configurationnotificationsmonitoringicinga2

How to get a single acknowledgement / OK notification in icinga2 from escalating notification templates?


In icinga2 monitoring, I want to be able to escalate problem notifications if the service has been down for a certain amount of time, or deescalate if when it stops being business hours. I want to get a single notification when the service comes back up.

When I have both "service-test-down-1" and "service-test-down-2" set to all types and states, I get two "OK" messages when the service becomes ok. When I set it up like below, separating the OK messages and the Not-OK messages, I never get any OKs. I feel like this should be straight forward, but I havent been able to make any progress.

apply Notification "service-test-down-1" to Service {
  command = "dispatch-service"
  states = [ Warning, Critical, Unknown ]
  types = [ Problem, Custom, FlappingStart, FlappingEnd,
            DowntimeStart, DowntimeEnd, DowntimeRemoved ]
  users = ["russ"]
  period = "24x7"
  assign where "tests" in service.groups
  vars.priority = "medium"
  times.begin = 0m
  times.end = 3m
  interval = 1m
}

apply Notification "service-test-down-2" to Service {
  command = "dispatch-service"
  states = [ Warning, Critical, Unknown ]
  types = [ Problem, Custom, FlappingStart, FlappingEnd,
            DowntimeStart, DowntimeEnd, DowntimeRemoved ]
  period = "24x7"
  users = ["russ"]
  assign where "tests" in service.groups
  vars.priority = "medium"
  times.begin = 3m
  times.end = 12h
  interval = 2m
}
apply Notification "service-test-recovery" to Service {
  command = "dispatch-service"
  states = [ OK ]
  types = [ Acknowledgement, Recovery ]
  users = ["russ"]
  period = "24x7"
  vars.priority = "medium"
  assign where "tests" in service.groups
  interval = 1
}

apply Service "NotificationTest" {
  enable_active_checks = true
  check_command = "passive"
  max_check_attempts = 1

  ignore where host.vars.noservices == true
  groups += ["tests"]
  assign where host.name == "icinga2.acceleration.net"
  max_check_attempts = 5
  check_interval = 5m
  retry_interval = 5m
}

This configuration gets printed by icinga as :

~# icinga2 object list --name service-test-* 
Object 'icinga2.acceleration.net!NotificationTest!service-test-down-1' of type 'Notification':
  % declared in '/opt/icinga2lib/lib.conf.d//test.conf', lines 2:1-2:51
  * __name = "icinga2.acceleration.net!NotificationTest!service-test-down-1"
  * command = "dispatch-service"
    % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 3:3-3:30
  * command_endpoint = ""
  * host_name = "icinga2.acceleration.net"
    % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 2:1-2:51
  * interval = 60
    % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 13:3-13:15
  * name = "service-test-down-1"
  * package = "_etc"
    % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 2:1-2:51
  * period = "24x7"
    % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 8:3-8:17
  * service_name = "NotificationTest"
    % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 2:1-2:51
  * states = [ "Warning", "Critical", "Unknown" ]
    % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 4:3-4:41
  * templates = [ "service-test-down-1" ]
    % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 2:1-2:51
  * times
    * begin = 0
      % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 11:3-11:18
    * end = 180
      % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 12:3-12:16
  * type = "Notification"
  * types = [ "Problem", "Custom", "FlappingStart", "FlappingEnd", "DowntimeStart", "DowntimeEnd", "DowntimeRemoved" ]
    % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 5:3-6:57
  * user_groups = null
  * users = [ "russ" ]
    % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 7:3-7:18
  * vars
    * priority = "medium"
      % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 10:3-10:26
  * zone = ""

Object 'icinga2.acceleration.net!NotificationTest!service-test-down-2' of type 'Notification':
  % declared in '/opt/icinga2lib/lib.conf.d//test.conf', lines 16:1-16:51
  * __name = "icinga2.acceleration.net!NotificationTest!service-test-down-2"
  * command = "dispatch-service"
    % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 17:3-17:30
  * command_endpoint = ""
  * host_name = "icinga2.acceleration.net"
    % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 16:1-16:51
  * interval = 120
    % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 27:3-27:15
  * name = "service-test-down-2"
  * package = "_etc"
    % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 16:1-16:51
  * period = "24x7"
    % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 21:3-21:17
  * service_name = "NotificationTest"
    % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 16:1-16:51
  * states = [ "Warning", "Critical", "Unknown" ]
    % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 18:3-18:41
  * templates = [ "service-test-down-2" ]
    % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 16:1-16:51
  * times
    * begin = 180
      % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 25:3-25:18
    * end = 43200
      % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 26:3-26:17
  * type = "Notification"
  * types = [ "Problem", "Custom", "FlappingStart", "FlappingEnd", "DowntimeStart", "DowntimeEnd", "DowntimeRemoved" ]
    % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 19:3-20:57
  * user_groups = null
  * users = [ "russ" ]
    % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 22:3-22:18
  * vars
    * priority = "medium"
      % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 24:3-24:26
  * zone = ""

Object 'icinga2.acceleration.net!NotificationTest!service-test-recovery' of type 'Notification':
  % declared in '/opt/icinga2lib/lib.conf.d//test.conf', lines 29:1-29:53
  * __name = "icinga2.acceleration.net!NotificationTest!service-test-recovery"
  * command = "dispatch-service"
    % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 30:3-30:30
  * command_endpoint = ""
  * host_name = "icinga2.acceleration.net"
    % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 29:1-29:53
  * interval = 1
    % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 37:3-37:14
  * name = "service-test-recovery"
  * package = "_etc"
    % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 29:1-29:53
  * period = "24x7"
    % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 34:3-34:17
  * service_name = "NotificationTest"
    % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 29:1-29:53
  * states = [ "OK" ]
    % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 31:3-31:17
  * templates = [ "service-test-recovery" ]
    % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 29:1-29:53
  * times = null
  * type = "Notification"
  * types = [ "Acknowledgement", "Recovery" ]
    % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 32:3-32:39
  * user_groups = null
  * users = [ "russ" ]
    % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 33:3-33:18
  * vars
    * priority = "medium"
      % = modified in '/opt/icinga2lib/lib.conf.d//test.conf', lines 35:3-35:26
  * zone = ""

Relevant Doc Links: https://www.icinga.com/docs/icinga2/latest/doc/03-monitoring-basics/#notification-escalations

Cross Posted: https://github.com/Icinga/icinga2/issues/5478


Solution

  • The answer provided by the devs of icinga2 (on github: https://github.com/Icinga/icinga2/issues/5478) is that there is no way of sending a single recovery notification from within icinga2 when you have notification escalations.

    Each escalation is a separate notification object and every notification that notified about a PROBLEM will be sent a RECOVERY message. No RECOVERY notification will be sent to any notification object that never sent a PROBLEM notification (this seems wrong headed, but whatever).

    The proposed solution is to have a notification proxy that deduplicates messages for you. In light of not wishing to have a stateful proxy, I made a function to set the current_escalation on hosts / services that are notifying, so that only the current escalation will actually send RECOVERY messages and our proxy can still be stateless. Example code on github.