pacemaker  2.0.3-4b1f869f0f
Scalable High-Availability cluster resource manager
pcmk_sched_native.c
Go to the documentation of this file.
1 /*
2  * Copyright 2004-2019 the Pacemaker project contributors
3  *
4  * The version control history for this file may have further details.
5  *
6  * This source code is licensed under the GNU General Public License version 2
7  * or later (GPLv2+) WITHOUT ANY WARRANTY.
8  */
9 
10 #include <crm_internal.h>
11 
12 #include <crm/pengine/rules.h>
13 #include <crm/msg_xml.h>
14 #include <pacemaker-internal.h>
15 #include <crm/services.h>
16 
17 // The controller removes the resource from the CIB, making this redundant
18 // #define DELETE_THEN_REFRESH 1
19 
20 #define INFINITY_HACK (INFINITY * -100)
21 
22 #define VARIANT_NATIVE 1
23 #include <lib/pengine/variant.h>
24 
25 void native_rsc_colocation_rh_must(resource_t * rsc_lh, gboolean update_lh,
26  resource_t * rsc_rh, gboolean update_rh);
27 
28 void native_rsc_colocation_rh_mustnot(resource_t * rsc_lh, gboolean update_lh,
29  resource_t * rsc_rh, gboolean update_rh);
30 
31 static void Recurring(resource_t *rsc, action_t *start, node_t *node,
32  pe_working_set_t *data_set);
33 static void RecurringOp(resource_t *rsc, action_t *start, node_t *node,
34  xmlNode *operation, pe_working_set_t *data_set);
35 static void Recurring_Stopped(resource_t *rsc, action_t *start, node_t *node,
36  pe_working_set_t *data_set);
37 static void RecurringOp_Stopped(resource_t *rsc, action_t *start, node_t *node,
38  xmlNode *operation, pe_working_set_t *data_set);
39 
40 void ReloadRsc(resource_t * rsc, node_t *node, pe_working_set_t * data_set);
41 gboolean DeleteRsc(resource_t * rsc, node_t * node, gboolean optional, pe_working_set_t * data_set);
42 gboolean StopRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set);
43 gboolean StartRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set);
44 gboolean DemoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set);
45 gboolean PromoteRsc(resource_t * rsc, node_t * next, gboolean optional,
46  pe_working_set_t * data_set);
47 gboolean RoleError(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set);
48 gboolean NullOp(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set);
49 
50 /* *INDENT-OFF* */
52 /* Current State */
53 /* Next State: Unknown Stopped Started Slave Master */
59 };
60 
62 /* Current State */
63 /* Next State: Unknown Stopped Started Slave Master */
64  /* Unknown */ { RoleError, StopRsc, RoleError, RoleError, RoleError, },
65  /* Stopped */ { RoleError, NullOp, StartRsc, StartRsc, RoleError, },
66  /* Started */ { RoleError, StopRsc, NullOp, NullOp, PromoteRsc, },
67  /* Slave */ { RoleError, StopRsc, StopRsc, NullOp, PromoteRsc, },
68  /* Master */ { RoleError, DemoteRsc, DemoteRsc, DemoteRsc, NullOp, },
69 };
70 /* *INDENT-ON* */
71 
72 static gboolean
73 native_choose_node(resource_t * rsc, node_t * prefer, pe_working_set_t * data_set)
74 {
75  GListPtr nodes = NULL;
76  node_t *chosen = NULL;
77  node_t *best = NULL;
78  int multiple = 1;
79  int length = 0;
80  gboolean result = FALSE;
81 
82  process_utilization(rsc, &prefer, data_set);
83 
84  if (is_not_set(rsc->flags, pe_rsc_provisional)) {
85  return rsc->allocated_to ? TRUE : FALSE;
86  }
87 
88  // Sort allowed nodes by weight
89  if (rsc->allowed_nodes) {
90  length = g_hash_table_size(rsc->allowed_nodes);
91  }
92  if (length > 0) {
93  nodes = g_hash_table_get_values(rsc->allowed_nodes);
94  nodes = sort_nodes_by_weight(nodes, pe__current_node(rsc), data_set);
95 
96  // First node in sorted list has the best score
97  best = g_list_nth_data(nodes, 0);
98  }
99 
100  if (prefer && nodes) {
101  chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id);
102 
103  if (chosen == NULL) {
104  pe_rsc_trace(rsc, "Preferred node %s for %s was unknown",
105  prefer->details->uname, rsc->id);
106 
107  /* Favor the preferred node as long as its weight is at least as good as
108  * the best allowed node's.
109  *
110  * An alternative would be to favor the preferred node even if the best
111  * node is better, when the best node's weight is less than INFINITY.
112  */
113  } else if ((chosen->weight < 0) || (chosen->weight < best->weight)) {
114  pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable",
115  chosen->details->uname, rsc->id);
116  chosen = NULL;
117 
118  } else if (!can_run_resources(chosen)) {
119  pe_rsc_trace(rsc, "Preferred node %s for %s was unavailable",
120  chosen->details->uname, rsc->id);
121  chosen = NULL;
122 
123  } else {
124  pe_rsc_trace(rsc,
125  "Chose preferred node %s for %s (ignoring %d candidates)",
126  chosen->details->uname, rsc->id, length);
127  }
128  }
129 
130  if ((chosen == NULL) && nodes) {
131  /* Either there is no preferred node, or the preferred node is not
132  * available, but there are other nodes allowed to run the resource.
133  */
134 
135  chosen = best;
136  pe_rsc_trace(rsc, "Chose node %s for %s from %d candidates",
137  chosen ? chosen->details->uname : "<none>", rsc->id, length);
138 
139  if (!pe_rsc_is_unique_clone(rsc->parent)
140  && chosen && (chosen->weight > 0) && can_run_resources(chosen)) {
141  /* If the resource is already running on a node, prefer that node if
142  * it is just as good as the chosen node.
143  *
144  * We don't do this for unique clone instances, because
145  * distribute_children() has already assigned instances to their
146  * running nodes when appropriate, and if we get here, we don't want
147  * remaining unallocated instances to prefer a node that's already
148  * running another instance.
149  */
150  node_t *running = pe__current_node(rsc);
151 
152  if (running && (can_run_resources(running) == FALSE)) {
153  pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources",
154  rsc->id, running->details->uname);
155  } else if (running) {
156  for (GList *iter = nodes->next; iter; iter = iter->next) {
157  node_t *tmp = (node_t *) iter->data;
158 
159  if (tmp->weight != chosen->weight) {
160  // The nodes are sorted by weight, so no more are equal
161  break;
162  }
163  if (tmp->details == running->details) {
164  // Scores are equal, so prefer the current node
165  chosen = tmp;
166  }
167  multiple++;
168  }
169  }
170  }
171  }
172 
173  if (multiple > 1) {
174  static char score[33];
175  int log_level = (chosen->weight >= INFINITY)? LOG_WARNING : LOG_INFO;
176 
177  score2char_stack(chosen->weight, score, sizeof(score));
178  do_crm_log(log_level,
179  "Chose node %s for %s from %d nodes with score %s",
180  chosen->details->uname, rsc->id, multiple, score);
181  }
182 
183  result = native_assign_node(rsc, nodes, chosen, FALSE);
184  g_list_free(nodes);
185  return result;
186 }
187 
188 static int
189 node_list_attr_score(GHashTable * list, const char *attr, const char *value)
190 {
191  GHashTableIter iter;
192  node_t *node = NULL;
193  int best_score = -INFINITY;
194  const char *best_node = NULL;
195 
196  if (attr == NULL) {
197  attr = CRM_ATTR_UNAME;
198  }
199 
200  g_hash_table_iter_init(&iter, list);
201  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
202  int weight = node->weight;
203 
204  if (can_run_resources(node) == FALSE) {
205  weight = -INFINITY;
206  }
207  if (weight > best_score || best_node == NULL) {
208  const char *tmp = pe_node_attribute_raw(node, attr);
209 
210  if (safe_str_eq(value, tmp)) {
211  best_score = weight;
212  best_node = node->details->uname;
213  }
214  }
215  }
216 
217  if (safe_str_neq(attr, CRM_ATTR_UNAME)) {
218  crm_info("Best score for %s=%s was %s with %d",
219  attr, value, best_node ? best_node : "<none>", best_score);
220  }
221 
222  return best_score;
223 }
224 
225 static void
226 node_hash_update(GHashTable * list1, GHashTable * list2, const char *attr, float factor,
227  gboolean only_positive)
228 {
229  int score = 0;
230  int new_score = 0;
231  GHashTableIter iter;
232  node_t *node = NULL;
233 
234  if (attr == NULL) {
235  attr = CRM_ATTR_UNAME;
236  }
237 
238  g_hash_table_iter_init(&iter, list1);
239  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
240  float weight_f = 0;
241  int weight = 0;
242 
243  CRM_LOG_ASSERT(node != NULL);
244  if(node == NULL) { continue; };
245 
246  score = node_list_attr_score(list2, attr, pe_node_attribute_raw(node, attr));
247 
248  weight_f = factor * score;
249  /* Round the number */
250  /* http://c-faq.com/fp/round.html */
251  weight = (int)(weight_f < 0 ? weight_f - 0.5 : weight_f + 0.5);
252 
253  new_score = merge_weights(weight, node->weight);
254 
255  if (factor < 0 && score < 0) {
256  /* Negative preference for a node with a negative score
257  * should not become a positive preference
258  *
259  * TODO - Decide if we want to filter only if weight == -INFINITY
260  *
261  */
262  crm_trace("%s: Filtering %d + %f*%d (factor * score)",
263  node->details->uname, node->weight, factor, score);
264 
265  } else if (node->weight == INFINITY_HACK) {
266  crm_trace("%s: Filtering %d + %f*%d (node < 0)",
267  node->details->uname, node->weight, factor, score);
268 
269  } else if (only_positive && new_score < 0 && node->weight > 0) {
270  node->weight = INFINITY_HACK;
271  crm_trace("%s: Filtering %d + %f*%d (score > 0)",
272  node->details->uname, node->weight, factor, score);
273 
274  } else if (only_positive && new_score < 0 && node->weight == 0) {
275  crm_trace("%s: Filtering %d + %f*%d (score == 0)",
276  node->details->uname, node->weight, factor, score);
277 
278  } else {
279  crm_trace("%s: %d + %f*%d", node->details->uname, node->weight, factor, score);
280  node->weight = new_score;
281  }
282  }
283 }
284 
285 GHashTable *
286 node_hash_dup(GHashTable * hash)
287 {
288  /* Hack! */
289  GListPtr list = g_hash_table_get_values(hash);
290  GHashTable *result = node_hash_from_list(list);
291 
292  g_list_free(list);
293  return result;
294 }
295 
296 GHashTable *
297 native_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes, const char *attr,
298  float factor, enum pe_weights flags)
299 {
300  return rsc_merge_weights(rsc, rhs, nodes, attr, factor, flags);
301 }
302 
303 GHashTable *
304 rsc_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes, const char *attr,
305  float factor, enum pe_weights flags)
306 {
307  GHashTable *work = NULL;
308  int multiplier = 1;
309 
310  if (factor < 0) {
311  multiplier = -1;
312  }
313 
314  if (is_set(rsc->flags, pe_rsc_merging)) {
315  pe_rsc_info(rsc, "%s: Breaking dependency loop at %s", rhs, rsc->id);
316  return nodes;
317  }
318 
320 
321  if (is_set(flags, pe_weights_init)) {
322  if (rsc->variant == pe_group && rsc->children) {
323  GListPtr last = rsc->children;
324 
325  while (last->next != NULL) {
326  last = last->next;
327  }
328 
329  pe_rsc_trace(rsc, "Merging %s as a group %p %p", rsc->id, rsc->children, last);
330  work = rsc_merge_weights(last->data, rhs, NULL, attr, factor, flags);
331 
332  } else {
333  work = node_hash_dup(rsc->allowed_nodes);
334  }
336 
337  } else if (rsc->variant == pe_group && rsc->children) {
338  GListPtr iter = rsc->children;
339 
340  pe_rsc_trace(rsc, "%s: Combining scores from %d children of %s", rhs, g_list_length(iter), rsc->id);
341  work = node_hash_dup(nodes);
342  for(iter = rsc->children; iter->next != NULL; iter = iter->next) {
343  work = rsc_merge_weights(iter->data, rhs, work, attr, factor, flags);
344  }
345 
346  } else {
347  pe_rsc_trace(rsc, "%s: Combining scores from %s", rhs, rsc->id);
348  work = node_hash_dup(nodes);
349  node_hash_update(work, rsc->allowed_nodes, attr, factor,
350  is_set(flags, pe_weights_positive));
351  }
352 
353  if (is_set(flags, pe_weights_rollback) && can_run_any(work) == FALSE) {
354  pe_rsc_info(rsc, "%s: Rolling back scores from %s", rhs, rsc->id);
355  g_hash_table_destroy(work);
357  return nodes;
358  }
359 
360  if (can_run_any(work)) {
361  GListPtr gIter = NULL;
362 
363  if (is_set(flags, pe_weights_forward)) {
364  gIter = rsc->rsc_cons;
365  crm_trace("Checking %d additional colocation constraints", g_list_length(gIter));
366 
367  } else if(rsc->variant == pe_group && rsc->children) {
368  GListPtr last = rsc->children;
369 
370  while (last->next != NULL) {
371  last = last->next;
372  }
373 
374  gIter = ((resource_t*)last->data)->rsc_cons_lhs;
375  crm_trace("Checking %d additional optional group colocation constraints from %s",
376  g_list_length(gIter), ((resource_t*)last->data)->id);
377 
378  } else {
379  gIter = rsc->rsc_cons_lhs;
380  crm_trace("Checking %d additional optional colocation constraints %s", g_list_length(gIter), rsc->id);
381  }
382 
383  for (; gIter != NULL; gIter = gIter->next) {
384  resource_t *other = NULL;
385  rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
386 
387  if (is_set(flags, pe_weights_forward)) {
388  other = constraint->rsc_rh;
389  } else {
390  other = constraint->rsc_lh;
391  }
392 
393  pe_rsc_trace(rsc, "Applying %s (%s)", constraint->id, other->id);
394  work = rsc_merge_weights(other, rhs, work, constraint->node_attribute,
395  multiplier * (float)constraint->score / INFINITY, flags|pe_weights_rollback);
396  dump_node_scores(LOG_TRACE, NULL, rhs, work);
397  }
398 
399  }
400 
401  if (is_set(flags, pe_weights_positive)) {
402  node_t *node = NULL;
403  GHashTableIter iter;
404 
405  g_hash_table_iter_init(&iter, work);
406  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
407  if (node->weight == INFINITY_HACK) {
408  node->weight = 1;
409  }
410  }
411  }
412 
413  if (nodes) {
414  g_hash_table_destroy(nodes);
415  }
416 
418  return work;
419 }
420 
421 static inline bool
422 node_has_been_unfenced(node_t *node)
423 {
424  const char *unfenced = pe_node_attribute_raw(node, CRM_ATTR_UNFENCED);
425 
426  return unfenced && strcmp("0", unfenced);
427 }
428 
429 static inline bool
430 is_unfence_device(resource_t *rsc, pe_working_set_t *data_set)
431 {
432  return is_set(rsc->flags, pe_rsc_fence_device)
433  && is_set(data_set->flags, pe_flag_enable_unfencing);
434 }
435 
436 node_t *
437 native_color(resource_t * rsc, node_t * prefer, pe_working_set_t * data_set)
438 {
439  GListPtr gIter = NULL;
440  int alloc_details = scores_log_level + 1;
441 
442  if (rsc->parent && is_not_set(rsc->parent->flags, pe_rsc_allocating)) {
443  /* never allocate children on their own */
444  pe_rsc_debug(rsc, "Escalating allocation of %s to its parent: %s", rsc->id,
445  rsc->parent->id);
446  rsc->parent->cmds->allocate(rsc->parent, prefer, data_set);
447  }
448 
449  if (is_not_set(rsc->flags, pe_rsc_provisional)) {
450  return rsc->allocated_to;
451  }
452 
453  if (is_set(rsc->flags, pe_rsc_allocating)) {
454  pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id);
455  return NULL;
456  }
457 
459  dump_node_scores(alloc_details, rsc, "Pre-alloc", rsc->allowed_nodes);
460 
461  for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) {
462  rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
463 
464  GHashTable *archive = NULL;
465  resource_t *rsc_rh = constraint->rsc_rh;
466 
467  pe_rsc_trace(rsc, "%s: Pre-Processing %s (%s, %d, %s)",
468  rsc->id, constraint->id, rsc_rh->id,
469  constraint->score, role2text(constraint->role_lh));
470  if (constraint->role_lh >= RSC_ROLE_MASTER
471  || (constraint->score < 0 && constraint->score > -INFINITY)) {
472  archive = node_hash_dup(rsc->allowed_nodes);
473  }
474  rsc_rh->cmds->allocate(rsc_rh, NULL, data_set);
475  rsc->cmds->rsc_colocation_lh(rsc, rsc_rh, constraint, data_set);
476  if (archive && can_run_any(rsc->allowed_nodes) == FALSE) {
477  pe_rsc_info(rsc, "%s: Rolling back scores from %s", rsc->id, rsc_rh->id);
478  g_hash_table_destroy(rsc->allowed_nodes);
479  rsc->allowed_nodes = archive;
480  archive = NULL;
481  }
482  if (archive) {
483  g_hash_table_destroy(archive);
484  }
485  }
486 
487  dump_node_scores(alloc_details, rsc, "Post-coloc", rsc->allowed_nodes);
488 
489  for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) {
490  rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
491 
492  rsc->allowed_nodes =
493  constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id, rsc->allowed_nodes,
494  constraint->node_attribute,
495  (float)constraint->score / INFINITY,
497  }
498 
499  if (rsc->next_role == RSC_ROLE_STOPPED) {
500  pe_rsc_trace(rsc, "Making sure %s doesn't get allocated", rsc->id);
501  /* make sure it doesn't come up again */
502  resource_location(rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE, data_set);
503 
504  } else if(rsc->next_role > rsc->role
505  && is_set(data_set->flags, pe_flag_have_quorum) == FALSE
506  && data_set->no_quorum_policy == no_quorum_freeze) {
507  crm_notice("Resource %s cannot be elevated from %s to %s: no-quorum-policy=freeze",
508  rsc->id, role2text(rsc->role), role2text(rsc->next_role));
509  rsc->next_role = rsc->role;
510  }
511 
512  dump_node_scores(show_scores ? 0 : scores_log_level, rsc, __FUNCTION__,
513  rsc->allowed_nodes);
514  if (is_set(data_set->flags, pe_flag_stonith_enabled)
515  && is_set(data_set->flags, pe_flag_have_stonith_resource) == FALSE) {
517  }
518 
519  if (is_not_set(rsc->flags, pe_rsc_managed)) {
520  const char *reason = NULL;
521  node_t *assign_to = NULL;
522 
523  rsc->next_role = rsc->role;
524  assign_to = pe__current_node(rsc);
525  if (assign_to == NULL) {
526  reason = "inactive";
527  } else if (rsc->role == RSC_ROLE_MASTER) {
528  reason = "master";
529  } else if (is_set(rsc->flags, pe_rsc_failed)) {
530  reason = "failed";
531  } else {
532  reason = "active";
533  }
534  pe_rsc_info(rsc, "Unmanaged resource %s allocated to %s: %s", rsc->id,
535  (assign_to? assign_to->details->uname : "no node"), reason);
536  native_assign_node(rsc, NULL, assign_to, TRUE);
537 
538  } else if (is_set(data_set->flags, pe_flag_stop_everything)) {
539  pe_rsc_debug(rsc, "Forcing %s to stop", rsc->id);
540  native_assign_node(rsc, NULL, NULL, TRUE);
541 
542  } else if (is_set(rsc->flags, pe_rsc_provisional)
543  && native_choose_node(rsc, prefer, data_set)) {
544  pe_rsc_trace(rsc, "Allocated resource %s to %s", rsc->id,
545  rsc->allocated_to->details->uname);
546 
547  } else if (rsc->allocated_to == NULL) {
548  if (is_not_set(rsc->flags, pe_rsc_orphan)) {
549  pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id);
550  } else if (rsc->running_on != NULL) {
551  pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id);
552  }
553 
554  } else {
555  pe_rsc_debug(rsc, "Pre-Allocated resource %s to %s", rsc->id,
556  rsc->allocated_to->details->uname);
557  }
558 
560 
561  if (rsc->is_remote_node) {
562  node_t *remote_node = pe_find_node(data_set->nodes, rsc->id);
563 
564  CRM_ASSERT(remote_node != NULL);
565  if (rsc->allocated_to && rsc->next_role != RSC_ROLE_STOPPED) {
566  crm_trace("Setting Pacemaker Remote node %s to ONLINE",
567  remote_node->details->id);
568  remote_node->details->online = TRUE;
569  /* We shouldn't consider an unseen remote-node unclean if we are going
570  * to try and connect to it. Otherwise we get an unnecessary fence */
571  if (remote_node->details->unseen == TRUE) {
572  remote_node->details->unclean = FALSE;
573  }
574 
575  } else {
576  crm_trace("Setting Pacemaker Remote node %s to SHUTDOWN (next role %s, %sallocated)",
577  remote_node->details->id, role2text(rsc->next_role),
578  (rsc->allocated_to? "" : "un"));
579  remote_node->details->shutdown = TRUE;
580  }
581  }
582 
583  return rsc->allocated_to;
584 }
585 
586 static gboolean
587 is_op_dup(resource_t *rsc, const char *name, guint interval_ms)
588 {
589  gboolean dup = FALSE;
590  const char *id = NULL;
591  const char *value = NULL;
592  xmlNode *operation = NULL;
593  guint interval2_ms = 0;
594 
595  CRM_ASSERT(rsc);
596  for (operation = __xml_first_child_element(rsc->ops_xml); operation != NULL;
597  operation = __xml_next_element(operation)) {
598 
599  if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
600  value = crm_element_value(operation, "name");
601  if (safe_str_neq(value, name)) {
602  continue;
603  }
604 
605  value = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
606  interval2_ms = crm_parse_interval_spec(value);
607  if (interval_ms != interval2_ms) {
608  continue;
609  }
610 
611  if (id == NULL) {
612  id = ID(operation);
613 
614  } else {
615  crm_config_err("Operation %s is a duplicate of %s", ID(operation), id);
617  ("Do not use the same (name, interval) combination more than once per resource");
618  dup = TRUE;
619  }
620  }
621  }
622 
623  return dup;
624 }
625 
626 static bool
627 op_cannot_recur(const char *name)
628 {
629  return safe_str_eq(name, RSC_STOP)
630  || safe_str_eq(name, RSC_START)
631  || safe_str_eq(name, RSC_DEMOTE)
632  || safe_str_eq(name, RSC_PROMOTE);
633 }
634 
635 static void
636 RecurringOp(resource_t * rsc, action_t * start, node_t * node,
637  xmlNode * operation, pe_working_set_t * data_set)
638 {
639  char *key = NULL;
640  const char *name = NULL;
641  const char *role = NULL;
642  const char *interval_spec = NULL;
643  const char *node_uname = node? node->details->uname : "n/a";
644 
645  guint interval_ms = 0;
646  action_t *mon = NULL;
647  gboolean is_optional = TRUE;
648  GListPtr possible_matches = NULL;
649 
650  CRM_ASSERT(rsc);
651 
652  /* Only process for the operations without role="Stopped" */
653  role = crm_element_value(operation, "role");
654  if (role && text2role(role) == RSC_ROLE_STOPPED) {
655  return;
656  }
657 
658  interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
659  interval_ms = crm_parse_interval_spec(interval_spec);
660  if (interval_ms == 0) {
661  return;
662  }
663 
664  name = crm_element_value(operation, "name");
665  if (is_op_dup(rsc, name, interval_ms)) {
666  crm_trace("Not creating duplicate recurring action %s for %dms %s",
667  ID(operation), interval_ms, name);
668  return;
669  }
670 
671  if (op_cannot_recur(name)) {
672  crm_config_err("Ignoring %s because action '%s' cannot be recurring",
673  ID(operation), name);
674  return;
675  }
676 
677  key = generate_op_key(rsc->id, name, interval_ms);
678  if (find_rsc_op_entry(rsc, key) == NULL) {
679  crm_trace("Not creating recurring action %s for disabled resource %s",
680  ID(operation), rsc->id);
681  free(key);
682  return;
683  }
684 
685  pe_rsc_trace(rsc, "Creating recurring action %s for %s in role %s on %s",
686  ID(operation), rsc->id, role2text(rsc->next_role), node_uname);
687 
688  if (start != NULL) {
689  pe_rsc_trace(rsc, "Marking %s %s due to %s",
690  key, is_set(start->flags, pe_action_optional) ? "optional" : "mandatory",
691  start->uuid);
692  is_optional = (rsc->cmds->action_flags(start, NULL) & pe_action_optional);
693  } else {
694  pe_rsc_trace(rsc, "Marking %s optional", key);
695  is_optional = TRUE;
696  }
697 
698  /* start a monitor for an already active resource */
699  possible_matches = find_actions_exact(rsc->actions, key, node);
700  if (possible_matches == NULL) {
701  is_optional = FALSE;
702  pe_rsc_trace(rsc, "Marking %s mandatory: not active", key);
703 
704  } else {
705  GListPtr gIter = NULL;
706 
707  for (gIter = possible_matches; gIter != NULL; gIter = gIter->next) {
708  action_t *op = (action_t *) gIter->data;
709 
710  if (is_set(op->flags, pe_action_reschedule)) {
711  is_optional = FALSE;
712  break;
713  }
714  }
715  g_list_free(possible_matches);
716  }
717 
718  if ((rsc->next_role == RSC_ROLE_MASTER && role == NULL)
719  || (role != NULL && text2role(role) != rsc->next_role)) {
720  int log_level = LOG_TRACE;
721  const char *result = "Ignoring";
722 
723  if (is_optional) {
724  char *after_key = NULL;
725  action_t *cancel_op = NULL;
726 
727  // It's running, so cancel it
728  log_level = LOG_INFO;
729  result = "Cancelling";
730  cancel_op = pe_cancel_op(rsc, name, interval_ms, node, data_set);
731 
732  switch (rsc->role) {
733  case RSC_ROLE_SLAVE:
734  case RSC_ROLE_STARTED:
735  if (rsc->next_role == RSC_ROLE_MASTER) {
736  after_key = promote_key(rsc);
737 
738  } else if (rsc->next_role == RSC_ROLE_STOPPED) {
739  after_key = stop_key(rsc);
740  }
741 
742  break;
743  case RSC_ROLE_MASTER:
744  after_key = demote_key(rsc);
745  break;
746  default:
747  break;
748  }
749 
750  if (after_key) {
751  custom_action_order(rsc, NULL, cancel_op, rsc, after_key, NULL,
752  pe_order_runnable_left, data_set);
753  }
754  }
755 
756  do_crm_log(log_level, "%s action %s (%s vs. %s)",
757  result, key, role ? role : role2text(RSC_ROLE_SLAVE),
758  role2text(rsc->next_role));
759 
760  free(key);
761  return;
762  }
763 
764  mon = custom_action(rsc, key, name, node, is_optional, TRUE, data_set);
765  key = mon->uuid;
766  if (is_optional) {
767  pe_rsc_trace(rsc, "%s\t %s (optional)", node_uname, mon->uuid);
768  }
769 
770  if (start == NULL || is_set(start->flags, pe_action_runnable) == FALSE) {
771  pe_rsc_debug(rsc, "%s\t %s (cancelled : start un-runnable)",
772  node_uname, mon->uuid);
773  update_action_flags(mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
774 
775  } else if (node == NULL || node->details->online == FALSE || node->details->unclean) {
776  pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)",
777  node_uname, mon->uuid);
778  update_action_flags(mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
779 
780  } else if (is_set(mon->flags, pe_action_optional) == FALSE) {
781  pe_rsc_info(rsc, " Start recurring %s (%us) for %s on %s",
782  mon->task, interval_ms / 1000, rsc->id, node_uname);
783  }
784 
785  if (rsc->next_role == RSC_ROLE_MASTER) {
786  char *running_master = crm_itoa(PCMK_OCF_RUNNING_MASTER);
787 
788  add_hash_param(mon->meta, XML_ATTR_TE_TARGET_RC, running_master);
789  free(running_master);
790  }
791 
792  if (node == NULL || is_set(rsc->flags, pe_rsc_managed)) {
793  custom_action_order(rsc, start_key(rsc), NULL,
794  NULL, strdup(key), mon,
796 
797  custom_action_order(rsc, reload_key(rsc), NULL,
798  NULL, strdup(key), mon,
800 
801  if (rsc->next_role == RSC_ROLE_MASTER) {
802  custom_action_order(rsc, promote_key(rsc), NULL,
803  rsc, NULL, mon,
805 
806  } else if (rsc->role == RSC_ROLE_MASTER) {
807  custom_action_order(rsc, demote_key(rsc), NULL,
808  rsc, NULL, mon,
810  }
811  }
812 }
813 
814 static void
815 Recurring(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set)
816 {
817  if (is_not_set(rsc->flags, pe_rsc_maintenance) &&
818  (node == NULL || node->details->maintenance == FALSE)) {
819  xmlNode *operation = NULL;
820 
821  for (operation = __xml_first_child_element(rsc->ops_xml);
822  operation != NULL;
823  operation = __xml_next_element(operation)) {
824 
825  if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
826  RecurringOp(rsc, start, node, operation, data_set);
827  }
828  }
829  }
830 }
831 
832 static void
833 RecurringOp_Stopped(resource_t * rsc, action_t * start, node_t * node,
834  xmlNode * operation, pe_working_set_t * data_set)
835 {
836  char *key = NULL;
837  const char *name = NULL;
838  const char *role = NULL;
839  const char *interval_spec = NULL;
840  const char *node_uname = node? node->details->uname : "n/a";
841 
842  guint interval_ms = 0;
843  GListPtr possible_matches = NULL;
844  GListPtr gIter = NULL;
845 
846  /* Only process for the operations with role="Stopped" */
847  role = crm_element_value(operation, "role");
848  if (role == NULL || text2role(role) != RSC_ROLE_STOPPED) {
849  return;
850  }
851 
852  interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
853  interval_ms = crm_parse_interval_spec(interval_spec);
854  if (interval_ms == 0) {
855  return;
856  }
857 
858  name = crm_element_value(operation, "name");
859  if (is_op_dup(rsc, name, interval_ms)) {
860  crm_trace("Not creating duplicate recurring action %s for %dms %s",
861  ID(operation), interval_ms, name);
862  return;
863  }
864 
865  if (op_cannot_recur(name)) {
866  crm_config_err("Invalid recurring action %s wth name: '%s'", ID(operation), name);
867  return;
868  }
869 
870  key = generate_op_key(rsc->id, name, interval_ms);
871  if (find_rsc_op_entry(rsc, key) == NULL) {
872  crm_trace("Not creating recurring action %s for disabled resource %s",
873  ID(operation), rsc->id);
874  free(key);
875  return;
876  }
877 
878  // @TODO add support
879  if (is_set(rsc->flags, pe_rsc_unique) == FALSE) {
880  crm_notice("Ignoring %s (recurring monitors for Stopped role are "
881  "not supported for anonymous clones)",
882  ID(operation));
883  return;
884  }
885 
886  pe_rsc_trace(rsc,
887  "Creating recurring action %s for %s in role %s on nodes where it should not be running",
888  ID(operation), rsc->id, role2text(rsc->next_role));
889 
890  /* if the monitor exists on the node where the resource will be running, cancel it */
891  if (node != NULL) {
892  possible_matches = find_actions_exact(rsc->actions, key, node);
893  if (possible_matches) {
894  action_t *cancel_op = NULL;
895 
896  g_list_free(possible_matches);
897 
898  cancel_op = pe_cancel_op(rsc, name, interval_ms, node, data_set);
899 
900  if (rsc->next_role == RSC_ROLE_STARTED || rsc->next_role == RSC_ROLE_SLAVE) {
901  /* rsc->role == RSC_ROLE_STOPPED: cancel the monitor before start */
902  /* rsc->role == RSC_ROLE_STARTED: for a migration, cancel the monitor on the target node before start */
903  custom_action_order(rsc, NULL, cancel_op, rsc, start_key(rsc), NULL,
904  pe_order_runnable_left, data_set);
905  }
906 
907  pe_rsc_info(rsc, "Cancel action %s (%s vs. %s) on %s",
908  key, role, role2text(rsc->next_role), node_uname);
909  }
910  }
911 
912  for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
913  node_t *stop_node = (node_t *) gIter->data;
914  const char *stop_node_uname = stop_node->details->uname;
915  gboolean is_optional = TRUE;
916  gboolean probe_is_optional = TRUE;
917  gboolean stop_is_optional = TRUE;
918  action_t *stopped_mon = NULL;
919  char *rc_inactive = NULL;
920  GListPtr probe_complete_ops = NULL;
921  GListPtr stop_ops = NULL;
922  GListPtr local_gIter = NULL;
923 
924  if (node && safe_str_eq(stop_node_uname, node_uname)) {
925  continue;
926  }
927 
928  pe_rsc_trace(rsc, "Creating recurring action %s for %s on %s",
929  ID(operation), rsc->id, crm_str(stop_node_uname));
930 
931  /* start a monitor for an already stopped resource */
932  possible_matches = find_actions_exact(rsc->actions, key, stop_node);
933  if (possible_matches == NULL) {
934  pe_rsc_trace(rsc, "Marking %s mandatory on %s: not active", key,
935  crm_str(stop_node_uname));
936  is_optional = FALSE;
937  } else {
938  pe_rsc_trace(rsc, "Marking %s optional on %s: already active", key,
939  crm_str(stop_node_uname));
940  is_optional = TRUE;
941  g_list_free(possible_matches);
942  }
943 
944  stopped_mon = custom_action(rsc, strdup(key), name, stop_node, is_optional, TRUE, data_set);
945 
946  rc_inactive = crm_itoa(PCMK_OCF_NOT_RUNNING);
947  add_hash_param(stopped_mon->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
948  free(rc_inactive);
949 
950  if (is_set(rsc->flags, pe_rsc_managed)) {
951  GList *probes = pe__resource_actions(rsc, stop_node, RSC_STATUS,
952  FALSE);
953  GListPtr pIter = NULL;
954 
955  for (pIter = probes; pIter != NULL; pIter = pIter->next) {
956  action_t *probe = (action_t *) pIter->data;
957 
958  order_actions(probe, stopped_mon, pe_order_runnable_left);
959  crm_trace("%s then %s on %s", probe->uuid, stopped_mon->uuid, stop_node->details->uname);
960  }
961 
962  g_list_free(probes);
963  }
964 
965  if (probe_complete_ops) {
966  g_list_free(probe_complete_ops);
967  }
968 
969  stop_ops = pe__resource_actions(rsc, stop_node, RSC_STOP, TRUE);
970 
971  for (local_gIter = stop_ops; local_gIter != NULL; local_gIter = local_gIter->next) {
972  action_t *stop = (action_t *) local_gIter->data;
973 
974  if (is_set(stop->flags, pe_action_optional) == FALSE) {
975  stop_is_optional = FALSE;
976  }
977 
978  if (is_set(stop->flags, pe_action_runnable) == FALSE) {
979  crm_debug("%s\t %s (cancelled : stop un-runnable)",
980  crm_str(stop_node_uname), stopped_mon->uuid);
981  update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
982  }
983 
984  if (is_set(rsc->flags, pe_rsc_managed)) {
985  custom_action_order(rsc, stop_key(rsc), stop,
986  NULL, strdup(key), stopped_mon,
988  }
989 
990  }
991 
992  if (stop_ops) {
993  g_list_free(stop_ops);
994  }
995 
996  if (is_optional == FALSE && probe_is_optional && stop_is_optional
997  && is_set(rsc->flags, pe_rsc_managed) == FALSE) {
998  pe_rsc_trace(rsc, "Marking %s optional on %s due to unmanaged",
999  key, crm_str(stop_node_uname));
1000  update_action_flags(stopped_mon, pe_action_optional, __FUNCTION__, __LINE__);
1001  }
1002 
1003  if (is_set(stopped_mon->flags, pe_action_optional)) {
1004  pe_rsc_trace(rsc, "%s\t %s (optional)", crm_str(stop_node_uname), stopped_mon->uuid);
1005  }
1006 
1007  if (stop_node->details->online == FALSE || stop_node->details->unclean) {
1008  pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)",
1009  crm_str(stop_node_uname), stopped_mon->uuid);
1010  update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
1011  }
1012 
1013  if (is_set(stopped_mon->flags, pe_action_runnable)
1014  && is_set(stopped_mon->flags, pe_action_optional) == FALSE) {
1015  crm_notice(" Start recurring %s (%us) for %s on %s", stopped_mon->task,
1016  interval_ms / 1000, rsc->id, crm_str(stop_node_uname));
1017  }
1018  }
1019 
1020  free(key);
1021 }
1022 
1023 static void
1024 Recurring_Stopped(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set)
1025 {
1026  if (is_not_set(rsc->flags, pe_rsc_maintenance) &&
1027  (node == NULL || node->details->maintenance == FALSE)) {
1028  xmlNode *operation = NULL;
1029 
1030  for (operation = __xml_first_child_element(rsc->ops_xml);
1031  operation != NULL;
1032  operation = __xml_next_element(operation)) {
1033 
1034  if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
1035  RecurringOp_Stopped(rsc, start, node, operation, data_set);
1036  }
1037  }
1038  }
1039 }
1040 
1041 static void
1042 handle_migration_actions(resource_t * rsc, node_t *current, node_t *chosen, pe_working_set_t * data_set)
1043 {
1044  action_t *migrate_to = NULL;
1045  action_t *migrate_from = NULL;
1046  action_t *start = NULL;
1047  action_t *stop = NULL;
1048  gboolean partial = rsc->partial_migration_target ? TRUE : FALSE;
1049 
1050  pe_rsc_trace(rsc, "Processing migration actions %s moving from %s to %s . partial migration = %s",
1051  rsc->id, current->details->id, chosen->details->id, partial ? "TRUE" : "FALSE");
1052  start = start_action(rsc, chosen, TRUE);
1053  stop = stop_action(rsc, current, TRUE);
1054 
1055  if (partial == FALSE) {
1056  migrate_to = custom_action(rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), RSC_MIGRATE, current, TRUE, TRUE, data_set);
1057  }
1058 
1059  migrate_from = custom_action(rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), RSC_MIGRATED, chosen, TRUE, TRUE, data_set);
1060 
1061  if ((migrate_to && migrate_from) || (migrate_from && partial)) {
1062 
1065 
1066  update_action_flags(start, pe_action_pseudo, __FUNCTION__, __LINE__); /* easier than trying to delete it from the graph */
1067 
1068  /* order probes before migrations */
1069  if (partial) {
1070  set_bit(migrate_from->flags, pe_action_migrate_runnable);
1071  migrate_from->needs = start->needs;
1072 
1073  custom_action_order(rsc, generate_op_key(rsc->id, RSC_STATUS, 0), NULL,
1074  rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), NULL, pe_order_optional, data_set);
1075 
1076  } else {
1077  set_bit(migrate_from->flags, pe_action_migrate_runnable);
1079  migrate_to->needs = start->needs;
1080 
1081  custom_action_order(rsc, generate_op_key(rsc->id, RSC_STATUS, 0), NULL,
1082  rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), NULL, pe_order_optional, data_set);
1083  custom_action_order(rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), NULL,
1085  }
1086 
1087  custom_action_order(rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), NULL,
1089  custom_action_order(rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), NULL,
1091 
1092  }
1093 
1094  if (migrate_to) {
1095  add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
1097 
1098  /* Pacemaker Remote connections don't require pending to be recorded in
1099  * the CIB. We can reduce CIB writes by not setting PENDING for them.
1100  */
1101  if (rsc->is_remote_node == FALSE) {
1102  /* migrate_to takes place on the source node, but can
1103  * have an effect on the target node depending on how
1104  * the agent is written. Because of this, we have to maintain
1105  * a record that the migrate_to occurred, in case the source node
1106  * loses membership while the migrate_to action is still in-flight.
1107  */
1108  add_hash_param(migrate_to->meta, XML_OP_ATTR_PENDING, "true");
1109  }
1110  }
1111 
1112  if (migrate_from) {
1113  add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
1114  add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname);
1115  }
1116 }
1117 
1118 void
1120 {
1121  action_t *start = NULL;
1122  node_t *chosen = NULL;
1123  node_t *current = NULL;
1124  gboolean need_stop = FALSE;
1125  gboolean is_moving = FALSE;
1126  gboolean allow_migrate = is_set(rsc->flags, pe_rsc_allow_migrate) ? TRUE : FALSE;
1127 
1128  GListPtr gIter = NULL;
1129  unsigned int num_all_active = 0;
1130  unsigned int num_clean_active = 0;
1131  bool multiply_active = FALSE;
1132  enum rsc_role_e role = RSC_ROLE_UNKNOWN;
1133  enum rsc_role_e next_role = RSC_ROLE_UNKNOWN;
1134 
1135  CRM_ASSERT(rsc);
1136  chosen = rsc->allocated_to;
1137  if (chosen != NULL && rsc->next_role == RSC_ROLE_UNKNOWN) {
1138  rsc->next_role = RSC_ROLE_STARTED;
1139  pe_rsc_trace(rsc, "Fixed next_role: unknown -> %s", role2text(rsc->next_role));
1140 
1141  } else if (rsc->next_role == RSC_ROLE_UNKNOWN) {
1142  rsc->next_role = RSC_ROLE_STOPPED;
1143  pe_rsc_trace(rsc, "Fixed next_role: unknown -> %s", role2text(rsc->next_role));
1144  }
1145 
1146  pe_rsc_trace(rsc, "Processing state transition for %s %p: %s->%s", rsc->id, rsc,
1147  role2text(rsc->role), role2text(rsc->next_role));
1148 
1149  current = pe__find_active_on(rsc, &num_all_active, &num_clean_active);
1150 
1151  for (gIter = rsc->dangling_migrations; gIter != NULL; gIter = gIter->next) {
1152  node_t *dangling_source = (node_t *) gIter->data;
1153 
1154  action_t *stop = stop_action(rsc, dangling_source, FALSE);
1155 
1156  set_bit(stop->flags, pe_action_dangle);
1157  pe_rsc_trace(rsc, "Forcing a cleanup of %s on %s",
1158  rsc->id, dangling_source->details->uname);
1159 
1160  if (is_set(data_set->flags, pe_flag_remove_after_stop)) {
1161  DeleteRsc(rsc, dangling_source, FALSE, data_set);
1162  }
1163  }
1164 
1165  if ((num_all_active == 2) && (num_clean_active == 2) && chosen
1167  && (current->details == rsc->partial_migration_source->details)
1168  && (chosen->details == rsc->partial_migration_target->details)) {
1169 
1170  /* The chosen node is still the migration target from a partial
1171  * migration. Attempt to continue the migration instead of recovering
1172  * by stopping the resource everywhere and starting it on a single node.
1173  */
1174  pe_rsc_trace(rsc,
1175  "Will attempt to continue with a partial migration to target %s from %s",
1178 
1179  } else if (is_not_set(rsc->flags, pe_rsc_needs_fencing)) {
1180  /* If a resource has "requires" set to nothing or quorum, don't consider
1181  * it active on unclean nodes (similar to how all resources behave when
1182  * stonith-enabled is false). We can start such resources elsewhere
1183  * before fencing completes, and if we considered the resource active on
1184  * the failed node, we would attempt recovery for being active on
1185  * multiple nodes.
1186  */
1187  multiply_active = (num_clean_active > 1);
1188  } else {
1189  multiply_active = (num_all_active > 1);
1190  }
1191 
1192  if (multiply_active) {
1194  // Migration was in progress, but we've chosen a different target
1195  crm_notice("Resource %s can no longer migrate to %s. Stopping on %s too",
1198 
1199  } else {
1200  // Resource was incorrectly multiply active
1201  pe_proc_err("Resource %s is active on %u nodes (%s)",
1202  rsc->id, num_all_active,
1203  recovery2text(rsc->recovery_type));
1204  crm_notice("See https://wiki.clusterlabs.org/wiki/FAQ#Resource_is_Too_Active for more information");
1205  }
1206 
1207  if (rsc->recovery_type == recovery_stop_start) {
1208  need_stop = TRUE;
1209  }
1210 
1211  /* If by chance a partial migration is in process, but the migration
1212  * target is not chosen still, clear all partial migration data.
1213  */
1215  allow_migrate = FALSE;
1216  }
1217 
1218  if (is_set(rsc->flags, pe_rsc_start_pending)) {
1219  start = start_action(rsc, chosen, TRUE);
1221  }
1222 
1223  if (current && chosen && current->details != chosen->details) {
1224  pe_rsc_trace(rsc, "Moving %s", rsc->id);
1225  is_moving = TRUE;
1226  need_stop = TRUE;
1227 
1228  } else if (is_set(rsc->flags, pe_rsc_failed)) {
1229  pe_rsc_trace(rsc, "Recovering %s", rsc->id);
1230  need_stop = TRUE;
1231 
1232  } else if (is_set(rsc->flags, pe_rsc_block)) {
1233  pe_rsc_trace(rsc, "Block %s", rsc->id);
1234  need_stop = TRUE;
1235 
1236  } else if (rsc->role > RSC_ROLE_STARTED && current != NULL && chosen != NULL) {
1237  /* Recovery of a promoted resource */
1238  start = start_action(rsc, chosen, TRUE);
1239  if (is_set(start->flags, pe_action_optional) == FALSE) {
1240  pe_rsc_trace(rsc, "Forced start %s", rsc->id);
1241  need_stop = TRUE;
1242  }
1243  }
1244 
1245  pe_rsc_trace(rsc, "Creating actions for %s: %s->%s", rsc->id,
1246  role2text(rsc->role), role2text(rsc->next_role));
1247 
1248  /* Create any additional actions required when bringing resource down and
1249  * back up to same level.
1250  */
1251  role = rsc->role;
1252  while (role != RSC_ROLE_STOPPED) {
1253  next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED];
1254  pe_rsc_trace(rsc, "Down: Executing: %s->%s (%s)%s", role2text(role), role2text(next_role),
1255  rsc->id, need_stop ? " required" : "");
1256  if (rsc_action_matrix[role][next_role] (rsc, current, !need_stop, data_set) == FALSE) {
1257  break;
1258  }
1259  role = next_role;
1260  }
1261 
1262 
1263  while (rsc->role <= rsc->next_role && role != rsc->role && is_not_set(rsc->flags, pe_rsc_block)) {
1264  next_role = rsc_state_matrix[role][rsc->role];
1265  pe_rsc_trace(rsc, "Up: Executing: %s->%s (%s)%s", role2text(role), role2text(next_role),
1266  rsc->id, need_stop ? " required" : "");
1267  if (rsc_action_matrix[role][next_role] (rsc, chosen, !need_stop, data_set) == FALSE) {
1268  break;
1269  }
1270  role = next_role;
1271  }
1272  role = rsc->role;
1273 
1274  /* Required steps from this role to the next */
1275  while (role != rsc->next_role) {
1276  next_role = rsc_state_matrix[role][rsc->next_role];
1277  pe_rsc_trace(rsc, "Role: Executing: %s->%s = (%s on %s)", role2text(role), role2text(next_role), rsc->id, chosen?chosen->details->uname:"NA");
1278  if (rsc_action_matrix[role][next_role] (rsc, chosen, FALSE, data_set) == FALSE) {
1279  break;
1280  }
1281  role = next_role;
1282  }
1283 
1284  if(is_set(rsc->flags, pe_rsc_block)) {
1285  pe_rsc_trace(rsc, "No monitor additional ops for blocked resource");
1286 
1287  } else if (rsc->next_role != RSC_ROLE_STOPPED || is_set(rsc->flags, pe_rsc_managed) == FALSE) {
1288  pe_rsc_trace(rsc, "Monitor ops for active resource");
1289  start = start_action(rsc, chosen, TRUE);
1290  Recurring(rsc, start, chosen, data_set);
1291  Recurring_Stopped(rsc, start, chosen, data_set);
1292  } else {
1293  pe_rsc_trace(rsc, "Monitor ops for inactive resource");
1294  Recurring_Stopped(rsc, NULL, NULL, data_set);
1295  }
1296 
1297  /* if we are stuck in a partial migration, where the target
1298  * of the partial migration no longer matches the chosen target.
1299  * A full stop/start is required */
1300  if (rsc->partial_migration_target && (chosen == NULL || rsc->partial_migration_target->details != chosen->details)) {
1301  pe_rsc_trace(rsc, "Not allowing partial migration to continue. %s", rsc->id);
1302  allow_migrate = FALSE;
1303 
1304  } else if (is_moving == FALSE ||
1305  is_not_set(rsc->flags, pe_rsc_managed) ||
1306  is_set(rsc->flags, pe_rsc_failed) ||
1307  is_set(rsc->flags, pe_rsc_start_pending) ||
1308  (current && current->details->unclean) ||
1309  rsc->next_role < RSC_ROLE_STARTED) {
1310 
1311  allow_migrate = FALSE;
1312  }
1313 
1314  if (allow_migrate) {
1315  handle_migration_actions(rsc, current, chosen, data_set);
1316  }
1317 }
1318 
1319 static void
1320 rsc_avoids_remote_nodes(resource_t *rsc)
1321 {
1322  GHashTableIter iter;
1323  node_t *node = NULL;
1324  g_hash_table_iter_init(&iter, rsc->allowed_nodes);
1325  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
1326  if (node->details->remote_rsc) {
1327  node->weight = -INFINITY;
1328  }
1329  }
1330 }
1331 
1346 static GList *
1347 allowed_nodes_as_list(pe_resource_t *rsc, pe_working_set_t *data_set)
1348 {
1349  GList *allowed_nodes = NULL;
1350 
1351  if (rsc->allowed_nodes) {
1352  allowed_nodes = g_hash_table_get_values(rsc->allowed_nodes);
1353  }
1354 
1355  if (is_set(data_set->flags, pe_flag_stdout)) {
1356  allowed_nodes = g_list_sort(allowed_nodes, sort_node_uname);
1357  }
1358  return allowed_nodes;
1359 }
1360 
1361 void
1363 {
1364  /* This function is on the critical path and worth optimizing as much as possible */
1365 
1366  pe_resource_t *top = NULL;
1367  GList *allowed_nodes = NULL;
1368  bool check_unfencing = FALSE;
1369  bool check_utilization = FALSE;
1370 
1371  if (is_not_set(rsc->flags, pe_rsc_managed)) {
1372  pe_rsc_trace(rsc,
1373  "Skipping native constraints for unmanaged resource: %s",
1374  rsc->id);
1375  return;
1376  }
1377 
1378  top = uber_parent(rsc);
1379 
1380  // Whether resource requires unfencing
1381  check_unfencing = is_not_set(rsc->flags, pe_rsc_fence_device)
1382  && is_set(data_set->flags, pe_flag_enable_unfencing)
1383  && is_set(rsc->flags, pe_rsc_needs_unfencing);
1384 
1385  // Whether a non-default placement strategy is used
1386  check_utilization = (g_hash_table_size(rsc->utilization) > 0)
1387  && safe_str_neq(data_set->placement_strategy, "default");
1388 
1389  // Order stops before starts (i.e. restart)
1390  custom_action_order(rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL,
1391  rsc, generate_op_key(rsc->id, RSC_START, 0), NULL,
1393  data_set);
1394 
1395  // Promotable ordering: demote before stop, start before promote
1396  if (is_set(top->flags, pe_rsc_promotable) || (rsc->role > RSC_ROLE_SLAVE)) {
1397  custom_action_order(rsc, generate_op_key(rsc->id, RSC_DEMOTE, 0), NULL,
1398  rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL,
1399  pe_order_implies_first_master, data_set);
1400 
1401  custom_action_order(rsc, generate_op_key(rsc->id, RSC_START, 0), NULL,
1402  rsc, generate_op_key(rsc->id, RSC_PROMOTE, 0), NULL,
1403  pe_order_runnable_left, data_set);
1404  }
1405 
1406  // Certain checks need allowed nodes
1407  if (check_unfencing || check_utilization || rsc->container) {
1408  allowed_nodes = allowed_nodes_as_list(rsc, data_set);
1409  }
1410 
1411  if (check_unfencing) {
1412  /* Check if the node needs to be unfenced first */
1413 
1414  for (GList *item = allowed_nodes; item; item = item->next) {
1415  pe_node_t *node = item->data;
1416  pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, data_set);
1417 
1418  crm_debug("Ordering any stops of %s before %s, and any starts after",
1419  rsc->id, unfence->uuid);
1420 
1421  /*
1422  * It would be more efficient to order clone resources once,
1423  * rather than order each instance, but ordering the instance
1424  * allows us to avoid unnecessary dependencies that might conflict
1425  * with user constraints.
1426  *
1427  * @TODO: This constraint can still produce a transition loop if the
1428  * resource has a stop scheduled on the node being unfenced, and
1429  * there is a user ordering constraint to start some other resource
1430  * (which will be ordered after the unfence) before stopping this
1431  * resource. An example is "start some slow-starting cloned service
1432  * before stopping an associated virtual IP that may be moving to
1433  * it":
1434  * stop this -> unfencing -> start that -> stop this
1435  */
1436  custom_action_order(rsc, stop_key(rsc), NULL,
1437  NULL, strdup(unfence->uuid), unfence,
1439 
1440  custom_action_order(NULL, strdup(unfence->uuid), unfence,
1441  rsc, start_key(rsc), NULL,
1443  data_set);
1444  }
1445  }
1446 
1447  if (check_utilization) {
1448  GListPtr gIter = NULL;
1449 
1450  pe_rsc_trace(rsc, "Creating utilization constraints for %s - strategy: %s",
1451  rsc->id, data_set->placement_strategy);
1452 
1453  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
1454  node_t *current = (node_t *) gIter->data;
1455 
1456  char *load_stopped_task = crm_concat(LOAD_STOPPED, current->details->uname, '_');
1457  action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set);
1458 
1459  if (load_stopped->node == NULL) {
1460  load_stopped->node = node_copy(current);
1461  update_action_flags(load_stopped, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
1462  }
1463 
1464  custom_action_order(rsc, stop_key(rsc), NULL,
1465  NULL, load_stopped_task, load_stopped, pe_order_load, data_set);
1466  }
1467 
1468  for (GList *item = allowed_nodes; item; item = item->next) {
1469  pe_node_t *next = item->data;
1470  char *load_stopped_task = crm_concat(LOAD_STOPPED, next->details->uname, '_');
1471  action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set);
1472 
1473  if (load_stopped->node == NULL) {
1474  load_stopped->node = node_copy(next);
1475  update_action_flags(load_stopped, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
1476  }
1477 
1478  custom_action_order(NULL, strdup(load_stopped_task), load_stopped,
1479  rsc, start_key(rsc), NULL, pe_order_load, data_set);
1480 
1481  custom_action_order(NULL, strdup(load_stopped_task), load_stopped,
1482  rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), NULL,
1483  pe_order_load, data_set);
1484 
1485  free(load_stopped_task);
1486  }
1487  }
1488 
1489  if (rsc->container) {
1490  resource_t *remote_rsc = NULL;
1491 
1492  if (rsc->is_remote_node) {
1493  // rsc is the implicit remote connection for a guest or bundle node
1494 
1495  /* Do not allow a guest resource to live on a Pacemaker Remote node,
1496  * to avoid nesting remotes. However, allow bundles to run on remote
1497  * nodes.
1498  */
1499  if (is_not_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
1500  rsc_avoids_remote_nodes(rsc->container);
1501  }
1502 
1503  /* If someone cleans up a guest or bundle node's container, we will
1504  * likely schedule a (re-)probe of the container and recovery of the
1505  * connection. Order the connection stop after the container probe,
1506  * so that if we detect the container running, we will trigger a new
1507  * transition and avoid the unnecessary recovery.
1508  */
1510  pe_order_optional, data_set);
1511 
1512  /* A user can specify that a resource must start on a Pacemaker Remote
1513  * node by explicitly configuring it with the container=NODENAME
1514  * meta-attribute. This is of questionable merit, since location
1515  * constraints can accomplish the same thing. But we support it, so here
1516  * we check whether a resource (that is not itself a remote connection)
1517  * has container set to a remote node or guest node resource.
1518  */
1519  } else if (rsc->container->is_remote_node) {
1520  remote_rsc = rsc->container;
1521  } else {
1522  remote_rsc = pe__resource_contains_guest_node(data_set,
1523  rsc->container);
1524  }
1525 
1526  if (remote_rsc) {
1527  /* Force the resource on the Pacemaker Remote node instead of
1528  * colocating the resource with the container resource.
1529  */
1530  for (GList *item = allowed_nodes; item; item = item->next) {
1531  pe_node_t *node = item->data;
1532 
1533  if (node->details->remote_rsc != remote_rsc) {
1534  node->weight = -INFINITY;
1535  }
1536  }
1537 
1538  } else {
1539  /* This resource is either a filler for a container that does NOT
1540  * represent a Pacemaker Remote node, or a Pacemaker Remote
1541  * connection resource for a guest node or bundle.
1542  */
1543  int score;
1544 
1545  crm_trace("Order and colocate %s relative to its container %s",
1546  rsc->id, rsc->container->id);
1547 
1549  rsc, generate_op_key(rsc->id, RSC_START, 0), NULL,
1551 
1552  custom_action_order(rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL,
1553  rsc->container, generate_op_key(rsc->container->id, RSC_STOP, 0), NULL,
1554  pe_order_implies_first, data_set);
1555 
1556  if (is_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
1557  score = 10000; /* Highly preferred but not essential */
1558  } else {
1559  score = INFINITY; /* Force them to run on the same host */
1560  }
1561  rsc_colocation_new("resource-with-container", NULL, score, rsc,
1562  rsc->container, NULL, NULL, data_set);
1563  }
1564  }
1565 
1566  if (rsc->is_remote_node || is_set(rsc->flags, pe_rsc_fence_device)) {
1567  /* don't allow remote nodes to run stonith devices
1568  * or remote connection resources.*/
1569  rsc_avoids_remote_nodes(rsc);
1570  }
1571  g_list_free(allowed_nodes);
1572 }
1573 
1574 void
1576  rsc_colocation_t *constraint,
1577  pe_working_set_t *data_set)
1578 {
1579  if (rsc_lh == NULL) {
1580  pe_err("rsc_lh was NULL for %s", constraint->id);
1581  return;
1582 
1583  } else if (constraint->rsc_rh == NULL) {
1584  pe_err("rsc_rh was NULL for %s", constraint->id);
1585  return;
1586  }
1587 
1588  pe_rsc_trace(rsc_lh, "Processing colocation constraint between %s and %s", rsc_lh->id,
1589  rsc_rh->id);
1590 
1591  rsc_rh->cmds->rsc_colocation_rh(rsc_lh, rsc_rh, constraint, data_set);
1592 }
1593 
1596  rsc_colocation_t * constraint, gboolean preview)
1597 {
1598  if (constraint->score == 0) {
1599  return influence_nothing;
1600  }
1601 
1602  /* rh side must be allocated before we can process constraint */
1603  if (preview == FALSE && is_set(rsc_rh->flags, pe_rsc_provisional)) {
1604  return influence_nothing;
1605  }
1606 
1607  if ((constraint->role_lh >= RSC_ROLE_SLAVE) &&
1608  rsc_lh->parent && is_set(rsc_lh->parent->flags, pe_rsc_promotable)
1609  && is_not_set(rsc_lh->flags, pe_rsc_provisional)) {
1610 
1611  /* LH and RH resources have already been allocated, place the correct
1612  * priority on LH rsc for the given promotable clone resource role */
1613  return influence_rsc_priority;
1614  }
1615 
1616  if (preview == FALSE && is_not_set(rsc_lh->flags, pe_rsc_provisional)) {
1617  // Log an error if we violated a mandatory colocation constraint
1618  const pe_node_t *rh_node = rsc_rh->allocated_to;
1619 
1620  if (rsc_lh->allocated_to == NULL) {
1621  // Dependent resource isn't allocated, so constraint doesn't matter
1622  return influence_nothing;
1623  }
1624 
1625  if (constraint->score >= INFINITY) {
1626  // Dependent resource must colocate with rh_node
1627 
1628  if ((rh_node == NULL)
1629  || (rh_node->details != rsc_lh->allocated_to->details)) {
1630  crm_err("%s must be colocated with %s but is not (%s vs. %s)",
1631  rsc_lh->id, rsc_rh->id,
1632  rsc_lh->allocated_to->details->uname,
1633  (rh_node? rh_node->details->uname : "unallocated"));
1634  }
1635 
1636  } else if (constraint->score <= -INFINITY) {
1637  // Dependent resource must anti-colocate with rh_node
1638 
1639  if ((rh_node != NULL)
1640  && (rsc_lh->allocated_to->details == rh_node->details)) {
1641  crm_err("%s and %s must be anti-colocated but are allocated "
1642  "to the same node (%s)",
1643  rsc_lh->id, rsc_rh->id, rh_node->details->uname);
1644  }
1645  }
1646  return influence_nothing;
1647  }
1648 
1649  if (constraint->score > 0
1650  && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh != rsc_lh->next_role) {
1651  crm_trace("LH: Skipping constraint: \"%s\" state filter nextrole is %s",
1652  role2text(constraint->role_lh), role2text(rsc_lh->next_role));
1653  return influence_nothing;
1654  }
1655 
1656  if (constraint->score > 0
1657  && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh != rsc_rh->next_role) {
1658  crm_trace("RH: Skipping constraint: \"%s\" state filter", role2text(constraint->role_rh));
1659  return FALSE;
1660  }
1661 
1662  if (constraint->score < 0
1663  && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh == rsc_lh->next_role) {
1664  crm_trace("LH: Skipping negative constraint: \"%s\" state filter",
1665  role2text(constraint->role_lh));
1666  return influence_nothing;
1667  }
1668 
1669  if (constraint->score < 0
1670  && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh == rsc_rh->next_role) {
1671  crm_trace("RH: Skipping negative constraint: \"%s\" state filter",
1672  role2text(constraint->role_rh));
1673  return influence_nothing;
1674  }
1675 
1676  return influence_rsc_location;
1677 }
1678 
1679 static void
1680 influence_priority(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint)
1681 {
1682  const char *rh_value = NULL;
1683  const char *lh_value = NULL;
1684  const char *attribute = CRM_ATTR_ID;
1685  int score_multiplier = 1;
1686 
1687  if (constraint->node_attribute != NULL) {
1688  attribute = constraint->node_attribute;
1689  }
1690 
1691  if (!rsc_rh->allocated_to || !rsc_lh->allocated_to) {
1692  return;
1693  }
1694 
1695  lh_value = pe_node_attribute_raw(rsc_lh->allocated_to, attribute);
1696  rh_value = pe_node_attribute_raw(rsc_rh->allocated_to, attribute);
1697 
1698  if (!safe_str_eq(lh_value, rh_value)) {
1699  if(constraint->score == INFINITY && constraint->role_lh == RSC_ROLE_MASTER) {
1700  rsc_lh->priority = -INFINITY;
1701  }
1702  return;
1703  }
1704 
1705  if (constraint->role_rh && (constraint->role_rh != rsc_rh->next_role)) {
1706  return;
1707  }
1708 
1709  if (constraint->role_lh == RSC_ROLE_SLAVE) {
1710  score_multiplier = -1;
1711  }
1712 
1713  rsc_lh->priority = merge_weights(score_multiplier * constraint->score, rsc_lh->priority);
1714 }
1715 
1716 static void
1717 colocation_match(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint)
1718 {
1719  const char *tmp = NULL;
1720  const char *value = NULL;
1721  const char *attribute = CRM_ATTR_ID;
1722 
1723  GHashTable *work = NULL;
1724  gboolean do_check = FALSE;
1725 
1726  GHashTableIter iter;
1727  node_t *node = NULL;
1728 
1729  if (constraint->node_attribute != NULL) {
1730  attribute = constraint->node_attribute;
1731  }
1732 
1733  if (rsc_rh->allocated_to) {
1734  value = pe_node_attribute_raw(rsc_rh->allocated_to, attribute);
1735  do_check = TRUE;
1736 
1737  } else if (constraint->score < 0) {
1738  /* nothing to do:
1739  * anti-colocation with something that is not running
1740  */
1741  return;
1742  }
1743 
1744  work = node_hash_dup(rsc_lh->allowed_nodes);
1745 
1746  g_hash_table_iter_init(&iter, work);
1747  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
1748  tmp = pe_node_attribute_raw(node, attribute);
1749  if (do_check && safe_str_eq(tmp, value)) {
1750  if (constraint->score < INFINITY) {
1751  pe_rsc_trace(rsc_lh, "%s: %s.%s += %d", constraint->id, rsc_lh->id,
1752  node->details->uname, constraint->score);
1753  node->weight = merge_weights(constraint->score, node->weight);
1754  }
1755 
1756  } else if (do_check == FALSE || constraint->score >= INFINITY) {
1757  pe_rsc_trace(rsc_lh, "%s: %s.%s -= %d (%s)", constraint->id, rsc_lh->id,
1758  node->details->uname, constraint->score,
1759  do_check ? "failed" : "unallocated");
1760  node->weight = merge_weights(-constraint->score, node->weight);
1761  }
1762  }
1763 
1764  if (can_run_any(work)
1765  || constraint->score <= -INFINITY || constraint->score >= INFINITY) {
1766  g_hash_table_destroy(rsc_lh->allowed_nodes);
1767  rsc_lh->allowed_nodes = work;
1768  work = NULL;
1769 
1770  } else {
1771  static char score[33];
1772 
1773  score2char_stack(constraint->score, score, sizeof(score));
1774 
1775  pe_rsc_info(rsc_lh, "%s: Rolling back scores from %s (%d, %s)",
1776  rsc_lh->id, rsc_rh->id, do_check, score);
1777  }
1778 
1779  if (work) {
1780  g_hash_table_destroy(work);
1781  }
1782 }
1783 
1784 void
1786  rsc_colocation_t *constraint,
1787  pe_working_set_t *data_set)
1788 {
1789  enum filter_colocation_res filter_results;
1790 
1791  CRM_ASSERT(rsc_lh);
1792  CRM_ASSERT(rsc_rh);
1793  filter_results = filter_colocation_constraint(rsc_lh, rsc_rh, constraint, FALSE);
1794  pe_rsc_trace(rsc_lh, "%sColocating %s with %s (%s, weight=%d, filter=%d)",
1795  constraint->score >= 0 ? "" : "Anti-",
1796  rsc_lh->id, rsc_rh->id, constraint->id, constraint->score, filter_results);
1797 
1798  switch (filter_results) {
1800  influence_priority(rsc_lh, rsc_rh, constraint);
1801  break;
1803  pe_rsc_trace(rsc_lh, "%sColocating %s with %s (%s, weight=%d)",
1804  constraint->score >= 0 ? "" : "Anti-",
1805  rsc_lh->id, rsc_rh->id, constraint->id, constraint->score);
1806  colocation_match(rsc_lh, rsc_rh, constraint);
1807  break;
1808  case influence_nothing:
1809  default:
1810  return;
1811  }
1812 }
1813 
1814 static gboolean
1815 filter_rsc_ticket(resource_t * rsc_lh, rsc_ticket_t * rsc_ticket)
1816 {
1817  if (rsc_ticket->role_lh != RSC_ROLE_UNKNOWN && rsc_ticket->role_lh != rsc_lh->role) {
1818  pe_rsc_trace(rsc_lh, "LH: Skipping constraint: \"%s\" state filter",
1819  role2text(rsc_ticket->role_lh));
1820  return FALSE;
1821  }
1822 
1823  return TRUE;
1824 }
1825 
1826 void
1828 {
1829  if (rsc_ticket == NULL) {
1830  pe_err("rsc_ticket was NULL");
1831  return;
1832  }
1833 
1834  if (rsc_lh == NULL) {
1835  pe_err("rsc_lh was NULL for %s", rsc_ticket->id);
1836  return;
1837  }
1838 
1839  if (rsc_ticket->ticket->granted && rsc_ticket->ticket->standby == FALSE) {
1840  return;
1841  }
1842 
1843  if (rsc_lh->children) {
1844  GListPtr gIter = rsc_lh->children;
1845 
1846  pe_rsc_trace(rsc_lh, "Processing ticket dependencies from %s", rsc_lh->id);
1847 
1848  for (; gIter != NULL; gIter = gIter->next) {
1849  resource_t *child_rsc = (resource_t *) gIter->data;
1850 
1851  rsc_ticket_constraint(child_rsc, rsc_ticket, data_set);
1852  }
1853  return;
1854  }
1855 
1856  pe_rsc_trace(rsc_lh, "%s: Processing ticket dependency on %s (%s, %s)",
1857  rsc_lh->id, rsc_ticket->ticket->id, rsc_ticket->id,
1858  role2text(rsc_ticket->role_lh));
1859 
1860  if ((rsc_ticket->ticket->granted == FALSE)
1861  && (rsc_lh->running_on != NULL)) {
1862 
1863  GListPtr gIter = NULL;
1864 
1865  switch (rsc_ticket->loss_policy) {
1866  case loss_ticket_stop:
1867  resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
1868  break;
1869 
1870  case loss_ticket_demote:
1871  // Promotion score will be set to -INFINITY in promotion_order()
1872  if (rsc_ticket->role_lh != RSC_ROLE_MASTER) {
1873  resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
1874  }
1875  break;
1876 
1877  case loss_ticket_fence:
1878  if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) {
1879  return;
1880  }
1881 
1882  resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
1883 
1884  for (gIter = rsc_lh->running_on; gIter != NULL; gIter = gIter->next) {
1885  node_t *node = (node_t *) gIter->data;
1886 
1887  pe_fence_node(data_set, node, "deadman ticket was lost");
1888  }
1889  break;
1890 
1891  case loss_ticket_freeze:
1892  if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) {
1893  return;
1894  }
1895  if (rsc_lh->running_on != NULL) {
1896  clear_bit(rsc_lh->flags, pe_rsc_managed);
1897  set_bit(rsc_lh->flags, pe_rsc_block);
1898  }
1899  break;
1900  }
1901 
1902  } else if (rsc_ticket->ticket->granted == FALSE) {
1903 
1904  if (rsc_ticket->role_lh != RSC_ROLE_MASTER || rsc_ticket->loss_policy == loss_ticket_stop) {
1905  resource_location(rsc_lh, NULL, -INFINITY, "__no_ticket__", data_set);
1906  }
1907 
1908  } else if (rsc_ticket->ticket->standby) {
1909 
1910  if (rsc_ticket->role_lh != RSC_ROLE_MASTER || rsc_ticket->loss_policy == loss_ticket_stop) {
1911  resource_location(rsc_lh, NULL, -INFINITY, "__ticket_standby__", data_set);
1912  }
1913  }
1914 }
1915 
1916 enum pe_action_flags
1918 {
1919  return action->flags;
1920 }
1921 
1922 static inline bool
1923 is_primitive_action(pe_action_t *action)
1924 {
1925  return action && action->rsc && (action->rsc->variant == pe_native);
1926 }
1927 
1939 static void
1940 handle_restart_ordering(pe_action_t *first, pe_action_t *then,
1941  enum pe_action_flags filter)
1942 {
1943  const char *reason = NULL;
1944 
1945  CRM_ASSERT(is_primitive_action(first));
1946  CRM_ASSERT(is_primitive_action(then));
1947 
1948  // We need to update the action in two cases:
1949 
1950  // ... if 'then' is required
1951  if (is_set(filter, pe_action_optional)
1952  && is_not_set(then->flags, pe_action_optional)) {
1953  reason = "restart";
1954  }
1955 
1956  /* ... if 'then' is unrunnable start of managed resource (if a resource
1957  * should restart but can't start, we still want to stop)
1958  */
1959  if (is_set(filter, pe_action_runnable)
1960  && is_not_set(then->flags, pe_action_runnable)
1961  && is_set(then->rsc->flags, pe_rsc_managed)
1962  && safe_str_eq(then->task, RSC_START)) {
1963  reason = "stop";
1964  }
1965 
1966  if (reason == NULL) {
1967  return;
1968  }
1969 
1970  pe_rsc_trace(first->rsc, "Handling %s -> %s for %s",
1971  first->uuid, then->uuid, reason);
1972 
1973  // Make 'first' required if it is runnable
1974  if (is_set(first->flags, pe_action_runnable)) {
1975  pe_action_implies(first, then, pe_action_optional);
1976  }
1977 
1978  // Make 'first' required if 'then' is required
1979  if (is_not_set(then->flags, pe_action_optional)) {
1980  pe_action_implies(first, then, pe_action_optional);
1981  }
1982 
1983  // Make 'first' unmigratable if 'then' is unmigratable
1984  if (is_not_set(then->flags, pe_action_migrate_runnable)) {
1986  }
1987 
1988  // Make 'then' unrunnable if 'first' is required but unrunnable
1989  if (is_not_set(first->flags, pe_action_optional)
1990  && is_not_set(first->flags, pe_action_runnable)) {
1991  pe_action_implies(then, first, pe_action_runnable);
1992  }
1993 }
1994 
1995 enum pe_graph_flags
1997  enum pe_action_flags flags, enum pe_action_flags filter,
1998  enum pe_ordering type, pe_working_set_t *data_set)
1999 {
2000  /* flags == get_action_flags(first, then_node) called from update_action() */
2001  enum pe_graph_flags changed = pe_graph_none;
2002  enum pe_action_flags then_flags = then->flags;
2003  enum pe_action_flags first_flags = first->flags;
2004 
2005  crm_trace( "Testing %s on %s (0x%.6x) with %s 0x%.6x",
2006  first->uuid, first->node ? first->node->details->uname : "[none]",
2007  first->flags, then->uuid, then->flags);
2008 
2009  if (type & pe_order_asymmetrical) {
2010  resource_t *then_rsc = then->rsc;
2011  enum rsc_role_e then_rsc_role = then_rsc ? then_rsc->fns->state(then_rsc, TRUE) : 0;
2012 
2013  if (!then_rsc) {
2014  /* ignore */
2015  } else if ((then_rsc_role == RSC_ROLE_STOPPED) && safe_str_eq(then->task, RSC_STOP)) {
2016  /* ignore... if 'then' is supposed to be stopped after 'first', but
2017  * then is already stopped, there is nothing to be done when non-symmetrical. */
2018  } else if ((then_rsc_role >= RSC_ROLE_STARTED)
2019  && safe_str_eq(then->task, RSC_START)
2020  && is_set(then->flags, pe_action_optional)
2021  && then->node
2022  && g_list_length(then_rsc->running_on) == 1
2023  && then->node->details == ((node_t *) then_rsc->running_on->data)->details) {
2024  /* Ignore. If 'then' is supposed to be started after 'first', but
2025  * 'then' is already started, there is nothing to be done when
2026  * asymmetrical -- unless the start is mandatory, which indicates
2027  * the resource is restarting, and the ordering is still needed.
2028  */
2029  } else if (!(first->flags & pe_action_runnable)) {
2030  /* prevent 'then' action from happening if 'first' is not runnable and
2031  * 'then' has not yet occurred. */
2032  pe_action_implies(then, first, pe_action_optional);
2033  pe_action_implies(then, first, pe_action_runnable);
2034 
2035  pe_rsc_trace(then->rsc, "Unset optional and runnable on %s", then->uuid);
2036  } else {
2037  /* ignore... then is allowed to start/stop if it wants to. */
2038  }
2039  }
2040 
2041  if (type & pe_order_implies_first) {
2042  if (is_set(filter, pe_action_optional) && is_not_set(flags /* Should be then_flags? */, pe_action_optional)) {
2043  // Needs is_set(first_flags, pe_action_optional) too?
2044  pe_rsc_trace(first->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid);
2045  pe_action_implies(first, then, pe_action_optional);
2046  }
2047 
2048  if (is_set(flags, pe_action_migrate_runnable) &&
2049  is_set(then->flags, pe_action_migrate_runnable) == FALSE &&
2050  is_set(then->flags, pe_action_optional) == FALSE) {
2051 
2052  pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s",
2053  first->uuid, then->uuid);
2055  }
2056  }
2057 
2059  if ((filter & pe_action_optional) &&
2060  ((then->flags & pe_action_optional) == FALSE) &&
2061  then->rsc && (then->rsc->role == RSC_ROLE_MASTER)) {
2062  pe_action_implies(first, then, pe_action_optional);
2063 
2064  if (is_set(first->flags, pe_action_migrate_runnable) &&
2065  is_set(then->flags, pe_action_migrate_runnable) == FALSE) {
2066 
2067  pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s", first->uuid, then->uuid);
2069  }
2070  pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid);
2071  }
2072  }
2073 
2075  && is_set(filter, pe_action_optional)) {
2076 
2077  if (((then->flags & pe_action_migrate_runnable) == FALSE) ||
2078  ((then->flags & pe_action_runnable) == FALSE)) {
2079 
2080  pe_rsc_trace(then->rsc, "Unset runnable on %s because %s is neither runnable or migratable", first->uuid, then->uuid);
2081  pe_action_implies(first, then, pe_action_runnable);
2082  }
2083 
2084  if ((then->flags & pe_action_optional) == 0) {
2085  pe_rsc_trace(then->rsc, "Unset optional on %s because %s is not optional", first->uuid, then->uuid);
2086  pe_action_implies(first, then, pe_action_optional);
2087  }
2088  }
2089 
2090  if ((type & pe_order_pseudo_left)
2091  && is_set(filter, pe_action_optional)) {
2092 
2093  if ((first->flags & pe_action_runnable) == FALSE) {
2096  pe_rsc_trace(then->rsc, "Unset pseudo on %s because %s is not runnable", then->uuid, first->uuid);
2097  }
2098 
2099  }
2100 
2101  if (is_set(type, pe_order_runnable_left)
2102  && is_set(filter, pe_action_runnable)
2103  && is_set(then->flags, pe_action_runnable)
2104  && is_set(flags, pe_action_runnable) == FALSE) {
2105  pe_rsc_trace(then->rsc, "Unset runnable on %s because of %s", then->uuid, first->uuid);
2106  pe_action_implies(then, first, pe_action_runnable);
2108  }
2109 
2110  if (is_set(type, pe_order_implies_then)
2111  && is_set(filter, pe_action_optional)
2112  && is_set(then->flags, pe_action_optional)
2113  && is_set(flags, pe_action_optional) == FALSE) {
2114 
2115  /* in this case, treat migrate_runnable as if first is optional */
2116  if (is_set(first->flags, pe_action_migrate_runnable) == FALSE) {
2117  pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", then->uuid, first->uuid);
2118  pe_action_implies(then, first, pe_action_optional);
2119  }
2120  }
2121 
2122  if (is_set(type, pe_order_restart)) {
2123  handle_restart_ordering(first, then, filter);
2124  }
2125 
2126  if (then_flags != then->flags) {
2127  changed |= pe_graph_updated_then;
2128  pe_rsc_trace(then->rsc,
2129  "Then: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x",
2130  then->uuid, then->node ? then->node->details->uname : "[none]", then->flags,
2131  then_flags, first->uuid, first->flags);
2132 
2133  if(then->rsc && then->rsc->parent) {
2134  /* "X_stop then X_start" doesn't get handled for cloned groups unless we do this */
2135  update_action(then, data_set);
2136  }
2137  }
2138 
2139  if (first_flags != first->flags) {
2140  changed |= pe_graph_updated_first;
2141  pe_rsc_trace(first->rsc,
2142  "First: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x",
2143  first->uuid, first->node ? first->node->details->uname : "[none]",
2144  first->flags, first_flags, then->uuid, then->flags);
2145  }
2146 
2147  return changed;
2148 }
2149 
2150 void
2152 {
2153  GListPtr gIter = NULL;
2154  GHashTableIter iter;
2155  node_t *node = NULL;
2156 
2157  if (constraint == NULL) {
2158  pe_err("Constraint is NULL");
2159  return;
2160 
2161  } else if (rsc == NULL) {
2162  pe_err("LHS of rsc_to_node (%s) is NULL", constraint->id);
2163  return;
2164  }
2165 
2166  pe_rsc_trace(rsc, "Applying %s (%s) to %s", constraint->id,
2167  role2text(constraint->role_filter), rsc->id);
2168 
2169  /* take "lifetime" into account */
2170  if (constraint->role_filter > RSC_ROLE_UNKNOWN && constraint->role_filter != rsc->next_role) {
2171  pe_rsc_debug(rsc, "Constraint (%s) is not active (role : %s vs. %s)",
2172  constraint->id, role2text(constraint->role_filter), role2text(rsc->next_role));
2173  return;
2174  }
2175 
2176  if (constraint->node_list_rh == NULL) {
2177  pe_rsc_trace(rsc, "RHS of constraint %s is NULL", constraint->id);
2178  return;
2179  }
2180 
2181  for (gIter = constraint->node_list_rh; gIter != NULL; gIter = gIter->next) {
2182  node_t *node = (node_t *) gIter->data;
2183  node_t *other_node = NULL;
2184 
2185  other_node = (node_t *) pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
2186 
2187  if (other_node != NULL) {
2188  pe_rsc_trace(rsc, "%s + %s: %d + %d",
2189  node->details->uname,
2190  other_node->details->uname, node->weight, other_node->weight);
2191  other_node->weight = merge_weights(other_node->weight, node->weight);
2192 
2193  } else {
2194  other_node = node_copy(node);
2195 
2196  pe_rsc_trace(rsc, "%s: %d (insert %d)", other_node->details->uname, other_node->weight, constraint->discover_mode);
2197  g_hash_table_insert(rsc->allowed_nodes, (gpointer) other_node->details->id, other_node);
2198  }
2199 
2200  if (other_node->rsc_discover_mode < constraint->discover_mode) {
2201  if (constraint->discover_mode == pe_discover_exclusive) {
2202  rsc->exclusive_discover = TRUE;
2203  }
2204  /* exclusive > never > always... always is default */
2205  other_node->rsc_discover_mode = constraint->discover_mode;
2206  }
2207  }
2208 
2209  g_hash_table_iter_init(&iter, rsc->allowed_nodes);
2210  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
2211  pe_rsc_trace(rsc, "%s + %s : %d", rsc->id, node->details->uname, node->weight);
2212  }
2213 }
2214 
2215 void
2217 {
2218  GListPtr gIter = NULL;
2219 
2220  CRM_ASSERT(rsc);
2221  pe_rsc_trace(rsc, "Processing actions from %s", rsc->id);
2222 
2223  for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
2224  action_t *action = (action_t *) gIter->data;
2225 
2226  crm_trace("processing action %d for rsc=%s", action->id, rsc->id);
2227  graph_element_from_action(action, data_set);
2228  }
2229 
2230  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
2231  resource_t *child_rsc = (resource_t *) gIter->data;
2232 
2233  child_rsc->cmds->expand(child_rsc, data_set);
2234  }
2235 }
2236 
2237 #define log_change(a, fmt, args...) do { \
2238  if(a && a->reason && terminal) { \
2239  printf(" * "fmt" \tdue to %s\n", ##args, a->reason); \
2240  } else if(a && a->reason) { \
2241  crm_notice(fmt" \tdue to %s", ##args, a->reason); \
2242  } else if(terminal) { \
2243  printf(" * "fmt"\n", ##args); \
2244  } else { \
2245  crm_notice(fmt, ##args); \
2246  } \
2247  } while(0)
2248 
2249 #define STOP_SANITY_ASSERT(lineno) do { \
2250  if(current && current->details->unclean) { \
2251  /* It will be a pseudo op */ \
2252  } else if(stop == NULL) { \
2253  crm_err("%s:%d: No stop action exists for %s", __FUNCTION__, lineno, rsc->id); \
2254  CRM_ASSERT(stop != NULL); \
2255  } else if(is_set(stop->flags, pe_action_optional)) { \
2256  crm_err("%s:%d: Action %s is still optional", __FUNCTION__, lineno, stop->uuid); \
2257  CRM_ASSERT(is_not_set(stop->flags, pe_action_optional)); \
2258  } \
2259  } while(0)
2260 
2261 static int rsc_width = 5;
2262 static int detail_width = 5;
2263 static void
2264 LogAction(const char *change, resource_t *rsc, pe_node_t *origin, pe_node_t *destination, pe_action_t *action, pe_action_t *source, gboolean terminal)
2265 {
2266  int len = 0;
2267  char *reason = NULL;
2268  char *details = NULL;
2269  bool same_host = FALSE;
2270  bool same_role = FALSE;
2271  bool need_role = FALSE;
2272 
2273  CRM_ASSERT(action);
2274  CRM_ASSERT(destination != NULL || origin != NULL);
2275 
2276  if(source == NULL) {
2277  source = action;
2278  }
2279 
2280  len = strlen(rsc->id);
2281  if(len > rsc_width) {
2282  rsc_width = len + 2;
2283  }
2284 
2285  if(rsc->role > RSC_ROLE_STARTED || rsc->next_role > RSC_ROLE_SLAVE) {
2286  need_role = TRUE;
2287  }
2288 
2289  if(origin != NULL && destination != NULL && origin->details == destination->details) {
2290  same_host = TRUE;
2291  }
2292 
2293  if(rsc->role == rsc->next_role) {
2294  same_role = TRUE;
2295  }
2296 
2297  if(need_role && origin == NULL) {
2298  /* Promoting from Stopped */
2299  details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), destination->details->uname);
2300 
2301  } else if(need_role && destination == NULL) {
2302  /* Demoting a Master or Stopping a Slave */
2303  details = crm_strdup_printf("%s %s", role2text(rsc->role), origin->details->uname);
2304 
2305  } else if(origin == NULL || destination == NULL) {
2306  /* Starting or stopping a resource */
2307  details = crm_strdup_printf("%s", origin?origin->details->uname:destination->details->uname);
2308 
2309  } else if(need_role && same_role && same_host) {
2310  /* Recovering or restarting a promotable clone resource */
2311  details = crm_strdup_printf("%s %s", role2text(rsc->role), origin->details->uname);
2312 
2313  } else if(same_role && same_host) {
2314  /* Recovering or Restarting a normal resource */
2315  details = crm_strdup_printf("%s", origin->details->uname);
2316 
2317  } else if(same_role && need_role) {
2318  /* Moving a promotable clone resource */
2319  details = crm_strdup_printf("%s -> %s %s", origin->details->uname, destination->details->uname, role2text(rsc->role));
2320 
2321  } else if(same_role) {
2322  /* Moving a normal resource */
2323  details = crm_strdup_printf("%s -> %s", origin->details->uname, destination->details->uname);
2324 
2325  } else if(same_host) {
2326  /* Promoting or demoting a promotable clone resource */
2327  details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), origin->details->uname);
2328 
2329  } else {
2330  /* Moving and promoting/demoting */
2331  details = crm_strdup_printf("%s %s -> %s %s", role2text(rsc->role), origin->details->uname, role2text(rsc->next_role), destination->details->uname);
2332  }
2333 
2334  len = strlen(details);
2335  if(len > detail_width) {
2336  detail_width = len;
2337  }
2338 
2339  if(source->reason && is_not_set(action->flags, pe_action_runnable)) {
2340  reason = crm_strdup_printf(" due to %s (blocked)", source->reason);
2341 
2342  } else if(source->reason) {
2343  reason = crm_strdup_printf(" due to %s", source->reason);
2344 
2345  } else if(is_not_set(action->flags, pe_action_runnable)) {
2346  reason = strdup(" blocked");
2347 
2348  } else {
2349  reason = strdup("");
2350  }
2351 
2352  if(terminal) {
2353  printf(" * %-8s %-*s ( %*s ) %s\n", change, rsc_width, rsc->id, detail_width, details, reason);
2354  } else {
2355  crm_notice(" * %-8s %-*s ( %*s ) %s", change, rsc_width, rsc->id, detail_width, details, reason);
2356  }
2357 
2358  free(details);
2359  free(reason);
2360 }
2361 
2362 
2363 void
2364 LogActions(resource_t * rsc, pe_working_set_t * data_set, gboolean terminal)
2365 {
2366  node_t *next = NULL;
2367  node_t *current = NULL;
2368  pe_node_t *start_node = NULL;
2369 
2370  action_t *stop = NULL;
2371  action_t *start = NULL;
2372  action_t *demote = NULL;
2373  action_t *promote = NULL;
2374 
2375  char *key = NULL;
2376  gboolean moving = FALSE;
2377  GListPtr possible_matches = NULL;
2378 
2379  if(rsc->variant == pe_container) {
2380  pcmk__bundle_log_actions(rsc, data_set, terminal);
2381  return;
2382  }
2383 
2384  if (rsc->children) {
2385  GListPtr gIter = NULL;
2386 
2387  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
2388  resource_t *child_rsc = (resource_t *) gIter->data;
2389 
2390  LogActions(child_rsc, data_set, terminal);
2391  }
2392  return;
2393  }
2394 
2395  next = rsc->allocated_to;
2396  if (rsc->running_on) {
2397  current = pe__current_node(rsc);
2398  if (rsc->role == RSC_ROLE_STOPPED) {
2399  /*
2400  * This can occur when resources are being recovered
2401  * We fiddle with the current role in native_create_actions()
2402  */
2403  rsc->role = RSC_ROLE_STARTED;
2404  }
2405  }
2406 
2407  if (current == NULL && is_set(rsc->flags, pe_rsc_orphan)) {
2408  /* Don't log stopped orphans */
2409  return;
2410  }
2411 
2412  if (is_not_set(rsc->flags, pe_rsc_managed)
2413  || (current == NULL && next == NULL)) {
2414  pe_rsc_info(rsc, "Leave %s\t(%s%s)",
2415  rsc->id, role2text(rsc->role), is_not_set(rsc->flags,
2416  pe_rsc_managed) ? " unmanaged" : "");
2417  return;
2418  }
2419 
2420  if (current != NULL && next != NULL && safe_str_neq(current->details->id, next->details->id)) {
2421  moving = TRUE;
2422  }
2423 
2424  possible_matches = pe__resource_actions(rsc, next, RSC_START, FALSE);
2425  if (possible_matches) {
2426  start = possible_matches->data;
2427  g_list_free(possible_matches);
2428  }
2429 
2430  if ((start == NULL) || is_not_set(start->flags, pe_action_runnable)) {
2431  start_node = NULL;
2432  } else {
2433  start_node = current;
2434  }
2435  possible_matches = pe__resource_actions(rsc, start_node, RSC_STOP, FALSE);
2436  if (possible_matches) {
2437  stop = possible_matches->data;
2438  g_list_free(possible_matches);
2439  }
2440 
2441  possible_matches = pe__resource_actions(rsc, next, RSC_PROMOTE, FALSE);
2442  if (possible_matches) {
2443  promote = possible_matches->data;
2444  g_list_free(possible_matches);
2445  }
2446 
2447  possible_matches = pe__resource_actions(rsc, next, RSC_DEMOTE, FALSE);
2448  if (possible_matches) {
2449  demote = possible_matches->data;
2450  g_list_free(possible_matches);
2451  }
2452 
2453  if (rsc->role == rsc->next_role) {
2454  action_t *migrate_op = NULL;
2455 
2456  possible_matches = pe__resource_actions(rsc, next, RSC_MIGRATED, FALSE);
2457  if (possible_matches) {
2458  migrate_op = possible_matches->data;
2459  }
2460 
2461  CRM_CHECK(next != NULL,);
2462  if (next == NULL) {
2463  } else if (migrate_op && is_set(migrate_op->flags, pe_action_runnable) && current) {
2464  LogAction("Migrate", rsc, current, next, start, NULL, terminal);
2465 
2466  } else if (is_set(rsc->flags, pe_rsc_reload)) {
2467  LogAction("Reload", rsc, current, next, start, NULL, terminal);
2468 
2469  } else if (start == NULL || is_set(start->flags, pe_action_optional)) {
2470  pe_rsc_info(rsc, "Leave %s\t(%s %s)", rsc->id, role2text(rsc->role),
2471  next->details->uname);
2472 
2473  } else if (start && is_set(start->flags, pe_action_runnable) == FALSE) {
2474  LogAction("Stop", rsc, current, NULL, stop,
2475  (stop && stop->reason)? stop : start, terminal);
2476  STOP_SANITY_ASSERT(__LINE__);
2477 
2478  } else if (moving && current) {
2479  LogAction(is_set(rsc->flags, pe_rsc_failed) ? "Recover" : "Move",
2480  rsc, current, next, stop, NULL, terminal);
2481 
2482  } else if (is_set(rsc->flags, pe_rsc_failed)) {
2483  LogAction("Recover", rsc, current, NULL, stop, NULL, terminal);
2484  STOP_SANITY_ASSERT(__LINE__);
2485 
2486  } else {
2487  LogAction("Restart", rsc, current, next, start, NULL, terminal);
2488  /* STOP_SANITY_ASSERT(__LINE__); False positive for migrate-fail-7 */
2489  }
2490 
2491  g_list_free(possible_matches);
2492  return;
2493  }
2494 
2495  if(stop
2496  && (rsc->next_role == RSC_ROLE_STOPPED
2497  || (start && is_not_set(start->flags, pe_action_runnable)))) {
2498 
2499  GListPtr gIter = NULL;
2500 
2501  key = stop_key(rsc);
2502  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
2503  node_t *node = (node_t *) gIter->data;
2504  action_t *stop_op = NULL;
2505 
2506  possible_matches = find_actions(rsc->actions, key, node);
2507  if (possible_matches) {
2508  stop_op = possible_matches->data;
2509  g_list_free(possible_matches);
2510  }
2511 
2512  if (stop_op && (stop_op->flags & pe_action_runnable)) {
2513  STOP_SANITY_ASSERT(__LINE__);
2514  }
2515 
2516  LogAction("Stop", rsc, node, NULL, stop_op,
2517  (stop_op && stop_op->reason)? stop_op : start, terminal);
2518  }
2519 
2520  free(key);
2521 
2522  } else if (stop && is_set(rsc->flags, pe_rsc_failed)) {
2523  /* 'stop' may be NULL if the failure was ignored */
2524  LogAction("Recover", rsc, current, next, stop, start, terminal);
2525  STOP_SANITY_ASSERT(__LINE__);
2526 
2527  } else if (moving) {
2528  LogAction("Move", rsc, current, next, stop, NULL, terminal);
2529  STOP_SANITY_ASSERT(__LINE__);
2530 
2531  } else if (is_set(rsc->flags, pe_rsc_reload)) {
2532  LogAction("Reload", rsc, current, next, start, NULL, terminal);
2533 
2534  } else if (stop != NULL && is_not_set(stop->flags, pe_action_optional)) {
2535  LogAction("Restart", rsc, current, next, start, NULL, terminal);
2536  STOP_SANITY_ASSERT(__LINE__);
2537 
2538  } else if (rsc->role == RSC_ROLE_MASTER) {
2539  CRM_LOG_ASSERT(current != NULL);
2540  LogAction("Demote", rsc, current, next, demote, NULL, terminal);
2541 
2542  } else if(rsc->next_role == RSC_ROLE_MASTER) {
2543  CRM_LOG_ASSERT(next);
2544  LogAction("Promote", rsc, current, next, promote, NULL, terminal);
2545 
2546  } else if (rsc->role == RSC_ROLE_STOPPED && rsc->next_role > RSC_ROLE_STOPPED) {
2547  LogAction("Start", rsc, current, next, start, NULL, terminal);
2548  }
2549 }
2550 
2551 gboolean
2552 StopRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set)
2553 {
2554  GListPtr gIter = NULL;
2555 
2556  CRM_ASSERT(rsc);
2557  pe_rsc_trace(rsc, "%s", rsc->id);
2558 
2559  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
2560  node_t *current = (node_t *) gIter->data;
2561  action_t *stop;
2562 
2563  if (rsc->partial_migration_target) {
2564  if (rsc->partial_migration_target->details == current->details) {
2565  pe_rsc_trace(rsc, "Filtered %s -> %s %s", current->details->uname,
2566  next->details->uname, rsc->id);
2567  continue;
2568  } else {
2569  pe_rsc_trace(rsc, "Forced on %s %s", current->details->uname, rsc->id);
2570  optional = FALSE;
2571  }
2572  }
2573 
2574  pe_rsc_trace(rsc, "%s on %s", rsc->id, current->details->uname);
2575  stop = stop_action(rsc, current, optional);
2576 
2577  if(rsc->allocated_to == NULL) {
2578  pe_action_set_reason(stop, "node availability", TRUE);
2579  }
2580 
2581  if (is_not_set(rsc->flags, pe_rsc_managed)) {
2582  update_action_flags(stop, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
2583  }
2584 
2585  if (is_set(data_set->flags, pe_flag_remove_after_stop)) {
2586  DeleteRsc(rsc, current, optional, data_set);
2587  }
2588 
2589  if(is_set(rsc->flags, pe_rsc_needs_unfencing)) {
2590  action_t *unfence = pe_fence_op(current, "on", TRUE, NULL, data_set);
2591 
2592  order_actions(stop, unfence, pe_order_implies_first);
2593  if (!node_has_been_unfenced(current)) {
2594  pe_proc_err("Stopping %s until %s can be unfenced", rsc->id, current->details->uname);
2595  }
2596  }
2597  }
2598 
2599  return TRUE;
2600 }
2601 
2602 static void
2603 order_after_unfencing(resource_t *rsc, pe_node_t *node, action_t *action,
2604  enum pe_ordering order, pe_working_set_t *data_set)
2605 {
2606  /* When unfencing is in use, we order unfence actions before any probe or
2607  * start of resources that require unfencing, and also of fence devices.
2608  *
2609  * This might seem to violate the principle that fence devices require
2610  * only quorum. However, fence agents that unfence often don't have enough
2611  * information to even probe or start unless the node is first unfenced.
2612  */
2613  if (is_unfence_device(rsc, data_set)
2614  || is_set(rsc->flags, pe_rsc_needs_unfencing)) {
2615 
2616  /* Start with an optional ordering. Requiring unfencing would result in
2617  * the node being unfenced, and all its resources being stopped,
2618  * whenever a new resource is added -- which would be highly suboptimal.
2619  */
2620  action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, data_set);
2621 
2622  order_actions(unfence, action, order);
2623 
2624  if (!node_has_been_unfenced(node)) {
2625  // But unfencing is required if it has never been done
2626  char *reason = crm_strdup_printf("required by %s %s",
2627  rsc->id, action->task);
2628 
2629  trigger_unfencing(NULL, node, reason, NULL, data_set);
2630  free(reason);
2631  }
2632  }
2633 }
2634 
2635 gboolean
2636 StartRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set)
2637 {
2638  action_t *start = NULL;
2639 
2640  CRM_ASSERT(rsc);
2641  pe_rsc_trace(rsc, "%s on %s %d %d", rsc->id, next ? next->details->uname : "N/A", optional, next ? next->weight : 0);
2642  start = start_action(rsc, next, TRUE);
2643 
2644  order_after_unfencing(rsc, next, start, pe_order_implies_then, data_set);
2645 
2646  if (is_set(start->flags, pe_action_runnable) && optional == FALSE) {
2647  update_action_flags(start, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
2648  }
2649 
2650 
2651  return TRUE;
2652 }
2653 
2654 gboolean
2655 PromoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set)
2656 {
2657  GListPtr gIter = NULL;
2658  gboolean runnable = TRUE;
2659  GListPtr action_list = NULL;
2660 
2661  CRM_ASSERT(rsc);
2662  CRM_CHECK(next != NULL, return FALSE);
2663  pe_rsc_trace(rsc, "%s on %s", rsc->id, next->details->uname);
2664 
2665  action_list = pe__resource_actions(rsc, next, RSC_START, TRUE);
2666 
2667  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
2668  action_t *start = (action_t *) gIter->data;
2669 
2670  if (is_set(start->flags, pe_action_runnable) == FALSE) {
2671  runnable = FALSE;
2672  }
2673  }
2674  g_list_free(action_list);
2675 
2676  if (runnable) {
2677  promote_action(rsc, next, optional);
2678  return TRUE;
2679  }
2680 
2681  pe_rsc_debug(rsc, "%s\tPromote %s (canceled)", next->details->uname, rsc->id);
2682 
2683  action_list = pe__resource_actions(rsc, next, RSC_PROMOTE, TRUE);
2684 
2685  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
2686  action_t *promote = (action_t *) gIter->data;
2687 
2688  update_action_flags(promote, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
2689  }
2690 
2691  g_list_free(action_list);
2692  return TRUE;
2693 }
2694 
2695 gboolean
2696 DemoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set)
2697 {
2698  GListPtr gIter = NULL;
2699 
2700  CRM_ASSERT(rsc);
2701  pe_rsc_trace(rsc, "%s", rsc->id);
2702 
2703 /* CRM_CHECK(rsc->next_role == RSC_ROLE_SLAVE, return FALSE); */
2704  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
2705  node_t *current = (node_t *) gIter->data;
2706 
2707  pe_rsc_trace(rsc, "%s on %s", rsc->id, next ? next->details->uname : "N/A");
2708  demote_action(rsc, current, optional);
2709  }
2710  return TRUE;
2711 }
2712 
2713 gboolean
2714 RoleError(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set)
2715 {
2716  CRM_ASSERT(rsc);
2717  crm_err("%s on %s", rsc->id, next ? next->details->uname : "N/A");
2718  CRM_CHECK(FALSE, return FALSE);
2719  return FALSE;
2720 }
2721 
2722 gboolean
2723 NullOp(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set)
2724 {
2725  CRM_ASSERT(rsc);
2726  pe_rsc_trace(rsc, "%s", rsc->id);
2727  return FALSE;
2728 }
2729 
2730 gboolean
2731 DeleteRsc(resource_t * rsc, node_t * node, gboolean optional, pe_working_set_t * data_set)
2732 {
2733  if (is_set(rsc->flags, pe_rsc_failed)) {
2734  pe_rsc_trace(rsc, "Resource %s not deleted from %s: failed", rsc->id, node->details->uname);
2735  return FALSE;
2736 
2737  } else if (node == NULL) {
2738  pe_rsc_trace(rsc, "Resource %s not deleted: NULL node", rsc->id);
2739  return FALSE;
2740 
2741  } else if (node->details->unclean || node->details->online == FALSE) {
2742  pe_rsc_trace(rsc, "Resource %s not deleted from %s: unrunnable", rsc->id,
2743  node->details->uname);
2744  return FALSE;
2745  }
2746 
2747  crm_notice("Removing %s from %s", rsc->id, node->details->uname);
2748 
2749  delete_action(rsc, node, optional);
2750 
2751  new_rsc_order(rsc, RSC_STOP, rsc, RSC_DELETE,
2752  optional ? pe_order_implies_then : pe_order_optional, data_set);
2753 
2754  new_rsc_order(rsc, RSC_DELETE, rsc, RSC_START,
2755  optional ? pe_order_implies_then : pe_order_optional, data_set);
2756 
2757  return TRUE;
2758 }
2759 
2760 gboolean
2761 native_create_probe(resource_t * rsc, node_t * node, action_t * complete,
2762  gboolean force, pe_working_set_t * data_set)
2763 {
2765  char *key = NULL;
2766  action_t *probe = NULL;
2767  node_t *running = NULL;
2768  node_t *allowed = NULL;
2769  resource_t *top = uber_parent(rsc);
2770 
2771  static const char *rc_master = NULL;
2772  static const char *rc_inactive = NULL;
2773 
2774  if (rc_inactive == NULL) {
2775  rc_inactive = crm_itoa(PCMK_OCF_NOT_RUNNING);
2776  rc_master = crm_itoa(PCMK_OCF_RUNNING_MASTER);
2777  }
2778 
2779  CRM_CHECK(node != NULL, return FALSE);
2780  if (force == FALSE && is_not_set(data_set->flags, pe_flag_startup_probes)) {
2781  pe_rsc_trace(rsc, "Skipping active resource detection for %s", rsc->id);
2782  return FALSE;
2783  }
2784 
2785  if (pe__is_guest_or_remote_node(node)) {
2786  const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
2787 
2789  pe_rsc_trace(rsc,
2790  "Skipping probe for %s on %s because Pacemaker Remote nodes cannot run stonith agents",
2791  rsc->id, node->details->id);
2792  return FALSE;
2793  } else if (pe__is_guest_node(node)
2794  && pe__resource_contains_guest_node(data_set, rsc)) {
2795  pe_rsc_trace(rsc,
2796  "Skipping probe for %s on %s because guest nodes cannot run resources containing guest nodes",
2797  rsc->id, node->details->id);
2798  return FALSE;
2799  } else if (rsc->is_remote_node) {
2800  pe_rsc_trace(rsc,
2801  "Skipping probe for %s on %s because Pacemaker Remote nodes cannot host remote connections",
2802  rsc->id, node->details->id);
2803  return FALSE;
2804  }
2805  }
2806 
2807  if (rsc->children) {
2808  GListPtr gIter = NULL;
2809  gboolean any_created = FALSE;
2810 
2811  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
2812  resource_t *child_rsc = (resource_t *) gIter->data;
2813 
2814  any_created = child_rsc->cmds->create_probe(child_rsc, node, complete, force, data_set)
2815  || any_created;
2816  }
2817 
2818  return any_created;
2819 
2820  } else if ((rsc->container) && (!rsc->is_remote_node)) {
2821  pe_rsc_trace(rsc, "Skipping %s: it is within container %s", rsc->id, rsc->container->id);
2822  return FALSE;
2823  }
2824 
2825  if (is_set(rsc->flags, pe_rsc_orphan)) {
2826  pe_rsc_trace(rsc, "Skipping orphan: %s", rsc->id);
2827  return FALSE;
2828  }
2829 
2830  // Check whether resource is already known on node
2831  if (!force && g_hash_table_lookup(rsc->known_on, node->details->id)) {
2832  pe_rsc_trace(rsc, "Skipping known: %s on %s", rsc->id, node->details->uname);
2833  return FALSE;
2834  }
2835 
2836  allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
2837 
2838  if (rsc->exclusive_discover || top->exclusive_discover) {
2839  if (allowed == NULL) {
2840  /* exclusive discover is enabled and this node is not in the allowed list. */
2841  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, A", rsc->id, node->details->id);
2842  return FALSE;
2843  } else if (allowed->rsc_discover_mode != pe_discover_exclusive) {
2844  /* exclusive discover is enabled and this node is not marked
2845  * as a node this resource should be discovered on */
2846  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, B", rsc->id, node->details->id);
2847  return FALSE;
2848  }
2849  }
2850 
2851  if(allowed == NULL && node->rsc_discover_mode == pe_discover_never) {
2852  /* If this node was allowed to host this resource it would
2853  * have been explicitly added to the 'allowed_nodes' list.
2854  * However it wasn't and the node has discovery disabled, so
2855  * no need to probe for this resource.
2856  */
2857  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, C", rsc->id, node->details->id);
2858  return FALSE;
2859  }
2860 
2861  if (allowed && allowed->rsc_discover_mode == pe_discover_never) {
2862  /* this resource is marked as not needing to be discovered on this node */
2863  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, discovery mode", rsc->id, node->details->id);
2864  return FALSE;
2865  }
2866 
2867  if (pe__is_guest_node(node)) {
2868  resource_t *remote = node->details->remote_rsc->container;
2869 
2870  if(remote->role == RSC_ROLE_STOPPED) {
2871  /* If the container is stopped, then we know anything that
2872  * might have been inside it is also stopped and there is
2873  * no need to probe.
2874  *
2875  * If we don't know the container's state on the target
2876  * either:
2877  *
2878  * - the container is running, the transition will abort
2879  * and we'll end up in a different case next time, or
2880  *
2881  * - the container is stopped
2882  *
2883  * Either way there is no need to probe.
2884  *
2885  */
2886  if(remote->allocated_to
2887  && g_hash_table_lookup(remote->known_on, remote->allocated_to->details->id) == NULL) {
2888  /* For safety, we order the 'rsc' start after 'remote'
2889  * has been probed.
2890  *
2891  * Using 'top' helps for groups, but we may need to
2892  * follow the start's ordering chain backwards.
2893  */
2894  custom_action_order(remote, generate_op_key(remote->id, RSC_STATUS, 0), NULL,
2895  top, generate_op_key(top->id, RSC_START, 0), NULL,
2896  pe_order_optional, data_set);
2897  }
2898  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopped",
2899  rsc->id, node->details->id, remote->id);
2900  return FALSE;
2901 
2902  /* Here we really we want to check if remote->stop is required,
2903  * but that information doesn't exist yet
2904  */
2905  } else if(node->details->remote_requires_reset
2906  || node->details->unclean
2907  || is_set(remote->flags, pe_rsc_failed)
2908  || remote->next_role == RSC_ROLE_STOPPED
2909  || (remote->allocated_to
2910  && pe_find_node(remote->running_on, remote->allocated_to->details->uname) == NULL)
2911  ) {
2912  /* The container is stopping or restarting, don't start
2913  * 'rsc' until 'remote' stops as this also implies that
2914  * 'rsc' is stopped - avoiding the need to probe
2915  */
2916  custom_action_order(remote, generate_op_key(remote->id, RSC_STOP, 0), NULL,
2917  top, generate_op_key(top->id, RSC_START, 0), NULL,
2918  pe_order_optional, data_set);
2919  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopping, restarting or moving",
2920  rsc->id, node->details->id, remote->id);
2921  return FALSE;
2922 /* } else {
2923  * The container is running so there is no problem probing it
2924  */
2925  }
2926  }
2927 
2928  key = generate_op_key(rsc->id, RSC_STATUS, 0);
2929  probe = custom_action(rsc, key, RSC_STATUS, node, FALSE, TRUE, data_set);
2930  update_action_flags(probe, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
2931 
2932  order_after_unfencing(rsc, node, probe, pe_order_optional, data_set);
2933 
2934  /*
2935  * We need to know if it's running_on (not just known_on) this node
2936  * to correctly determine the target rc.
2937  */
2938  running = pe_find_node_id(rsc->running_on, node->details->id);
2939  if (running == NULL) {
2940  add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
2941 
2942  } else if (rsc->role == RSC_ROLE_MASTER) {
2943  add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_master);
2944  }
2945 
2946  crm_debug("Probing %s on %s (%s) %d %p", rsc->id, node->details->uname, role2text(rsc->role),
2947  is_set(probe->flags, pe_action_runnable), rsc->running_on);
2948 
2949  if (is_unfence_device(rsc, data_set) || !pe_rsc_is_clone(top)) {
2950  top = rsc;
2951  } else {
2952  crm_trace("Probing %s on %s (%s) as %s", rsc->id, node->details->uname, role2text(rsc->role), top->id);
2953  }
2954 
2955  if(is_not_set(probe->flags, pe_action_runnable) && rsc->running_on == NULL) {
2956  /* Prevent the start from occurring if rsc isn't active, but
2957  * don't cause it to stop if it was active already
2958  */
2960  }
2961 
2962  custom_action_order(rsc, NULL, probe,
2963  top, generate_op_key(top->id, RSC_START, 0), NULL,
2964  flags, data_set);
2965 
2966  /* Before any reloads, if they exist */
2967  custom_action_order(rsc, NULL, probe,
2968  top, reload_key(rsc), NULL,
2969  pe_order_optional, data_set);
2970 
2971 #if 0
2972  // complete is always null currently
2973  if (!is_unfence_device(rsc, data_set)) {
2974  /* Normally rsc.start depends on probe complete which depends
2975  * on rsc.probe. But this can't be the case for fence devices
2976  * with unfencing, as it would create graph loops.
2977  *
2978  * So instead we explicitly order 'rsc.probe then rsc.start'
2979  */
2980  order_actions(probe, complete, pe_order_implies_then);
2981  }
2982 #endif
2983  return TRUE;
2984 }
2985 
2995 static bool
2996 rsc_is_known_on(pe_resource_t *rsc, const pe_node_t *node)
2997 {
2998  if (pe_hash_table_lookup(rsc->known_on, node->details->id)) {
2999  return TRUE;
3000 
3001  } else if ((rsc->variant == pe_native)
3002  && pe_rsc_is_anon_clone(rsc->parent)
3003  && pe_hash_table_lookup(rsc->parent->known_on, node->details->id)) {
3004  /* We check only the parent, not the uber-parent, because we cannot
3005  * assume that the resource is known if it is in an anonymously cloned
3006  * group (which may be only partially known).
3007  */
3008  return TRUE;
3009  }
3010  return FALSE;
3011 }
3012 
3021 static void
3022 native_start_constraints(resource_t * rsc, action_t * stonith_op, pe_working_set_t * data_set)
3023 {
3024  node_t *target;
3025  GListPtr gIter = NULL;
3026 
3027  CRM_CHECK(stonith_op && stonith_op->node, return);
3028  target = stonith_op->node;
3029 
3030  for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
3031  action_t *action = (action_t *) gIter->data;
3032 
3033  switch (action->needs) {
3034  case rsc_req_nothing:
3035  // Anything other than start or promote requires nothing
3036  break;
3037 
3038  case rsc_req_stonith:
3039  order_actions(stonith_op, action, pe_order_optional);
3040  break;
3041 
3042  case rsc_req_quorum:
3043  if (safe_str_eq(action->task, RSC_START)
3044  && pe_hash_table_lookup(rsc->allowed_nodes, target->details->id)
3045  && !rsc_is_known_on(rsc, target)) {
3046 
3047  /* If we don't know the status of the resource on the node
3048  * we're about to shoot, we have to assume it may be active
3049  * there. Order the resource start after the fencing. This
3050  * is analogous to waiting for all the probes for a resource
3051  * to complete before starting it.
3052  *
3053  * The most likely explanation is that the DC died and took
3054  * its status with it.
3055  */
3056  pe_rsc_debug(rsc, "Ordering %s after %s recovery", action->uuid,
3057  target->details->uname);
3058  order_actions(stonith_op, action,
3060  }
3061  break;
3062  }
3063  }
3064 }
3065 
3066 static void
3067 native_stop_constraints(resource_t * rsc, action_t * stonith_op, pe_working_set_t * data_set)
3068 {
3069  GListPtr gIter = NULL;
3070  GListPtr action_list = NULL;
3071  bool order_implicit = false;
3072 
3073  resource_t *top = uber_parent(rsc);
3074  pe_action_t *parent_stop = NULL;
3075  node_t *target;
3076 
3077  CRM_CHECK(stonith_op && stonith_op->node, return);
3078  target = stonith_op->node;
3079 
3080  /* Get a list of stop actions potentially implied by the fencing */
3081  action_list = pe__resource_actions(rsc, target, RSC_STOP, FALSE);
3082 
3083  /* If resource requires fencing, implicit actions must occur after fencing.
3084  *
3085  * Implied stops and demotes of resources running on guest nodes are always
3086  * ordered after fencing, even if the resource does not require fencing,
3087  * because guest node "fencing" is actually just a resource stop.
3088  */
3089  if (is_set(rsc->flags, pe_rsc_needs_fencing) || pe__is_guest_node(target)) {
3090  order_implicit = true;
3091  }
3092 
3093  if (action_list && order_implicit) {
3094  parent_stop = find_first_action(top->actions, NULL, RSC_STOP, NULL);
3095  }
3096 
3097  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
3098  action_t *action = (action_t *) gIter->data;
3099 
3100  // The stop would never complete, so convert it into a pseudo-action.
3102  __FUNCTION__, __LINE__);
3103 
3104  if (order_implicit) {
3106  __FUNCTION__, __LINE__);
3107 
3108  /* Order the stonith before the parent stop (if any).
3109  *
3110  * Also order the stonith before the resource stop, unless the
3111  * resource is inside a bundle -- that would cause a graph loop.
3112  * We can rely on the parent stop's ordering instead.
3113  *
3114  * User constraints must not order a resource in a guest node
3115  * relative to the guest node container resource. The
3116  * pe_order_preserve flag marks constraints as generated by the
3117  * cluster and thus immune to that check (and is irrelevant if
3118  * target is not a guest).
3119  */
3120  if (!pe_rsc_is_bundled(rsc)) {
3121  order_actions(stonith_op, action, pe_order_preserve);
3122  }
3123  order_actions(stonith_op, parent_stop, pe_order_preserve);
3124  }
3125 
3126  if (is_set(rsc->flags, pe_rsc_failed)) {
3127  crm_notice("Stop of failed resource %s is implicit %s %s is fenced",
3128  rsc->id, (order_implicit? "after" : "because"),
3129  target->details->uname);
3130  } else {
3131  crm_info("%s is implicit %s %s is fenced",
3132  action->uuid, (order_implicit? "after" : "because"),
3133  target->details->uname);
3134  }
3135 
3136  if (is_set(rsc->flags, pe_rsc_notify)) {
3137  /* Create a second notification that will be delivered
3138  * immediately after the node is fenced
3139  *
3140  * Basic problem:
3141  * - C is a clone active on the node to be shot and stopping on another
3142  * - R is a resource that depends on C
3143  *
3144  * + C.stop depends on R.stop
3145  * + C.stopped depends on STONITH
3146  * + C.notify depends on C.stopped
3147  * + C.healthy depends on C.notify
3148  * + R.stop depends on C.healthy
3149  *
3150  * The extra notification here changes
3151  * + C.healthy depends on C.notify
3152  * into:
3153  * + C.healthy depends on C.notify'
3154  * + C.notify' depends on STONITH'
3155  * thus breaking the loop
3156  */
3157  create_secondary_notification(action, rsc, stonith_op, data_set);
3158  }
3159 
3160 /* From Bug #1601, successful fencing must be an input to a failed resources stop action.
3161 
3162  However given group(rA, rB) running on nodeX and B.stop has failed,
3163  A := stop healthy resource (rA.stop)
3164  B := stop failed resource (pseudo operation B.stop)
3165  C := stonith nodeX
3166  A requires B, B requires C, C requires A
3167  This loop would prevent the cluster from making progress.
3168 
3169  This block creates the "C requires A" dependency and therefore must (at least
3170  for now) be disabled.
3171 
3172  Instead, run the block above and treat all resources on nodeX as B would be
3173  (marked as a pseudo op depending on the STONITH).
3174 
3175  TODO: Break the "A requires B" dependency in update_action() and re-enable this block
3176 
3177  } else if(is_stonith == FALSE) {
3178  crm_info("Moving healthy resource %s"
3179  " off %s before fencing",
3180  rsc->id, node->details->uname);
3181 
3182  * stop healthy resources before the
3183  * stonith op
3184  *
3185  custom_action_order(
3186  rsc, stop_key(rsc), NULL,
3187  NULL,strdup(CRM_OP_FENCE),stonith_op,
3188  pe_order_optional, data_set);
3189 */
3190  }
3191 
3192  g_list_free(action_list);
3193 
3194  /* Get a list of demote actions potentially implied by the fencing */
3195  action_list = pe__resource_actions(rsc, target, RSC_DEMOTE, FALSE);
3196 
3197  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
3198  action_t *action = (action_t *) gIter->data;
3199 
3200  if (action->node->details->online == FALSE || action->node->details->unclean == TRUE
3201  || is_set(rsc->flags, pe_rsc_failed)) {
3202 
3203  if (is_set(rsc->flags, pe_rsc_failed)) {
3204  pe_rsc_info(rsc,
3205  "Demote of failed resource %s is implicit after %s is fenced",
3206  rsc->id, target->details->uname);
3207  } else {
3208  pe_rsc_info(rsc, "%s is implicit after %s is fenced",
3209  action->uuid, target->details->uname);
3210  }
3211 
3212  /* The demote would never complete and is now implied by the
3213  * fencing, so convert it into a pseudo-action.
3214  */
3216  __FUNCTION__, __LINE__);
3217 
3218  if (pe_rsc_is_bundled(rsc)) {
3219  /* Do nothing, let the recovery be ordered after the parent's implied stop */
3220 
3221  } else if (order_implicit) {
3222  order_actions(stonith_op, action, pe_order_preserve|pe_order_optional);
3223  }
3224  }
3225  }
3226 
3227  g_list_free(action_list);
3228 }
3229 
3230 void
3232 {
3233  if (rsc->children) {
3234  GListPtr gIter = NULL;
3235 
3236  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
3237  resource_t *child_rsc = (resource_t *) gIter->data;
3238 
3239  rsc_stonith_ordering(child_rsc, stonith_op, data_set);
3240  }
3241 
3242  } else if (is_not_set(rsc->flags, pe_rsc_managed)) {
3243  pe_rsc_trace(rsc, "Skipping fencing constraints for unmanaged resource: %s", rsc->id);
3244 
3245  } else {
3246  native_start_constraints(rsc, stonith_op, data_set);
3247  native_stop_constraints(rsc, stonith_op, data_set);
3248  }
3249 }
3250 
3251 void
3252 ReloadRsc(resource_t * rsc, node_t *node, pe_working_set_t * data_set)
3253 {
3254  GListPtr gIter = NULL;
3255  action_t *reload = NULL;
3256 
3257  if (rsc->children) {
3258  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
3259  resource_t *child_rsc = (resource_t *) gIter->data;
3260 
3261  ReloadRsc(child_rsc, node, data_set);
3262  }
3263  return;
3264 
3265  } else if (rsc->variant > pe_native) {
3266  /* Complex resource with no children */
3267  return;
3268 
3269  } else if (is_not_set(rsc->flags, pe_rsc_managed)) {
3270  pe_rsc_trace(rsc, "%s: unmanaged", rsc->id);
3271  return;
3272 
3273  } else if (is_set(rsc->flags, pe_rsc_failed) || is_set(rsc->flags, pe_rsc_start_pending)) {
3274  pe_rsc_trace(rsc, "%s: general resource state: flags=0x%.16llx", rsc->id, rsc->flags);
3275  stop_action(rsc, node, FALSE); /* Force a full restart, overkill? */
3276  return;
3277 
3278  } else if (node == NULL) {
3279  pe_rsc_trace(rsc, "%s: not active", rsc->id);
3280  return;
3281  }
3282 
3283  pe_rsc_trace(rsc, "Processing %s", rsc->id);
3284  set_bit(rsc->flags, pe_rsc_reload);
3285 
3286  reload = custom_action(
3287  rsc, reload_key(rsc), CRMD_ACTION_RELOAD, node, FALSE, TRUE, data_set);
3288  pe_action_set_reason(reload, "resource definition change", FALSE);
3289 
3290  custom_action_order(NULL, NULL, reload, rsc, stop_key(rsc), NULL,
3292  data_set);
3293  custom_action_order(NULL, NULL, reload, rsc, demote_key(rsc), NULL,
3295  data_set);
3296 }
3297 
3298 void
3299 native_append_meta(resource_t * rsc, xmlNode * xml)
3300 {
3301  char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION);
3302  resource_t *parent;
3303 
3304  if (value) {
3305  char *name = NULL;
3306 
3308  crm_xml_add(xml, name, value);
3309  free(name);
3310  }
3311 
3312  value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_REMOTE_NODE);
3313  if (value) {
3314  char *name = NULL;
3315 
3317  crm_xml_add(xml, name, value);
3318  free(name);
3319  }
3320 
3321  for (parent = rsc; parent != NULL; parent = parent->parent) {
3322  if (parent->container) {
3324  }
3325  }
3326 }
pe_action_flags
pe_action_flags
Definition: pe_types.h:265
show_scores
gboolean show_scores
Definition: pcmk_sched_messages.c:25
find_actions_exact
GList * find_actions_exact(GList *input, const char *key, const pe_node_t *on_node)
Definition: utils.c:1469
pe_resource_s::priority
int priority
Definition: pe_types.h:311
pe_rsc_orphan
#define pe_rsc_orphan
Definition: pe_types.h:224
pe_weights_rollback
@ pe_weights_rollback
Definition: pcmki_scheduler.h:37
pe_native
@ pe_native
Definition: pe_types.h:37
native_append_meta
void native_append_meta(resource_t *rsc, xmlNode *xml)
Definition: pcmk_sched_native.c:3299
start_key
#define start_key(rsc)
Definition: internal.h:235
INFINITY_HACK
#define INFINITY_HACK
Definition: pcmk_sched_native.c:20
GListPtr
GList * GListPtr
Definition: crm.h:214
XML_LRM_ATTR_MIGRATE_SOURCE
#define XML_LRM_ATTR_MIGRATE_SOURCE
Definition: msg_xml.h:285
INFINITY
#define INFINITY
Definition: crm.h:95
rsc_colocation_s::role_lh
int role_lh
Definition: pcmki_scheduler.h:46
LOAD_STOPPED
#define LOAD_STOPPED
Definition: pcmki_sched_utils.h:90
order_actions
gboolean order_actions(action_t *lh_action, action_t *rh_action, enum pe_ordering order)
Definition: utils.c:1778
pe_rsc_notify
#define pe_rsc_notify
Definition: pe_types.h:229
pe_action_s::needs
enum rsc_start_requirement needs
Definition: pe_types.h:383
pe_container
@ pe_container
Definition: pe_types.h:40
sort_nodes_by_weight
GList * sort_nodes_by_weight(GList *nodes, pe_node_t *active_node, pe_working_set_t *data_set)
Definition: pcmk_sched_utils.c:185
pe_resource_s::exclusive_discover
gboolean exclusive_discover
Definition: pe_types.h:323
pe_cancel_op
pe_action_t * pe_cancel_op(pe_resource_t *rsc, const char *name, guint interval_ms, pe_node_t *node, pe_working_set_t *data_set)
Definition: pcmk_sched_utils.c:421
crm_str_eq
gboolean crm_str_eq(const char *a, const char *b, gboolean use_case)
Definition: strings.c:224
pe_resource_s::variant
enum pe_obj_types variant
Definition: pe_types.h:301
RSC_PROMOTE
#define RSC_PROMOTE
Definition: crm.h:202
find_rsc_op_entry
xmlNode * find_rsc_op_entry(resource_t *rsc, const char *key)
Definition: utils.c:1261
resource_alloc_functions_s::allocate
node_t *(* allocate)(resource_t *, node_t *, pe_working_set_t *)
Definition: pcmki_sched_allocate.h:23
XML_RSC_ATTR_REMOTE_NODE
#define XML_RSC_ATTR_REMOTE_NODE
Definition: msg_xml.h:208
pe_resource_s::dangling_migrations
GListPtr dangling_migrations
Definition: pe_types.h:349
RSC_DEMOTE
#define RSC_DEMOTE
Definition: crm.h:204
pe_rsc_allow_migrate
#define pe_rsc_allow_migrate
Definition: pe_types.h:247
StopRsc
gboolean StopRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:2552
pe_resource_s::actions
GListPtr actions
Definition: pe_types.h:330
no_quorum_freeze
@ no_quorum_freeze
Definition: pe_types.h:61
DeleteRsc
gboolean DeleteRsc(resource_t *rsc, node_t *node, gboolean optional, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:2731
pacemaker-internal.h
pe_resource_s::next_role
enum rsc_role_e next_role
Definition: pe_types.h:342
pe_find_node
pe_node_t * pe_find_node(GListPtr node_list, const char *uname)
Definition: status.c:422
flags
uint64_t flags
Definition: remote.c:5
pe_node_shared_s::unseen
gboolean unseen
Definition: pe_types.h:195
pe_working_set_s::nodes
GListPtr nodes
Definition: pe_types.h:138
msg_xml.h
RSC_ROLE_STOPPED
@ RSC_ROLE_STOPPED
Definition: common.h:88
pe_resource_s::utilization
GHashTable * utilization
Definition: pe_types.h:346
pe_rsc_info
#define pe_rsc_info(rsc, fmt, args...)
Definition: internal.h:17
pe_resource_s::rsc_cons_lhs
GListPtr rsc_cons_lhs
Definition: pe_types.h:327
pe_node_shared_s::remote_rsc
pe_resource_t * remote_rsc
Definition: pe_types.h:207
LOG_TRACE
#define LOG_TRACE
Definition: logging.h:26
RSC_ROLE_MASTER
@ RSC_ROLE_MASTER
Definition: common.h:91
pe_rsc_debug
#define pe_rsc_debug(rsc, fmt, args...)
Definition: internal.h:18
rsc_req_quorum
@ rsc_req_quorum
Definition: common.h:82
pe_rsc_provisional
#define pe_rsc_provisional
Definition: pe_types.h:234
pe_order_runnable_left
@ pe_order_runnable_left
Definition: pe_types.h:458
pe_ticket_s::granted
gboolean granted
Definition: pe_types.h:421
PCMK_RESOURCE_CLASS_STONITH
#define PCMK_RESOURCE_CLASS_STONITH
Definition: services.h:49
pe_resource_s::known_on
GHashTable * known_on
Definition: pe_types.h:338
pe_resource_s::children
GListPtr children
Definition: pe_types.h:348
pe_resource_s::id
char * id
Definition: pe_types.h:292
pe_rsc_allow_remote_remotes
#define pe_rsc_allow_remote_remotes
Definition: pe_types.h:239
pe_resource_s::allocated_to
pe_node_t * allocated_to
Definition: pe_types.h:334
stop_action
#define stop_action(rsc, node, optional)
Definition: internal.h:230
new_rsc_order
int new_rsc_order(resource_t *lh_rsc, const char *lh_task, resource_t *rh_rsc, const char *rh_task, enum pe_ordering type, pe_working_set_t *data_set)
Definition: pcmk_sched_constraints.c:1398
native_create_actions
void native_create_actions(resource_t *rsc, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:1119
influence_rsc_location
@ influence_rsc_location
Definition: pcmki_sched_utils.h:66
rsc_role_e
rsc_role_e
Definition: common.h:86
pe__location_constraint_s::id
char * id
Definition: internal.h:29
pe_weights_init
@ pe_weights_init
Definition: pcmki_scheduler.h:34
RSC_MIGRATED
#define RSC_MIGRATED
Definition: crm.h:194
CRM_CHECK
#define CRM_CHECK(expr, failure_action)
Definition: logging.h:157
dump_node_scores
#define dump_node_scores(level, rsc, text, nodes)
Definition: internal.h:206
pe_node_s::weight
int weight
Definition: pe_types.h:217
pe_action_pseudo
@ pe_action_pseudo
Definition: pe_types.h:266
clear_bit
#define clear_bit(word, bit)
Definition: crm_internal.h:168
rsc_ticket_s::role_lh
int role_lh
Definition: pcmki_scheduler.h:65
CRM_ATTR_UNAME
#define CRM_ATTR_UNAME
Definition: crm.h:110
graph_element_from_action
void graph_element_from_action(action_t *action, pe_working_set_t *data_set)
Definition: pcmk_sched_graph.c:1754
pe_rsc_fence_device
#define pe_rsc_fence_device
Definition: pe_types.h:231
pe_node_s::details
struct pe_node_shared_s * details
Definition: pe_types.h:220
custom_action
action_t * custom_action(resource_t *rsc, char *key, const char *task, node_t *on_node, gboolean optional, gboolean foo, pe_working_set_t *data_set)
Definition: utils.c:455
crm_notice
#define crm_notice(fmt, args...)
Definition: logging.h:243
rsc_colocation_s::rsc_lh
resource_t * rsc_lh
Definition: pcmki_scheduler.h:43
pe_node_shared_s::id
const char * id
Definition: pe_types.h:185
rsc_colocation_s::node_attribute
const char * node_attribute
Definition: pcmki_scheduler.h:42
type
enum crm_ais_msg_types type
Definition: internal.h:5
crm_err
#define crm_err(fmt, args...)
Definition: logging.h:241
pe_weights_forward
@ pe_weights_forward
Definition: pcmki_scheduler.h:35
LogActions
void LogActions(resource_t *rsc, pe_working_set_t *data_set, gboolean terminal)
Definition: pcmk_sched_native.c:2364
native_assign_node
gboolean native_assign_node(resource_t *rsc, GListPtr candidates, node_t *chosen, gboolean force)
Definition: pcmk_sched_utils.c:212
XML_LRM_ATTR_INTERVAL
#define XML_LRM_ATTR_INTERVAL
Definition: msg_xml.h:254
crm_trace
#define crm_trace(fmt, args...)
Definition: logging.h:247
pe_resource_s::meta
GHashTable * meta
Definition: pe_types.h:344
pe_action_print_always
@ pe_action_print_always
Definition: pe_types.h:269
safe_str_eq
#define safe_str_eq(a, b)
Definition: util.h:61
rsc_colocation_s::score
int score
Definition: pcmki_scheduler.h:49
pe_action_implies
#define pe_action_implies(action, reason, flag)
Definition: internal.h:349
uber_parent
pe_resource_t * uber_parent(pe_resource_t *rsc)
Definition: complex.c:765
native_action_flags
enum pe_action_flags native_action_flags(action_t *action, node_t *node)
Definition: pcmk_sched_native.c:1917
pe__is_guest_or_remote_node
gboolean pe__is_guest_or_remote_node(pe_node_t *node)
Definition: remote.c:58
trigger_unfencing
void trigger_unfencing(resource_t *rsc, node_t *node, const char *reason, action_t *dependency, pe_working_set_t *data_set)
Definition: utils.c:2371
pe_resource_s::recovery_type
enum rsc_recovery_type recovery_type
Definition: pe_types.h:306
pe_action_s::flags
enum pe_action_flags flags
Definition: pe_types.h:382
rsc_colocation_s::id
const char * id
Definition: pcmki_scheduler.h:41
create_secondary_notification
void create_secondary_notification(pe_action_t *action, resource_t *rsc, pe_action_t *stonith_op, pe_working_set_t *data_set)
Definition: pcmk_sched_notif.c:821
pe_order_asymmetrical
@ pe_order_asymmetrical
Definition: pe_types.h:478
pe_group
@ pe_group
Definition: pe_types.h:38
pe_resource_s::running_on
GListPtr running_on
Definition: pe_types.h:337
pe_resource_s::partial_migration_target
pe_node_t * partial_migration_target
Definition: pe_types.h:335
rsc_colocation_new
gboolean rsc_colocation_new(const char *id, const char *node_attr, int score, resource_t *rsc_lh, resource_t *rsc_rh, const char *state_lh, const char *state_rh, pe_working_set_t *data_set)
Definition: pcmk_sched_constraints.c:1339
rsc_stonith_ordering
void rsc_stonith_ordering(resource_t *rsc, action_t *stonith_op, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:3231
CRM_ATTR_UNFENCED
#define CRM_ATTR_UNFENCED
Definition: crm.h:117
pe_working_set_s::placement_strategy
const char * placement_strategy
Definition: pe_types.h:125
loss_ticket_stop
@ loss_ticket_stop
Definition: pcmki_scheduler.h:53
pe_node_shared_s::remote_requires_reset
gboolean remote_requires_reset
Definition: pe_types.h:201
set_bit
#define set_bit(word, bit)
Definition: crm_internal.h:167
get_pseudo_op
action_t * get_pseudo_op(const char *name, pe_working_set_t *data_set)
Definition: utils.c:1830
rsc_colocation_s::rsc_rh
resource_t * rsc_rh
Definition: pcmki_scheduler.h:44
CRM_ATTR_ID
#define CRM_ATTR_ID
Definition: crm.h:111
custom_action_order
int custom_action_order(resource_t *lh_rsc, char *lh_task, action_t *lh_action, resource_t *rh_rsc, char *rh_task, action_t *rh_action, enum pe_ordering type, pe_working_set_t *data_set)
Definition: pcmk_sched_constraints.c:1564
native_expand
void native_expand(resource_t *rsc, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:2216
influence_rsc_priority
@ influence_rsc_priority
Definition: pcmki_sched_utils.h:67
native_update_actions
enum pe_graph_flags native_update_actions(pe_action_t *first, pe_action_t *then, pe_node_t *node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:1996
pe_action_s::uuid
char * uuid
Definition: pe_types.h:378
ID
#define ID(x)
Definition: msg_xml.h:415
pe_weights
pe_weights
Definition: pcmki_scheduler.h:32
demote_key
#define demote_key(rsc)
Definition: internal.h:255
pe_order_same_node
@ pe_order_same_node
Definition: pe_types.h:473
rsc_ticket_s::loss_policy
enum loss_ticket_policy_e loss_policy
Definition: pcmki_scheduler.h:63
RSC_START
#define RSC_START
Definition: crm.h:196
pe_ticket_s::id
char * id
Definition: pe_types.h:420
promote_action
#define promote_action(rsc, node, optional)
Definition: internal.h:246
pe_order_load
@ pe_order_load
Definition: pe_types.h:479
pe_fence_node
void pe_fence_node(pe_working_set_t *data_set, node_t *node, const char *reason)
Schedule a fence action for a node.
Definition: unpack.c:78
pe_err
#define pe_err(fmt...)
Definition: internal.h:21
RSC_ROLE_SLAVE
@ RSC_ROLE_SLAVE
Definition: common.h:90
pe_action_s
Definition: pe_types.h:369
CRM_META
#define CRM_META
Definition: crm.h:71
pe_node_shared_s::shutdown
gboolean shutdown
Definition: pe_types.h:196
pe_graph_none
@ pe_graph_none
Definition: pe_types.h:258
process_utilization
void process_utilization(resource_t *rsc, node_t **prefer, pe_working_set_t *data_set)
Definition: pcmk_sched_utilization.c:331
crm_info
#define crm_info(fmt, args...)
Definition: logging.h:244
resource_alloc_functions_s::create_probe
gboolean(* create_probe)(resource_t *, node_t *, action_t *, gboolean, pe_working_set_t *)
Definition: pcmki_sched_allocate.h:25
ReloadRsc
void ReloadRsc(resource_t *rsc, node_t *node, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:3252
update_action_flags
gboolean update_action_flags(action_t *action, enum pe_action_flags flags, const char *source, int line)
Definition: pcmk_sched_allocate.c:119
pe__location_constraint_s::role_filter
enum rsc_role_e role_filter
Definition: internal.h:31
CRM_LOG_ASSERT
#define CRM_LOG_ASSERT(expr)
Definition: logging.h:143
rsc_ticket_s::ticket
ticket_t * ticket
Definition: pcmki_scheduler.h:62
XML_AGENT_ATTR_CLASS
#define XML_AGENT_ATTR_CLASS
Definition: msg_xml.h:229
XML_ATTR_TE_TARGET_RC
#define XML_ATTR_TE_TARGET_RC
Definition: msg_xml.h:361
filter_colocation_constraint
enum filter_colocation_res filter_colocation_constraint(resource_t *rsc_lh, resource_t *rsc_rh, rsc_colocation_t *constraint, gboolean preview)
Definition: pcmk_sched_native.c:1595
pe_action_set_reason
void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite)
Definition: utils.c:2499
DemoteRsc
gboolean DemoteRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:2696
native_rsc_colocation_rh
void native_rsc_colocation_rh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, rsc_colocation_t *constraint, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:1785
demote_action
#define demote_action(rsc, node, optional)
Definition: internal.h:256
native_rsc_colocation_lh
void native_rsc_colocation_lh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, rsc_colocation_t *constraint, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:1575
role2text
const char * role2text(enum rsc_role_e role)
Definition: common.c:335
pe_action_implied_by_stonith
@ pe_action_implied_by_stonith
Definition: pe_types.h:272
crm_strdup_printf
char * crm_strdup_printf(char const *format,...) __attribute__((__format__(__printf__
pe_flag_stop_everything
#define pe_flag_stop_everything
Definition: pe_types.h:100
RSC_ROLE_UNKNOWN
@ RSC_ROLE_UNKNOWN
Definition: common.h:87
crm_debug
#define crm_debug(fmt, args...)
Definition: logging.h:246
resource_alloc_functions_s::merge_weights
GHashTable *(* merge_weights)(resource_t *, const char *, GHashTable *, const char *, float, enum pe_weights)
Definition: pcmki_sched_allocate.h:21
pe_order_pseudo_left
@ pe_order_pseudo_left
Definition: pe_types.h:460
pe_flag_stdout
#define pe_flag_stdout
Definition: pe_types.h:112
pe_action_s::node
pe_node_t * node
Definition: pe_types.h:374
resource_object_functions_s::state
enum rsc_role_e(* state)(const pe_resource_t *, gboolean)
Definition: pe_types.h:52
native_rsc_colocation_rh_must
void native_rsc_colocation_rh_must(resource_t *rsc_lh, gboolean update_lh, resource_t *rsc_rh, gboolean update_rh)
pe_order_optional
@ pe_order_optional
Definition: pe_types.h:448
rsc_req_stonith
@ rsc_req_stonith
Definition: common.h:83
pe_node_s::rsc_discover_mode
int rsc_discover_mode
Definition: pe_types.h:221
pe_action_optional
@ pe_action_optional
Definition: pe_types.h:268
resource_alloc_functions_s::rsc_colocation_rh
void(* rsc_colocation_rh)(pe_resource_t *, pe_resource_t *, rsc_colocation_t *, pe_working_set_t *)
Definition: pcmki_sched_allocate.h:30
pe_resource_s::partial_migration_source
pe_node_t * partial_migration_source
Definition: pe_types.h:336
pe_rsc_needs_unfencing
#define pe_rsc_needs_unfencing
Definition: pe_types.h:255
pe_order_implies_first
@ pe_order_implies_first
Definition: pe_types.h:451
resource_alloc_functions_s::expand
void(* expand)(resource_t *, pe_working_set_t *)
Definition: pcmki_sched_allocate.h:42
promote_key
#define promote_key(rsc)
Definition: internal.h:245
pe__location_constraint_s::discover_mode
enum pe_discover_e discover_mode
Definition: internal.h:32
do_crm_log
#define do_crm_log(level, fmt, args...)
Log a message.
Definition: logging.h:122
RSC_STOP
#define RSC_STOP
Definition: crm.h:199
native_rsc_location
void native_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
Definition: pcmk_sched_native.c:2151
filter_colocation_res
filter_colocation_res
Definition: pcmki_sched_utils.h:64
crm_xml_add
const char * crm_xml_add(xmlNode *node, const char *name, const char *value)
Create an XML attribute with specified name and value.
Definition: nvpair.c:313
RSC_MIGRATE
#define RSC_MIGRATE
Definition: crm.h:193
pe_working_set_s
Definition: pe_types.h:117
rsc_ticket_constraint
void rsc_ticket_constraint(resource_t *rsc_lh, rsc_ticket_t *rsc_ticket, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:1827
crm_element_value
const char * crm_element_value(const xmlNode *data, const char *name)
Retrieve the value of an XML attribute.
Definition: nvpair.c:519
pe_action_clear
@ pe_action_clear
Definition: pe_types.h:277
node_copy
node_t * node_copy(const node_t *this_node)
Definition: utils.c:132
pe__find_active_on
pe_node_t * pe__find_active_on(const pe_resource_t *rsc, unsigned int *count_all, unsigned int *count_clean)
Definition: complex.c:861
pe_action_s::reason
char * reason
Definition: pe_types.h:380
influence_nothing
@ influence_nothing
Definition: pcmki_sched_utils.h:65
node_hash_from_list
GHashTable * node_hash_from_list(GListPtr list)
Definition: utils.c:188
rsc_colocation_s::role_rh
int role_rh
Definition: pcmki_scheduler.h:47
pe_resource_s::rsc_cons
GListPtr rsc_cons
Definition: pe_types.h:328
RSC_ROLE_MAX
#define RSC_ROLE_MAX
Definition: common.h:94
sort_node_uname
gint sort_node_uname(gconstpointer a, gconstpointer b)
Definition: utils.c:231
text2role
enum rsc_role_e text2role(const char *role)
Definition: common.c:356
pe_rsc_merging
#define pe_rsc_merging
Definition: pe_types.h:236
loss_ticket_demote
@ loss_ticket_demote
Definition: pcmki_scheduler.h:54
pe_action_s::id
int id
Definition: pe_types.h:370
can_run_resources
gboolean can_run_resources(const node_t *node)
Definition: pcmk_sched_utils.c:62
pe_rsc_allocating
#define pe_rsc_allocating
Definition: pe_types.h:235
rules.h
add_hash_param
void add_hash_param(GHashTable *hash, const char *name, const char *value)
Definition: common.c:412
delete_action
#define delete_action(rsc, node, optional)
Definition: internal.h:220
pe_resource_s::container
pe_resource_t * container
Definition: pe_types.h:351
variant.h
pe__location_constraint_s::node_list_rh
GListPtr node_list_rh
Definition: internal.h:33
pe_order_implies_first_master
@ pe_order_implies_first_master
Definition: pe_types.h:453
pe_fence_op
action_t * pe_fence_op(node_t *node, const char *op, bool optional, const char *reason, pe_working_set_t *data_set)
Definition: utils.c:2289
pe_rsc_needs_fencing
#define pe_rsc_needs_fencing
Definition: pe_types.h:254
pe_flag_have_stonith_resource
#define pe_flag_have_stonith_resource
Definition: pe_types.h:94
resource_alloc_functions_s::rsc_colocation_lh
void(* rsc_colocation_lh)(pe_resource_t *, pe_resource_t *, rsc_colocation_t *, pe_working_set_t *)
Definition: pcmki_sched_allocate.h:28
XML_RSC_ATTR_TARGET_ROLE
#define XML_RSC_ATTR_TARGET_ROLE
Definition: msg_xml.h:196
pe_rsc_unique
#define pe_rsc_unique
Definition: pe_types.h:230
reload_key
#define reload_key(rsc)
Definition: internal.h:234
pe_order_implies_then
@ pe_order_implies_then
Definition: pe_types.h:452
resource_location
void resource_location(resource_t *rsc, node_t *node, int score, const char *tag, pe_working_set_t *data_set)
Definition: utils.c:1566
pe_graph_flags
pe_graph_flags
Definition: pe_types.h:257
can_run_any
gboolean can_run_any(GHashTable *nodes)
Definition: pcmk_sched_utils.c:377
find_actions
GListPtr find_actions(GListPtr input, const char *key, const node_t *on_node)
Definition: utils.c:1429
StartRsc
gboolean StartRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:2636
RSC_STATUS
#define RSC_STATUS
Definition: crm.h:210
pe_graph_updated_first
@ pe_graph_updated_first
Definition: pe_types.h:259
CRMD_ACTION_RELOAD
#define CRMD_ACTION_RELOAD
Definition: crm.h:167
rsc_req_nothing
@ rsc_req_nothing
Definition: common.h:81
pe_ticket_s::standby
gboolean standby
Definition: pe_types.h:423
RoleError
gboolean RoleError(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:2714
pe_action_reschedule
@ pe_action_reschedule
Definition: pe_types.h:285
pe_clear_action_bit
#define pe_clear_action_bit(action, bit)
Definition: internal.h:26
safe_str_neq
gboolean safe_str_neq(const char *a, const char *b)
Definition: strings.c:161
pe_order_then_cancels_first
@ pe_order_then_cancels_first
Definition: pe_types.h:484
check_utilization
gboolean check_utilization(const char *value)
Definition: utils.c:177
pe_action_s::rsc
pe_resource_t * rsc
Definition: pe_types.h:373
native_create_probe
gboolean native_create_probe(resource_t *rsc, node_t *node, action_t *complete, gboolean force, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:2761
rsc_colocation_s
Definition: pcmki_scheduler.h:40
pe_resource_s::parent
pe_resource_t * parent
Definition: pe_types.h:299
crm_parse_interval_spec
guint crm_parse_interval_spec(const char *input)
Definition: utils.c:542
XML_LRM_ATTR_MIGRATE_TARGET
#define XML_LRM_ATTR_MIGRATE_TARGET
Definition: msg_xml.h:286
crm_str
#define crm_str(x)
Definition: logging.h:267
native_color
node_t * native_color(resource_t *rsc, node_t *prefer, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:437
pe_order_implies_first_migratable
@ pe_order_implies_first_migratable
Definition: pe_types.h:456
services.h
Services API.
STOP_SANITY_ASSERT
#define STOP_SANITY_ASSERT(lineno)
Definition: pcmk_sched_native.c:2249
rsc_state_matrix
enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX]
Definition: pcmk_sched_native.c:51
pe_resource_s::flags
unsigned long long flags
Definition: pe_types.h:319
score2char_stack
char * score2char_stack(int score, char *buf, size_t len)
Definition: utils.c:237
pe_action_migrate_runnable
@ pe_action_migrate_runnable
Definition: pe_types.h:273
pe_order_restart
@ pe_order_restart
Definition: pe_types.h:470
rsc_ticket_s::id
const char * id
Definition: pcmki_scheduler.h:60
pe_flag_enable_unfencing
#define pe_flag_enable_unfencing
Definition: pe_types.h:95
pe_order_implies_then_on_node
@ pe_order_implies_then_on_node
Definition: pe_types.h:461
pe_flag_remove_after_stop
#define pe_flag_remove_after_stop
Definition: pe_types.h:103
PCMK_OCF_RUNNING_MASTER
@ PCMK_OCF_RUNNING_MASTER
Definition: services.h:98
recovery_stop_start
@ recovery_stop_start
Definition: common.h:75
pe_resource_s::role
enum rsc_role_e role
Definition: pe_types.h:341
pe_rsc_trace
#define pe_rsc_trace(rsc, fmt, args...)
Definition: internal.h:19
native_merge_weights
GHashTable * native_merge_weights(resource_t *rsc, const char *rhs, GHashTable *nodes, const char *attr, float factor, enum pe_weights flags)
Definition: pcmk_sched_native.c:297
pe_order_preserve
@ pe_order_preserve
Definition: pe_types.h:483
pe_flag_startup_probes
#define pe_flag_startup_probes
Definition: pe_types.h:106
CRM_ASSERT
#define CRM_ASSERT(expr)
Definition: results.h:42
pcmk__bundle_log_actions
void pcmk__bundle_log_actions(pe_resource_t *rsc, pe_working_set_t *data_set, gboolean terminal)
Definition: pcmk_sched_bundle.c:1054
pe__resource_actions
GList * pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node, const char *task, bool require_node)
Find all actions of given type for a resource.
Definition: utils.c:1518
pe_rsc_start_pending
#define pe_rsc_start_pending
Definition: pe_types.h:243
pe_action_runnable
@ pe_action_runnable
Definition: pe_types.h:267
pe_action_dangle
@ pe_action_dangle
Definition: pe_types.h:278
pe_rsc_managed
#define pe_rsc_managed
Definition: pe_types.h:225
loss_ticket_fence
@ loss_ticket_fence
Definition: pcmki_scheduler.h:55
start_action
#define start_action(rsc, node, optional)
Definition: internal.h:236
PCMK_OCF_NOT_RUNNING
@ PCMK_OCF_NOT_RUNNING
Definition: services.h:97
pe_node_attribute_raw
const char * pe_node_attribute_raw(pe_node_t *node, const char *name)
Definition: common.c:468
pe_rsc_promotable
#define pe_rsc_promotable
Definition: pe_types.h:232
RSC_DELETE
#define RSC_DELETE
Definition: crm.h:190
pe_flag_stonith_enabled
#define pe_flag_stonith_enabled
Definition: pe_types.h:93
rsc_merge_weights
GHashTable * rsc_merge_weights(resource_t *rsc, const char *rhs, GHashTable *nodes, const char *attr, float factor, enum pe_weights flags)
Definition: pcmk_sched_native.c:304
pe_node_shared_s::maintenance
gboolean maintenance
Definition: pe_types.h:199
merge_weights
int merge_weights(int w1, int w2)
Definition: common.c:375
pe__is_guest_node
gboolean pe__is_guest_node(pe_node_t *node)
Definition: remote.c:47
rsc_action_matrix
gboolean(* rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX])(resource_t *, node_t *, gboolean, pe_working_set_t *)
Definition: pcmk_sched_native.c:61
pe_resource_s
Definition: pe_types.h:291
pe_working_set_s::flags
unsigned long long flags
Definition: pe_types.h:127
pe_resource_s::allowed_nodes
GHashTable * allowed_nodes
Definition: pe_types.h:339
rsc_ticket_s
Definition: pcmki_scheduler.h:59
pe_node_shared_s::unclean
gboolean unclean
Definition: pe_types.h:194
pe__location_constraint_s
Definition: internal.h:28
pe_rsc_reload
#define pe_rsc_reload
Definition: pe_types.h:238
pe_resource_s::ops_xml
xmlNode * ops_xml
Definition: pe_types.h:296
loss_ticket_freeze
@ loss_ticket_freeze
Definition: pcmki_scheduler.h:56
RSC_ROLE_STARTED
@ RSC_ROLE_STARTED
Definition: common.h:89
pe_working_set_s::no_quorum_policy
enum pe_quorum_policy no_quorum_policy
Definition: pe_types.h:130
NullOp
gboolean NullOp(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:2723
XML_OP_ATTR_PENDING
#define XML_OP_ATTR_PENDING
Definition: msg_xml.h:221
generate_op_key
char * generate_op_key(const char *rsc_id, const char *op_type, guint interval_ms)
Generate an operation key.
Definition: operations.c:39
update_action
gboolean update_action(pe_action_t *action, pe_working_set_t *data_set)
Definition: pcmk_sched_graph.c:512
PromoteRsc
gboolean PromoteRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:2655
XML_RSC_ATTR_INCARNATION
#define XML_RSC_ATTR_INCARNATION
Definition: msg_xml.h:186
pe_node_shared_s::online
gboolean online
Definition: pe_types.h:190
pe_graph_updated_then
@ pe_graph_updated_then
Definition: pe_types.h:260
pe_action_s::task
char * task
Definition: pe_types.h:377
pe_node_shared_s::uname
const char * uname
Definition: pe_types.h:186
pe_resource_s::cmds
resource_alloc_functions_t * cmds
Definition: pe_types.h:304
pe_weights_positive
@ pe_weights_positive
Definition: pcmki_scheduler.h:36
pe_discover_exclusive
@ pe_discover_exclusive
Definition: pe_types.h:442
pe_resource_s::is_remote_node
gboolean is_remote_node
Definition: pe_types.h:322
crm_internal.h
node_hash_dup
GHashTable * node_hash_dup(GHashTable *hash)
Definition: pcmk_sched_native.c:286
pe__resource_contains_guest_node
pe_resource_t * pe__resource_contains_guest_node(const pe_working_set_t *data_set, const pe_resource_t *rsc)
Definition: remote.c:76
stop_key
#define stop_key(rsc)
Definition: internal.h:229
pe_node_s
Definition: pe_types.h:216
pe_flag_have_quorum
#define pe_flag_have_quorum
Definition: pe_types.h:89
pe_rsc_maintenance
#define pe_rsc_maintenance
Definition: pe_types.h:250
pe_find_node_id
pe_node_t * pe_find_node_id(GListPtr node_list, const char *id)
Definition: status.c:406
crm_meta_name
char * crm_meta_name(const char *field)
Definition: utils.c:739
XML_RSC_ATTR_CONTAINER
#define XML_RSC_ATTR_CONTAINER
Definition: msg_xml.h:205
pe_discover_never
@ pe_discover_never
Definition: pe_types.h:441
pe_action_s::meta
GHashTable * meta
Definition: pe_types.h:387
pe_rsc_block
#define pe_rsc_block
Definition: pe_types.h:226
native_rsc_colocation_rh_mustnot
void native_rsc_colocation_rh_mustnot(resource_t *rsc_lh, gboolean update_lh, resource_t *rsc_rh, gboolean update_rh)
scores_log_level
int scores_log_level
Definition: pcmk_sched_messages.c:26
pe_resource_s::fns
resource_object_functions_t * fns
Definition: pe_types.h:303
native_internal_constraints
void native_internal_constraints(resource_t *rsc, pe_working_set_t *data_set)
Definition: pcmk_sched_native.c:1362
find_first_action
action_t * find_first_action(GListPtr input, const char *uuid, const char *task, node_t *on_node)
Definition: utils.c:1399
resource_alloc_functions_s::action_flags
enum pe_action_flags(* action_flags)(action_t *, node_t *)
Definition: pcmki_sched_allocate.h:35
crm_config_err
#define crm_config_err(fmt...)
Definition: crm_internal.h:179
pe_proc_err
#define pe_proc_err(fmt...)
Definition: internal.h:23
pe_rsc_failed
#define pe_rsc_failed
Definition: pe_types.h:241
pe_ordering
pe_ordering
Definition: pe_types.h:446