1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
8 * Copyright (C) 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/utsname.h>
34 #include <linux/init.h>
35 #include <linux/sysctl.h>
36 #include <linux/random.h>
37 #include <linux/blkdev.h>
38 #include <linux/socket.h>
39 #include <linux/inet.h>
40 #include <linux/timer.h>
41 #include <linux/kthread.h>
42 #include <linux/delay.h>
45 #include "cluster/heartbeat.h"
46 #include "cluster/nodemanager.h"
47 #include "cluster/tcp.h"
50 #include "dlmcommon.h"
51 #include "dlmdomain.h"
53 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
54 #include "cluster/masklog.h"
56 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);
58 static int dlm_recovery_thread(void *data);
59 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
60 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm);
61 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
62 static int dlm_do_recovery(struct dlm_ctxt *dlm);
64 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm);
65 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node);
66 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
67 static int dlm_request_all_locks(struct dlm_ctxt *dlm,
68 u8 request_from, u8 dead_node);
69 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
71 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res);
72 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
73 const char *lockname, int namelen,
74 int total_locks, u64 cookie,
76 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
77 struct dlm_migratable_lockres *mres,
79 struct dlm_lock_resource *res,
81 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
82 struct dlm_lock_resource *res,
83 struct dlm_migratable_lockres *mres);
84 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm);
85 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm,
86 u8 dead_node, u8 send_to);
87 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node);
88 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
89 struct list_head *list, u8 dead_node);
90 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
91 u8 dead_node, u8 new_master);
92 static void dlm_reco_ast(void *astdata);
93 static void dlm_reco_bast(void *astdata, int blocked_type);
94 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st);
95 static void dlm_request_all_locks_worker(struct dlm_work_item *item,
97 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
99 static u64 dlm_get_next_mig_cookie(void);
101 static spinlock_t dlm_reco_state_lock = SPIN_LOCK_UNLOCKED;
102 static spinlock_t dlm_mig_cookie_lock = SPIN_LOCK_UNLOCKED;
103 static u64 dlm_mig_cookie = 1;
105 static u64 dlm_get_next_mig_cookie(void)
108 spin_lock(&dlm_mig_cookie_lock);
110 if (dlm_mig_cookie == (~0ULL))
114 spin_unlock(&dlm_mig_cookie_lock);
118 static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm,
121 assert_spin_locked(&dlm->spinlock);
122 if (dlm->reco.dead_node != dead_node)
123 mlog(0, "%s: changing dead_node from %u to %u\n",
124 dlm->name, dlm->reco.dead_node, dead_node);
125 dlm->reco.dead_node = dead_node;
128 static inline void dlm_set_reco_master(struct dlm_ctxt *dlm,
131 assert_spin_locked(&dlm->spinlock);
132 mlog(0, "%s: changing new_master from %u to %u\n",
133 dlm->name, dlm->reco.new_master, master);
134 dlm->reco.new_master = master;
137 static inline void dlm_reset_recovery(struct dlm_ctxt *dlm)
139 spin_lock(&dlm->spinlock);
140 clear_bit(dlm->reco.dead_node, dlm->recovery_map);
141 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
142 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
143 spin_unlock(&dlm->spinlock);
146 /* Worker function used during recovery. */
147 void dlm_dispatch_work(void *data)
149 struct dlm_ctxt *dlm = (struct dlm_ctxt *)data;
151 struct list_head *iter, *iter2;
152 struct dlm_work_item *item;
153 dlm_workfunc_t *workfunc;
155 spin_lock(&dlm->work_lock);
156 list_splice_init(&dlm->work_list, &tmp_list);
157 spin_unlock(&dlm->work_lock);
159 list_for_each_safe(iter, iter2, &tmp_list) {
160 item = list_entry(iter, struct dlm_work_item, list);
161 workfunc = item->func;
162 list_del_init(&item->list);
164 /* already have ref on dlm to avoid having
165 * it disappear. just double-check. */
166 BUG_ON(item->dlm != dlm);
168 /* this is allowed to sleep and
169 * call network stuff */
170 workfunc(item, item->data);
181 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm)
183 /* wake the recovery thread
184 * this will wake the reco thread in one of three places
185 * 1) sleeping with no recovery happening
186 * 2) sleeping with recovery mastered elsewhere
187 * 3) recovery mastered here, waiting on reco data */
189 wake_up(&dlm->dlm_reco_thread_wq);
192 /* Launch the recovery thread */
193 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm)
195 mlog(0, "starting dlm recovery thread...\n");
197 dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm,
199 if (IS_ERR(dlm->dlm_reco_thread_task)) {
200 mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task));
201 dlm->dlm_reco_thread_task = NULL;
208 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
210 if (dlm->dlm_reco_thread_task) {
211 mlog(0, "waiting for dlm recovery thread to exit\n");
212 kthread_stop(dlm->dlm_reco_thread_task);
213 dlm->dlm_reco_thread_task = NULL;
220 * this is lame, but here's how recovery works...
221 * 1) all recovery threads cluster wide will work on recovering
223 * 2) negotiate who will take over all the locks for the dead node.
224 * thats right... ALL the locks.
225 * 3) once a new master is chosen, everyone scans all locks
226 * and moves aside those mastered by the dead guy
227 * 4) each of these locks should be locked until recovery is done
228 * 5) the new master collects up all of secondary lock queue info
229 * one lock at a time, forcing each node to communicate back
231 * 6) each secondary lock queue responds with the full known lock info
232 * 7) once the new master has run all its locks, it sends a ALLDONE!
233 * message to everyone
234 * 8) upon receiving this message, the secondary queue node unlocks
235 * and responds to the ALLDONE
236 * 9) once the new master gets responses from everyone, he unlocks
237 * everything and recovery for this dead node is done
238 *10) go back to 2) while there are still dead nodes
242 static void dlm_print_reco_node_status(struct dlm_ctxt *dlm)
244 struct dlm_reco_node_data *ndata;
245 struct dlm_lock_resource *res;
247 mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n",
248 dlm->name, dlm->dlm_reco_thread_task->pid,
249 dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive",
250 dlm->reco.dead_node, dlm->reco.new_master);
252 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
253 char *st = "unknown";
254 switch (ndata->state) {
255 case DLM_RECO_NODE_DATA_INIT:
258 case DLM_RECO_NODE_DATA_REQUESTING:
261 case DLM_RECO_NODE_DATA_DEAD:
264 case DLM_RECO_NODE_DATA_RECEIVING:
267 case DLM_RECO_NODE_DATA_REQUESTED:
270 case DLM_RECO_NODE_DATA_DONE:
273 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
274 st = "finalize-sent";
280 mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n",
281 dlm->name, ndata->node_num, st);
283 list_for_each_entry(res, &dlm->reco.resources, recovering) {
284 mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n",
285 dlm->name, res->lockname.len, res->lockname.name);
289 #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
291 static int dlm_recovery_thread(void *data)
294 struct dlm_ctxt *dlm = data;
295 unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS);
297 mlog(0, "dlm thread running for %s...\n", dlm->name);
299 while (!kthread_should_stop()) {
300 if (dlm_joined(dlm)) {
301 status = dlm_do_recovery(dlm);
302 if (status == -EAGAIN) {
303 /* do not sleep, recheck immediately. */
310 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
311 kthread_should_stop(),
315 mlog(0, "quitting DLM recovery thread\n");
319 /* returns true when the recovery master has contacted us */
320 static int dlm_reco_master_ready(struct dlm_ctxt *dlm)
323 spin_lock(&dlm->spinlock);
324 ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM);
325 spin_unlock(&dlm->spinlock);
329 /* returns true if node is no longer in the domain
330 * could be dead or just not joined */
331 int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
334 spin_lock(&dlm->spinlock);
335 dead = !test_bit(node, dlm->domain_map);
336 spin_unlock(&dlm->spinlock);
340 int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
343 mlog(ML_NOTICE, "%s: waiting %dms for notification of "
344 "death of node %u\n", dlm->name, timeout, node);
345 wait_event_timeout(dlm->dlm_reco_thread_wq,
346 dlm_is_node_dead(dlm, node),
347 msecs_to_jiffies(timeout));
349 mlog(ML_NOTICE, "%s: waiting indefinitely for notification "
350 "of death of node %u\n", dlm->name, node);
351 wait_event(dlm->dlm_reco_thread_wq,
352 dlm_is_node_dead(dlm, node));
354 /* for now, return 0 */
358 /* callers of the top-level api calls (dlmlock/dlmunlock) should
359 * block on the dlm->reco.event when recovery is in progress.
360 * the dlm recovery thread will set this state when it begins
361 * recovering a dead node (as the new master or not) and clear
362 * the state and wake as soon as all affected lock resources have
363 * been marked with the RECOVERY flag */
364 static int dlm_in_recovery(struct dlm_ctxt *dlm)
367 spin_lock(&dlm->spinlock);
368 in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
369 spin_unlock(&dlm->spinlock);
374 void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
376 wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
379 static void dlm_begin_recovery(struct dlm_ctxt *dlm)
381 spin_lock(&dlm->spinlock);
382 BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
383 dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
384 spin_unlock(&dlm->spinlock);
387 static void dlm_end_recovery(struct dlm_ctxt *dlm)
389 spin_lock(&dlm->spinlock);
390 BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
391 dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
392 spin_unlock(&dlm->spinlock);
393 wake_up(&dlm->reco.event);
396 static int dlm_do_recovery(struct dlm_ctxt *dlm)
401 spin_lock(&dlm->spinlock);
403 /* check to see if the new master has died */
404 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM &&
405 test_bit(dlm->reco.new_master, dlm->recovery_map)) {
406 mlog(0, "new master %u died while recovering %u!\n",
407 dlm->reco.new_master, dlm->reco.dead_node);
408 /* unset the new_master, leave dead_node */
409 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
412 /* select a target to recover */
413 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
416 bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES+1, 0);
417 if (bit >= O2NM_MAX_NODES || bit < 0)
418 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
420 dlm_set_reco_dead_node(dlm, bit);
421 } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
423 mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
424 dlm->reco.dead_node);
425 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
428 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
429 // mlog(0, "nothing to recover! sleeping now!\n");
430 spin_unlock(&dlm->spinlock);
431 /* return to main thread loop and sleep. */
434 mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n",
435 dlm->name, dlm->dlm_reco_thread_task->pid,
436 dlm->reco.dead_node);
437 spin_unlock(&dlm->spinlock);
439 /* take write barrier */
440 /* (stops the list reshuffling thread, proxy ast handling) */
441 dlm_begin_recovery(dlm);
443 if (dlm->reco.new_master == dlm->node_num)
446 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
447 /* choose a new master, returns 0 if this node
448 * is the master, -EEXIST if it's another node.
449 * this does not return until a new master is chosen
450 * or recovery completes entirely. */
451 ret = dlm_pick_recovery_master(dlm);
453 /* already notified everyone. go. */
456 mlog(0, "another node will master this recovery session.\n");
458 mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n",
459 dlm->name, dlm->dlm_reco_thread_task->pid, dlm->reco.new_master,
460 dlm->node_num, dlm->reco.dead_node);
462 /* it is safe to start everything back up here
463 * because all of the dead node's lock resources
464 * have been marked as in-recovery */
465 dlm_end_recovery(dlm);
467 /* sleep out in main dlm_recovery_thread loop. */
471 mlog(0, "(%d) mastering recovery of %s:%u here(this=%u)!\n",
472 dlm->dlm_reco_thread_task->pid,
473 dlm->name, dlm->reco.dead_node, dlm->node_num);
475 status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
477 mlog(ML_ERROR, "error %d remastering locks for node %u, "
478 "retrying.\n", status, dlm->reco.dead_node);
479 /* yield a bit to allow any final network messages
480 * to get handled on remaining nodes */
483 /* success! see if any other nodes need recovery */
484 mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
485 dlm->name, dlm->reco.dead_node, dlm->node_num);
486 dlm_reset_recovery(dlm);
488 dlm_end_recovery(dlm);
490 /* continue and look for another dead node */
494 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
497 struct dlm_reco_node_data *ndata;
498 struct list_head *iter;
503 status = dlm_init_recovery_area(dlm, dead_node);
507 /* safe to access the node data list without a lock, since this
508 * process is the only one to change the list */
509 list_for_each(iter, &dlm->reco.node_data) {
510 ndata = list_entry (iter, struct dlm_reco_node_data, list);
511 BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
512 ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
514 mlog(0, "requesting lock info from node %u\n",
517 if (ndata->node_num == dlm->node_num) {
518 ndata->state = DLM_RECO_NODE_DATA_DONE;
522 status = dlm_request_all_locks(dlm, ndata->node_num, dead_node);
525 if (dlm_is_host_down(status))
526 ndata->state = DLM_RECO_NODE_DATA_DEAD;
533 switch (ndata->state) {
534 case DLM_RECO_NODE_DATA_INIT:
535 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
536 case DLM_RECO_NODE_DATA_REQUESTED:
539 case DLM_RECO_NODE_DATA_DEAD:
540 mlog(0, "node %u died after requesting "
541 "recovery info for node %u\n",
542 ndata->node_num, dead_node);
547 case DLM_RECO_NODE_DATA_REQUESTING:
548 ndata->state = DLM_RECO_NODE_DATA_REQUESTED;
549 mlog(0, "now receiving recovery data from "
550 "node %u for dead node %u\n",
551 ndata->node_num, dead_node);
553 case DLM_RECO_NODE_DATA_RECEIVING:
554 mlog(0, "already receiving recovery data from "
555 "node %u for dead node %u\n",
556 ndata->node_num, dead_node);
558 case DLM_RECO_NODE_DATA_DONE:
559 mlog(0, "already DONE receiving recovery data "
560 "from node %u for dead node %u\n",
561 ndata->node_num, dead_node);
566 mlog(0, "done requesting all lock info\n");
568 /* nodes should be sending reco data now
569 * just need to wait */
572 /* check all the nodes now to see if we are
573 * done, or if anyone died */
575 spin_lock(&dlm_reco_state_lock);
576 list_for_each(iter, &dlm->reco.node_data) {
577 ndata = list_entry (iter, struct dlm_reco_node_data, list);
579 mlog(0, "checking recovery state of node %u\n",
581 switch (ndata->state) {
582 case DLM_RECO_NODE_DATA_INIT:
583 case DLM_RECO_NODE_DATA_REQUESTING:
584 mlog(ML_ERROR, "bad ndata state for "
585 "node %u: state=%d\n",
586 ndata->node_num, ndata->state);
589 case DLM_RECO_NODE_DATA_DEAD:
590 mlog(ML_NOTICE, "node %u died after "
591 "requesting recovery info for "
592 "node %u\n", ndata->node_num,
594 spin_unlock(&dlm_reco_state_lock);
598 /* instead of spinning like crazy here,
599 * wait for the domain map to catch up
600 * with the network state. otherwise this
601 * can be hit hundreds of times before
602 * the node is really seen as dead. */
603 wait_event_timeout(dlm->dlm_reco_thread_wq,
604 dlm_is_node_dead(dlm,
606 msecs_to_jiffies(1000));
607 mlog(0, "waited 1 sec for %u, "
608 "dead? %s\n", ndata->node_num,
609 dlm_is_node_dead(dlm, ndata->node_num) ?
612 case DLM_RECO_NODE_DATA_RECEIVING:
613 case DLM_RECO_NODE_DATA_REQUESTED:
614 mlog(0, "%s: node %u still in state %s\n",
615 dlm->name, ndata->node_num,
616 ndata->state==DLM_RECO_NODE_DATA_RECEIVING ?
617 "receiving" : "requested");
620 case DLM_RECO_NODE_DATA_DONE:
621 mlog(0, "%s: node %u state is done\n",
622 dlm->name, ndata->node_num);
624 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
625 mlog(0, "%s: node %u state is finalize\n",
626 dlm->name, ndata->node_num);
630 spin_unlock(&dlm_reco_state_lock);
632 mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass,
633 all_nodes_done?"yes":"no");
634 if (all_nodes_done) {
637 /* all nodes are now in DLM_RECO_NODE_DATA_DONE state
638 * just send a finalize message to everyone and
640 mlog(0, "all nodes are done! send finalize\n");
641 ret = dlm_send_finalize_reco_message(dlm);
645 spin_lock(&dlm->spinlock);
646 dlm_finish_local_lockres_recovery(dlm, dead_node,
648 spin_unlock(&dlm->spinlock);
649 mlog(0, "should be done with recovery!\n");
651 mlog(0, "finishing recovery of %s at %lu, "
652 "dead=%u, this=%u, new=%u\n", dlm->name,
653 jiffies, dlm->reco.dead_node,
654 dlm->node_num, dlm->reco.new_master);
657 /* rescan everything marked dirty along the way */
658 dlm_kick_thread(dlm, NULL);
661 /* wait to be signalled, with periodic timeout
662 * to check for node death */
663 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
664 kthread_should_stop(),
665 msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS));
671 dlm_destroy_recovery_area(dlm, dead_node);
677 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
680 struct dlm_reco_node_data *ndata;
682 spin_lock(&dlm->spinlock);
683 memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map));
684 /* nodes can only be removed (by dying) after dropping
685 * this lock, and death will be trapped later, so this should do */
686 spin_unlock(&dlm->spinlock);
689 num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num);
690 if (num >= O2NM_MAX_NODES) {
693 BUG_ON(num == dead_node);
695 ndata = kcalloc(1, sizeof(*ndata), GFP_KERNEL);
697 dlm_destroy_recovery_area(dlm, dead_node);
700 ndata->node_num = num;
701 ndata->state = DLM_RECO_NODE_DATA_INIT;
702 spin_lock(&dlm_reco_state_lock);
703 list_add_tail(&ndata->list, &dlm->reco.node_data);
704 spin_unlock(&dlm_reco_state_lock);
711 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
713 struct list_head *iter, *iter2;
714 struct dlm_reco_node_data *ndata;
717 spin_lock(&dlm_reco_state_lock);
718 list_splice_init(&dlm->reco.node_data, &tmplist);
719 spin_unlock(&dlm_reco_state_lock);
721 list_for_each_safe(iter, iter2, &tmplist) {
722 ndata = list_entry (iter, struct dlm_reco_node_data, list);
723 list_del_init(&ndata->list);
728 static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
731 struct dlm_lock_request lr;
737 mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
738 "to %u\n", dead_node, request_from);
740 memset(&lr, 0, sizeof(lr));
741 lr.node_idx = dlm->node_num;
742 lr.dead_node = dead_node;
746 ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key,
747 &lr, sizeof(lr), request_from, NULL);
749 /* negative status is handled by caller */
753 // return from here, then
754 // sleep until all received or error
759 int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data)
761 struct dlm_ctxt *dlm = data;
762 struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf;
764 struct dlm_work_item *item = NULL;
769 if (lr->dead_node != dlm->reco.dead_node) {
770 mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local "
771 "dead_node is %u\n", dlm->name, lr->node_idx,
772 lr->dead_node, dlm->reco.dead_node);
773 dlm_print_reco_node_status(dlm);
778 BUG_ON(lr->dead_node != dlm->reco.dead_node);
780 item = kcalloc(1, sizeof(*item), GFP_KERNEL);
786 /* this will get freed by dlm_request_all_locks_worker */
787 buf = (char *) __get_free_page(GFP_KERNEL);
794 /* queue up work for dlm_request_all_locks_worker */
795 dlm_grab(dlm); /* get an extra ref for the work item */
796 dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf);
797 item->u.ral.reco_master = lr->node_idx;
798 item->u.ral.dead_node = lr->dead_node;
799 spin_lock(&dlm->work_lock);
800 list_add_tail(&item->list, &dlm->work_list);
801 spin_unlock(&dlm->work_lock);
802 schedule_work(&dlm->dispatched_work);
808 static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
810 struct dlm_migratable_lockres *mres;
811 struct dlm_lock_resource *res;
812 struct dlm_ctxt *dlm;
813 LIST_HEAD(resources);
814 struct list_head *iter;
816 u8 dead_node, reco_master;
817 int skip_all_done = 0;
820 dead_node = item->u.ral.dead_node;
821 reco_master = item->u.ral.reco_master;
822 mres = (struct dlm_migratable_lockres *)data;
824 mlog(0, "%s: recovery worker started, dead=%u, master=%u\n",
825 dlm->name, dead_node, reco_master);
827 if (dead_node != dlm->reco.dead_node ||
828 reco_master != dlm->reco.new_master) {
829 /* show extra debug info if the recovery state is messed */
830 mlog(ML_ERROR, "%s: bad reco state: reco(dead=%u, master=%u), "
831 "request(dead=%u, master=%u)\n",
832 dlm->name, dlm->reco.dead_node, dlm->reco.new_master,
833 dead_node, reco_master);
834 mlog(ML_ERROR, "%s: name=%.*s master=%u locks=%u/%u flags=%u "
835 "entry[0]={c=%u:%llu,l=%u,f=%u,t=%d,ct=%d,hb=%d,n=%u}\n",
836 dlm->name, mres->lockname_len, mres->lockname, mres->master,
837 mres->num_locks, mres->total_locks, mres->flags,
838 dlm_get_lock_cookie_node(mres->ml[0].cookie),
839 dlm_get_lock_cookie_seq(mres->ml[0].cookie),
840 mres->ml[0].list, mres->ml[0].flags,
841 mres->ml[0].type, mres->ml[0].convert_type,
842 mres->ml[0].highest_blocked, mres->ml[0].node);
845 BUG_ON(dead_node != dlm->reco.dead_node);
846 BUG_ON(reco_master != dlm->reco.new_master);
848 /* lock resources should have already been moved to the
849 * dlm->reco.resources list. now move items from that list
850 * to a temp list if the dead owner matches. note that the
851 * whole cluster recovers only one node at a time, so we
852 * can safely move UNKNOWN lock resources for each recovery
854 dlm_move_reco_locks_to_list(dlm, &resources, dead_node);
856 /* now we can begin blasting lockreses without the dlm lock */
858 /* any errors returned will be due to the new_master dying,
859 * the dlm_reco_thread should detect this */
860 list_for_each(iter, &resources) {
861 res = list_entry (iter, struct dlm_lock_resource, recovering);
862 ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
865 mlog(ML_ERROR, "%s: node %u went down while sending "
866 "recovery state for dead node %u, ret=%d\n", dlm->name,
867 reco_master, dead_node, ret);
873 /* move the resources back to the list */
874 spin_lock(&dlm->spinlock);
875 list_splice_init(&resources, &dlm->reco.resources);
876 spin_unlock(&dlm->spinlock);
878 if (!skip_all_done) {
879 ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
881 mlog(ML_ERROR, "%s: node %u went down while sending "
882 "recovery all-done for dead node %u, ret=%d\n",
883 dlm->name, reco_master, dead_node, ret);
887 free_page((unsigned long)data);
891 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
894 struct dlm_reco_data_done done_msg;
896 memset(&done_msg, 0, sizeof(done_msg));
897 done_msg.node_idx = dlm->node_num;
898 done_msg.dead_node = dead_node;
899 mlog(0, "sending DATA DONE message to %u, "
900 "my node=%u, dead node=%u\n", send_to, done_msg.node_idx,
903 ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
904 sizeof(done_msg), send_to, &tmpret);
906 if (!dlm_is_host_down(ret)) {
908 mlog(ML_ERROR, "%s: unknown error sending data-done "
909 "to %u\n", dlm->name, send_to);
918 int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data)
920 struct dlm_ctxt *dlm = data;
921 struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf;
922 struct list_head *iter;
923 struct dlm_reco_node_data *ndata = NULL;
929 mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
930 "node_idx=%u, this node=%u\n", done->dead_node,
931 dlm->reco.dead_node, done->node_idx, dlm->node_num);
933 mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node),
934 "Got DATA DONE: dead_node=%u, reco.dead_node=%u, "
935 "node_idx=%u, this node=%u\n", done->dead_node,
936 dlm->reco.dead_node, done->node_idx, dlm->node_num);
938 spin_lock(&dlm_reco_state_lock);
939 list_for_each(iter, &dlm->reco.node_data) {
940 ndata = list_entry (iter, struct dlm_reco_node_data, list);
941 if (ndata->node_num != done->node_idx)
944 switch (ndata->state) {
945 /* should have moved beyond INIT but not to FINALIZE yet */
946 case DLM_RECO_NODE_DATA_INIT:
947 case DLM_RECO_NODE_DATA_DEAD:
948 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
949 mlog(ML_ERROR, "bad ndata state for node %u:"
950 " state=%d\n", ndata->node_num,
954 /* these states are possible at this point, anywhere along
955 * the line of recovery */
956 case DLM_RECO_NODE_DATA_DONE:
957 case DLM_RECO_NODE_DATA_RECEIVING:
958 case DLM_RECO_NODE_DATA_REQUESTED:
959 case DLM_RECO_NODE_DATA_REQUESTING:
960 mlog(0, "node %u is DONE sending "
964 ndata->state = DLM_RECO_NODE_DATA_DONE;
969 spin_unlock(&dlm_reco_state_lock);
971 /* wake the recovery thread, some node is done */
973 dlm_kick_recovery_thread(dlm);
976 mlog(ML_ERROR, "failed to find recovery node data for node "
977 "%u\n", done->node_idx);
980 mlog(0, "leaving reco data done handler, ret=%d\n", ret);
984 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
985 struct list_head *list,
988 struct dlm_lock_resource *res;
989 struct list_head *iter, *iter2;
990 struct dlm_lock *lock;
992 spin_lock(&dlm->spinlock);
993 list_for_each_safe(iter, iter2, &dlm->reco.resources) {
994 res = list_entry (iter, struct dlm_lock_resource, recovering);
995 /* always prune any $RECOVERY entries for dead nodes,
996 * otherwise hangs can occur during later recovery */
997 if (dlm_is_recovery_lock(res->lockname.name,
998 res->lockname.len)) {
999 spin_lock(&res->spinlock);
1000 list_for_each_entry(lock, &res->granted, list) {
1001 if (lock->ml.node == dead_node) {
1002 mlog(0, "AHA! there was "
1003 "a $RECOVERY lock for dead "
1005 dead_node, dlm->name);
1006 list_del_init(&lock->list);
1011 spin_unlock(&res->spinlock);
1015 if (res->owner == dead_node) {
1016 mlog(0, "found lockres owned by dead node while "
1017 "doing recovery for node %u. sending it.\n",
1019 list_move_tail(&res->recovering, list);
1020 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
1021 mlog(0, "found UNKNOWN owner while doing recovery "
1022 "for node %u. sending it.\n", dead_node);
1023 list_move_tail(&res->recovering, list);
1026 spin_unlock(&dlm->spinlock);
1029 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res)
1031 int total_locks = 0;
1032 struct list_head *iter, *queue = &res->granted;
1035 for (i=0; i<3; i++) {
1036 list_for_each(iter, queue)
1044 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
1045 struct dlm_migratable_lockres *mres,
1047 struct dlm_lock_resource *res,
1050 u64 mig_cookie = be64_to_cpu(mres->mig_cookie);
1051 int mres_total_locks = be32_to_cpu(mres->total_locks);
1052 int sz, ret = 0, status = 0;
1053 u8 orig_flags = mres->flags,
1054 orig_master = mres->master;
1056 BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS);
1057 if (!mres->num_locks)
1060 sz = sizeof(struct dlm_migratable_lockres) +
1061 (mres->num_locks * sizeof(struct dlm_migratable_lock));
1063 /* add an all-done flag if we reached the last lock */
1064 orig_flags = mres->flags;
1065 BUG_ON(total_locks > mres_total_locks);
1066 if (total_locks == mres_total_locks)
1067 mres->flags |= DLM_MRES_ALL_DONE;
1070 ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
1071 sz, send_to, &status);
1073 /* XXX: negative status is not handled.
1074 * this will end up killing this node. */
1077 /* might get an -ENOMEM back here */
1082 if (ret == -EFAULT) {
1083 mlog(ML_ERROR, "node %u told me to kill "
1084 "myself!\n", send_to);
1090 /* zero and reinit the message buffer */
1091 dlm_init_migratable_lockres(mres, res->lockname.name,
1092 res->lockname.len, mres_total_locks,
1093 mig_cookie, orig_flags, orig_master);
1097 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
1098 const char *lockname, int namelen,
1099 int total_locks, u64 cookie,
1100 u8 flags, u8 master)
1102 /* mres here is one full page */
1103 memset(mres, 0, PAGE_SIZE);
1104 mres->lockname_len = namelen;
1105 memcpy(mres->lockname, lockname, namelen);
1106 mres->num_locks = 0;
1107 mres->total_locks = cpu_to_be32(total_locks);
1108 mres->mig_cookie = cpu_to_be64(cookie);
1109 mres->flags = flags;
1110 mres->master = master;
1114 /* returns 1 if this lock fills the network structure,
1116 static int dlm_add_lock_to_array(struct dlm_lock *lock,
1117 struct dlm_migratable_lockres *mres, int queue)
1119 struct dlm_migratable_lock *ml;
1120 int lock_num = mres->num_locks;
1122 ml = &(mres->ml[lock_num]);
1123 ml->cookie = lock->ml.cookie;
1124 ml->type = lock->ml.type;
1125 ml->convert_type = lock->ml.convert_type;
1126 ml->highest_blocked = lock->ml.highest_blocked;
1129 ml->flags = lock->lksb->flags;
1130 /* send our current lvb */
1131 if (ml->type == LKM_EXMODE ||
1132 ml->type == LKM_PRMODE) {
1133 /* if it is already set, this had better be a PR
1134 * and it has to match */
1135 if (!dlm_lvb_is_empty(mres->lvb) &&
1136 (ml->type == LKM_EXMODE ||
1137 memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) {
1138 mlog(ML_ERROR, "mismatched lvbs!\n");
1139 __dlm_print_one_lock_resource(lock->lockres);
1142 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
1145 ml->node = lock->ml.node;
1147 /* we reached the max, send this network message */
1148 if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS)
1154 int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1155 struct dlm_migratable_lockres *mres,
1156 u8 send_to, u8 flags)
1158 struct list_head *queue, *iter;
1161 struct dlm_lock *lock;
1164 BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1166 mlog(0, "sending to %u\n", send_to);
1168 total_locks = dlm_num_locks_in_lockres(res);
1169 if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) {
1170 /* rare, but possible */
1171 mlog(0, "argh. lockres has %d locks. this will "
1172 "require more than one network packet to "
1173 "migrate\n", total_locks);
1174 mig_cookie = dlm_get_next_mig_cookie();
1177 dlm_init_migratable_lockres(mres, res->lockname.name,
1178 res->lockname.len, total_locks,
1179 mig_cookie, flags, res->owner);
1182 for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) {
1183 queue = dlm_list_idx_to_ptr(res, i);
1184 list_for_each(iter, queue) {
1185 lock = list_entry (iter, struct dlm_lock, list);
1187 /* add another lock. */
1189 if (!dlm_add_lock_to_array(lock, mres, i))
1192 /* this filled the lock message,
1193 * we must send it immediately. */
1194 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
1200 /* flush any remaining locks */
1201 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
1207 mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n",
1209 if (!dlm_is_host_down(ret))
1211 mlog(0, "%s: node %u went down while sending %s "
1212 "lockres %.*s\n", dlm->name, send_to,
1213 flags & DLM_MRES_RECOVERY ? "recovery" : "migration",
1214 res->lockname.len, res->lockname.name);
1221 * this message will contain no more than one page worth of
1222 * recovery data, and it will work on only one lockres.
1223 * there may be many locks in this page, and we may need to wait
1224 * for additional packets to complete all the locks (rare, but
1228 * NOTE: the allocation error cases here are scary
1229 * we really cannot afford to fail an alloc in recovery
1230 * do we spin? returning an error only delays the problem really
1233 int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data)
1235 struct dlm_ctxt *dlm = data;
1236 struct dlm_migratable_lockres *mres =
1237 (struct dlm_migratable_lockres *)msg->buf;
1241 struct dlm_work_item *item = NULL;
1242 struct dlm_lock_resource *res = NULL;
1247 BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1249 real_master = mres->master;
1250 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1251 /* cannot migrate a lockres with no master */
1252 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1255 mlog(0, "%s message received from node %u\n",
1256 (mres->flags & DLM_MRES_RECOVERY) ?
1257 "recovery" : "migration", mres->master);
1258 if (mres->flags & DLM_MRES_ALL_DONE)
1259 mlog(0, "all done flag. all lockres data received!\n");
1262 buf = kmalloc(be16_to_cpu(msg->data_len), GFP_KERNEL);
1263 item = kcalloc(1, sizeof(*item), GFP_KERNEL);
1267 /* lookup the lock to see if we have a secondary queue for this
1268 * already... just add the locks in and this will have its owner
1269 * and RECOVERY flag changed when it completes. */
1270 res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len);
1272 /* this will get a ref on res */
1273 /* mark it as recovering/migrating and hash it */
1274 spin_lock(&res->spinlock);
1275 if (mres->flags & DLM_MRES_RECOVERY) {
1276 res->state |= DLM_LOCK_RES_RECOVERING;
1278 if (res->state & DLM_LOCK_RES_MIGRATING) {
1279 /* this is at least the second
1280 * lockres message */
1281 mlog(0, "lock %.*s is already migrating\n",
1284 } else if (res->state & DLM_LOCK_RES_RECOVERING) {
1285 /* caller should BUG */
1286 mlog(ML_ERROR, "node is attempting to migrate "
1287 "lock %.*s, but marked as recovering!\n",
1288 mres->lockname_len, mres->lockname);
1290 spin_unlock(&res->spinlock);
1293 res->state |= DLM_LOCK_RES_MIGRATING;
1295 spin_unlock(&res->spinlock);
1297 /* need to allocate, just like if it was
1298 * mastered here normally */
1299 res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len);
1303 /* to match the ref that we would have gotten if
1304 * dlm_lookup_lockres had succeeded */
1305 dlm_lockres_get(res);
1307 /* mark it as recovering/migrating and hash it */
1308 if (mres->flags & DLM_MRES_RECOVERY)
1309 res->state |= DLM_LOCK_RES_RECOVERING;
1311 res->state |= DLM_LOCK_RES_MIGRATING;
1313 spin_lock(&dlm->spinlock);
1314 __dlm_insert_lockres(dlm, res);
1315 spin_unlock(&dlm->spinlock);
1317 /* now that the new lockres is inserted,
1318 * make it usable by other processes */
1319 spin_lock(&res->spinlock);
1320 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1321 spin_unlock(&res->spinlock);
1323 /* add an extra ref for just-allocated lockres
1324 * otherwise the lockres will be purged immediately */
1325 dlm_lockres_get(res);
1329 /* at this point we have allocated everything we need,
1330 * and we have a hashed lockres with an extra ref and
1331 * the proper res->state flags. */
1333 if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1334 /* migration cannot have an unknown master */
1335 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1336 mlog(0, "recovery has passed me a lockres with an "
1337 "unknown owner.. will need to requery: "
1338 "%.*s\n", mres->lockname_len, mres->lockname);
1340 spin_lock(&res->spinlock);
1341 dlm_change_lockres_owner(dlm, res, dlm->node_num);
1342 spin_unlock(&res->spinlock);
1345 /* queue up work for dlm_mig_lockres_worker */
1346 dlm_grab(dlm); /* get an extra ref for the work item */
1347 memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */
1348 dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);
1349 item->u.ml.lockres = res; /* already have a ref */
1350 item->u.ml.real_master = real_master;
1351 spin_lock(&dlm->work_lock);
1352 list_add_tail(&item->list, &dlm->work_list);
1353 spin_unlock(&dlm->work_lock);
1354 schedule_work(&dlm->dispatched_work);
1370 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data)
1372 struct dlm_ctxt *dlm = data;
1373 struct dlm_migratable_lockres *mres;
1375 struct dlm_lock_resource *res;
1379 mres = (struct dlm_migratable_lockres *)data;
1381 res = item->u.ml.lockres;
1382 real_master = item->u.ml.real_master;
1384 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1385 /* this case is super-rare. only occurs if
1386 * node death happens during migration. */
1388 ret = dlm_lockres_master_requery(dlm, res, &real_master);
1390 mlog(0, "dlm_lockres_master_requery ret=%d\n",
1394 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1395 mlog(0, "lockres %.*s not claimed. "
1396 "this node will take it.\n",
1397 res->lockname.len, res->lockname.name);
1399 mlog(0, "master needs to respond to sender "
1400 "that node %u still owns %.*s\n",
1401 real_master, res->lockname.len,
1402 res->lockname.name);
1403 /* cannot touch this lockres */
1408 ret = dlm_process_recovery_data(dlm, res, mres);
1410 mlog(0, "dlm_process_recovery_data returned %d\n", ret);
1412 mlog(0, "dlm_process_recovery_data succeeded\n");
1414 if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) ==
1415 (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) {
1416 ret = dlm_finish_migration(dlm, res, mres->master);
1428 int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
1429 struct dlm_lock_resource *res, u8 *real_master)
1431 struct dlm_node_iter iter;
1435 *real_master = DLM_LOCK_RES_OWNER_UNKNOWN;
1437 /* we only reach here if one of the two nodes in a
1438 * migration died while the migration was in progress.
1439 * at this point we need to requery the master. we
1440 * know that the new_master got as far as creating
1441 * an mle on at least one node, but we do not know
1442 * if any nodes had actually cleared the mle and set
1443 * the master to the new_master. the old master
1444 * is supposed to set the owner to UNKNOWN in the
1445 * event of a new_master death, so the only possible
1446 * responses that we can get from nodes here are
1447 * that the master is new_master, or that the master
1449 * if all nodes come back with UNKNOWN then we know
1450 * the lock needs remastering here.
1451 * if any node comes back with a valid master, check
1452 * to see if that master is the one that we are
1453 * recovering. if so, then the new_master died and
1454 * we need to remaster this lock. if not, then the
1455 * new_master survived and that node will respond to
1456 * other nodes about the owner.
1457 * if there is an owner, this node needs to dump this
1458 * lockres and alert the sender that this lockres
1460 spin_lock(&dlm->spinlock);
1461 dlm_node_iter_init(dlm->domain_map, &iter);
1462 spin_unlock(&dlm->spinlock);
1464 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
1465 /* do not send to self */
1466 if (nodenum == dlm->node_num)
1468 ret = dlm_do_master_requery(dlm, res, nodenum, real_master);
1471 if (!dlm_is_host_down(ret))
1473 /* host is down, so answer for that node would be
1474 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
1476 if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1477 mlog(0, "lock master is %u\n", *real_master);
1485 int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1486 u8 nodenum, u8 *real_master)
1489 struct dlm_master_requery req;
1490 int status = DLM_LOCK_RES_OWNER_UNKNOWN;
1492 memset(&req, 0, sizeof(req));
1493 req.node_idx = dlm->node_num;
1494 req.namelen = res->lockname.len;
1495 memcpy(req.name, res->lockname.name, res->lockname.len);
1497 ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key,
1498 &req, sizeof(req), nodenum, &status);
1499 /* XXX: negative status not handled properly here. */
1504 BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN);
1505 *real_master = (u8) (status & 0xff);
1506 mlog(0, "node %u responded to master requery with %u\n",
1507 nodenum, *real_master);
1514 /* this function cannot error, so unless the sending
1515 * or receiving of the message failed, the owner can
1517 int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data)
1519 struct dlm_ctxt *dlm = data;
1520 struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
1521 struct dlm_lock_resource *res = NULL;
1523 int master = DLM_LOCK_RES_OWNER_UNKNOWN;
1524 u32 flags = DLM_ASSERT_MASTER_REQUERY;
1526 if (!dlm_grab(dlm)) {
1527 /* since the domain has gone away on this
1528 * node, the proper response is UNKNOWN */
1532 hash = dlm_lockid_hash(req->name, req->namelen);
1534 spin_lock(&dlm->spinlock);
1535 res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash);
1537 spin_lock(&res->spinlock);
1538 master = res->owner;
1539 if (master == dlm->node_num) {
1540 int ret = dlm_dispatch_assert_master(dlm, res,
1543 mlog_errno(-ENOMEM);
1548 spin_unlock(&res->spinlock);
1550 spin_unlock(&dlm->spinlock);
1556 static inline struct list_head *
1557 dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num)
1559 struct list_head *ret;
1560 BUG_ON(list_num < 0);
1561 BUG_ON(list_num > 2);
1562 ret = &(res->granted);
1566 /* TODO: do ast flush business
1567 * TODO: do MIGRATING and RECOVERING spinning
1571 * NOTE about in-flight requests during migration:
1573 * Before attempting the migrate, the master has marked the lockres as
1574 * MIGRATING and then flushed all of its pending ASTS. So any in-flight
1575 * requests either got queued before the MIGRATING flag got set, in which
1576 * case the lock data will reflect the change and a return message is on
1577 * the way, or the request failed to get in before MIGRATING got set. In
1578 * this case, the caller will be told to spin and wait for the MIGRATING
1579 * flag to be dropped, then recheck the master.
1580 * This holds true for the convert, cancel and unlock cases, and since lvb
1581 * updates are tied to these same messages, it applies to lvb updates as
1582 * well. For the lock case, there is no way a lock can be on the master
1583 * queue and not be on the secondary queue since the lock is always added
1584 * locally first. This means that the new target node will never be sent
1585 * a lock that he doesn't already have on the list.
1586 * In total, this means that the local lock is correct and should not be
1587 * updated to match the one sent by the master. Any messages sent back
1588 * from the master before the MIGRATING flag will bring the lock properly
1589 * up-to-date, and the change will be ordered properly for the waiter.
1590 * We will *not* attempt to modify the lock underneath the waiter.
1593 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1594 struct dlm_lock_resource *res,
1595 struct dlm_migratable_lockres *mres)
1597 struct dlm_migratable_lock *ml;
1598 struct list_head *queue;
1599 struct dlm_lock *newlock = NULL;
1600 struct dlm_lockstatus *lksb = NULL;
1603 struct list_head *iter;
1604 struct dlm_lock *lock = NULL;
1606 mlog(0, "running %d locks for this lockres\n", mres->num_locks);
1607 for (i=0; i<mres->num_locks; i++) {
1608 ml = &(mres->ml[i]);
1609 BUG_ON(ml->highest_blocked != LKM_IVMODE);
1613 queue = dlm_list_num_to_pointer(res, ml->list);
1615 /* if the lock is for the local node it needs to
1616 * be moved to the proper location within the queue.
1617 * do not allocate a new lock structure. */
1618 if (ml->node == dlm->node_num) {
1619 /* MIGRATION ONLY! */
1620 BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
1622 spin_lock(&res->spinlock);
1623 list_for_each(iter, queue) {
1624 lock = list_entry (iter, struct dlm_lock, list);
1625 if (lock->ml.cookie != ml->cookie)
1631 /* lock is always created locally first, and
1632 * destroyed locally last. it must be on the list */
1635 mlog(ML_ERROR, "could not find local lock "
1636 "with cookie %u:%llu!\n",
1637 dlm_get_lock_cookie_node(c),
1638 dlm_get_lock_cookie_seq(c));
1641 BUG_ON(lock->ml.node != ml->node);
1643 /* see NOTE above about why we do not update
1644 * to match the master here */
1646 /* move the lock to its proper place */
1647 /* do not alter lock refcount. switching lists. */
1648 list_move_tail(&lock->list, queue);
1649 spin_unlock(&res->spinlock);
1651 mlog(0, "just reordered a local lock!\n");
1655 /* lock is for another node. */
1656 newlock = dlm_new_lock(ml->type, ml->node,
1657 be64_to_cpu(ml->cookie), NULL);
1662 lksb = newlock->lksb;
1663 dlm_lock_attach_lockres(newlock, res);
1665 if (ml->convert_type != LKM_IVMODE) {
1666 BUG_ON(queue != &res->converting);
1667 newlock->ml.convert_type = ml->convert_type;
1669 lksb->flags |= (ml->flags &
1670 (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
1672 if (!dlm_lvb_is_empty(mres->lvb)) {
1673 if (lksb->flags & DLM_LKSB_PUT_LVB) {
1674 /* other node was trying to update
1675 * lvb when node died. recreate the
1676 * lksb with the updated lvb. */
1677 memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN);
1679 /* otherwise, the node is sending its
1680 * most recent valid lvb info */
1681 BUG_ON(ml->type != LKM_EXMODE &&
1682 ml->type != LKM_PRMODE);
1683 if (!dlm_lvb_is_empty(res->lvb) &&
1684 (ml->type == LKM_EXMODE ||
1685 memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
1686 mlog(ML_ERROR, "received bad lvb!\n");
1687 __dlm_print_one_lock_resource(res);
1690 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1696 * wrt lock queue ordering and recovery:
1697 * 1. order of locks on granted queue is
1699 * 2. order of locks on converting queue is
1700 * LOST with the node death. sorry charlie.
1701 * 3. order of locks on the blocked queue is
1703 * order of locks does not affect integrity, it
1704 * just means that a lock request may get pushed
1705 * back in line as a result of the node death.
1706 * also note that for a given node the lock order
1707 * for its secondary queue locks is preserved
1708 * relative to each other, but clearly *not*
1709 * preserved relative to locks from other nodes.
1712 spin_lock(&res->spinlock);
1713 list_for_each_entry(lock, queue, list) {
1714 if (lock->ml.cookie == ml->cookie) {
1715 u64 c = lock->ml.cookie;
1716 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
1717 "exists on this lockres!\n", dlm->name,
1718 res->lockname.len, res->lockname.name,
1719 dlm_get_lock_cookie_node(c),
1720 dlm_get_lock_cookie_seq(c));
1722 mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, "
1723 "node=%u, cookie=%u:%llu, queue=%d\n",
1724 ml->type, ml->convert_type, ml->node,
1725 dlm_get_lock_cookie_node(ml->cookie),
1726 dlm_get_lock_cookie_seq(ml->cookie),
1729 __dlm_print_one_lock_resource(res);
1735 dlm_lock_get(newlock);
1736 list_add_tail(&newlock->list, queue);
1738 spin_unlock(&res->spinlock);
1740 mlog(0, "done running all the locks\n");
1746 dlm_lock_put(newlock);
1753 void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
1754 struct dlm_lock_resource *res)
1757 struct list_head *queue, *iter, *iter2;
1758 struct dlm_lock *lock;
1760 res->state |= DLM_LOCK_RES_RECOVERING;
1761 if (!list_empty(&res->recovering))
1762 list_del_init(&res->recovering);
1763 list_add_tail(&res->recovering, &dlm->reco.resources);
1765 /* find any pending locks and put them back on proper list */
1766 for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) {
1767 queue = dlm_list_idx_to_ptr(res, i);
1768 list_for_each_safe(iter, iter2, queue) {
1769 lock = list_entry (iter, struct dlm_lock, list);
1771 if (lock->convert_pending) {
1772 /* move converting lock back to granted */
1773 BUG_ON(i != DLM_CONVERTING_LIST);
1774 mlog(0, "node died with convert pending "
1775 "on %.*s. move back to granted list.\n",
1776 res->lockname.len, res->lockname.name);
1777 dlm_revert_pending_convert(res, lock);
1778 lock->convert_pending = 0;
1779 } else if (lock->lock_pending) {
1780 /* remove pending lock requests completely */
1781 BUG_ON(i != DLM_BLOCKED_LIST);
1782 mlog(0, "node died with lock pending "
1783 "on %.*s. remove from blocked list and skip.\n",
1784 res->lockname.len, res->lockname.name);
1785 /* lock will be floating until ref in
1786 * dlmlock_remote is freed after the network
1787 * call returns. ok for it to not be on any
1788 * list since no ast can be called
1789 * (the master is dead). */
1790 dlm_revert_pending_lock(res, lock);
1791 lock->lock_pending = 0;
1792 } else if (lock->unlock_pending) {
1793 /* if an unlock was in progress, treat as
1794 * if this had completed successfully
1795 * before sending this lock state to the
1796 * new master. note that the dlm_unlock
1797 * call is still responsible for calling
1798 * the unlockast. that will happen after
1799 * the network call times out. for now,
1800 * just move lists to prepare the new
1801 * recovery master. */
1802 BUG_ON(i != DLM_GRANTED_LIST);
1803 mlog(0, "node died with unlock pending "
1804 "on %.*s. remove from blocked list and skip.\n",
1805 res->lockname.len, res->lockname.name);
1806 dlm_commit_pending_unlock(res, lock);
1807 lock->unlock_pending = 0;
1808 } else if (lock->cancel_pending) {
1809 /* if a cancel was in progress, treat as
1810 * if this had completed successfully
1811 * before sending this lock state to the
1813 BUG_ON(i != DLM_CONVERTING_LIST);
1814 mlog(0, "node died with cancel pending "
1815 "on %.*s. move back to granted list.\n",
1816 res->lockname.len, res->lockname.name);
1817 dlm_commit_pending_cancel(res, lock);
1818 lock->cancel_pending = 0;
1827 /* removes all recovered locks from the recovery list.
1828 * sets the res->owner to the new master.
1829 * unsets the RECOVERY flag and wakes waiters. */
1830 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
1831 u8 dead_node, u8 new_master)
1834 struct list_head *iter, *iter2;
1835 struct hlist_node *hash_iter;
1836 struct hlist_head *bucket;
1838 struct dlm_lock_resource *res;
1842 assert_spin_locked(&dlm->spinlock);
1844 list_for_each_safe(iter, iter2, &dlm->reco.resources) {
1845 res = list_entry (iter, struct dlm_lock_resource, recovering);
1846 if (res->owner == dead_node) {
1847 list_del_init(&res->recovering);
1848 spin_lock(&res->spinlock);
1849 dlm_change_lockres_owner(dlm, res, new_master);
1850 res->state &= ~DLM_LOCK_RES_RECOVERING;
1851 __dlm_dirty_lockres(dlm, res);
1852 spin_unlock(&res->spinlock);
1857 /* this will become unnecessary eventually, but
1858 * for now we need to run the whole hash, clear
1859 * the RECOVERING state and set the owner
1861 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
1862 bucket = dlm_lockres_hash(dlm, i);
1863 hlist_for_each_entry(res, hash_iter, bucket, hash_node) {
1864 if (res->state & DLM_LOCK_RES_RECOVERING) {
1865 if (res->owner == dead_node) {
1866 mlog(0, "(this=%u) res %.*s owner=%u "
1867 "was not on recovering list, but "
1868 "clearing state anyway\n",
1869 dlm->node_num, res->lockname.len,
1870 res->lockname.name, new_master);
1871 } else if (res->owner == dlm->node_num) {
1872 mlog(0, "(this=%u) res %.*s owner=%u "
1873 "was not on recovering list, "
1874 "owner is THIS node, clearing\n",
1875 dlm->node_num, res->lockname.len,
1876 res->lockname.name, new_master);
1880 if (!list_empty(&res->recovering)) {
1881 mlog(0, "%s:%.*s: lockres was "
1882 "marked RECOVERING, owner=%u\n",
1883 dlm->name, res->lockname.len,
1884 res->lockname.name, res->owner);
1885 list_del_init(&res->recovering);
1887 spin_lock(&res->spinlock);
1888 dlm_change_lockres_owner(dlm, res, new_master);
1889 res->state &= ~DLM_LOCK_RES_RECOVERING;
1890 __dlm_dirty_lockres(dlm, res);
1891 spin_unlock(&res->spinlock);
1898 static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local)
1901 if (lock->ml.type != LKM_EXMODE &&
1902 lock->ml.type != LKM_PRMODE)
1904 } else if (lock->ml.type == LKM_EXMODE)
1909 static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
1910 struct dlm_lock_resource *res, u8 dead_node)
1912 struct list_head *iter, *queue;
1913 struct dlm_lock *lock;
1914 int blank_lvb = 0, local = 0;
1918 assert_spin_locked(&dlm->spinlock);
1919 assert_spin_locked(&res->spinlock);
1921 if (res->owner == dlm->node_num)
1922 /* if this node owned the lockres, and if the dead node
1923 * had an EX when he died, blank out the lvb */
1924 search_node = dead_node;
1926 /* if this is a secondary lockres, and we had no EX or PR
1927 * locks granted, we can no longer trust the lvb */
1928 search_node = dlm->node_num;
1929 local = 1; /* check local state for valid lvb */
1932 for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) {
1933 queue = dlm_list_idx_to_ptr(res, i);
1934 list_for_each(iter, queue) {
1935 lock = list_entry (iter, struct dlm_lock, list);
1936 if (lock->ml.node == search_node) {
1937 if (dlm_lvb_needs_invalidation(lock, local)) {
1938 /* zero the lksb lvb and lockres lvb */
1940 memset(lock->lksb->lvb, 0, DLM_LVB_LEN);
1947 mlog(0, "clearing %.*s lvb, dead node %u had EX\n",
1948 res->lockname.len, res->lockname.name, dead_node);
1949 memset(res->lvb, 0, DLM_LVB_LEN);
1953 static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
1954 struct dlm_lock_resource *res, u8 dead_node)
1956 struct list_head *iter, *tmpiter;
1957 struct dlm_lock *lock;
1959 /* this node is the lockres master:
1960 * 1) remove any stale locks for the dead node
1961 * 2) if the dead node had an EX when he died, blank out the lvb
1963 assert_spin_locked(&dlm->spinlock);
1964 assert_spin_locked(&res->spinlock);
1966 /* TODO: check pending_asts, pending_basts here */
1967 list_for_each_safe(iter, tmpiter, &res->granted) {
1968 lock = list_entry (iter, struct dlm_lock, list);
1969 if (lock->ml.node == dead_node) {
1970 list_del_init(&lock->list);
1974 list_for_each_safe(iter, tmpiter, &res->converting) {
1975 lock = list_entry (iter, struct dlm_lock, list);
1976 if (lock->ml.node == dead_node) {
1977 list_del_init(&lock->list);
1981 list_for_each_safe(iter, tmpiter, &res->blocked) {
1982 lock = list_entry (iter, struct dlm_lock, list);
1983 if (lock->ml.node == dead_node) {
1984 list_del_init(&lock->list);
1989 /* do not kick thread yet */
1990 __dlm_dirty_lockres(dlm, res);
1993 /* if this node is the recovery master, and there are no
1994 * locks for a given lockres owned by this node that are in
1995 * either PR or EX mode, zero out the lvb before requesting.
2000 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2002 struct hlist_node *iter;
2003 struct dlm_lock_resource *res;
2005 struct hlist_head *bucket;
2006 struct dlm_lock *lock;
2009 /* purge any stale mles */
2010 dlm_clean_master_list(dlm, dead_node);
2013 * now clean up all lock resources. there are two rules:
2015 * 1) if the dead node was the master, move the lockres
2016 * to the recovering list. set the RECOVERING flag.
2017 * this lockres needs to be cleaned up before it can
2020 * 2) if this node was the master, remove all locks from
2021 * each of the lockres queues that were owned by the
2022 * dead node. once recovery finishes, the dlm thread
2023 * can be kicked again to see if any ASTs or BASTs
2024 * need to be fired as a result.
2026 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2027 bucket = dlm_lockres_hash(dlm, i);
2028 hlist_for_each_entry(res, iter, bucket, hash_node) {
2029 /* always prune any $RECOVERY entries for dead nodes,
2030 * otherwise hangs can occur during later recovery */
2031 if (dlm_is_recovery_lock(res->lockname.name,
2032 res->lockname.len)) {
2033 spin_lock(&res->spinlock);
2034 list_for_each_entry(lock, &res->granted, list) {
2035 if (lock->ml.node == dead_node) {
2036 mlog(0, "AHA! there was "
2037 "a $RECOVERY lock for dead "
2039 dead_node, dlm->name);
2040 list_del_init(&lock->list);
2045 spin_unlock(&res->spinlock);
2048 spin_lock(&res->spinlock);
2049 /* zero the lvb if necessary */
2050 dlm_revalidate_lvb(dlm, res, dead_node);
2051 if (res->owner == dead_node)
2052 dlm_move_lockres_to_recovery_list(dlm, res);
2053 else if (res->owner == dlm->node_num) {
2054 dlm_free_dead_locks(dlm, res, dead_node);
2055 __dlm_lockres_calc_usage(dlm, res);
2057 spin_unlock(&res->spinlock);
2063 static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx)
2065 assert_spin_locked(&dlm->spinlock);
2067 /* check to see if the node is already considered dead */
2068 if (!test_bit(idx, dlm->live_nodes_map)) {
2069 mlog(0, "for domain %s, node %d is already dead. "
2070 "another node likely did recovery already.\n",
2075 /* check to see if we do not care about this node */
2076 if (!test_bit(idx, dlm->domain_map)) {
2077 /* This also catches the case that we get a node down
2078 * but haven't joined the domain yet. */
2079 mlog(0, "node %u already removed from domain!\n", idx);
2083 clear_bit(idx, dlm->live_nodes_map);
2085 /* Clean up join state on node death. */
2086 if (dlm->joining_node == idx) {
2087 mlog(0, "Clearing join state for node %u\n", idx);
2088 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
2091 /* make sure local cleanup occurs before the heartbeat events */
2092 if (!test_bit(idx, dlm->recovery_map))
2093 dlm_do_local_recovery_cleanup(dlm, idx);
2095 /* notify anything attached to the heartbeat events */
2096 dlm_hb_event_notify_attached(dlm, idx, 0);
2098 mlog(0, "node %u being removed from domain map!\n", idx);
2099 clear_bit(idx, dlm->domain_map);
2100 /* wake up migration waiters if a node goes down.
2101 * perhaps later we can genericize this for other waiters. */
2102 wake_up(&dlm->migration_wq);
2104 if (test_bit(idx, dlm->recovery_map))
2105 mlog(0, "domain %s, node %u already added "
2106 "to recovery map!\n", dlm->name, idx);
2108 set_bit(idx, dlm->recovery_map);
2111 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data)
2113 struct dlm_ctxt *dlm = data;
2118 spin_lock(&dlm->spinlock);
2119 __dlm_hb_node_down(dlm, idx);
2120 spin_unlock(&dlm->spinlock);
2125 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data)
2127 struct dlm_ctxt *dlm = data;
2132 spin_lock(&dlm->spinlock);
2133 set_bit(idx, dlm->live_nodes_map);
2134 /* do NOT notify mle attached to the heartbeat events.
2135 * new nodes are not interesting in mastery until joined. */
2136 spin_unlock(&dlm->spinlock);
2141 static void dlm_reco_ast(void *astdata)
2143 struct dlm_ctxt *dlm = astdata;
2144 mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
2145 dlm->node_num, dlm->name);
2147 static void dlm_reco_bast(void *astdata, int blocked_type)
2149 struct dlm_ctxt *dlm = astdata;
2150 mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
2151 dlm->node_num, dlm->name);
2153 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
2155 mlog(0, "unlockast for recovery lock fired!\n");
2159 * dlm_pick_recovery_master will continually attempt to use
2160 * dlmlock() on the special "$RECOVERY" lockres with the
2161 * LKM_NOQUEUE flag to get an EX. every thread that enters
2162 * this function on each node racing to become the recovery
2163 * master will not stop attempting this until either:
2164 * a) this node gets the EX (and becomes the recovery master),
2165 * or b) dlm->reco.new_master gets set to some nodenum
2166 * != O2NM_INVALID_NODE_NUM (another node will do the reco).
2167 * so each time a recovery master is needed, the entire cluster
2168 * will sync at this point. if the new master dies, that will
2169 * be detected in dlm_do_recovery */
2170 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
2172 enum dlm_status ret;
2173 struct dlm_lockstatus lksb;
2174 int status = -EINVAL;
2176 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
2177 dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
2179 memset(&lksb, 0, sizeof(lksb));
2181 ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
2182 DLM_RECOVERY_LOCK_NAME, dlm_reco_ast, dlm, dlm_reco_bast);
2184 mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
2185 dlm->name, ret, lksb.status);
2187 if (ret == DLM_NORMAL) {
2188 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
2189 dlm->name, dlm->node_num);
2191 /* got the EX lock. check to see if another node
2192 * just became the reco master */
2193 if (dlm_reco_master_ready(dlm)) {
2194 mlog(0, "%s: got reco EX lock, but %u will "
2195 "do the recovery\n", dlm->name,
2196 dlm->reco.new_master);
2201 /* see if recovery was already finished elsewhere */
2202 spin_lock(&dlm->spinlock);
2203 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
2205 mlog(0, "%s: got reco EX lock, but "
2206 "node got recovered already\n", dlm->name);
2207 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2208 mlog(ML_ERROR, "%s: new master is %u "
2209 "but no dead node!\n",
2210 dlm->name, dlm->reco.new_master);
2214 spin_unlock(&dlm->spinlock);
2217 /* if this node has actually become the recovery master,
2218 * set the master and send the messages to begin recovery */
2220 mlog(0, "%s: dead=%u, this=%u, sending "
2221 "begin_reco now\n", dlm->name,
2222 dlm->reco.dead_node, dlm->node_num);
2223 status = dlm_send_begin_reco_message(dlm,
2224 dlm->reco.dead_node);
2225 /* this always succeeds */
2228 /* set the new_master to this node */
2229 spin_lock(&dlm->spinlock);
2230 dlm_set_reco_master(dlm, dlm->node_num);
2231 spin_unlock(&dlm->spinlock);
2234 /* recovery lock is a special case. ast will not get fired,
2235 * so just go ahead and unlock it. */
2236 ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm);
2237 if (ret == DLM_DENIED) {
2238 mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n");
2239 ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm);
2241 if (ret != DLM_NORMAL) {
2242 /* this would really suck. this could only happen
2243 * if there was a network error during the unlock
2244 * because of node death. this means the unlock
2245 * is actually "done" and the lock structure is
2246 * even freed. we can continue, but only
2247 * because this specific lock name is special. */
2248 mlog(ML_ERROR, "dlmunlock returned %d\n", ret);
2250 } else if (ret == DLM_NOTQUEUED) {
2251 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
2252 dlm->name, dlm->node_num);
2253 /* another node is master. wait on
2254 * reco.new_master != O2NM_INVALID_NODE_NUM
2255 * for at most one second */
2256 wait_event_timeout(dlm->dlm_reco_thread_wq,
2257 dlm_reco_master_ready(dlm),
2258 msecs_to_jiffies(1000));
2259 if (!dlm_reco_master_ready(dlm)) {
2260 mlog(0, "%s: reco master taking awhile\n",
2264 /* another node has informed this one that it is reco master */
2265 mlog(0, "%s: reco master %u is ready to recover %u\n",
2266 dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
2269 struct dlm_lock_resource *res;
2271 /* dlmlock returned something other than NOTQUEUED or NORMAL */
2272 mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), "
2273 "lksb.status=%s\n", dlm->name, dlm_errname(ret),
2274 dlm_errname(lksb.status));
2275 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2276 DLM_RECOVERY_LOCK_NAME_LEN);
2278 dlm_print_one_lock_resource(res);
2279 dlm_lockres_put(res);
2281 mlog(ML_ERROR, "recovery lock not found\n");
2289 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
2291 struct dlm_begin_reco br;
2293 struct dlm_node_iter iter;
2297 mlog_entry("%u\n", dead_node);
2299 mlog(0, "%s: dead node is %u\n", dlm->name, dead_node);
2301 spin_lock(&dlm->spinlock);
2302 dlm_node_iter_init(dlm->domain_map, &iter);
2303 spin_unlock(&dlm->spinlock);
2305 clear_bit(dead_node, iter.node_map);
2307 memset(&br, 0, sizeof(br));
2308 br.node_idx = dlm->node_num;
2309 br.dead_node = dead_node;
2311 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2313 if (nodenum == dead_node) {
2314 mlog(0, "not sending begin reco to dead node "
2318 if (nodenum == dlm->node_num) {
2319 mlog(0, "not sending begin reco to self\n");
2324 mlog(0, "attempting to send begin reco msg to %d\n",
2326 ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key,
2327 &br, sizeof(br), nodenum, &status);
2328 /* negative status is handled ok by caller here */
2331 if (dlm_is_host_down(ret)) {
2332 /* node is down. not involved in recovery
2333 * so just keep going */
2334 mlog(0, "%s: node %u was down when sending "
2335 "begin reco msg (%d)\n", dlm->name, nodenum, ret);
2339 struct dlm_lock_resource *res;
2340 /* this is now a serious problem, possibly ENOMEM
2341 * in the network stack. must retry */
2343 mlog(ML_ERROR, "begin reco of dlm %s to node %u "
2344 " returned %d\n", dlm->name, nodenum, ret);
2345 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2346 DLM_RECOVERY_LOCK_NAME_LEN);
2348 dlm_print_one_lock_resource(res);
2349 dlm_lockres_put(res);
2351 mlog(ML_ERROR, "recovery lock not found\n");
2353 /* sleep for a bit in hopes that we can avoid
2363 int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data)
2365 struct dlm_ctxt *dlm = data;
2366 struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf;
2368 /* ok to return 0, domain has gone away */
2372 mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n",
2373 dlm->name, br->node_idx, br->dead_node,
2374 dlm->reco.dead_node, dlm->reco.new_master);
2376 dlm_fire_domain_eviction_callbacks(dlm, br->dead_node);
2378 spin_lock(&dlm->spinlock);
2379 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2380 if (test_bit(dlm->reco.new_master, dlm->recovery_map)) {
2381 mlog(0, "%s: new_master %u died, changing "
2382 "to %u\n", dlm->name, dlm->reco.new_master,
2385 mlog(0, "%s: new_master %u NOT DEAD, changing "
2386 "to %u\n", dlm->name, dlm->reco.new_master,
2388 /* may not have seen the new master as dead yet */
2391 if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
2392 mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
2393 "node %u changing it to %u\n", dlm->name,
2394 dlm->reco.dead_node, br->node_idx, br->dead_node);
2396 dlm_set_reco_master(dlm, br->node_idx);
2397 dlm_set_reco_dead_node(dlm, br->dead_node);
2398 if (!test_bit(br->dead_node, dlm->recovery_map)) {
2399 mlog(0, "recovery master %u sees %u as dead, but this "
2400 "node has not yet. marking %u as dead\n",
2401 br->node_idx, br->dead_node, br->dead_node);
2402 if (!test_bit(br->dead_node, dlm->domain_map) ||
2403 !test_bit(br->dead_node, dlm->live_nodes_map))
2404 mlog(0, "%u not in domain/live_nodes map "
2405 "so setting it in reco map manually\n",
2407 /* force the recovery cleanup in __dlm_hb_node_down
2408 * both of these will be cleared in a moment */
2409 set_bit(br->dead_node, dlm->domain_map);
2410 set_bit(br->dead_node, dlm->live_nodes_map);
2411 __dlm_hb_node_down(dlm, br->dead_node);
2413 spin_unlock(&dlm->spinlock);
2415 dlm_kick_recovery_thread(dlm);
2417 mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n",
2418 dlm->name, br->node_idx, br->dead_node,
2419 dlm->reco.dead_node, dlm->reco.new_master);
2425 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
2428 struct dlm_finalize_reco fr;
2429 struct dlm_node_iter iter;
2433 mlog(0, "finishing recovery for node %s:%u\n",
2434 dlm->name, dlm->reco.dead_node);
2436 spin_lock(&dlm->spinlock);
2437 dlm_node_iter_init(dlm->domain_map, &iter);
2438 spin_unlock(&dlm->spinlock);
2440 memset(&fr, 0, sizeof(fr));
2441 fr.node_idx = dlm->node_num;
2442 fr.dead_node = dlm->reco.dead_node;
2444 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2445 if (nodenum == dlm->node_num)
2447 ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key,
2448 &fr, sizeof(fr), nodenum, &status);
2451 if (dlm_is_host_down(ret)) {
2452 /* this has no effect on this recovery
2453 * session, so set the status to zero to
2454 * finish out the last recovery */
2455 mlog(ML_ERROR, "node %u went down after this "
2456 "node finished recovery.\n", nodenum);
2469 int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data)
2471 struct dlm_ctxt *dlm = data;
2472 struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
2474 /* ok to return 0, domain has gone away */
2478 mlog(0, "%s: node %u finalizing recovery of node %u (%u:%u)\n",
2479 dlm->name, fr->node_idx, fr->dead_node,
2480 dlm->reco.dead_node, dlm->reco.new_master);
2482 spin_lock(&dlm->spinlock);
2484 if (dlm->reco.new_master != fr->node_idx) {
2485 mlog(ML_ERROR, "node %u sent recovery finalize msg, but node "
2486 "%u is supposed to be the new master, dead=%u\n",
2487 fr->node_idx, dlm->reco.new_master, fr->dead_node);
2490 if (dlm->reco.dead_node != fr->dead_node) {
2491 mlog(ML_ERROR, "node %u sent recovery finalize msg for dead "
2492 "node %u, but node %u is supposed to be dead\n",
2493 fr->node_idx, fr->dead_node, dlm->reco.dead_node);
2497 dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx);
2499 spin_unlock(&dlm->spinlock);
2501 dlm_reset_recovery(dlm);
2503 dlm_kick_recovery_thread(dlm);
2504 mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n",
2505 dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master);