blob: 04048bb1a1bdca4dd3e4529b7a3c6847d561ef28 [file] [log] [blame]
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * dlmcommon.h
5 *
6 * Copyright (C) 2004 Oracle. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public
19 * License along with this program; if not, write to the
20 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
21 * Boston, MA 021110-1307, USA.
22 *
23 */
24
25#ifndef DLMCOMMON_H
26#define DLMCOMMON_H
27
28#include <linux/kref.h>
29
30#define DLM_HB_NODE_DOWN_PRI (0xf000000)
31#define DLM_HB_NODE_UP_PRI (0x8000000)
32
33#define DLM_LOCKID_NAME_MAX 32
34
35#define DLM_DOMAIN_NAME_MAX_LEN 255
36#define DLM_LOCK_RES_OWNER_UNKNOWN O2NM_MAX_NODES
37#define DLM_THREAD_SHUFFLE_INTERVAL 5 // flush everything every 5 passes
38#define DLM_THREAD_MS 200 // flush at least every 200 ms
39
Joel Beckerc8f33b62006-03-16 17:40:37 -080040#define DLM_HASH_SIZE_DEFAULT (1 << 14)
41#if DLM_HASH_SIZE_DEFAULT < PAGE_SIZE
42# define DLM_HASH_PAGES 1
43#else
44# define DLM_HASH_PAGES (DLM_HASH_SIZE_DEFAULT / PAGE_SIZE)
45#endif
Daniel Phillips03d864c2006-03-10 18:08:16 -080046#define DLM_BUCKETS_PER_PAGE (PAGE_SIZE / sizeof(struct hlist_head))
47#define DLM_HASH_BUCKETS (DLM_HASH_PAGES * DLM_BUCKETS_PER_PAGE)
Kurt Hackel6714d8e2005-12-15 14:31:23 -080048
Mark Fasheha3d33292006-03-09 17:55:56 -080049/* Intended to make it easier for us to switch out hash functions */
50#define dlm_lockid_hash(_n, _l) full_name_hash(_n, _l)
51
Kurt Hackel6714d8e2005-12-15 14:31:23 -080052enum dlm_ast_type {
53 DLM_AST = 0,
54 DLM_BAST,
55 DLM_ASTUNLOCK
56};
57
58
59#define LKM_VALID_FLAGS (LKM_VALBLK | LKM_CONVERT | LKM_UNLOCK | \
60 LKM_CANCEL | LKM_INVVALBLK | LKM_FORCE | \
61 LKM_RECOVERY | LKM_LOCAL | LKM_NOQUEUE)
62
63#define DLM_RECOVERY_LOCK_NAME "$RECOVERY"
64#define DLM_RECOVERY_LOCK_NAME_LEN 9
65
66static inline int dlm_is_recovery_lock(const char *lock_name, int name_len)
67{
68 if (name_len == DLM_RECOVERY_LOCK_NAME_LEN &&
69 memcmp(lock_name, DLM_RECOVERY_LOCK_NAME, name_len)==0)
70 return 1;
71 return 0;
72}
73
Kurt Hackel466d1a42006-05-01 11:11:13 -070074#define DLM_RECO_STATE_ACTIVE 0x0001
75#define DLM_RECO_STATE_FINALIZE 0x0002
Kurt Hackel6714d8e2005-12-15 14:31:23 -080076
77struct dlm_recovery_ctxt
78{
79 struct list_head resources;
80 struct list_head received;
81 struct list_head node_data;
82 u8 new_master;
83 u8 dead_node;
84 u16 state;
85 unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
86 wait_queue_head_t event;
87};
88
89enum dlm_ctxt_state {
90 DLM_CTXT_NEW = 0,
91 DLM_CTXT_JOINED,
92 DLM_CTXT_IN_SHUTDOWN,
93 DLM_CTXT_LEAVING,
94};
95
96struct dlm_ctxt
97{
98 struct list_head list;
Daniel Phillips03d864c2006-03-10 18:08:16 -080099 struct hlist_head **lockres_hash;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800100 struct list_head dirty_list;
101 struct list_head purge_list;
102 struct list_head pending_asts;
103 struct list_head pending_basts;
104 unsigned int purge_count;
105 spinlock_t spinlock;
106 spinlock_t ast_lock;
107 char *name;
108 u8 node_num;
109 u32 key;
110 u8 joining_node;
111 wait_queue_head_t dlm_join_events;
112 unsigned long live_nodes_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
113 unsigned long domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
114 unsigned long recovery_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
115 struct dlm_recovery_ctxt reco;
116 spinlock_t master_lock;
117 struct list_head master_list;
118 struct list_head mle_hb_events;
119
120 /* these give a really vague idea of the system load */
121 atomic_t local_resources;
122 atomic_t remote_resources;
123 atomic_t unknown_resources;
124
125 /* NOTE: Next three are protected by dlm_domain_lock */
126 struct kref dlm_refs;
127 enum dlm_ctxt_state dlm_state;
128 unsigned int num_joins;
129
130 struct o2hb_callback_func dlm_hb_up;
131 struct o2hb_callback_func dlm_hb_down;
132 struct task_struct *dlm_thread_task;
133 struct task_struct *dlm_reco_thread_task;
Kurt Hackel3156d262006-05-01 14:39:29 -0700134 struct workqueue_struct *dlm_worker;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800135 wait_queue_head_t dlm_thread_wq;
136 wait_queue_head_t dlm_reco_thread_wq;
137 wait_queue_head_t ast_wq;
138 wait_queue_head_t migration_wq;
139
140 struct work_struct dispatched_work;
141 struct list_head work_list;
142 spinlock_t work_lock;
143 struct list_head dlm_domain_handlers;
144 struct list_head dlm_eviction_callbacks;
145};
146
Daniel Phillips03d864c2006-03-10 18:08:16 -0800147static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned i)
148{
149 return dlm->lockres_hash[(i / DLM_BUCKETS_PER_PAGE) % DLM_HASH_PAGES] + (i % DLM_BUCKETS_PER_PAGE);
150}
151
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800152/* these keventd work queue items are for less-frequently
153 * called functions that cannot be directly called from the
154 * net message handlers for some reason, usually because
155 * they need to send net messages of their own. */
David Howellsc4028952006-11-22 14:57:56 +0000156void dlm_dispatch_work(struct work_struct *work);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800157
158struct dlm_lock_resource;
159struct dlm_work_item;
160
161typedef void (dlm_workfunc_t)(struct dlm_work_item *, void *);
162
163struct dlm_request_all_locks_priv
164{
165 u8 reco_master;
166 u8 dead_node;
167};
168
169struct dlm_mig_lockres_priv
170{
171 struct dlm_lock_resource *lockres;
172 u8 real_master;
173};
174
175struct dlm_assert_master_priv
176{
177 struct dlm_lock_resource *lockres;
178 u8 request_from;
179 u32 flags;
180 unsigned ignore_higher:1;
181};
182
183
184struct dlm_work_item
185{
186 struct list_head list;
187 dlm_workfunc_t *func;
188 struct dlm_ctxt *dlm;
189 void *data;
190 union {
191 struct dlm_request_all_locks_priv ral;
192 struct dlm_mig_lockres_priv ml;
193 struct dlm_assert_master_priv am;
194 } u;
195};
196
197static inline void dlm_init_work_item(struct dlm_ctxt *dlm,
198 struct dlm_work_item *i,
199 dlm_workfunc_t *f, void *data)
200{
201 memset(i, 0, sizeof(*i));
202 i->func = f;
203 INIT_LIST_HEAD(&i->list);
204 i->data = data;
205 i->dlm = dlm; /* must have already done a dlm_grab on this! */
206}
207
208
209
210static inline void __dlm_set_joining_node(struct dlm_ctxt *dlm,
211 u8 node)
212{
213 assert_spin_locked(&dlm->spinlock);
214
215 dlm->joining_node = node;
216 wake_up(&dlm->dlm_join_events);
217}
218
219#define DLM_LOCK_RES_UNINITED 0x00000001
220#define DLM_LOCK_RES_RECOVERING 0x00000002
221#define DLM_LOCK_RES_READY 0x00000004
222#define DLM_LOCK_RES_DIRTY 0x00000008
223#define DLM_LOCK_RES_IN_PROGRESS 0x00000010
224#define DLM_LOCK_RES_MIGRATING 0x00000020
Kurt Hackelba2bf212006-12-01 14:47:20 -0800225#define DLM_LOCK_RES_DROPPING_REF 0x00000040
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800226
Kurt Hackel44465a72006-01-18 17:05:38 -0800227/* max milliseconds to wait to sync up a network failure with a node death */
228#define DLM_NODE_DEATH_WAIT_MAX (5 * 1000)
229
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800230#define DLM_PURGE_INTERVAL_MS (8 * 1000)
231
232struct dlm_lock_resource
233{
234 /* WARNING: Please see the comment in dlm_init_lockres before
235 * adding fields here. */
Mark Fasheh81f20942006-02-28 17:31:22 -0800236 struct hlist_node hash_node;
Mark Fasheh65c491d2006-03-06 15:36:17 -0800237 struct qstr lockname;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800238 struct kref refs;
239
Kurt Hackel6ff06a92006-05-01 11:51:45 -0700240 /*
241 * Please keep granted, converting, and blocked in this order,
242 * as some funcs want to iterate over all lists.
243 *
244 * All four lists are protected by the hash's reference.
245 */
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800246 struct list_head granted;
247 struct list_head converting;
248 struct list_head blocked;
Kurt Hackel6ff06a92006-05-01 11:51:45 -0700249 struct list_head purge;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800250
Kurt Hackel6ff06a92006-05-01 11:51:45 -0700251 /*
252 * These two lists require you to hold an additional reference
253 * while they are on the list.
254 */
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800255 struct list_head dirty;
256 struct list_head recovering; // dlm_recovery_ctxt.resources list
257
258 /* unused lock resources have their last_used stamped and are
259 * put on a list for the dlm thread to run. */
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800260 unsigned long last_used;
261
262 unsigned migration_pending:1;
263 atomic_t asts_reserved;
264 spinlock_t spinlock;
265 wait_queue_head_t wq;
266 u8 owner; //node which owns the lock resource, or unknown
267 u16 state;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800268 char lvb[DLM_LVB_LEN];
Kurt Hackelba2bf212006-12-01 14:47:20 -0800269 unsigned int inflight_locks;
270 unsigned long refmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800271};
272
273struct dlm_migratable_lock
274{
275 __be64 cookie;
276
277 /* these 3 are just padding for the in-memory structure, but
278 * list and flags are actually used when sent over the wire */
279 __be16 pad1;
280 u8 list; // 0=granted, 1=converting, 2=blocked
281 u8 flags;
282
283 s8 type;
284 s8 convert_type;
285 s8 highest_blocked;
286 u8 node;
287}; // 16 bytes
288
289struct dlm_lock
290{
291 struct dlm_migratable_lock ml;
292
293 struct list_head list;
294 struct list_head ast_list;
295 struct list_head bast_list;
296 struct dlm_lock_resource *lockres;
297 spinlock_t spinlock;
298 struct kref lock_refs;
299
300 // ast and bast must be callable while holding a spinlock!
301 dlm_astlockfunc_t *ast;
302 dlm_bastlockfunc_t *bast;
303 void *astdata;
304 struct dlm_lockstatus *lksb;
305 unsigned ast_pending:1,
306 bast_pending:1,
307 convert_pending:1,
308 lock_pending:1,
309 cancel_pending:1,
310 unlock_pending:1,
311 lksb_kernel_allocated:1;
312};
313
314
315#define DLM_LKSB_UNUSED1 0x01
316#define DLM_LKSB_PUT_LVB 0x02
317#define DLM_LKSB_GET_LVB 0x04
318#define DLM_LKSB_UNUSED2 0x08
319#define DLM_LKSB_UNUSED3 0x10
320#define DLM_LKSB_UNUSED4 0x20
321#define DLM_LKSB_UNUSED5 0x40
322#define DLM_LKSB_UNUSED6 0x80
323
324
325enum dlm_lockres_list {
326 DLM_GRANTED_LIST = 0,
327 DLM_CONVERTING_LIST,
328 DLM_BLOCKED_LIST
329};
330
Kurt Hackel8bc674c2006-04-27 18:02:10 -0700331static inline int dlm_lvb_is_empty(char *lvb)
332{
333 int i;
334 for (i=0; i<DLM_LVB_LEN; i++)
335 if (lvb[i])
336 return 0;
337 return 1;
338}
339
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800340static inline struct list_head *
341dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx)
342{
343 struct list_head *ret = NULL;
344 if (idx == DLM_GRANTED_LIST)
345 ret = &res->granted;
346 else if (idx == DLM_CONVERTING_LIST)
347 ret = &res->converting;
348 else if (idx == DLM_BLOCKED_LIST)
349 ret = &res->blocked;
350 else
351 BUG();
352 return ret;
353}
354
355
356
357
358struct dlm_node_iter
359{
360 unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
361 int curnode;
362};
363
364
365enum {
366 DLM_MASTER_REQUEST_MSG = 500,
367 DLM_UNUSED_MSG1, /* 501 */
368 DLM_ASSERT_MASTER_MSG, /* 502 */
369 DLM_CREATE_LOCK_MSG, /* 503 */
370 DLM_CONVERT_LOCK_MSG, /* 504 */
371 DLM_PROXY_AST_MSG, /* 505 */
372 DLM_UNLOCK_LOCK_MSG, /* 506 */
Kurt Hackelba2bf212006-12-01 14:47:20 -0800373 DLM_DEREF_LOCKRES_MSG, /* 507 */
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800374 DLM_MIGRATE_REQUEST_MSG, /* 508 */
375 DLM_MIG_LOCKRES_MSG, /* 509 */
376 DLM_QUERY_JOIN_MSG, /* 510 */
377 DLM_ASSERT_JOINED_MSG, /* 511 */
378 DLM_CANCEL_JOIN_MSG, /* 512 */
379 DLM_EXIT_DOMAIN_MSG, /* 513 */
380 DLM_MASTER_REQUERY_MSG, /* 514 */
381 DLM_LOCK_REQUEST_MSG, /* 515 */
382 DLM_RECO_DATA_DONE_MSG, /* 516 */
383 DLM_BEGIN_RECO_MSG, /* 517 */
384 DLM_FINALIZE_RECO_MSG /* 518 */
385};
386
387struct dlm_reco_node_data
388{
389 int state;
390 u8 node_num;
391 struct list_head list;
392};
393
394enum {
395 DLM_RECO_NODE_DATA_DEAD = -1,
396 DLM_RECO_NODE_DATA_INIT = 0,
397 DLM_RECO_NODE_DATA_REQUESTING,
398 DLM_RECO_NODE_DATA_REQUESTED,
399 DLM_RECO_NODE_DATA_RECEIVING,
400 DLM_RECO_NODE_DATA_DONE,
401 DLM_RECO_NODE_DATA_FINALIZE_SENT,
402};
403
404
405enum {
406 DLM_MASTER_RESP_NO = 0,
407 DLM_MASTER_RESP_YES,
408 DLM_MASTER_RESP_MAYBE,
409 DLM_MASTER_RESP_ERROR
410};
411
412
413struct dlm_master_request
414{
415 u8 node_idx;
416 u8 namelen;
417 __be16 pad1;
418 __be32 flags;
419
420 u8 name[O2NM_MAX_NAME_LEN];
421};
422
Kurt Hackelba2bf212006-12-01 14:47:20 -0800423#define DLM_ASSERT_RESPONSE_REASSERT 0x00000001
424#define DLM_ASSERT_RESPONSE_MASTERY_REF 0x00000002
425
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800426#define DLM_ASSERT_MASTER_MLE_CLEANUP 0x00000001
427#define DLM_ASSERT_MASTER_REQUERY 0x00000002
428#define DLM_ASSERT_MASTER_FINISH_MIGRATION 0x00000004
429struct dlm_assert_master
430{
431 u8 node_idx;
432 u8 namelen;
433 __be16 pad1;
434 __be32 flags;
435
436 u8 name[O2NM_MAX_NAME_LEN];
437};
438
Kurt Hackelba2bf212006-12-01 14:47:20 -0800439#define DLM_MIGRATE_RESPONSE_MASTERY_REF 0x00000001
440
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800441struct dlm_migrate_request
442{
443 u8 master;
444 u8 new_master;
445 u8 namelen;
446 u8 pad1;
447 __be32 pad2;
448 u8 name[O2NM_MAX_NAME_LEN];
449};
450
451struct dlm_master_requery
452{
453 u8 pad1;
454 u8 pad2;
455 u8 node_idx;
456 u8 namelen;
457 __be32 pad3;
458 u8 name[O2NM_MAX_NAME_LEN];
459};
460
461#define DLM_MRES_RECOVERY 0x01
462#define DLM_MRES_MIGRATION 0x02
463#define DLM_MRES_ALL_DONE 0x04
464
465/*
466 * We would like to get one whole lockres into a single network
467 * message whenever possible. Generally speaking, there will be
468 * at most one dlm_lock on a lockres for each node in the cluster,
469 * plus (infrequently) any additional locks coming in from userdlm.
470 *
471 * struct _dlm_lockres_page
472 * {
473 * dlm_migratable_lockres mres;
474 * dlm_migratable_lock ml[DLM_MAX_MIGRATABLE_LOCKS];
475 * u8 pad[DLM_MIG_LOCKRES_RESERVED];
476 * };
477 *
478 * from ../cluster/tcp.h
479 * NET_MAX_PAYLOAD_BYTES (4096 - sizeof(net_msg))
480 * (roughly 4080 bytes)
481 * and sizeof(dlm_migratable_lockres) = 112 bytes
482 * and sizeof(dlm_migratable_lock) = 16 bytes
483 *
484 * Choosing DLM_MAX_MIGRATABLE_LOCKS=240 and
485 * DLM_MIG_LOCKRES_RESERVED=128 means we have this:
486 *
487 * (DLM_MAX_MIGRATABLE_LOCKS * sizeof(dlm_migratable_lock)) +
488 * sizeof(dlm_migratable_lockres) + DLM_MIG_LOCKRES_RESERVED =
489 * NET_MAX_PAYLOAD_BYTES
490 * (240 * 16) + 112 + 128 = 4080
491 *
492 * So a lockres would need more than 240 locks before it would
493 * use more than one network packet to recover. Not too bad.
494 */
495#define DLM_MAX_MIGRATABLE_LOCKS 240
496
497struct dlm_migratable_lockres
498{
499 u8 master;
500 u8 lockname_len;
501 u8 num_locks; // locks sent in this structure
502 u8 flags;
503 __be32 total_locks; // locks to be sent for this migration cookie
504 __be64 mig_cookie; // cookie for this lockres migration
505 // or zero if not needed
506 // 16 bytes
507 u8 lockname[DLM_LOCKID_NAME_MAX];
508 // 48 bytes
509 u8 lvb[DLM_LVB_LEN];
510 // 112 bytes
511 struct dlm_migratable_lock ml[0]; // 16 bytes each, begins at byte 112
512};
513#define DLM_MIG_LOCKRES_MAX_LEN \
514 (sizeof(struct dlm_migratable_lockres) + \
515 (sizeof(struct dlm_migratable_lock) * \
516 DLM_MAX_MIGRATABLE_LOCKS) )
517
518/* from above, 128 bytes
519 * for some undetermined future use */
520#define DLM_MIG_LOCKRES_RESERVED (NET_MAX_PAYLOAD_BYTES - \
521 DLM_MIG_LOCKRES_MAX_LEN)
522
523struct dlm_create_lock
524{
525 __be64 cookie;
526
527 __be32 flags;
528 u8 pad1;
529 u8 node_idx;
530 s8 requested_type;
531 u8 namelen;
532
533 u8 name[O2NM_MAX_NAME_LEN];
534};
535
536struct dlm_convert_lock
537{
538 __be64 cookie;
539
540 __be32 flags;
541 u8 pad1;
542 u8 node_idx;
543 s8 requested_type;
544 u8 namelen;
545
546 u8 name[O2NM_MAX_NAME_LEN];
547
548 s8 lvb[0];
549};
550#define DLM_CONVERT_LOCK_MAX_LEN (sizeof(struct dlm_convert_lock)+DLM_LVB_LEN)
551
552struct dlm_unlock_lock
553{
554 __be64 cookie;
555
556 __be32 flags;
557 __be16 pad1;
558 u8 node_idx;
559 u8 namelen;
560
561 u8 name[O2NM_MAX_NAME_LEN];
562
563 s8 lvb[0];
564};
565#define DLM_UNLOCK_LOCK_MAX_LEN (sizeof(struct dlm_unlock_lock)+DLM_LVB_LEN)
566
567struct dlm_proxy_ast
568{
569 __be64 cookie;
570
571 __be32 flags;
572 u8 node_idx;
573 u8 type;
574 u8 blocked_type;
575 u8 namelen;
576
577 u8 name[O2NM_MAX_NAME_LEN];
578
579 s8 lvb[0];
580};
581#define DLM_PROXY_AST_MAX_LEN (sizeof(struct dlm_proxy_ast)+DLM_LVB_LEN)
582
583#define DLM_MOD_KEY (0x666c6172)
584enum dlm_query_join_response {
585 JOIN_DISALLOW = 0,
586 JOIN_OK,
587 JOIN_OK_NO_MAP,
588};
589
590struct dlm_lock_request
591{
592 u8 node_idx;
593 u8 dead_node;
594 __be16 pad1;
595 __be32 pad2;
596};
597
598struct dlm_reco_data_done
599{
600 u8 node_idx;
601 u8 dead_node;
602 __be16 pad1;
603 __be32 pad2;
604
605 /* unused for now */
606 /* eventually we can use this to attempt
607 * lvb recovery based on each node's info */
608 u8 reco_lvb[DLM_LVB_LEN];
609};
610
611struct dlm_begin_reco
612{
613 u8 node_idx;
614 u8 dead_node;
615 __be16 pad1;
616 __be32 pad2;
617};
618
619
620struct dlm_query_join_request
621{
622 u8 node_idx;
623 u8 pad1[2];
624 u8 name_len;
625 u8 domain[O2NM_MAX_NAME_LEN];
626};
627
628struct dlm_assert_joined
629{
630 u8 node_idx;
631 u8 pad1[2];
632 u8 name_len;
633 u8 domain[O2NM_MAX_NAME_LEN];
634};
635
636struct dlm_cancel_join
637{
638 u8 node_idx;
639 u8 pad1[2];
640 u8 name_len;
641 u8 domain[O2NM_MAX_NAME_LEN];
642};
643
644struct dlm_exit_domain
645{
646 u8 node_idx;
647 u8 pad1[3];
648};
649
650struct dlm_finalize_reco
651{
652 u8 node_idx;
653 u8 dead_node;
Kurt Hackel466d1a42006-05-01 11:11:13 -0700654 u8 flags;
655 u8 pad1;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800656 __be32 pad2;
657};
658
Kurt Hackelba2bf212006-12-01 14:47:20 -0800659struct dlm_deref_lockres
660{
661 u32 pad1;
662 u16 pad2;
663 u8 node_idx;
664 u8 namelen;
665
666 u8 name[O2NM_MAX_NAME_LEN];
667};
668
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800669static inline enum dlm_status
670__dlm_lockres_state_to_status(struct dlm_lock_resource *res)
671{
672 enum dlm_status status = DLM_NORMAL;
673
674 assert_spin_locked(&res->spinlock);
675
676 if (res->state & DLM_LOCK_RES_RECOVERING)
677 status = DLM_RECOVERING;
678 else if (res->state & DLM_LOCK_RES_MIGRATING)
679 status = DLM_MIGRATING;
680 else if (res->state & DLM_LOCK_RES_IN_PROGRESS)
681 status = DLM_FORWARD;
682
683 return status;
684}
685
Kurt Hackel29004852006-03-02 16:43:36 -0800686static inline u8 dlm_get_lock_cookie_node(u64 cookie)
687{
688 u8 ret;
689 cookie >>= 56;
690 ret = (u8)(cookie & 0xffULL);
691 return ret;
692}
693
694static inline unsigned long long dlm_get_lock_cookie_seq(u64 cookie)
695{
696 unsigned long long ret;
697 ret = ((unsigned long long)cookie) & 0x00ffffffffffffffULL;
698 return ret;
699}
700
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800701struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
702 struct dlm_lockstatus *lksb);
703void dlm_lock_get(struct dlm_lock *lock);
704void dlm_lock_put(struct dlm_lock *lock);
705
706void dlm_lock_attach_lockres(struct dlm_lock *lock,
707 struct dlm_lock_resource *res);
708
709int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data);
710int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data);
711int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data);
712
713void dlm_revert_pending_convert(struct dlm_lock_resource *res,
714 struct dlm_lock *lock);
715void dlm_revert_pending_lock(struct dlm_lock_resource *res,
716 struct dlm_lock *lock);
717
718int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data);
719void dlm_commit_pending_cancel(struct dlm_lock_resource *res,
720 struct dlm_lock *lock);
721void dlm_commit_pending_unlock(struct dlm_lock_resource *res,
722 struct dlm_lock *lock);
723
724int dlm_launch_thread(struct dlm_ctxt *dlm);
725void dlm_complete_thread(struct dlm_ctxt *dlm);
726int dlm_launch_recovery_thread(struct dlm_ctxt *dlm);
727void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
728void dlm_wait_for_recovery(struct dlm_ctxt *dlm);
Kurt Hackelc03872f2006-03-06 14:08:49 -0800729void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
Kurt Hackele2faea42006-01-12 14:24:55 -0800730int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node);
Kurt Hackel44465a72006-01-18 17:05:38 -0800731int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout);
Kurt Hackelb7084ab2006-05-01 13:54:07 -0700732int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800733
734void dlm_put(struct dlm_ctxt *dlm);
735struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm);
736int dlm_domain_fully_joined(struct dlm_ctxt *dlm);
737
738void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
739 struct dlm_lock_resource *res);
740void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
741 struct dlm_lock_resource *res);
Mark Fasheh95c4f582006-03-10 13:44:00 -0800742static inline void dlm_lockres_get(struct dlm_lock_resource *res)
743{
744 /* This is called on every lookup, so it might be worth
745 * inlining. */
746 kref_get(&res->refs);
747}
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800748void dlm_lockres_put(struct dlm_lock_resource *res);
749void __dlm_unhash_lockres(struct dlm_lock_resource *res);
750void __dlm_insert_lockres(struct dlm_ctxt *dlm,
751 struct dlm_lock_resource *res);
Kurt Hackelba2bf212006-12-01 14:47:20 -0800752struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
753 const char *name,
754 unsigned int len,
755 unsigned int hash);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800756struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
757 const char *name,
Mark Fasheha3d33292006-03-09 17:55:56 -0800758 unsigned int len,
759 unsigned int hash);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800760struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm,
761 const char *name,
762 unsigned int len);
763
764int dlm_is_host_down(int errno);
765void dlm_change_lockres_owner(struct dlm_ctxt *dlm,
766 struct dlm_lock_resource *res,
767 u8 owner);
768struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
769 const char *lockid,
Mark Fasheh3384f3d2006-09-08 11:38:29 -0700770 int namelen,
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800771 int flags);
772struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
773 const char *name,
774 unsigned int namelen);
775
Kurt Hackelba2bf212006-12-01 14:47:20 -0800776#define dlm_lockres_set_refmap_bit(bit,res) \
777 __dlm_lockres_set_refmap_bit(bit,res,__FILE__,__LINE__)
778#define dlm_lockres_clear_refmap_bit(bit,res) \
779 __dlm_lockres_clear_refmap_bit(bit,res,__FILE__,__LINE__)
780
781static inline void __dlm_lockres_set_refmap_bit(int bit,
782 struct dlm_lock_resource *res,
783 const char *file,
784 int line)
785{
786 //printk("%s:%d:%.*s: setting bit %d\n", file, line,
787 // res->lockname.len, res->lockname.name, bit);
788 set_bit(bit, res->refmap);
789}
790
791static inline void __dlm_lockres_clear_refmap_bit(int bit,
792 struct dlm_lock_resource *res,
793 const char *file,
794 int line)
795{
796 //printk("%s:%d:%.*s: clearing bit %d\n", file, line,
797 // res->lockname.len, res->lockname.name, bit);
798 clear_bit(bit, res->refmap);
799}
800
801void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
802 struct dlm_lock_resource *res,
803 const char *file,
804 int line);
805void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
806 struct dlm_lock_resource *res,
807 int new_lockres,
808 const char *file,
809 int line);
810#define dlm_lockres_drop_inflight_ref(d,r) \
811 __dlm_lockres_drop_inflight_ref(d,r,__FILE__,__LINE__)
812#define dlm_lockres_grab_inflight_ref(d,r) \
813 __dlm_lockres_grab_inflight_ref(d,r,0,__FILE__,__LINE__)
814#define dlm_lockres_grab_inflight_ref_new(d,r) \
815 __dlm_lockres_grab_inflight_ref(d,r,1,__FILE__,__LINE__)
816
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800817void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
818void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
819void dlm_do_local_ast(struct dlm_ctxt *dlm,
820 struct dlm_lock_resource *res,
821 struct dlm_lock *lock);
822int dlm_do_remote_ast(struct dlm_ctxt *dlm,
823 struct dlm_lock_resource *res,
824 struct dlm_lock *lock);
825void dlm_do_local_bast(struct dlm_ctxt *dlm,
826 struct dlm_lock_resource *res,
827 struct dlm_lock *lock,
828 int blocked_type);
829int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm,
830 struct dlm_lock_resource *res,
831 struct dlm_lock *lock,
832 int msg_type,
833 int blocked_type, int flags);
834static inline int dlm_send_proxy_bast(struct dlm_ctxt *dlm,
835 struct dlm_lock_resource *res,
836 struct dlm_lock *lock,
837 int blocked_type)
838{
839 return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_BAST,
840 blocked_type, 0);
841}
842
843static inline int dlm_send_proxy_ast(struct dlm_ctxt *dlm,
844 struct dlm_lock_resource *res,
845 struct dlm_lock *lock,
846 int flags)
847{
848 return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_AST,
849 0, flags);
850}
851
852void dlm_print_one_lock_resource(struct dlm_lock_resource *res);
853void __dlm_print_one_lock_resource(struct dlm_lock_resource *res);
854
855u8 dlm_nm_this_node(struct dlm_ctxt *dlm);
856void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
857void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
858
859
860int dlm_nm_init(struct dlm_ctxt *dlm);
861int dlm_heartbeat_init(struct dlm_ctxt *dlm);
862void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data);
863void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data);
864
Kurt Hackelba2bf212006-12-01 14:47:20 -0800865int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800866int dlm_finish_migration(struct dlm_ctxt *dlm,
867 struct dlm_lock_resource *res,
868 u8 old_master);
869void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
870 struct dlm_lock_resource *res);
871void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res);
872
873int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data);
874int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data);
Kurt Hackelba2bf212006-12-01 14:47:20 -0800875int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800876int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data);
877int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data);
878int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data);
879int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data);
880int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data);
881int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data);
882int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data);
Kurt Hackelc03872f2006-03-06 14:08:49 -0800883int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
884 u8 nodenum, u8 *real_master);
Kurt Hackelc03872f2006-03-06 14:08:49 -0800885
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800886
887int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
888 struct dlm_lock_resource *res,
889 int ignore_higher,
890 u8 request_from,
891 u32 flags);
892
893
894int dlm_send_one_lockres(struct dlm_ctxt *dlm,
895 struct dlm_lock_resource *res,
896 struct dlm_migratable_lockres *mres,
897 u8 send_to,
898 u8 flags);
899void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
900 struct dlm_lock_resource *res);
901
902/* will exit holding res->spinlock, but may drop in function */
903void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags);
904void __dlm_wait_on_lockres_flags_set(struct dlm_lock_resource *res, int flags);
905
906/* will exit holding res->spinlock, but may drop in function */
907static inline void __dlm_wait_on_lockres(struct dlm_lock_resource *res)
908{
909 __dlm_wait_on_lockres_flags(res, (DLM_LOCK_RES_IN_PROGRESS|
910 DLM_LOCK_RES_RECOVERING|
911 DLM_LOCK_RES_MIGRATING));
912}
913
914
915int dlm_init_mle_cache(void);
916void dlm_destroy_mle_cache(void);
917void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up);
Kurt Hackelba2bf212006-12-01 14:47:20 -0800918int dlm_drop_lockres_ref(struct dlm_ctxt *dlm,
919 struct dlm_lock_resource *res);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800920void dlm_clean_master_list(struct dlm_ctxt *dlm,
921 u8 dead_node);
922int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock);
Kurt Hackelba2bf212006-12-01 14:47:20 -0800923int __dlm_lockres_has_locks(struct dlm_lock_resource *res);
Kurt Hackel69d72b02006-05-01 10:57:51 -0700924int __dlm_lockres_unused(struct dlm_lock_resource *res);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800925
926static inline const char * dlm_lock_mode_name(int mode)
927{
928 switch (mode) {
929 case LKM_EXMODE:
930 return "EX";
931 case LKM_PRMODE:
932 return "PR";
933 case LKM_NLMODE:
934 return "NL";
935 }
936 return "UNKNOWN";
937}
938
939
940static inline int dlm_lock_compatible(int existing, int request)
941{
942 /* NO_LOCK compatible with all */
943 if (request == LKM_NLMODE ||
944 existing == LKM_NLMODE)
945 return 1;
946
947 /* EX incompatible with all non-NO_LOCK */
948 if (request == LKM_EXMODE)
949 return 0;
950
951 /* request must be PR, which is compatible with PR */
952 if (existing == LKM_PRMODE)
953 return 1;
954
955 return 0;
956}
957
958static inline int dlm_lock_on_list(struct list_head *head,
959 struct dlm_lock *lock)
960{
961 struct list_head *iter;
962 struct dlm_lock *tmplock;
963
964 list_for_each(iter, head) {
965 tmplock = list_entry(iter, struct dlm_lock, list);
966 if (tmplock == lock)
967 return 1;
968 }
969 return 0;
970}
971
972
973static inline enum dlm_status dlm_err_to_dlm_status(int err)
974{
975 enum dlm_status ret;
976 if (err == -ENOMEM)
977 ret = DLM_SYSERR;
978 else if (err == -ETIMEDOUT || o2net_link_down(err, NULL))
979 ret = DLM_NOLOCKMGR;
980 else if (err == -EINVAL)
981 ret = DLM_BADPARAM;
982 else if (err == -ENAMETOOLONG)
983 ret = DLM_IVBUFLEN;
984 else
985 ret = DLM_BADARGS;
986 return ret;
987}
988
989
990static inline void dlm_node_iter_init(unsigned long *map,
991 struct dlm_node_iter *iter)
992{
993 memcpy(iter->node_map, map, sizeof(iter->node_map));
994 iter->curnode = -1;
995}
996
997static inline int dlm_node_iter_next(struct dlm_node_iter *iter)
998{
999 int bit;
1000 bit = find_next_bit(iter->node_map, O2NM_MAX_NODES, iter->curnode+1);
1001 if (bit >= O2NM_MAX_NODES) {
1002 iter->curnode = O2NM_MAX_NODES;
1003 return -ENOENT;
1004 }
1005 iter->curnode = bit;
1006 return bit;
1007}
1008
1009
1010
1011#endif /* DLMCOMMON_H */