[IA64-SGI] Fix XPC code which sleeps with spin_lock_irqsave().
[linux-2.6.git] / arch / ia64 / sn / kernel / xpc_main.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (c) 2004-2006 Silicon Graphics, Inc.  All Rights Reserved.
7  */
8
9
10 /*
11  * Cross Partition Communication (XPC) support - standard version.
12  *
13  *      XPC provides a message passing capability that crosses partition
14  *      boundaries. This module is made up of two parts:
15  *
16  *          partition   This part detects the presence/absence of other
17  *                      partitions. It provides a heartbeat and monitors
18  *                      the heartbeats of other partitions.
19  *
20  *          channel     This part manages the channels and sends/receives
21  *                      messages across them to/from other partitions.
22  *
23  *      There are a couple of additional functions residing in XP, which
24  *      provide an interface to XPC for its users.
25  *
26  *
27  *      Caveats:
28  *
29  *        . We currently have no way to determine which nasid an IPI came
30  *          from. Thus, xpc_IPI_send() does a remote AMO write followed by
31  *          an IPI. The AMO indicates where data is to be pulled from, so
32  *          after the IPI arrives, the remote partition checks the AMO word.
33  *          The IPI can actually arrive before the AMO however, so other code
34  *          must periodically check for this case. Also, remote AMO operations
35  *          do not reliably time out. Thus we do a remote PIO read solely to
36  *          know whether the remote partition is down and whether we should
37  *          stop sending IPIs to it. This remote PIO read operation is set up
38  *          in a special nofault region so SAL knows to ignore (and cleanup)
39  *          any errors due to the remote AMO write, PIO read, and/or PIO
40  *          write operations.
41  *
42  *          If/when new hardware solves this IPI problem, we should abandon
43  *          the current approach.
44  *
45  */
46
47
48 #include <linux/kernel.h>
49 #include <linux/module.h>
50 #include <linux/init.h>
51 #include <linux/sched.h>
52 #include <linux/syscalls.h>
53 #include <linux/cache.h>
54 #include <linux/interrupt.h>
55 #include <linux/slab.h>
56 #include <linux/delay.h>
57 #include <linux/reboot.h>
58 #include <linux/completion.h>
59 #include <asm/sn/intr.h>
60 #include <asm/sn/sn_sal.h>
61 #include <asm/kdebug.h>
62 #include <asm/uaccess.h>
63 #include <asm/sn/xpc.h>
64
65
66 /* define two XPC debug device structures to be used with dev_dbg() et al */
67
68 struct device_driver xpc_dbg_name = {
69         .name = "xpc"
70 };
71
72 struct device xpc_part_dbg_subname = {
73         .bus_id = {0},          /* set to "part" at xpc_init() time */
74         .driver = &xpc_dbg_name
75 };
76
77 struct device xpc_chan_dbg_subname = {
78         .bus_id = {0},          /* set to "chan" at xpc_init() time */
79         .driver = &xpc_dbg_name
80 };
81
82 struct device *xpc_part = &xpc_part_dbg_subname;
83 struct device *xpc_chan = &xpc_chan_dbg_subname;
84
85
86 static int xpc_kdebug_ignore;
87
88
89 /* systune related variables for /proc/sys directories */
90
91 static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
92 static int xpc_hb_min_interval = 1;
93 static int xpc_hb_max_interval = 10;
94
95 static int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_INTERVAL;
96 static int xpc_hb_check_min_interval = 10;
97 static int xpc_hb_check_max_interval = 120;
98
99 int xpc_disengage_request_timelimit = XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT;
100 static int xpc_disengage_request_min_timelimit = 0;
101 static int xpc_disengage_request_max_timelimit = 120;
102
103 static ctl_table xpc_sys_xpc_hb_dir[] = {
104         {
105                 1,
106                 "hb_interval",
107                 &xpc_hb_interval,
108                 sizeof(int),
109                 0644,
110                 NULL,
111                 &proc_dointvec_minmax,
112                 &sysctl_intvec,
113                 NULL,
114                 &xpc_hb_min_interval,
115                 &xpc_hb_max_interval
116         },
117         {
118                 2,
119                 "hb_check_interval",
120                 &xpc_hb_check_interval,
121                 sizeof(int),
122                 0644,
123                 NULL,
124                 &proc_dointvec_minmax,
125                 &sysctl_intvec,
126                 NULL,
127                 &xpc_hb_check_min_interval,
128                 &xpc_hb_check_max_interval
129         },
130         {0}
131 };
132 static ctl_table xpc_sys_xpc_dir[] = {
133         {
134                 1,
135                 "hb",
136                 NULL,
137                 0,
138                 0555,
139                 xpc_sys_xpc_hb_dir
140         },
141         {
142                 2,
143                 "disengage_request_timelimit",
144                 &xpc_disengage_request_timelimit,
145                 sizeof(int),
146                 0644,
147                 NULL,
148                 &proc_dointvec_minmax,
149                 &sysctl_intvec,
150                 NULL,
151                 &xpc_disengage_request_min_timelimit,
152                 &xpc_disengage_request_max_timelimit
153         },
154         {0}
155 };
156 static ctl_table xpc_sys_dir[] = {
157         {
158                 1,
159                 "xpc",
160                 NULL,
161                 0,
162                 0555,
163                 xpc_sys_xpc_dir
164         },
165         {0}
166 };
167 static struct ctl_table_header *xpc_sysctl;
168
169 /* non-zero if any remote partition disengage request was timed out */
170 int xpc_disengage_request_timedout;
171
172 /* #of IRQs received */
173 static atomic_t xpc_act_IRQ_rcvd;
174
175 /* IRQ handler notifies this wait queue on receipt of an IRQ */
176 static DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq);
177
178 static unsigned long xpc_hb_check_timeout;
179
180 /* notification that the xpc_hb_checker thread has exited */
181 static DECLARE_COMPLETION(xpc_hb_checker_exited);
182
183 /* notification that the xpc_discovery thread has exited */
184 static DECLARE_COMPLETION(xpc_discovery_exited);
185
186
187 static struct timer_list xpc_hb_timer;
188
189
190 static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *);
191
192
193 static int xpc_system_reboot(struct notifier_block *, unsigned long, void *);
194 static struct notifier_block xpc_reboot_notifier = {
195         .notifier_call = xpc_system_reboot,
196 };
197
198 static int xpc_system_die(struct notifier_block *, unsigned long, void *);
199 static struct notifier_block xpc_die_notifier = {
200         .notifier_call = xpc_system_die,
201 };
202
203
204 /*
205  * Timer function to enforce the timelimit on the partition disengage request.
206  */
207 static void
208 xpc_timeout_partition_disengage_request(unsigned long data)
209 {
210         struct xpc_partition *part = (struct xpc_partition *) data;
211
212
213         DBUG_ON(jiffies < part->disengage_request_timeout);
214
215         (void) xpc_partition_disengaged(part);
216
217         DBUG_ON(part->disengage_request_timeout != 0);
218         DBUG_ON(xpc_partition_engaged(1UL << XPC_PARTID(part)) != 0);
219 }
220
221
222 /*
223  * Notify the heartbeat check thread that an IRQ has been received.
224  */
225 static irqreturn_t
226 xpc_act_IRQ_handler(int irq, void *dev_id, struct pt_regs *regs)
227 {
228         atomic_inc(&xpc_act_IRQ_rcvd);
229         wake_up_interruptible(&xpc_act_IRQ_wq);
230         return IRQ_HANDLED;
231 }
232
233
234 /*
235  * Timer to produce the heartbeat.  The timer structures function is
236  * already set when this is initially called.  A tunable is used to
237  * specify when the next timeout should occur.
238  */
239 static void
240 xpc_hb_beater(unsigned long dummy)
241 {
242         xpc_vars->heartbeat++;
243
244         if (jiffies >= xpc_hb_check_timeout) {
245                 wake_up_interruptible(&xpc_act_IRQ_wq);
246         }
247
248         xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ);
249         add_timer(&xpc_hb_timer);
250 }
251
252
253 /*
254  * This thread is responsible for nearly all of the partition
255  * activation/deactivation.
256  */
257 static int
258 xpc_hb_checker(void *ignore)
259 {
260         int last_IRQ_count = 0;
261         int new_IRQ_count;
262         int force_IRQ=0;
263
264
265         /* this thread was marked active by xpc_hb_init() */
266
267         daemonize(XPC_HB_CHECK_THREAD_NAME);
268
269         set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU));
270
271         xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
272
273         while (!(volatile int) xpc_exiting) {
274
275                 dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
276                         "been received\n",
277                         (int) (xpc_hb_check_timeout - jiffies),
278                         atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count);
279
280
281                 /* checking of remote heartbeats is skewed by IRQ handling */
282                 if (jiffies >= xpc_hb_check_timeout) {
283                         dev_dbg(xpc_part, "checking remote heartbeats\n");
284                         xpc_check_remote_hb();
285
286                         /*
287                          * We need to periodically recheck to ensure no
288                          * IPI/AMO pairs have been missed.  That check
289                          * must always reset xpc_hb_check_timeout.
290                          */
291                         force_IRQ = 1;
292                 }
293
294
295                 /* check for outstanding IRQs */
296                 new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd);
297                 if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) {
298                         force_IRQ = 0;
299
300                         dev_dbg(xpc_part, "found an IRQ to process; will be "
301                                 "resetting xpc_hb_check_timeout\n");
302
303                         last_IRQ_count += xpc_identify_act_IRQ_sender();
304                         if (last_IRQ_count < new_IRQ_count) {
305                                 /* retry once to help avoid missing AMO */
306                                 (void) xpc_identify_act_IRQ_sender();
307                         }
308                         last_IRQ_count = new_IRQ_count;
309
310                         xpc_hb_check_timeout = jiffies +
311                                            (xpc_hb_check_interval * HZ);
312                 }
313
314                 /* wait for IRQ or timeout */
315                 (void) wait_event_interruptible(xpc_act_IRQ_wq,
316                             (last_IRQ_count < atomic_read(&xpc_act_IRQ_rcvd) ||
317                                         jiffies >= xpc_hb_check_timeout ||
318                                                 (volatile int) xpc_exiting));
319         }
320
321         dev_dbg(xpc_part, "heartbeat checker is exiting\n");
322
323
324         /* mark this thread as having exited */
325         complete(&xpc_hb_checker_exited);
326         return 0;
327 }
328
329
330 /*
331  * This thread will attempt to discover other partitions to activate
332  * based on info provided by SAL. This new thread is short lived and
333  * will exit once discovery is complete.
334  */
335 static int
336 xpc_initiate_discovery(void *ignore)
337 {
338         daemonize(XPC_DISCOVERY_THREAD_NAME);
339
340         xpc_discovery();
341
342         dev_dbg(xpc_part, "discovery thread is exiting\n");
343
344         /* mark this thread as having exited */
345         complete(&xpc_discovery_exited);
346         return 0;
347 }
348
349
350 /*
351  * Establish first contact with the remote partititon. This involves pulling
352  * the XPC per partition variables from the remote partition and waiting for
353  * the remote partition to pull ours.
354  */
355 static enum xpc_retval
356 xpc_make_first_contact(struct xpc_partition *part)
357 {
358         enum xpc_retval ret;
359
360
361         while ((ret = xpc_pull_remote_vars_part(part)) != xpcSuccess) {
362                 if (ret != xpcRetry) {
363                         XPC_DEACTIVATE_PARTITION(part, ret);
364                         return ret;
365                 }
366
367                 dev_dbg(xpc_chan, "waiting to make first contact with "
368                         "partition %d\n", XPC_PARTID(part));
369
370                 /* wait a 1/4 of a second or so */
371                 (void) msleep_interruptible(250);
372
373                 if (part->act_state == XPC_P_DEACTIVATING) {
374                         return part->reason;
375                 }
376         }
377
378         return xpc_mark_partition_active(part);
379 }
380
381
382 /*
383  * The first kthread assigned to a newly activated partition is the one
384  * created by XPC HB with which it calls xpc_partition_up(). XPC hangs on to
385  * that kthread until the partition is brought down, at which time that kthread
386  * returns back to XPC HB. (The return of that kthread will signify to XPC HB
387  * that XPC has dismantled all communication infrastructure for the associated
388  * partition.) This kthread becomes the channel manager for that partition.
389  *
390  * Each active partition has a channel manager, who, besides connecting and
391  * disconnecting channels, will ensure that each of the partition's connected
392  * channels has the required number of assigned kthreads to get the work done.
393  */
394 static void
395 xpc_channel_mgr(struct xpc_partition *part)
396 {
397         while (part->act_state != XPC_P_DEACTIVATING ||
398                         atomic_read(&part->nchannels_active) > 0 ||
399                                         !xpc_partition_disengaged(part)) {
400
401                 xpc_process_channel_activity(part);
402
403
404                 /*
405                  * Wait until we've been requested to activate kthreads or
406                  * all of the channel's message queues have been torn down or
407                  * a signal is pending.
408                  *
409                  * The channel_mgr_requests is set to 1 after being awakened,
410                  * This is done to prevent the channel mgr from making one pass
411                  * through the loop for each request, since he will
412                  * be servicing all the requests in one pass. The reason it's
413                  * set to 1 instead of 0 is so that other kthreads will know
414                  * that the channel mgr is running and won't bother trying to
415                  * wake him up.
416                  */
417                 atomic_dec(&part->channel_mgr_requests);
418                 (void) wait_event_interruptible(part->channel_mgr_wq,
419                                 (atomic_read(&part->channel_mgr_requests) > 0 ||
420                                 (volatile u64) part->local_IPI_amo != 0 ||
421                                 ((volatile u8) part->act_state ==
422                                                         XPC_P_DEACTIVATING &&
423                                 atomic_read(&part->nchannels_active) == 0 &&
424                                 xpc_partition_disengaged(part))));
425                 atomic_set(&part->channel_mgr_requests, 1);
426
427                 // >>> Does it need to wakeup periodically as well? In case we
428                 // >>> miscalculated the #of kthreads to wakeup or create?
429         }
430 }
431
432
433 /*
434  * When XPC HB determines that a partition has come up, it will create a new
435  * kthread and that kthread will call this function to attempt to set up the
436  * basic infrastructure used for Cross Partition Communication with the newly
437  * upped partition.
438  *
439  * The kthread that was created by XPC HB and which setup the XPC
440  * infrastructure will remain assigned to the partition until the partition
441  * goes down. At which time the kthread will teardown the XPC infrastructure
442  * and then exit.
443  *
444  * XPC HB will put the remote partition's XPC per partition specific variables
445  * physical address into xpc_partitions[partid].remote_vars_part_pa prior to
446  * calling xpc_partition_up().
447  */
448 static void
449 xpc_partition_up(struct xpc_partition *part)
450 {
451         DBUG_ON(part->channels != NULL);
452
453         dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part));
454
455         if (xpc_setup_infrastructure(part) != xpcSuccess) {
456                 return;
457         }
458
459         /*
460          * The kthread that XPC HB called us with will become the
461          * channel manager for this partition. It will not return
462          * back to XPC HB until the partition's XPC infrastructure
463          * has been dismantled.
464          */
465
466         (void) xpc_part_ref(part);      /* this will always succeed */
467
468         if (xpc_make_first_contact(part) == xpcSuccess) {
469                 xpc_channel_mgr(part);
470         }
471
472         xpc_part_deref(part);
473
474         xpc_teardown_infrastructure(part);
475 }
476
477
478 static int
479 xpc_activating(void *__partid)
480 {
481         partid_t partid = (u64) __partid;
482         struct xpc_partition *part = &xpc_partitions[partid];
483         unsigned long irq_flags;
484         struct sched_param param = { sched_priority: MAX_RT_PRIO - 1 };
485         int ret;
486
487
488         DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
489
490         spin_lock_irqsave(&part->act_lock, irq_flags);
491
492         if (part->act_state == XPC_P_DEACTIVATING) {
493                 part->act_state = XPC_P_INACTIVE;
494                 spin_unlock_irqrestore(&part->act_lock, irq_flags);
495                 part->remote_rp_pa = 0;
496                 return 0;
497         }
498
499         /* indicate the thread is activating */
500         DBUG_ON(part->act_state != XPC_P_ACTIVATION_REQ);
501         part->act_state = XPC_P_ACTIVATING;
502
503         XPC_SET_REASON(part, 0, 0);
504         spin_unlock_irqrestore(&part->act_lock, irq_flags);
505
506         dev_dbg(xpc_part, "bringing partition %d up\n", partid);
507
508         daemonize("xpc%02d", partid);
509
510         /*
511          * This thread needs to run at a realtime priority to prevent a
512          * significant performance degradation.
513          */
514         ret = sched_setscheduler(current, SCHED_FIFO, &param);
515         if (ret != 0) {
516                 dev_warn(xpc_part, "unable to set pid %d to a realtime "
517                         "priority, ret=%d\n", current->pid, ret);
518         }
519
520         /* allow this thread and its children to run on any CPU */
521         set_cpus_allowed(current, CPU_MASK_ALL);
522
523         /*
524          * Register the remote partition's AMOs with SAL so it can handle
525          * and cleanup errors within that address range should the remote
526          * partition go down. We don't unregister this range because it is
527          * difficult to tell when outstanding writes to the remote partition
528          * are finished and thus when it is safe to unregister. This should
529          * not result in wasted space in the SAL xp_addr_region table because
530          * we should get the same page for remote_amos_page_pa after module
531          * reloads and system reboots.
532          */
533         if (sn_register_xp_addr_region(part->remote_amos_page_pa,
534                                                         PAGE_SIZE, 1) < 0) {
535                 dev_warn(xpc_part, "xpc_partition_up(%d) failed to register "
536                         "xp_addr region\n", partid);
537
538                 spin_lock_irqsave(&part->act_lock, irq_flags);
539                 part->act_state = XPC_P_INACTIVE;
540                 XPC_SET_REASON(part, xpcPhysAddrRegFailed, __LINE__);
541                 spin_unlock_irqrestore(&part->act_lock, irq_flags);
542                 part->remote_rp_pa = 0;
543                 return 0;
544         }
545
546         xpc_allow_hb(partid, xpc_vars);
547         xpc_IPI_send_activated(part);
548
549
550         /*
551          * xpc_partition_up() holds this thread and marks this partition as
552          * XPC_P_ACTIVE by calling xpc_hb_mark_active().
553          */
554         (void) xpc_partition_up(part);
555
556         xpc_disallow_hb(partid, xpc_vars);
557         xpc_mark_partition_inactive(part);
558
559         if (part->reason == xpcReactivating) {
560                 /* interrupting ourselves results in activating partition */
561                 xpc_IPI_send_reactivate(part);
562         }
563
564         return 0;
565 }
566
567
568 void
569 xpc_activate_partition(struct xpc_partition *part)
570 {
571         partid_t partid = XPC_PARTID(part);
572         unsigned long irq_flags;
573         pid_t pid;
574
575
576         spin_lock_irqsave(&part->act_lock, irq_flags);
577
578         DBUG_ON(part->act_state != XPC_P_INACTIVE);
579
580         part->act_state = XPC_P_ACTIVATION_REQ;
581         XPC_SET_REASON(part, xpcCloneKThread, __LINE__);
582
583         spin_unlock_irqrestore(&part->act_lock, irq_flags);
584
585         pid = kernel_thread(xpc_activating, (void *) ((u64) partid), 0);
586
587         if (unlikely(pid <= 0)) {
588                 spin_lock_irqsave(&part->act_lock, irq_flags);
589                 part->act_state = XPC_P_INACTIVE;
590                 XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__);
591                 spin_unlock_irqrestore(&part->act_lock, irq_flags);
592         }
593 }
594
595
596 /*
597  * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
598  * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
599  * than one partition, we use an AMO_t structure per partition to indicate
600  * whether a partition has sent an IPI or not.  >>> If it has, then wake up the
601  * associated kthread to handle it.
602  *
603  * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC
604  * running on other partitions.
605  *
606  * Noteworthy Arguments:
607  *
608  *      irq - Interrupt ReQuest number. NOT USED.
609  *
610  *      dev_id - partid of IPI's potential sender.
611  *
612  *      regs - processor's context before the processor entered
613  *             interrupt code. NOT USED.
614  */
615 irqreturn_t
616 xpc_notify_IRQ_handler(int irq, void *dev_id, struct pt_regs *regs)
617 {
618         partid_t partid = (partid_t) (u64) dev_id;
619         struct xpc_partition *part = &xpc_partitions[partid];
620
621
622         DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
623
624         if (xpc_part_ref(part)) {
625                 xpc_check_for_channel_activity(part);
626
627                 xpc_part_deref(part);
628         }
629         return IRQ_HANDLED;
630 }
631
632
633 /*
634  * Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor
635  * because the write to their associated IPI amo completed after the IRQ/IPI
636  * was received.
637  */
638 void
639 xpc_dropped_IPI_check(struct xpc_partition *part)
640 {
641         if (xpc_part_ref(part)) {
642                 xpc_check_for_channel_activity(part);
643
644                 part->dropped_IPI_timer.expires = jiffies +
645                                                         XPC_P_DROPPED_IPI_WAIT;
646                 add_timer(&part->dropped_IPI_timer);
647                 xpc_part_deref(part);
648         }
649 }
650
651
652 void
653 xpc_activate_kthreads(struct xpc_channel *ch, int needed)
654 {
655         int idle = atomic_read(&ch->kthreads_idle);
656         int assigned = atomic_read(&ch->kthreads_assigned);
657         int wakeup;
658
659
660         DBUG_ON(needed <= 0);
661
662         if (idle > 0) {
663                 wakeup = (needed > idle) ? idle : needed;
664                 needed -= wakeup;
665
666                 dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, "
667                         "channel=%d\n", wakeup, ch->partid, ch->number);
668
669                 /* only wakeup the requested number of kthreads */
670                 wake_up_nr(&ch->idle_wq, wakeup);
671         }
672
673         if (needed <= 0) {
674                 return;
675         }
676
677         if (needed + assigned > ch->kthreads_assigned_limit) {
678                 needed = ch->kthreads_assigned_limit - assigned;
679                 // >>>should never be less than 0
680                 if (needed <= 0) {
681                         return;
682                 }
683         }
684
685         dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n",
686                 needed, ch->partid, ch->number);
687
688         xpc_create_kthreads(ch, needed);
689 }
690
691
692 /*
693  * This function is where XPC's kthreads wait for messages to deliver.
694  */
695 static void
696 xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
697 {
698         do {
699                 /* deliver messages to their intended recipients */
700
701                 while ((volatile s64) ch->w_local_GP.get <
702                                 (volatile s64) ch->w_remote_GP.put &&
703                                         !((volatile u32) ch->flags &
704                                                 XPC_C_DISCONNECTING)) {
705                         xpc_deliver_msg(ch);
706                 }
707
708                 if (atomic_inc_return(&ch->kthreads_idle) >
709                                                 ch->kthreads_idle_limit) {
710                         /* too many idle kthreads on this channel */
711                         atomic_dec(&ch->kthreads_idle);
712                         break;
713                 }
714
715                 dev_dbg(xpc_chan, "idle kthread calling "
716                         "wait_event_interruptible_exclusive()\n");
717
718                 (void) wait_event_interruptible_exclusive(ch->idle_wq,
719                                 ((volatile s64) ch->w_local_GP.get <
720                                         (volatile s64) ch->w_remote_GP.put ||
721                                 ((volatile u32) ch->flags &
722                                                 XPC_C_DISCONNECTING)));
723
724                 atomic_dec(&ch->kthreads_idle);
725
726         } while (!((volatile u32) ch->flags & XPC_C_DISCONNECTING));
727 }
728
729
730 static int
731 xpc_daemonize_kthread(void *args)
732 {
733         partid_t partid = XPC_UNPACK_ARG1(args);
734         u16 ch_number = XPC_UNPACK_ARG2(args);
735         struct xpc_partition *part = &xpc_partitions[partid];
736         struct xpc_channel *ch;
737         int n_needed;
738         unsigned long irq_flags;
739
740
741         daemonize("xpc%02dc%d", partid, ch_number);
742
743         dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
744                 partid, ch_number);
745
746         ch = &part->channels[ch_number];
747
748         if (!(ch->flags & XPC_C_DISCONNECTING)) {
749
750                 /* let registerer know that connection has been established */
751
752                 spin_lock_irqsave(&ch->lock, irq_flags);
753                 if (!(ch->flags & XPC_C_CONNECTCALLOUT)) {
754                         ch->flags |= XPC_C_CONNECTCALLOUT;
755                         spin_unlock_irqrestore(&ch->lock, irq_flags);
756
757                         xpc_connected_callout(ch);
758
759                         /*
760                          * It is possible that while the callout was being
761                          * made that the remote partition sent some messages.
762                          * If that is the case, we may need to activate
763                          * additional kthreads to help deliver them. We only
764                          * need one less than total #of messages to deliver.
765                          */
766                         n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1;
767                         if (n_needed > 0 &&
768                                         !(ch->flags & XPC_C_DISCONNECTING)) {
769                                 xpc_activate_kthreads(ch, n_needed);
770                         }
771                 } else {
772                         spin_unlock_irqrestore(&ch->lock, irq_flags);
773                 }
774
775                 xpc_kthread_waitmsgs(part, ch);
776         }
777
778         if (atomic_dec_return(&ch->kthreads_assigned) == 0) {
779                 spin_lock_irqsave(&ch->lock, irq_flags);
780                 if ((ch->flags & XPC_C_CONNECTCALLOUT) &&
781                                 !(ch->flags & XPC_C_DISCONNECTCALLOUT)) {
782                         ch->flags |= XPC_C_DISCONNECTCALLOUT;
783                         spin_unlock_irqrestore(&ch->lock, irq_flags);
784
785                         xpc_disconnect_callout(ch, xpcDisconnecting);
786                 } else {
787                         spin_unlock_irqrestore(&ch->lock, irq_flags);
788                 }
789                 if (atomic_dec_return(&part->nchannels_engaged) == 0) {
790                         xpc_mark_partition_disengaged(part);
791                         xpc_IPI_send_disengage(part);
792                 }
793         }
794
795
796         xpc_msgqueue_deref(ch);
797
798         dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n",
799                 partid, ch_number);
800
801         xpc_part_deref(part);
802         return 0;
803 }
804
805
806 /*
807  * For each partition that XPC has established communications with, there is
808  * a minimum of one kernel thread assigned to perform any operation that
809  * may potentially sleep or block (basically the callouts to the asynchronous
810  * functions registered via xpc_connect()).
811  *
812  * Additional kthreads are created and destroyed by XPC as the workload
813  * demands.
814  *
815  * A kthread is assigned to one of the active channels that exists for a given
816  * partition.
817  */
818 void
819 xpc_create_kthreads(struct xpc_channel *ch, int needed)
820 {
821         unsigned long irq_flags;
822         pid_t pid;
823         u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
824         struct xpc_partition *part = &xpc_partitions[ch->partid];
825
826
827         while (needed-- > 0) {
828
829                 /*
830                  * The following is done on behalf of the newly created
831                  * kthread. That kthread is responsible for doing the
832                  * counterpart to the following before it exits.
833                  */
834                 (void) xpc_part_ref(part);
835                 xpc_msgqueue_ref(ch);
836                 if (atomic_inc_return(&ch->kthreads_assigned) == 1 &&
837                     atomic_inc_return(&part->nchannels_engaged) == 1) {
838                         xpc_mark_partition_engaged(part);
839                 }
840
841                 pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0);
842                 if (pid < 0) {
843                         /* the fork failed */
844                         if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
845                             atomic_dec_return(&part->nchannels_engaged) == 0) {
846                                 xpc_mark_partition_disengaged(part);
847                                 xpc_IPI_send_disengage(part);
848                         }
849                         xpc_msgqueue_deref(ch);
850                         xpc_part_deref(part);
851
852                         if (atomic_read(&ch->kthreads_assigned) <
853                                                 ch->kthreads_idle_limit) {
854                                 /*
855                                  * Flag this as an error only if we have an
856                                  * insufficient #of kthreads for the channel
857                                  * to function.
858                                  *
859                                  * No xpc_msgqueue_ref() is needed here since
860                                  * the channel mgr is doing this.
861                                  */
862                                 spin_lock_irqsave(&ch->lock, irq_flags);
863                                 XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources,
864                                                                 &irq_flags);
865                                 spin_unlock_irqrestore(&ch->lock, irq_flags);
866                         }
867                         break;
868                 }
869
870                 ch->kthreads_created++; // >>> temporary debug only!!!
871         }
872 }
873
874
875 void
876 xpc_disconnect_wait(int ch_number)
877 {
878         unsigned long irq_flags;
879         partid_t partid;
880         struct xpc_partition *part;
881         struct xpc_channel *ch;
882         int wakeup_channel_mgr;
883
884
885         /* now wait for all callouts to the caller's function to cease */
886         for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
887                 part = &xpc_partitions[partid];
888
889                 if (!xpc_part_ref(part)) {
890                         continue;
891                 }
892
893                 ch = &part->channels[ch_number];
894
895                 if (!(ch->flags & XPC_C_WDISCONNECT)) {
896                         xpc_part_deref(part);
897                         continue;
898                 }
899
900                 wait_for_completion(&ch->wdisconnect_wait);
901
902                 spin_lock_irqsave(&ch->lock, irq_flags);
903                 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
904                 wakeup_channel_mgr = 0;
905
906                 if (ch->delayed_IPI_flags) {
907                         if (part->act_state != XPC_P_DEACTIVATING) {
908                                 spin_lock(&part->IPI_lock);
909                                 XPC_SET_IPI_FLAGS(part->local_IPI_amo,
910                                         ch->number, ch->delayed_IPI_flags);
911                                 spin_unlock(&part->IPI_lock);
912                                 wakeup_channel_mgr = 1;
913                         }
914                         ch->delayed_IPI_flags = 0;
915                 }
916
917                 ch->flags &= ~XPC_C_WDISCONNECT;
918                 spin_unlock_irqrestore(&ch->lock, irq_flags);
919
920                 if (wakeup_channel_mgr) {
921                         xpc_wakeup_channel_mgr(part);
922                 }
923
924                 xpc_part_deref(part);
925         }
926 }
927
928
929 static void
930 xpc_do_exit(enum xpc_retval reason)
931 {
932         partid_t partid;
933         int active_part_count, printed_waiting_msg = 0;
934         struct xpc_partition *part;
935         unsigned long printmsg_time, disengage_request_timeout = 0;
936
937
938         /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
939         DBUG_ON(xpc_exiting == 1);
940
941         /*
942          * Let the heartbeat checker thread and the discovery thread
943          * (if one is running) know that they should exit. Also wake up
944          * the heartbeat checker thread in case it's sleeping.
945          */
946         xpc_exiting = 1;
947         wake_up_interruptible(&xpc_act_IRQ_wq);
948
949         /* ignore all incoming interrupts */
950         free_irq(SGI_XPC_ACTIVATE, NULL);
951
952         /* wait for the discovery thread to exit */
953         wait_for_completion(&xpc_discovery_exited);
954
955         /* wait for the heartbeat checker thread to exit */
956         wait_for_completion(&xpc_hb_checker_exited);
957
958
959         /* sleep for a 1/3 of a second or so */
960         (void) msleep_interruptible(300);
961
962
963         /* wait for all partitions to become inactive */
964
965         printmsg_time = jiffies + (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ);
966         xpc_disengage_request_timedout = 0;
967
968         do {
969                 active_part_count = 0;
970
971                 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
972                         part = &xpc_partitions[partid];
973
974                         if (xpc_partition_disengaged(part) &&
975                                         part->act_state == XPC_P_INACTIVE) {
976                                 continue;
977                         }
978
979                         active_part_count++;
980
981                         XPC_DEACTIVATE_PARTITION(part, reason);
982
983                         if (part->disengage_request_timeout >
984                                                 disengage_request_timeout) {
985                                 disengage_request_timeout =
986                                                 part->disengage_request_timeout;
987                         }
988                 }
989
990                 if (xpc_partition_engaged(-1UL)) {
991                         if (time_after(jiffies, printmsg_time)) {
992                                 dev_info(xpc_part, "waiting for remote "
993                                         "partitions to disengage, timeout in "
994                                         "%ld seconds\n",
995                                         (disengage_request_timeout - jiffies)
996                                                                         / HZ);
997                                 printmsg_time = jiffies +
998                                         (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ);
999                                 printed_waiting_msg = 1;
1000                         }
1001
1002                 } else if (active_part_count > 0) {
1003                         if (printed_waiting_msg) {
1004                                 dev_info(xpc_part, "waiting for local partition"
1005                                         " to disengage\n");
1006                                 printed_waiting_msg = 0;
1007                         }
1008
1009                 } else {
1010                         if (!xpc_disengage_request_timedout) {
1011                                 dev_info(xpc_part, "all partitions have "
1012                                         "disengaged\n");
1013                         }
1014                         break;
1015                 }
1016
1017                 /* sleep for a 1/3 of a second or so */
1018                 (void) msleep_interruptible(300);
1019
1020         } while (1);
1021
1022         DBUG_ON(xpc_partition_engaged(-1UL));
1023
1024
1025         /* indicate to others that our reserved page is uninitialized */
1026         xpc_rsvd_page->vars_pa = 0;
1027
1028         /* now it's time to eliminate our heartbeat */
1029         del_timer_sync(&xpc_hb_timer);
1030         DBUG_ON(xpc_vars->heartbeating_to_mask != 0);
1031
1032         if (reason == xpcUnloading) {
1033                 /* take ourselves off of the reboot_notifier_list */
1034                 (void) unregister_reboot_notifier(&xpc_reboot_notifier);
1035
1036                 /* take ourselves off of the die_notifier list */
1037                 (void) unregister_die_notifier(&xpc_die_notifier);
1038         }
1039
1040         /* close down protections for IPI operations */
1041         xpc_restrict_IPI_ops();
1042
1043
1044         /* clear the interface to XPC's functions */
1045         xpc_clear_interface();
1046
1047         if (xpc_sysctl) {
1048                 unregister_sysctl_table(xpc_sysctl);
1049         }
1050 }
1051
1052
1053 /*
1054  * This function is called when the system is being rebooted.
1055  */
1056 static int
1057 xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
1058 {
1059         enum xpc_retval reason;
1060
1061
1062         switch (event) {
1063         case SYS_RESTART:
1064                 reason = xpcSystemReboot;
1065                 break;
1066         case SYS_HALT:
1067                 reason = xpcSystemHalt;
1068                 break;
1069         case SYS_POWER_OFF:
1070                 reason = xpcSystemPoweroff;
1071                 break;
1072         default:
1073                 reason = xpcSystemGoingDown;
1074         }
1075
1076         xpc_do_exit(reason);
1077         return NOTIFY_DONE;
1078 }
1079
1080
1081 /*
1082  * Notify other partitions to disengage from all references to our memory.
1083  */
1084 static void
1085 xpc_die_disengage(void)
1086 {
1087         struct xpc_partition *part;
1088         partid_t partid;
1089         unsigned long engaged;
1090         long time, printmsg_time, disengage_request_timeout;
1091
1092
1093         /* keep xpc_hb_checker thread from doing anything (just in case) */
1094         xpc_exiting = 1;
1095
1096         xpc_vars->heartbeating_to_mask = 0;  /* indicate we're deactivated */
1097
1098         for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
1099                 part = &xpc_partitions[partid];
1100
1101                 if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part->
1102                                                         remote_vars_version)) {
1103
1104                         /* just in case it was left set by an earlier XPC */
1105                         xpc_clear_partition_engaged(1UL << partid);
1106                         continue;
1107                 }
1108
1109                 if (xpc_partition_engaged(1UL << partid) ||
1110                                         part->act_state != XPC_P_INACTIVE) {
1111                         xpc_request_partition_disengage(part);
1112                         xpc_mark_partition_disengaged(part);
1113                         xpc_IPI_send_disengage(part);
1114                 }
1115         }
1116
1117         time = rtc_time();
1118         printmsg_time = time +
1119                 (XPC_DISENGAGE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second);
1120         disengage_request_timeout = time +
1121                 (xpc_disengage_request_timelimit * sn_rtc_cycles_per_second);
1122
1123         /* wait for all other partitions to disengage from us */
1124
1125         while (1) {
1126                 engaged = xpc_partition_engaged(-1UL);
1127                 if (!engaged) {
1128                         dev_info(xpc_part, "all partitions have disengaged\n");
1129                         break;
1130                 }
1131
1132                 time = rtc_time();
1133                 if (time >= disengage_request_timeout) {
1134                         for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
1135                                 if (engaged & (1UL << partid)) {
1136                                         dev_info(xpc_part, "disengage from "
1137                                                 "remote partition %d timed "
1138                                                 "out\n", partid);
1139                                 }
1140                         }
1141                         break;
1142                 }
1143
1144                 if (time >= printmsg_time) {
1145                         dev_info(xpc_part, "waiting for remote partitions to "
1146                                 "disengage, timeout in %ld seconds\n",
1147                                 (disengage_request_timeout - time) /
1148                                                 sn_rtc_cycles_per_second);
1149                         printmsg_time = time +
1150                                         (XPC_DISENGAGE_PRINTMSG_INTERVAL *
1151                                                 sn_rtc_cycles_per_second);
1152                 }
1153         }
1154 }
1155
1156
1157 /*
1158  * This function is called when the system is being restarted or halted due
1159  * to some sort of system failure. If this is the case we need to notify the
1160  * other partitions to disengage from all references to our memory.
1161  * This function can also be called when our heartbeater could be offlined
1162  * for a time. In this case we need to notify other partitions to not worry
1163  * about the lack of a heartbeat.
1164  */
1165 static int
1166 xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
1167 {
1168         switch (event) {
1169         case DIE_MACHINE_RESTART:
1170         case DIE_MACHINE_HALT:
1171                 xpc_die_disengage();
1172                 break;
1173
1174         case DIE_KDEBUG_ENTER:
1175                 /* Should lack of heartbeat be ignored by other partitions? */
1176                 if (!xpc_kdebug_ignore) {
1177                         break;
1178                 }
1179                 /* fall through */
1180         case DIE_MCA_MONARCH_ENTER:
1181         case DIE_INIT_MONARCH_ENTER:
1182                 xpc_vars->heartbeat++;
1183                 xpc_vars->heartbeat_offline = 1;
1184                 break;
1185
1186         case DIE_KDEBUG_LEAVE:
1187                 /* Is lack of heartbeat being ignored by other partitions? */
1188                 if (!xpc_kdebug_ignore) {
1189                         break;
1190                 }
1191                 /* fall through */
1192         case DIE_MCA_MONARCH_LEAVE:
1193         case DIE_INIT_MONARCH_LEAVE:
1194                 xpc_vars->heartbeat++;
1195                 xpc_vars->heartbeat_offline = 0;
1196                 break;
1197         }
1198
1199         return NOTIFY_DONE;
1200 }
1201
1202
1203 int __init
1204 xpc_init(void)
1205 {
1206         int ret;
1207         partid_t partid;
1208         struct xpc_partition *part;
1209         pid_t pid;
1210
1211
1212         if (!ia64_platform_is("sn2")) {
1213                 return -ENODEV;
1214         }
1215
1216         /*
1217          * xpc_remote_copy_buffer is used as a temporary buffer for bte_copy'ng
1218          * various portions of a partition's reserved page. Its size is based
1219          * on the size of the reserved page header and part_nasids mask. So we
1220          * need to ensure that the other items will fit as well.
1221          */
1222         if (XPC_RP_VARS_SIZE > XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES) {
1223                 dev_err(xpc_part, "xpc_remote_copy_buffer is not big enough\n");
1224                 return -EPERM;
1225         }
1226         DBUG_ON((u64) xpc_remote_copy_buffer !=
1227                                 L1_CACHE_ALIGN((u64) xpc_remote_copy_buffer));
1228
1229         snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part");
1230         snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan");
1231
1232         xpc_sysctl = register_sysctl_table(xpc_sys_dir, 1);
1233
1234         /*
1235          * The first few fields of each entry of xpc_partitions[] need to
1236          * be initialized now so that calls to xpc_connect() and
1237          * xpc_disconnect() can be made prior to the activation of any remote
1238          * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE
1239          * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING
1240          * PARTITION HAS BEEN ACTIVATED.
1241          */
1242         for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
1243                 part = &xpc_partitions[partid];
1244
1245                 DBUG_ON((u64) part != L1_CACHE_ALIGN((u64) part));
1246
1247                 part->act_IRQ_rcvd = 0;
1248                 spin_lock_init(&part->act_lock);
1249                 part->act_state = XPC_P_INACTIVE;
1250                 XPC_SET_REASON(part, 0, 0);
1251
1252                 init_timer(&part->disengage_request_timer);
1253                 part->disengage_request_timer.function =
1254                                 xpc_timeout_partition_disengage_request;
1255                 part->disengage_request_timer.data = (unsigned long) part;
1256
1257                 part->setup_state = XPC_P_UNSET;
1258                 init_waitqueue_head(&part->teardown_wq);
1259                 atomic_set(&part->references, 0);
1260         }
1261
1262         /*
1263          * Open up protections for IPI operations (and AMO operations on
1264          * Shub 1.1 systems).
1265          */
1266         xpc_allow_IPI_ops();
1267
1268         /*
1269          * Interrupts being processed will increment this atomic variable and
1270          * awaken the heartbeat thread which will process the interrupts.
1271          */
1272         atomic_set(&xpc_act_IRQ_rcvd, 0);
1273
1274         /*
1275          * This is safe to do before the xpc_hb_checker thread has started
1276          * because the handler releases a wait queue.  If an interrupt is
1277          * received before the thread is waiting, it will not go to sleep,
1278          * but rather immediately process the interrupt.
1279          */
1280         ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0,
1281                                                         "xpc hb", NULL);
1282         if (ret != 0) {
1283                 dev_err(xpc_part, "can't register ACTIVATE IRQ handler, "
1284                         "errno=%d\n", -ret);
1285
1286                 xpc_restrict_IPI_ops();
1287
1288                 if (xpc_sysctl) {
1289                         unregister_sysctl_table(xpc_sysctl);
1290                 }
1291                 return -EBUSY;
1292         }
1293
1294         /*
1295          * Fill the partition reserved page with the information needed by
1296          * other partitions to discover we are alive and establish initial
1297          * communications.
1298          */
1299         xpc_rsvd_page = xpc_rsvd_page_init();
1300         if (xpc_rsvd_page == NULL) {
1301                 dev_err(xpc_part, "could not setup our reserved page\n");
1302
1303                 free_irq(SGI_XPC_ACTIVATE, NULL);
1304                 xpc_restrict_IPI_ops();
1305
1306                 if (xpc_sysctl) {
1307                         unregister_sysctl_table(xpc_sysctl);
1308                 }
1309                 return -EBUSY;
1310         }
1311
1312
1313         /* add ourselves to the reboot_notifier_list */
1314         ret = register_reboot_notifier(&xpc_reboot_notifier);
1315         if (ret != 0) {
1316                 dev_warn(xpc_part, "can't register reboot notifier\n");
1317         }
1318
1319         /* add ourselves to the die_notifier list (i.e., ia64die_chain) */
1320         ret = register_die_notifier(&xpc_die_notifier);
1321         if (ret != 0) {
1322                 dev_warn(xpc_part, "can't register die notifier\n");
1323         }
1324
1325
1326         /*
1327          * Set the beating to other partitions into motion.  This is
1328          * the last requirement for other partitions' discovery to
1329          * initiate communications with us.
1330          */
1331         init_timer(&xpc_hb_timer);
1332         xpc_hb_timer.function = xpc_hb_beater;
1333         xpc_hb_beater(0);
1334
1335
1336         /*
1337          * The real work-horse behind xpc.  This processes incoming
1338          * interrupts and monitors remote heartbeats.
1339          */
1340         pid = kernel_thread(xpc_hb_checker, NULL, 0);
1341         if (pid < 0) {
1342                 dev_err(xpc_part, "failed while forking hb check thread\n");
1343
1344                 /* indicate to others that our reserved page is uninitialized */
1345                 xpc_rsvd_page->vars_pa = 0;
1346
1347                 /* take ourselves off of the reboot_notifier_list */
1348                 (void) unregister_reboot_notifier(&xpc_reboot_notifier);
1349
1350                 /* take ourselves off of the die_notifier list */
1351                 (void) unregister_die_notifier(&xpc_die_notifier);
1352
1353                 del_timer_sync(&xpc_hb_timer);
1354                 free_irq(SGI_XPC_ACTIVATE, NULL);
1355                 xpc_restrict_IPI_ops();
1356
1357                 if (xpc_sysctl) {
1358                         unregister_sysctl_table(xpc_sysctl);
1359                 }
1360                 return -EBUSY;
1361         }
1362
1363
1364         /*
1365          * Startup a thread that will attempt to discover other partitions to
1366          * activate based on info provided by SAL. This new thread is short
1367          * lived and will exit once discovery is complete.
1368          */
1369         pid = kernel_thread(xpc_initiate_discovery, NULL, 0);
1370         if (pid < 0) {
1371                 dev_err(xpc_part, "failed while forking discovery thread\n");
1372
1373                 /* mark this new thread as a non-starter */
1374                 complete(&xpc_discovery_exited);
1375
1376                 xpc_do_exit(xpcUnloading);
1377                 return -EBUSY;
1378         }
1379
1380
1381         /* set the interface to point at XPC's functions */
1382         xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
1383                           xpc_initiate_allocate, xpc_initiate_send,
1384                           xpc_initiate_send_notify, xpc_initiate_received,
1385                           xpc_initiate_partid_to_nasids);
1386
1387         return 0;
1388 }
1389 module_init(xpc_init);
1390
1391
1392 void __exit
1393 xpc_exit(void)
1394 {
1395         xpc_do_exit(xpcUnloading);
1396 }
1397 module_exit(xpc_exit);
1398
1399
1400 MODULE_AUTHOR("Silicon Graphics, Inc.");
1401 MODULE_DESCRIPTION("Cross Partition Communication (XPC) support");
1402 MODULE_LICENSE("GPL");
1403
1404 module_param(xpc_hb_interval, int, 0);
1405 MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between "
1406                 "heartbeat increments.");
1407
1408 module_param(xpc_hb_check_interval, int, 0);
1409 MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
1410                 "heartbeat checks.");
1411
1412 module_param(xpc_disengage_request_timelimit, int, 0);
1413 MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait "
1414                 "for disengage request to complete.");
1415
1416 module_param(xpc_kdebug_ignore, int, 0);
1417 MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by "
1418                 "other partitions when dropping into kdebug.");
1419