[S390] Get rid of a bunch of sparse warnings again.
[linux-2.6.git] / drivers / s390 / char / sclp.c
1 /*
2  *  drivers/s390/char/sclp.c
3  *     core function to access sclp interface
4  *
5  *  S390 version
6  *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
7  *    Author(s): Martin Peschke <mpeschke@de.ibm.com>
8  *               Martin Schwidefsky <schwidefsky@de.ibm.com>
9  */
10
11 #include <linux/module.h>
12 #include <linux/err.h>
13 #include <linux/spinlock.h>
14 #include <linux/interrupt.h>
15 #include <linux/timer.h>
16 #include <linux/reboot.h>
17 #include <linux/jiffies.h>
18 #include <linux/init.h>
19 #include <asm/types.h>
20 #include <asm/s390_ext.h>
21
22 #include "sclp.h"
23
24 #define SCLP_HEADER             "sclp: "
25
26 /* Structure for register_early_external_interrupt. */
27 static ext_int_info_t ext_int_info_hwc;
28
29 /* Lock to protect internal data consistency. */
30 static DEFINE_SPINLOCK(sclp_lock);
31
32 /* Mask of events that we can receive from the sclp interface. */
33 static sccb_mask_t sclp_receive_mask;
34
35 /* Mask of events that we can send to the sclp interface. */
36 static sccb_mask_t sclp_send_mask;
37
38 /* List of registered event listeners and senders. */
39 static struct list_head sclp_reg_list;
40
41 /* List of queued requests. */
42 static struct list_head sclp_req_queue;
43
44 /* Data for read and and init requests. */
45 static struct sclp_req sclp_read_req;
46 static struct sclp_req sclp_init_req;
47 static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
48 static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
49
50 /* Timer for request retries. */
51 static struct timer_list sclp_request_timer;
52
53 /* Internal state: is the driver initialized? */
54 static volatile enum sclp_init_state_t {
55         sclp_init_state_uninitialized,
56         sclp_init_state_initializing,
57         sclp_init_state_initialized
58 } sclp_init_state = sclp_init_state_uninitialized;
59
60 /* Internal state: is a request active at the sclp? */
61 static volatile enum sclp_running_state_t {
62         sclp_running_state_idle,
63         sclp_running_state_running,
64         sclp_running_state_reset_pending
65 } sclp_running_state = sclp_running_state_idle;
66
67 /* Internal state: is a read request pending? */
68 static volatile enum sclp_reading_state_t {
69         sclp_reading_state_idle,
70         sclp_reading_state_reading
71 } sclp_reading_state = sclp_reading_state_idle;
72
73 /* Internal state: is the driver currently serving requests? */
74 static volatile enum sclp_activation_state_t {
75         sclp_activation_state_active,
76         sclp_activation_state_deactivating,
77         sclp_activation_state_inactive,
78         sclp_activation_state_activating
79 } sclp_activation_state = sclp_activation_state_active;
80
81 /* Internal state: is an init mask request pending? */
82 static volatile enum sclp_mask_state_t {
83         sclp_mask_state_idle,
84         sclp_mask_state_initializing
85 } sclp_mask_state = sclp_mask_state_idle;
86
87 /* Maximum retry counts */
88 #define SCLP_INIT_RETRY         3
89 #define SCLP_MASK_RETRY         3
90
91 /* Timeout intervals in seconds.*/
92 #define SCLP_BUSY_INTERVAL      10
93 #define SCLP_RETRY_INTERVAL     30
94
95 static void sclp_process_queue(void);
96 static void __sclp_make_read_req(void);
97 static int sclp_init_mask(int calculate);
98 static int sclp_init(void);
99
100 /* Perform service call. Return 0 on success, non-zero otherwise. */
101 int
102 sclp_service_call(sclp_cmdw_t command, void *sccb)
103 {
104         int cc;
105
106         asm volatile(
107                 "       .insn   rre,0xb2200000,%1,%2\n"  /* servc %1,%2 */
108                 "       ipm     %0\n"
109                 "       srl     %0,28"
110                 : "=&d" (cc) : "d" (command), "a" (__pa(sccb))
111                 : "cc", "memory");
112         if (cc == 3)
113                 return -EIO;
114         if (cc == 2)
115                 return -EBUSY;
116         return 0;
117 }
118
119
120 static void
121 __sclp_queue_read_req(void)
122 {
123         if (sclp_reading_state == sclp_reading_state_idle) {
124                 sclp_reading_state = sclp_reading_state_reading;
125                 __sclp_make_read_req();
126                 /* Add request to head of queue */
127                 list_add(&sclp_read_req.list, &sclp_req_queue);
128         }
129 }
130
131 /* Set up request retry timer. Called while sclp_lock is locked. */
132 static inline void
133 __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
134                          unsigned long data)
135 {
136         del_timer(&sclp_request_timer);
137         sclp_request_timer.function = function;
138         sclp_request_timer.data = data;
139         sclp_request_timer.expires = jiffies + time;
140         add_timer(&sclp_request_timer);
141 }
142
143 /* Request timeout handler. Restart the request queue. If DATA is non-zero,
144  * force restart of running request. */
145 static void
146 sclp_request_timeout(unsigned long data)
147 {
148         unsigned long flags;
149
150         spin_lock_irqsave(&sclp_lock, flags);
151         if (data) {
152                 if (sclp_running_state == sclp_running_state_running) {
153                         /* Break running state and queue NOP read event request
154                          * to get a defined interface state. */
155                         __sclp_queue_read_req();
156                         sclp_running_state = sclp_running_state_idle;
157                 }
158         } else {
159                 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
160                                          sclp_request_timeout, 0);
161         }
162         spin_unlock_irqrestore(&sclp_lock, flags);
163         sclp_process_queue();
164 }
165
166 /* Try to start a request. Return zero if the request was successfully
167  * started or if it will be started at a later time. Return non-zero otherwise.
168  * Called while sclp_lock is locked. */
169 static int
170 __sclp_start_request(struct sclp_req *req)
171 {
172         int rc;
173
174         if (sclp_running_state != sclp_running_state_idle)
175                 return 0;
176         del_timer(&sclp_request_timer);
177         rc = sclp_service_call(req->command, req->sccb);
178         req->start_count++;
179
180         if (rc == 0) {
181                 /* Sucessfully started request */
182                 req->status = SCLP_REQ_RUNNING;
183                 sclp_running_state = sclp_running_state_running;
184                 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
185                                          sclp_request_timeout, 1);
186                 return 0;
187         } else if (rc == -EBUSY) {
188                 /* Try again later */
189                 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
190                                          sclp_request_timeout, 0);
191                 return 0;
192         }
193         /* Request failed */
194         req->status = SCLP_REQ_FAILED;
195         return rc;
196 }
197
198 /* Try to start queued requests. */
199 static void
200 sclp_process_queue(void)
201 {
202         struct sclp_req *req;
203         int rc;
204         unsigned long flags;
205
206         spin_lock_irqsave(&sclp_lock, flags);
207         if (sclp_running_state != sclp_running_state_idle) {
208                 spin_unlock_irqrestore(&sclp_lock, flags);
209                 return;
210         }
211         del_timer(&sclp_request_timer);
212         while (!list_empty(&sclp_req_queue)) {
213                 req = list_entry(sclp_req_queue.next, struct sclp_req, list);
214                 rc = __sclp_start_request(req);
215                 if (rc == 0)
216                         break;
217                 /* Request failed */
218                 if (req->start_count > 1) {
219                         /* Cannot abort already submitted request - could still
220                          * be active at the SCLP */
221                         __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
222                                                  sclp_request_timeout, 0);
223                         break;
224                 }
225                 /* Post-processing for aborted request */
226                 list_del(&req->list);
227                 if (req->callback) {
228                         spin_unlock_irqrestore(&sclp_lock, flags);
229                         req->callback(req, req->callback_data);
230                         spin_lock_irqsave(&sclp_lock, flags);
231                 }
232         }
233         spin_unlock_irqrestore(&sclp_lock, flags);
234 }
235
236 /* Queue a new request. Return zero on success, non-zero otherwise. */
237 int
238 sclp_add_request(struct sclp_req *req)
239 {
240         unsigned long flags;
241         int rc;
242
243         spin_lock_irqsave(&sclp_lock, flags);
244         if ((sclp_init_state != sclp_init_state_initialized ||
245              sclp_activation_state != sclp_activation_state_active) &&
246             req != &sclp_init_req) {
247                 spin_unlock_irqrestore(&sclp_lock, flags);
248                 return -EIO;
249         }
250         req->status = SCLP_REQ_QUEUED;
251         req->start_count = 0;
252         list_add_tail(&req->list, &sclp_req_queue);
253         rc = 0;
254         /* Start if request is first in list */
255         if (sclp_running_state == sclp_running_state_idle &&
256             req->list.prev == &sclp_req_queue) {
257                 rc = __sclp_start_request(req);
258                 if (rc)
259                         list_del(&req->list);
260         }
261         spin_unlock_irqrestore(&sclp_lock, flags);
262         return rc;
263 }
264
265 EXPORT_SYMBOL(sclp_add_request);
266
267 /* Dispatch events found in request buffer to registered listeners. Return 0
268  * if all events were dispatched, non-zero otherwise. */
269 static int
270 sclp_dispatch_evbufs(struct sccb_header *sccb)
271 {
272         unsigned long flags;
273         struct evbuf_header *evbuf;
274         struct list_head *l;
275         struct sclp_register *reg;
276         int offset;
277         int rc;
278
279         spin_lock_irqsave(&sclp_lock, flags);
280         rc = 0;
281         for (offset = sizeof(struct sccb_header); offset < sccb->length;
282              offset += evbuf->length) {
283                 /* Search for event handler */
284                 evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
285                 reg = NULL;
286                 list_for_each(l, &sclp_reg_list) {
287                         reg = list_entry(l, struct sclp_register, list);
288                         if (reg->receive_mask & (1 << (32 - evbuf->type)))
289                                 break;
290                         else
291                                 reg = NULL;
292                 }
293                 if (reg && reg->receiver_fn) {
294                         spin_unlock_irqrestore(&sclp_lock, flags);
295                         reg->receiver_fn(evbuf);
296                         spin_lock_irqsave(&sclp_lock, flags);
297                 } else if (reg == NULL)
298                         rc = -ENOSYS;
299         }
300         spin_unlock_irqrestore(&sclp_lock, flags);
301         return rc;
302 }
303
304 /* Read event data request callback. */
305 static void
306 sclp_read_cb(struct sclp_req *req, void *data)
307 {
308         unsigned long flags;
309         struct sccb_header *sccb;
310
311         sccb = (struct sccb_header *) req->sccb;
312         if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
313             sccb->response_code == 0x220))
314                 sclp_dispatch_evbufs(sccb);
315         spin_lock_irqsave(&sclp_lock, flags);
316         sclp_reading_state = sclp_reading_state_idle;
317         spin_unlock_irqrestore(&sclp_lock, flags);
318 }
319
320 /* Prepare read event data request. Called while sclp_lock is locked. */
321 static void __sclp_make_read_req(void)
322 {
323         struct sccb_header *sccb;
324
325         sccb = (struct sccb_header *) sclp_read_sccb;
326         clear_page(sccb);
327         memset(&sclp_read_req, 0, sizeof(struct sclp_req));
328         sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
329         sclp_read_req.status = SCLP_REQ_QUEUED;
330         sclp_read_req.start_count = 0;
331         sclp_read_req.callback = sclp_read_cb;
332         sclp_read_req.sccb = sccb;
333         sccb->length = PAGE_SIZE;
334         sccb->function_code = 0;
335         sccb->control_mask[2] = 0x80;
336 }
337
338 /* Search request list for request with matching sccb. Return request if found,
339  * NULL otherwise. Called while sclp_lock is locked. */
340 static inline struct sclp_req *
341 __sclp_find_req(u32 sccb)
342 {
343         struct list_head *l;
344         struct sclp_req *req;
345
346         list_for_each(l, &sclp_req_queue) {
347                 req = list_entry(l, struct sclp_req, list);
348                 if (sccb == (u32) (addr_t) req->sccb)
349                                 return req;
350         }
351         return NULL;
352 }
353
354 /* Handler for external interruption. Perform request post-processing.
355  * Prepare read event data request if necessary. Start processing of next
356  * request on queue. */
357 static void
358 sclp_interrupt_handler(__u16 code)
359 {
360         struct sclp_req *req;
361         u32 finished_sccb;
362         u32 evbuf_pending;
363
364         spin_lock(&sclp_lock);
365         finished_sccb = S390_lowcore.ext_params & 0xfffffff8;
366         evbuf_pending = S390_lowcore.ext_params & 0x3;
367         if (finished_sccb) {
368                 del_timer(&sclp_request_timer);
369                 sclp_running_state = sclp_running_state_reset_pending;
370                 req = __sclp_find_req(finished_sccb);
371                 if (req) {
372                         /* Request post-processing */
373                         list_del(&req->list);
374                         req->status = SCLP_REQ_DONE;
375                         if (req->callback) {
376                                 spin_unlock(&sclp_lock);
377                                 req->callback(req, req->callback_data);
378                                 spin_lock(&sclp_lock);
379                         }
380                 }
381                 sclp_running_state = sclp_running_state_idle;
382         }
383         if (evbuf_pending && sclp_receive_mask != 0 &&
384             sclp_activation_state == sclp_activation_state_active)
385                 __sclp_queue_read_req();
386         spin_unlock(&sclp_lock);
387         sclp_process_queue();
388 }
389
390 /* Convert interval in jiffies to TOD ticks. */
391 static inline u64
392 sclp_tod_from_jiffies(unsigned long jiffies)
393 {
394         return (u64) (jiffies / HZ) << 32;
395 }
396
397 /* Wait until a currently running request finished. Note: while this function
398  * is running, no timers are served on the calling CPU. */
399 void
400 sclp_sync_wait(void)
401 {
402         unsigned long flags;
403         unsigned long cr0, cr0_sync;
404         u64 timeout;
405         int irq_context;
406
407         /* We'll be disabling timer interrupts, so we need a custom timeout
408          * mechanism */
409         timeout = 0;
410         if (timer_pending(&sclp_request_timer)) {
411                 /* Get timeout TOD value */
412                 timeout = get_clock() +
413                           sclp_tod_from_jiffies(sclp_request_timer.expires -
414                                                 jiffies);
415         }
416         local_irq_save(flags);
417         /* Prevent bottom half from executing once we force interrupts open */
418         irq_context = in_interrupt();
419         if (!irq_context)
420                 local_bh_disable();
421         /* Enable service-signal interruption, disable timer interrupts */
422         trace_hardirqs_on();
423         __ctl_store(cr0, 0, 0);
424         cr0_sync = cr0;
425         cr0_sync |= 0x00000200;
426         cr0_sync &= 0xFFFFF3AC;
427         __ctl_load(cr0_sync, 0, 0);
428         __raw_local_irq_stosm(0x01);
429         /* Loop until driver state indicates finished request */
430         while (sclp_running_state != sclp_running_state_idle) {
431                 /* Check for expired request timer */
432                 if (timer_pending(&sclp_request_timer) &&
433                     get_clock() > timeout &&
434                     del_timer(&sclp_request_timer))
435                         sclp_request_timer.function(sclp_request_timer.data);
436                 cpu_relax();
437         }
438         local_irq_disable();
439         __ctl_load(cr0, 0, 0);
440         if (!irq_context)
441                 _local_bh_enable();
442         local_irq_restore(flags);
443 }
444
445 EXPORT_SYMBOL(sclp_sync_wait);
446
447 /* Dispatch changes in send and receive mask to registered listeners. */
448 static void
449 sclp_dispatch_state_change(void)
450 {
451         struct list_head *l;
452         struct sclp_register *reg;
453         unsigned long flags;
454         sccb_mask_t receive_mask;
455         sccb_mask_t send_mask;
456
457         do {
458                 spin_lock_irqsave(&sclp_lock, flags);
459                 reg = NULL;
460                 list_for_each(l, &sclp_reg_list) {
461                         reg = list_entry(l, struct sclp_register, list);
462                         receive_mask = reg->receive_mask & sclp_receive_mask;
463                         send_mask = reg->send_mask & sclp_send_mask;
464                         if (reg->sclp_receive_mask != receive_mask ||
465                             reg->sclp_send_mask != send_mask) {
466                                 reg->sclp_receive_mask = receive_mask;
467                                 reg->sclp_send_mask = send_mask;
468                                 break;
469                         } else
470                                 reg = NULL;
471                 }
472                 spin_unlock_irqrestore(&sclp_lock, flags);
473                 if (reg && reg->state_change_fn)
474                         reg->state_change_fn(reg);
475         } while (reg);
476 }
477
478 struct sclp_statechangebuf {
479         struct evbuf_header     header;
480         u8              validity_sclp_active_facility_mask : 1;
481         u8              validity_sclp_receive_mask : 1;
482         u8              validity_sclp_send_mask : 1;
483         u8              validity_read_data_function_mask : 1;
484         u16             _zeros : 12;
485         u16             mask_length;
486         u64             sclp_active_facility_mask;
487         sccb_mask_t     sclp_receive_mask;
488         sccb_mask_t     sclp_send_mask;
489         u32             read_data_function_mask;
490 } __attribute__((packed));
491
492
493 /* State change event callback. Inform listeners of changes. */
494 static void
495 sclp_state_change_cb(struct evbuf_header *evbuf)
496 {
497         unsigned long flags;
498         struct sclp_statechangebuf *scbuf;
499
500         scbuf = (struct sclp_statechangebuf *) evbuf;
501         if (scbuf->mask_length != sizeof(sccb_mask_t))
502                 return;
503         spin_lock_irqsave(&sclp_lock, flags);
504         if (scbuf->validity_sclp_receive_mask)
505                 sclp_receive_mask = scbuf->sclp_receive_mask;
506         if (scbuf->validity_sclp_send_mask)
507                 sclp_send_mask = scbuf->sclp_send_mask;
508         spin_unlock_irqrestore(&sclp_lock, flags);
509         sclp_dispatch_state_change();
510 }
511
512 static struct sclp_register sclp_state_change_event = {
513         .receive_mask = EVTYP_STATECHANGE_MASK,
514         .receiver_fn = sclp_state_change_cb
515 };
516
517 /* Calculate receive and send mask of currently registered listeners.
518  * Called while sclp_lock is locked. */
519 static inline void
520 __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
521 {
522         struct list_head *l;
523         struct sclp_register *t;
524
525         *receive_mask = 0;
526         *send_mask = 0;
527         list_for_each(l, &sclp_reg_list) {
528                 t = list_entry(l, struct sclp_register, list);
529                 *receive_mask |= t->receive_mask;
530                 *send_mask |= t->send_mask;
531         }
532 }
533
534 /* Register event listener. Return 0 on success, non-zero otherwise. */
535 int
536 sclp_register(struct sclp_register *reg)
537 {
538         unsigned long flags;
539         sccb_mask_t receive_mask;
540         sccb_mask_t send_mask;
541         int rc;
542
543         rc = sclp_init();
544         if (rc)
545                 return rc;
546         spin_lock_irqsave(&sclp_lock, flags);
547         /* Check event mask for collisions */
548         __sclp_get_mask(&receive_mask, &send_mask);
549         if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
550                 spin_unlock_irqrestore(&sclp_lock, flags);
551                 return -EBUSY;
552         }
553         /* Trigger initial state change callback */
554         reg->sclp_receive_mask = 0;
555         reg->sclp_send_mask = 0;
556         list_add(&reg->list, &sclp_reg_list);
557         spin_unlock_irqrestore(&sclp_lock, flags);
558         rc = sclp_init_mask(1);
559         if (rc) {
560                 spin_lock_irqsave(&sclp_lock, flags);
561                 list_del(&reg->list);
562                 spin_unlock_irqrestore(&sclp_lock, flags);
563         }
564         return rc;
565 }
566
567 EXPORT_SYMBOL(sclp_register);
568
569 /* Unregister event listener. */
570 void
571 sclp_unregister(struct sclp_register *reg)
572 {
573         unsigned long flags;
574
575         spin_lock_irqsave(&sclp_lock, flags);
576         list_del(&reg->list);
577         spin_unlock_irqrestore(&sclp_lock, flags);
578         sclp_init_mask(1);
579 }
580
581 EXPORT_SYMBOL(sclp_unregister);
582
583 /* Remove event buffers which are marked processed. Return the number of
584  * remaining event buffers. */
585 int
586 sclp_remove_processed(struct sccb_header *sccb)
587 {
588         struct evbuf_header *evbuf;
589         int unprocessed;
590         u16 remaining;
591
592         evbuf = (struct evbuf_header *) (sccb + 1);
593         unprocessed = 0;
594         remaining = sccb->length - sizeof(struct sccb_header);
595         while (remaining > 0) {
596                 remaining -= evbuf->length;
597                 if (evbuf->flags & 0x80) {
598                         sccb->length -= evbuf->length;
599                         memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
600                                remaining);
601                 } else {
602                         unprocessed++;
603                         evbuf = (struct evbuf_header *)
604                                         ((addr_t) evbuf + evbuf->length);
605                 }
606         }
607         return unprocessed;
608 }
609
610 EXPORT_SYMBOL(sclp_remove_processed);
611
612 struct init_sccb {
613         struct sccb_header header;
614         u16 _reserved;
615         u16 mask_length;
616         sccb_mask_t receive_mask;
617         sccb_mask_t send_mask;
618         sccb_mask_t sclp_send_mask;
619         sccb_mask_t sclp_receive_mask;
620 } __attribute__((packed));
621
622 /* Prepare init mask request. Called while sclp_lock is locked. */
623 static inline void
624 __sclp_make_init_req(u32 receive_mask, u32 send_mask)
625 {
626         struct init_sccb *sccb;
627
628         sccb = (struct init_sccb *) sclp_init_sccb;
629         clear_page(sccb);
630         memset(&sclp_init_req, 0, sizeof(struct sclp_req));
631         sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
632         sclp_init_req.status = SCLP_REQ_FILLED;
633         sclp_init_req.start_count = 0;
634         sclp_init_req.callback = NULL;
635         sclp_init_req.callback_data = NULL;
636         sclp_init_req.sccb = sccb;
637         sccb->header.length = sizeof(struct init_sccb);
638         sccb->mask_length = sizeof(sccb_mask_t);
639         sccb->receive_mask = receive_mask;
640         sccb->send_mask = send_mask;
641         sccb->sclp_receive_mask = 0;
642         sccb->sclp_send_mask = 0;
643 }
644
645 /* Start init mask request. If calculate is non-zero, calculate the mask as
646  * requested by registered listeners. Use zero mask otherwise. Return 0 on
647  * success, non-zero otherwise. */
648 static int
649 sclp_init_mask(int calculate)
650 {
651         unsigned long flags;
652         struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb;
653         sccb_mask_t receive_mask;
654         sccb_mask_t send_mask;
655         int retry;
656         int rc;
657         unsigned long wait;
658
659         spin_lock_irqsave(&sclp_lock, flags);
660         /* Check if interface is in appropriate state */
661         if (sclp_mask_state != sclp_mask_state_idle) {
662                 spin_unlock_irqrestore(&sclp_lock, flags);
663                 return -EBUSY;
664         }
665         if (sclp_activation_state == sclp_activation_state_inactive) {
666                 spin_unlock_irqrestore(&sclp_lock, flags);
667                 return -EINVAL;
668         }
669         sclp_mask_state = sclp_mask_state_initializing;
670         /* Determine mask */
671         if (calculate)
672                 __sclp_get_mask(&receive_mask, &send_mask);
673         else {
674                 receive_mask = 0;
675                 send_mask = 0;
676         }
677         rc = -EIO;
678         for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
679                 /* Prepare request */
680                 __sclp_make_init_req(receive_mask, send_mask);
681                 spin_unlock_irqrestore(&sclp_lock, flags);
682                 if (sclp_add_request(&sclp_init_req)) {
683                         /* Try again later */
684                         wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
685                         while (time_before(jiffies, wait))
686                                 sclp_sync_wait();
687                         spin_lock_irqsave(&sclp_lock, flags);
688                         continue;
689                 }
690                 while (sclp_init_req.status != SCLP_REQ_DONE &&
691                        sclp_init_req.status != SCLP_REQ_FAILED)
692                         sclp_sync_wait();
693                 spin_lock_irqsave(&sclp_lock, flags);
694                 if (sclp_init_req.status == SCLP_REQ_DONE &&
695                     sccb->header.response_code == 0x20) {
696                         /* Successful request */
697                         if (calculate) {
698                                 sclp_receive_mask = sccb->sclp_receive_mask;
699                                 sclp_send_mask = sccb->sclp_send_mask;
700                         } else {
701                                 sclp_receive_mask = 0;
702                                 sclp_send_mask = 0;
703                         }
704                         spin_unlock_irqrestore(&sclp_lock, flags);
705                         sclp_dispatch_state_change();
706                         spin_lock_irqsave(&sclp_lock, flags);
707                         rc = 0;
708                         break;
709                 }
710         }
711         sclp_mask_state = sclp_mask_state_idle;
712         spin_unlock_irqrestore(&sclp_lock, flags);
713         return rc;
714 }
715
716 /* Deactivate SCLP interface. On success, new requests will be rejected,
717  * events will no longer be dispatched. Return 0 on success, non-zero
718  * otherwise. */
719 int
720 sclp_deactivate(void)
721 {
722         unsigned long flags;
723         int rc;
724
725         spin_lock_irqsave(&sclp_lock, flags);
726         /* Deactivate can only be called when active */
727         if (sclp_activation_state != sclp_activation_state_active) {
728                 spin_unlock_irqrestore(&sclp_lock, flags);
729                 return -EINVAL;
730         }
731         sclp_activation_state = sclp_activation_state_deactivating;
732         spin_unlock_irqrestore(&sclp_lock, flags);
733         rc = sclp_init_mask(0);
734         spin_lock_irqsave(&sclp_lock, flags);
735         if (rc == 0)
736                 sclp_activation_state = sclp_activation_state_inactive;
737         else
738                 sclp_activation_state = sclp_activation_state_active;
739         spin_unlock_irqrestore(&sclp_lock, flags);
740         return rc;
741 }
742
743 EXPORT_SYMBOL(sclp_deactivate);
744
745 /* Reactivate SCLP interface after sclp_deactivate. On success, new
746  * requests will be accepted, events will be dispatched again. Return 0 on
747  * success, non-zero otherwise. */
748 int
749 sclp_reactivate(void)
750 {
751         unsigned long flags;
752         int rc;
753
754         spin_lock_irqsave(&sclp_lock, flags);
755         /* Reactivate can only be called when inactive */
756         if (sclp_activation_state != sclp_activation_state_inactive) {
757                 spin_unlock_irqrestore(&sclp_lock, flags);
758                 return -EINVAL;
759         }
760         sclp_activation_state = sclp_activation_state_activating;
761         spin_unlock_irqrestore(&sclp_lock, flags);
762         rc = sclp_init_mask(1);
763         spin_lock_irqsave(&sclp_lock, flags);
764         if (rc == 0)
765                 sclp_activation_state = sclp_activation_state_active;
766         else
767                 sclp_activation_state = sclp_activation_state_inactive;
768         spin_unlock_irqrestore(&sclp_lock, flags);
769         return rc;
770 }
771
772 EXPORT_SYMBOL(sclp_reactivate);
773
774 /* Handler for external interruption used during initialization. Modify
775  * request state to done. */
776 static void
777 sclp_check_handler(__u16 code)
778 {
779         u32 finished_sccb;
780
781         finished_sccb = S390_lowcore.ext_params & 0xfffffff8;
782         /* Is this the interrupt we are waiting for? */
783         if (finished_sccb == 0)
784                 return;
785         if (finished_sccb != (u32) (addr_t) sclp_init_sccb) {
786                 printk(KERN_WARNING SCLP_HEADER "unsolicited interrupt "
787                        "for buffer at 0x%x\n", finished_sccb);
788                 return;
789         }
790         spin_lock(&sclp_lock);
791         if (sclp_running_state == sclp_running_state_running) {
792                 sclp_init_req.status = SCLP_REQ_DONE;
793                 sclp_running_state = sclp_running_state_idle;
794         }
795         spin_unlock(&sclp_lock);
796 }
797
798 /* Initial init mask request timed out. Modify request state to failed. */
799 static void
800 sclp_check_timeout(unsigned long data)
801 {
802         unsigned long flags;
803
804         spin_lock_irqsave(&sclp_lock, flags);
805         if (sclp_running_state == sclp_running_state_running) {
806                 sclp_init_req.status = SCLP_REQ_FAILED;
807                 sclp_running_state = sclp_running_state_idle;
808         }
809         spin_unlock_irqrestore(&sclp_lock, flags);
810 }
811
812 /* Perform a check of the SCLP interface. Return zero if the interface is
813  * available and there are no pending requests from a previous instance.
814  * Return non-zero otherwise. */
815 static int
816 sclp_check_interface(void)
817 {
818         struct init_sccb *sccb;
819         unsigned long flags;
820         int retry;
821         int rc;
822
823         spin_lock_irqsave(&sclp_lock, flags);
824         /* Prepare init mask command */
825         rc = register_early_external_interrupt(0x2401, sclp_check_handler,
826                                                &ext_int_info_hwc);
827         if (rc) {
828                 spin_unlock_irqrestore(&sclp_lock, flags);
829                 return rc;
830         }
831         for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
832                 __sclp_make_init_req(0, 0);
833                 sccb = (struct init_sccb *) sclp_init_req.sccb;
834                 rc = sclp_service_call(sclp_init_req.command, sccb);
835                 if (rc == -EIO)
836                         break;
837                 sclp_init_req.status = SCLP_REQ_RUNNING;
838                 sclp_running_state = sclp_running_state_running;
839                 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
840                                          sclp_check_timeout, 0);
841                 spin_unlock_irqrestore(&sclp_lock, flags);
842                 /* Enable service-signal interruption - needs to happen
843                  * with IRQs enabled. */
844                 ctl_set_bit(0, 9);
845                 /* Wait for signal from interrupt or timeout */
846                 sclp_sync_wait();
847                 /* Disable service-signal interruption - needs to happen
848                  * with IRQs enabled. */
849                 ctl_clear_bit(0,9);
850                 spin_lock_irqsave(&sclp_lock, flags);
851                 del_timer(&sclp_request_timer);
852                 if (sclp_init_req.status == SCLP_REQ_DONE &&
853                     sccb->header.response_code == 0x20) {
854                         rc = 0;
855                         break;
856                 } else
857                         rc = -EBUSY;
858         }
859         unregister_early_external_interrupt(0x2401, sclp_check_handler,
860                                             &ext_int_info_hwc);
861         spin_unlock_irqrestore(&sclp_lock, flags);
862         return rc;
863 }
864
865 /* Reboot event handler. Reset send and receive mask to prevent pending SCLP
866  * events from interfering with rebooted system. */
867 static int
868 sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
869 {
870         sclp_deactivate();
871         return NOTIFY_DONE;
872 }
873
874 static struct notifier_block sclp_reboot_notifier = {
875         .notifier_call = sclp_reboot_event
876 };
877
878 /* Initialize SCLP driver. Return zero if driver is operational, non-zero
879  * otherwise. */
880 static int
881 sclp_init(void)
882 {
883         unsigned long flags;
884         int rc;
885
886         if (!MACHINE_HAS_SCLP)
887                 return -ENODEV;
888         spin_lock_irqsave(&sclp_lock, flags);
889         /* Check for previous or running initialization */
890         if (sclp_init_state != sclp_init_state_uninitialized) {
891                 spin_unlock_irqrestore(&sclp_lock, flags);
892                 return 0;
893         }
894         sclp_init_state = sclp_init_state_initializing;
895         /* Set up variables */
896         INIT_LIST_HEAD(&sclp_req_queue);
897         INIT_LIST_HEAD(&sclp_reg_list);
898         list_add(&sclp_state_change_event.list, &sclp_reg_list);
899         init_timer(&sclp_request_timer);
900         /* Check interface */
901         spin_unlock_irqrestore(&sclp_lock, flags);
902         rc = sclp_check_interface();
903         spin_lock_irqsave(&sclp_lock, flags);
904         if (rc) {
905                 sclp_init_state = sclp_init_state_uninitialized;
906                 spin_unlock_irqrestore(&sclp_lock, flags);
907                 return rc;
908         }
909         /* Register reboot handler */
910         rc = register_reboot_notifier(&sclp_reboot_notifier);
911         if (rc) {
912                 sclp_init_state = sclp_init_state_uninitialized;
913                 spin_unlock_irqrestore(&sclp_lock, flags);
914                 return rc;
915         }
916         /* Register interrupt handler */
917         rc = register_early_external_interrupt(0x2401, sclp_interrupt_handler,
918                                                &ext_int_info_hwc);
919         if (rc) {
920                 unregister_reboot_notifier(&sclp_reboot_notifier);
921                 sclp_init_state = sclp_init_state_uninitialized;
922                 spin_unlock_irqrestore(&sclp_lock, flags);
923                 return rc;
924         }
925         sclp_init_state = sclp_init_state_initialized;
926         spin_unlock_irqrestore(&sclp_lock, flags);
927         /* Enable service-signal external interruption - needs to happen with
928          * IRQs enabled. */
929         ctl_set_bit(0, 9);
930         sclp_init_mask(1);
931         return 0;
932 }
933
934 static __init int sclp_initcall(void)
935 {
936         return sclp_init();
937 }
938
939 arch_initcall(sclp_initcall);