[S390] qdio: convert global statistics to per-device stats
[linux-2.6.git] / drivers / s390 / cio / qdio_thinint.c
1 /*
2  * linux/drivers/s390/cio/thinint_qdio.c
3  *
4  * Copyright 2000,2009 IBM Corp.
5  * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
6  *            Cornelia Huck <cornelia.huck@de.ibm.com>
7  *            Jan Glauber <jang@linux.vnet.ibm.com>
8  */
9 #include <linux/io.h>
10 #include <asm/atomic.h>
11 #include <asm/debug.h>
12 #include <asm/qdio.h>
13 #include <asm/airq.h>
14 #include <asm/isc.h>
15
16 #include "cio.h"
17 #include "ioasm.h"
18 #include "qdio.h"
19 #include "qdio_debug.h"
20
21 /*
22  * Restriction: only 63 iqdio subchannels would have its own indicator,
23  * after that, subsequent subchannels share one indicator
24  */
25 #define TIQDIO_NR_NONSHARED_IND         63
26 #define TIQDIO_NR_INDICATORS            (TIQDIO_NR_NONSHARED_IND + 1)
27 #define TIQDIO_SHARED_IND               63
28
29 /* list of thin interrupt input queues */
30 static LIST_HEAD(tiq_list);
31 DEFINE_MUTEX(tiq_list_lock);
32
33 /* adapter local summary indicator */
34 static unsigned char *tiqdio_alsi;
35
36 /* device state change indicators */
37 struct indicator_t {
38         u32 ind;        /* u32 because of compare-and-swap performance */
39         atomic_t count; /* use count, 0 or 1 for non-shared indicators */
40 };
41 static struct indicator_t *q_indicators;
42
43 static int css_qdio_omit_svs;
44
45 static inline unsigned long do_clear_global_summary(void)
46 {
47         register unsigned long __fn asm("1") = 3;
48         register unsigned long __tmp asm("2");
49         register unsigned long __time asm("3");
50
51         asm volatile(
52                 "       .insn   rre,0xb2650000,2,0"
53                 : "+d" (__fn), "=d" (__tmp), "=d" (__time));
54         return __time;
55 }
56
57 /* returns addr for the device state change indicator */
58 static u32 *get_indicator(void)
59 {
60         int i;
61
62         for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++)
63                 if (!atomic_read(&q_indicators[i].count)) {
64                         atomic_set(&q_indicators[i].count, 1);
65                         return &q_indicators[i].ind;
66                 }
67
68         /* use the shared indicator */
69         atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count);
70         return &q_indicators[TIQDIO_SHARED_IND].ind;
71 }
72
73 static void put_indicator(u32 *addr)
74 {
75         int i;
76
77         if (!addr)
78                 return;
79         i = ((unsigned long)addr - (unsigned long)q_indicators) /
80                 sizeof(struct indicator_t);
81         atomic_dec(&q_indicators[i].count);
82 }
83
84 void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
85 {
86         struct qdio_q *q;
87         int i;
88
89         /* No TDD facility? If we must use SIGA-s we can also omit SVS. */
90         if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync)
91                 css_qdio_omit_svs = 1;
92
93         mutex_lock(&tiq_list_lock);
94         for_each_input_queue(irq_ptr, q, i)
95                 list_add_rcu(&q->entry, &tiq_list);
96         mutex_unlock(&tiq_list_lock);
97         xchg(irq_ptr->dsci, 1);
98 }
99
100 void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
101 {
102         struct qdio_q *q;
103         int i;
104
105         for (i = 0; i < irq_ptr->nr_input_qs; i++) {
106                 q = irq_ptr->input_qs[i];
107                 /* if establish triggered an error */
108                 if (!q || !q->entry.prev || !q->entry.next)
109                         continue;
110
111                 mutex_lock(&tiq_list_lock);
112                 list_del_rcu(&q->entry);
113                 mutex_unlock(&tiq_list_lock);
114                 synchronize_rcu();
115         }
116 }
117
118 static inline int shared_ind(struct qdio_irq *irq_ptr)
119 {
120         return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
121 }
122
123 /**
124  * tiqdio_thinint_handler - thin interrupt handler for qdio
125  * @ind: pointer to adapter local summary indicator
126  * @drv_data: NULL
127  */
128 static void tiqdio_thinint_handler(void *ind, void *drv_data)
129 {
130         struct qdio_q *q;
131
132         /*
133          * SVS only when needed: issue SVS to benefit from iqdio interrupt
134          * avoidance (SVS clears adapter interrupt suppression overwrite)
135          */
136         if (!css_qdio_omit_svs)
137                 do_clear_global_summary();
138
139         /*
140          * reset local summary indicator (tiqdio_alsi) to stop adapter
141          * interrupts for now
142          */
143         xchg((u8 *)ind, 0);
144
145         /* protect tiq_list entries, only changed in activate or shutdown */
146         rcu_read_lock();
147
148         /* check for work on all inbound thinint queues */
149         list_for_each_entry_rcu(q, &tiq_list, entry)
150                 /* only process queues from changed sets */
151                 if (*q->irq_ptr->dsci) {
152                         qperf_inc(q, adapter_int);
153
154                         /* only clear it if the indicator is non-shared */
155                         if (!shared_ind(q->irq_ptr))
156                                 xchg(q->irq_ptr->dsci, 0);
157                         /*
158                          * don't call inbound processing directly since
159                          * that could starve other thinint queues
160                          */
161                         tasklet_schedule(&q->tasklet);
162                 }
163
164         rcu_read_unlock();
165
166         /*
167          * if we used the shared indicator clear it now after all queues
168          * were processed
169          */
170         if (atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) {
171                 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
172
173                 /* prevent racing */
174                 if (*tiqdio_alsi)
175                         xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1);
176         }
177 }
178
179 static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
180 {
181         struct scssc_area *scssc_area;
182         int rc;
183
184         scssc_area = (struct scssc_area *)irq_ptr->chsc_page;
185         memset(scssc_area, 0, PAGE_SIZE);
186
187         if (reset) {
188                 scssc_area->summary_indicator_addr = 0;
189                 scssc_area->subchannel_indicator_addr = 0;
190         } else {
191                 scssc_area->summary_indicator_addr = virt_to_phys(tiqdio_alsi);
192                 scssc_area->subchannel_indicator_addr =
193                         virt_to_phys(irq_ptr->dsci);
194         }
195
196         scssc_area->request = (struct chsc_header) {
197                 .length = 0x0fe0,
198                 .code   = 0x0021,
199         };
200         scssc_area->operation_code = 0;
201         scssc_area->ks = PAGE_DEFAULT_KEY;
202         scssc_area->kc = PAGE_DEFAULT_KEY;
203         scssc_area->isc = QDIO_AIRQ_ISC;
204         scssc_area->schid = irq_ptr->schid;
205
206         /* enable the time delay disablement facility */
207         if (css_general_characteristics.aif_tdd)
208                 scssc_area->word_with_d_bit = 0x10000000;
209
210         rc = chsc(scssc_area);
211         if (rc)
212                 return -EIO;
213
214         rc = chsc_error_from_response(scssc_area->response.code);
215         if (rc) {
216                 DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no,
217                           scssc_area->response.code);
218                 DBF_ERROR_HEX(&scssc_area->response, sizeof(void *));
219                 return rc;
220         }
221
222         DBF_EVENT("setscind");
223         DBF_HEX(&scssc_area->summary_indicator_addr, sizeof(unsigned long));
224         DBF_HEX(&scssc_area->subchannel_indicator_addr, sizeof(unsigned long));
225         return 0;
226 }
227
228 /* allocate non-shared indicators and shared indicator */
229 int __init tiqdio_allocate_memory(void)
230 {
231         q_indicators = kzalloc(sizeof(struct indicator_t) * TIQDIO_NR_INDICATORS,
232                              GFP_KERNEL);
233         if (!q_indicators)
234                 return -ENOMEM;
235         return 0;
236 }
237
238 void tiqdio_free_memory(void)
239 {
240         kfree(q_indicators);
241 }
242
243 int __init tiqdio_register_thinints(void)
244 {
245         isc_register(QDIO_AIRQ_ISC);
246         tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler,
247                                                       NULL, QDIO_AIRQ_ISC);
248         if (IS_ERR(tiqdio_alsi)) {
249                 DBF_EVENT("RTI:%lx", PTR_ERR(tiqdio_alsi));
250                 tiqdio_alsi = NULL;
251                 isc_unregister(QDIO_AIRQ_ISC);
252                 return -ENOMEM;
253         }
254         return 0;
255 }
256
257 int qdio_establish_thinint(struct qdio_irq *irq_ptr)
258 {
259         if (!is_thinint_irq(irq_ptr))
260                 return 0;
261
262         /* Check for aif time delay disablement. If installed,
263          * omit SVS even under LPAR
264          */
265         if (css_general_characteristics.aif_tdd)
266                 css_qdio_omit_svs = 1;
267         return set_subchannel_ind(irq_ptr, 0);
268 }
269
270 void qdio_setup_thinint(struct qdio_irq *irq_ptr)
271 {
272         if (!is_thinint_irq(irq_ptr))
273                 return;
274         irq_ptr->dsci = get_indicator();
275         DBF_HEX(&irq_ptr->dsci, sizeof(void *));
276 }
277
278 void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
279 {
280         if (!is_thinint_irq(irq_ptr))
281                 return;
282
283         /* reset adapter interrupt indicators */
284         put_indicator(irq_ptr->dsci);
285         set_subchannel_ind(irq_ptr, 1);
286 }
287
288 void __exit tiqdio_unregister_thinints(void)
289 {
290         WARN_ON(!list_empty(&tiq_list));
291
292         if (tiqdio_alsi) {
293                 s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC);
294                 isc_unregister(QDIO_AIRQ_ISC);
295         }
296 }