ieee1394: ohci1394: fix cosmetic problem in error logging
[linux-2.6.git] / drivers / ieee1394 / ohci1394.c
1 /*
2  * ohci1394.c - driver for OHCI 1394 boards
3  * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4  *                        Gord Peters <GordPeters@smarttech.com>
5  *              2001      Ben Collins <bcollins@debian.org>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software Foundation,
19  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20  */
21
22 /*
23  * Things known to be working:
24  * . Async Request Transmit
25  * . Async Response Receive
26  * . Async Request Receive
27  * . Async Response Transmit
28  * . Iso Receive
29  * . DMA mmap for iso receive
30  * . Config ROM generation
31  *
32  * Things implemented, but still in test phase:
33  * . Iso Transmit
34  * . Async Stream Packets Transmit (Receive done via Iso interface)
35  *
36  * Things not implemented:
37  * . DMA error recovery
38  *
39  * Known bugs:
40  * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41  *   added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
42  */
43
44 /*
45  * Acknowledgments:
46  *
47  * Adam J Richter <adam@yggdrasil.com>
48  *  . Use of pci_class to find device
49  *
50  * Emilie Chung <emilie.chung@axis.com>
51  *  . Tip on Async Request Filter
52  *
53  * Pascal Drolet <pascal.drolet@informission.ca>
54  *  . Various tips for optimization and functionnalities
55  *
56  * Robert Ficklin <rficklin@westengineering.com>
57  *  . Loop in irq_handler
58  *
59  * James Goodwin <jamesg@Filanet.com>
60  *  . Various tips on initialization, self-id reception, etc.
61  *
62  * Albrecht Dress <ad@mpifr-bonn.mpg.de>
63  *  . Apple PowerBook detection
64  *
65  * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
66  *  . Reset the board properly before leaving + misc cleanups
67  *
68  * Leon van Stuivenberg <leonvs@iae.nl>
69  *  . Bug fixes
70  *
71  * Ben Collins <bcollins@debian.org>
72  *  . Working big-endian support
73  *  . Updated to 2.4.x module scheme (PCI aswell)
74  *  . Config ROM generation
75  *
76  * Manfred Weihs <weihs@ict.tuwien.ac.at>
77  *  . Reworked code for initiating bus resets
78  *    (long, short, with or without hold-off)
79  *
80  * Nandu Santhi <contactnandu@users.sourceforge.net>
81  *  . Added support for nVidia nForce2 onboard Firewire chipset
82  *
83  */
84
85 #include <linux/kernel.h>
86 #include <linux/list.h>
87 #include <linux/slab.h>
88 #include <linux/interrupt.h>
89 #include <linux/wait.h>
90 #include <linux/errno.h>
91 #include <linux/module.h>
92 #include <linux/moduleparam.h>
93 #include <linux/pci.h>
94 #include <linux/fs.h>
95 #include <linux/poll.h>
96 #include <asm/byteorder.h>
97 #include <asm/atomic.h>
98 #include <asm/uaccess.h>
99 #include <linux/delay.h>
100 #include <linux/spinlock.h>
101
102 #include <asm/pgtable.h>
103 #include <asm/page.h>
104 #include <asm/irq.h>
105 #include <linux/types.h>
106 #include <linux/vmalloc.h>
107 #include <linux/init.h>
108
109 #ifdef CONFIG_PPC_PMAC
110 #include <asm/machdep.h>
111 #include <asm/pmac_feature.h>
112 #include <asm/prom.h>
113 #include <asm/pci-bridge.h>
114 #endif
115
116 #include "csr1212.h"
117 #include "ieee1394.h"
118 #include "ieee1394_types.h"
119 #include "hosts.h"
120 #include "dma.h"
121 #include "iso.h"
122 #include "ieee1394_core.h"
123 #include "highlevel.h"
124 #include "ohci1394.h"
125
126 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
127 #define OHCI1394_DEBUG
128 #endif
129
130 #ifdef DBGMSG
131 #undef DBGMSG
132 #endif
133
134 #ifdef OHCI1394_DEBUG
135 #define DBGMSG(fmt, args...) \
136 printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
137 #else
138 #define DBGMSG(fmt, args...) do {} while (0)
139 #endif
140
141 #ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
142 #define OHCI_DMA_ALLOC(fmt, args...) \
143         HPSB_ERR("%s(%s)alloc(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
144                 ++global_outstanding_dmas, ## args)
145 #define OHCI_DMA_FREE(fmt, args...) \
146         HPSB_ERR("%s(%s)free(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
147                 --global_outstanding_dmas, ## args)
148 static int global_outstanding_dmas = 0;
149 #else
150 #define OHCI_DMA_ALLOC(fmt, args...) do {} while (0)
151 #define OHCI_DMA_FREE(fmt, args...) do {} while (0)
152 #endif
153
154 /* print general (card independent) information */
155 #define PRINT_G(level, fmt, args...) \
156 printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
157
158 /* print card specific information */
159 #define PRINT(level, fmt, args...) \
160 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
161
162 /* Module Parameters */
163 static int phys_dma = 1;
164 module_param(phys_dma, int, 0444);
165 MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
166
167 static void dma_trm_tasklet(unsigned long data);
168 static void dma_trm_reset(struct dma_trm_ctx *d);
169
170 static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
171                              enum context_type type, int ctx, int num_desc,
172                              int buf_size, int split_buf_size, int context_base);
173 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d);
174 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
175
176 static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
177                              enum context_type type, int ctx, int num_desc,
178                              int context_base);
179
180 static void ohci1394_pci_remove(struct pci_dev *pdev);
181
182 #ifndef __LITTLE_ENDIAN
183 static const size_t hdr_sizes[] = {
184         3,      /* TCODE_WRITEQ */
185         4,      /* TCODE_WRITEB */
186         3,      /* TCODE_WRITE_RESPONSE */
187         0,      /* reserved */
188         3,      /* TCODE_READQ */
189         4,      /* TCODE_READB */
190         3,      /* TCODE_READQ_RESPONSE */
191         4,      /* TCODE_READB_RESPONSE */
192         1,      /* TCODE_CYCLE_START */
193         4,      /* TCODE_LOCK_REQUEST */
194         2,      /* TCODE_ISO_DATA */
195         4,      /* TCODE_LOCK_RESPONSE */
196                 /* rest is reserved or link-internal */
197 };
198
199 static inline void header_le32_to_cpu(quadlet_t *data, unsigned char tcode)
200 {
201         size_t size;
202
203         if (unlikely(tcode >= ARRAY_SIZE(hdr_sizes)))
204                 return;
205
206         size = hdr_sizes[tcode];
207         while (size--)
208                 data[size] = le32_to_cpu(data[size]);
209 }
210 #else
211 #define header_le32_to_cpu(w,x) do {} while (0)
212 #endif /* !LITTLE_ENDIAN */
213
214 /***********************************
215  * IEEE-1394 functionality section *
216  ***********************************/
217
218 static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
219 {
220         int i;
221         unsigned long flags;
222         quadlet_t r;
223
224         spin_lock_irqsave (&ohci->phy_reg_lock, flags);
225
226         reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
227
228         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
229                 if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
230                         break;
231
232                 mdelay(1);
233         }
234
235         r = reg_read(ohci, OHCI1394_PhyControl);
236
237         if (i >= OHCI_LOOP_COUNT)
238                 PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
239                        r, r & 0x80000000, i);
240
241         spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
242
243         return (r & 0x00ff0000) >> 16;
244 }
245
246 static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
247 {
248         int i;
249         unsigned long flags;
250         u32 r = 0;
251
252         spin_lock_irqsave (&ohci->phy_reg_lock, flags);
253
254         reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
255
256         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
257                 r = reg_read(ohci, OHCI1394_PhyControl);
258                 if (!(r & 0x00004000))
259                         break;
260
261                 mdelay(1);
262         }
263
264         if (i == OHCI_LOOP_COUNT)
265                 PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
266                        r, r & 0x00004000, i);
267
268         spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
269
270         return;
271 }
272
273 /* Or's our value into the current value */
274 static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
275 {
276         u8 old;
277
278         old = get_phy_reg (ohci, addr);
279         old |= data;
280         set_phy_reg (ohci, addr, old);
281
282         return;
283 }
284
285 static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
286                                 int phyid, int isroot)
287 {
288         quadlet_t *q = ohci->selfid_buf_cpu;
289         quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
290         size_t size;
291         quadlet_t q0, q1;
292
293         /* Check status of self-id reception */
294
295         if (ohci->selfid_swap)
296                 q0 = le32_to_cpu(q[0]);
297         else
298                 q0 = q[0];
299
300         if ((self_id_count & 0x80000000) ||
301             ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
302                 PRINT(KERN_ERR,
303                       "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
304                       self_id_count, q0, ohci->self_id_errors);
305
306                 /* Tip by James Goodwin <jamesg@Filanet.com>:
307                  * We had an error, generate another bus reset in response.  */
308                 if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
309                         set_phy_reg_mask (ohci, 1, 0x40);
310                         ohci->self_id_errors++;
311                 } else {
312                         PRINT(KERN_ERR,
313                               "Too many errors on SelfID error reception, giving up!");
314                 }
315                 return;
316         }
317
318         /* SelfID Ok, reset error counter. */
319         ohci->self_id_errors = 0;
320
321         size = ((self_id_count & 0x00001FFC) >> 2) - 1;
322         q++;
323
324         while (size > 0) {
325                 if (ohci->selfid_swap) {
326                         q0 = le32_to_cpu(q[0]);
327                         q1 = le32_to_cpu(q[1]);
328                 } else {
329                         q0 = q[0];
330                         q1 = q[1];
331                 }
332
333                 if (q0 == ~q1) {
334                         DBGMSG ("SelfID packet 0x%x received", q0);
335                         hpsb_selfid_received(host, cpu_to_be32(q0));
336                         if (((q0 & 0x3f000000) >> 24) == phyid)
337                                 DBGMSG ("SelfID for this node is 0x%08x", q0);
338                 } else {
339                         PRINT(KERN_ERR,
340                               "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
341                 }
342                 q += 2;
343                 size -= 2;
344         }
345
346         DBGMSG("SelfID complete");
347
348         return;
349 }
350
351 static void ohci_soft_reset(struct ti_ohci *ohci) {
352         int i;
353
354         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
355
356         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
357                 if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
358                         break;
359                 mdelay(1);
360         }
361         DBGMSG ("Soft reset finished");
362 }
363
364
365 /* Generate the dma receive prgs and start the context */
366 static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
367 {
368         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
369         int i;
370
371         ohci1394_stop_context(ohci, d->ctrlClear, NULL);
372
373         for (i=0; i<d->num_desc; i++) {
374                 u32 c;
375
376                 c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
377                 if (generate_irq)
378                         c |= DMA_CTL_IRQ;
379
380                 d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
381
382                 /* End of descriptor list? */
383                 if (i + 1 < d->num_desc) {
384                         d->prg_cpu[i]->branchAddress =
385                                 cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
386                 } else {
387                         d->prg_cpu[i]->branchAddress =
388                                 cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
389                 }
390
391                 d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
392                 d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
393         }
394
395         d->buf_ind = 0;
396         d->buf_offset = 0;
397
398         if (d->type == DMA_CTX_ISO) {
399                 /* Clear contextControl */
400                 reg_write(ohci, d->ctrlClear, 0xffffffff);
401
402                 /* Set bufferFill, isochHeader, multichannel for IR context */
403                 reg_write(ohci, d->ctrlSet, 0xd0000000);
404
405                 /* Set the context match register to match on all tags */
406                 reg_write(ohci, d->ctxtMatch, 0xf0000000);
407
408                 /* Clear the multi channel mask high and low registers */
409                 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
410                 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
411
412                 /* Set up isoRecvIntMask to generate interrupts */
413                 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
414         }
415
416         /* Tell the controller where the first AR program is */
417         reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
418
419         /* Run context */
420         reg_write(ohci, d->ctrlSet, 0x00008000);
421
422         DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
423 }
424
425 /* Initialize the dma transmit context */
426 static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
427 {
428         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
429
430         /* Stop the context */
431         ohci1394_stop_context(ohci, d->ctrlClear, NULL);
432
433         d->prg_ind = 0;
434         d->sent_ind = 0;
435         d->free_prgs = d->num_desc;
436         d->branchAddrPtr = NULL;
437         INIT_LIST_HEAD(&d->fifo_list);
438         INIT_LIST_HEAD(&d->pending_list);
439
440         if (d->type == DMA_CTX_ISO) {
441                 /* enable interrupts */
442                 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
443         }
444
445         DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
446 }
447
448 /* Count the number of available iso contexts */
449 static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
450 {
451         int i,ctx=0;
452         u32 tmp;
453
454         reg_write(ohci, reg, 0xffffffff);
455         tmp = reg_read(ohci, reg);
456
457         DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
458
459         /* Count the number of contexts */
460         for (i=0; i<32; i++) {
461                 if (tmp & 1) ctx++;
462                 tmp >>= 1;
463         }
464         return ctx;
465 }
466
467 /* Global initialization */
468 static void ohci_initialize(struct ti_ohci *ohci)
469 {
470         quadlet_t buf;
471         int num_ports, i;
472
473         spin_lock_init(&ohci->phy_reg_lock);
474
475         /* Put some defaults to these undefined bus options */
476         buf = reg_read(ohci, OHCI1394_BusOptions);
477         buf |=  0x60000000; /* Enable CMC and ISC */
478         if (hpsb_disable_irm)
479                 buf &= ~0x80000000;
480         else
481                 buf |=  0x80000000; /* Enable IRMC */
482         buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
483         buf &= ~0x18000000; /* Disable PMC and BMC */
484         reg_write(ohci, OHCI1394_BusOptions, buf);
485
486         /* Set the bus number */
487         reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
488
489         /* Enable posted writes */
490         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
491
492         /* Clear link control register */
493         reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
494
495         /* Enable cycle timer and cycle master and set the IRM
496          * contender bit in our self ID packets if appropriate. */
497         reg_write(ohci, OHCI1394_LinkControlSet,
498                   OHCI1394_LinkControl_CycleTimerEnable |
499                   OHCI1394_LinkControl_CycleMaster);
500         i = get_phy_reg(ohci, 4) | PHY_04_LCTRL;
501         if (hpsb_disable_irm)
502                 i &= ~PHY_04_CONTENDER;
503         else
504                 i |= PHY_04_CONTENDER;
505         set_phy_reg(ohci, 4, i);
506
507         /* Set up self-id dma buffer */
508         reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
509
510         /* enable self-id and phys */
511         reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID |
512                   OHCI1394_LinkControl_RcvPhyPkt);
513
514         /* Set the Config ROM mapping register */
515         reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
516
517         /* Now get our max packet size */
518         ohci->max_packet_size =
519                 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
520                 
521         /* Don't accept phy packets into AR request context */
522         reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
523
524         /* Clear the interrupt mask */
525         reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
526         reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
527
528         /* Clear the interrupt mask */
529         reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
530         reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
531
532         /* Initialize AR dma */
533         initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
534         initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
535
536         /* Initialize AT dma */
537         initialize_dma_trm_ctx(&ohci->at_req_context);
538         initialize_dma_trm_ctx(&ohci->at_resp_context);
539         
540         /* Initialize IR Legacy DMA channel mask */
541         ohci->ir_legacy_channels = 0;
542
543         /* Accept AR requests from all nodes */
544         reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
545
546         /* Set the address range of the physical response unit.
547          * Most controllers do not implement it as a writable register though.
548          * They will keep a hardwired offset of 0x00010000 and show 0x0 as
549          * register content.
550          * To actually enable physical responses is the job of our interrupt
551          * handler which programs the physical request filter. */
552         reg_write(ohci, OHCI1394_PhyUpperBound,
553                   OHCI1394_PHYS_UPPER_BOUND_PROGRAMMED >> 16);
554
555         DBGMSG("physUpperBoundOffset=%08x",
556                reg_read(ohci, OHCI1394_PhyUpperBound));
557
558         /* Specify AT retries */
559         reg_write(ohci, OHCI1394_ATRetries,
560                   OHCI1394_MAX_AT_REQ_RETRIES |
561                   (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
562                   (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
563
564         /* We don't want hardware swapping */
565         reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
566
567         /* Enable interrupts */
568         reg_write(ohci, OHCI1394_IntMaskSet,
569                   OHCI1394_unrecoverableError |
570                   OHCI1394_masterIntEnable |
571                   OHCI1394_busReset |
572                   OHCI1394_selfIDComplete |
573                   OHCI1394_RSPkt |
574                   OHCI1394_RQPkt |
575                   OHCI1394_respTxComplete |
576                   OHCI1394_reqTxComplete |
577                   OHCI1394_isochRx |
578                   OHCI1394_isochTx |
579                   OHCI1394_postedWriteErr |
580                   OHCI1394_cycleTooLong |
581                   OHCI1394_cycleInconsistent);
582
583         /* Enable link */
584         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
585
586         buf = reg_read(ohci, OHCI1394_Version);
587         PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%d]  "
588               "MMIO=[%llx-%llx]  Max Packet=[%d]  IR/IT contexts=[%d/%d]",
589               ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
590               ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), ohci->dev->irq,
591               (unsigned long long)pci_resource_start(ohci->dev, 0),
592               (unsigned long long)pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
593               ohci->max_packet_size,
594               ohci->nb_iso_rcv_ctx, ohci->nb_iso_xmit_ctx);
595
596         /* Check all of our ports to make sure that if anything is
597          * connected, we enable that port. */
598         num_ports = get_phy_reg(ohci, 2) & 0xf;
599         for (i = 0; i < num_ports; i++) {
600                 unsigned int status;
601
602                 set_phy_reg(ohci, 7, i);
603                 status = get_phy_reg(ohci, 8);
604
605                 if (status & 0x20)
606                         set_phy_reg(ohci, 8, status & ~1);
607         }
608
609         /* Serial EEPROM Sanity check. */
610         if ((ohci->max_packet_size < 512) ||
611             (ohci->max_packet_size > 4096)) {
612                 /* Serial EEPROM contents are suspect, set a sane max packet
613                  * size and print the raw contents for bug reports if verbose
614                  * debug is enabled. */
615 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
616                 int i;
617 #endif
618
619                 PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
620                       "attempting to setting max_packet_size to 512 bytes");
621                 reg_write(ohci, OHCI1394_BusOptions,
622                           (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
623                 ohci->max_packet_size = 512;
624 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
625                 PRINT(KERN_DEBUG, "    EEPROM Present: %d",
626                       (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
627                 reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
628
629                 for (i = 0;
630                      ((i < 1000) &&
631                       (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
632                         udelay(10);
633
634                 for (i = 0; i < 0x20; i++) {
635                         reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
636                         PRINT(KERN_DEBUG, "    EEPROM %02x: %02x", i,
637                               (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
638                 }
639 #endif
640         }
641 }
642
643 /*
644  * Insert a packet in the DMA fifo and generate the DMA prg
645  * FIXME: rewrite the program in order to accept packets crossing
646  *        page boundaries.
647  *        check also that a single dma descriptor doesn't cross a
648  *        page boundary.
649  */
650 static void insert_packet(struct ti_ohci *ohci,
651                           struct dma_trm_ctx *d, struct hpsb_packet *packet)
652 {
653         u32 cycleTimer;
654         int idx = d->prg_ind;
655
656         DBGMSG("Inserting packet for node " NODE_BUS_FMT
657                ", tlabel=%d, tcode=0x%x, speed=%d",
658                NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
659                packet->tcode, packet->speed_code);
660
661         d->prg_cpu[idx]->begin.address = 0;
662         d->prg_cpu[idx]->begin.branchAddress = 0;
663
664         if (d->type == DMA_CTX_ASYNC_RESP) {
665                 /*
666                  * For response packets, we need to put a timeout value in
667                  * the 16 lower bits of the status... let's try 1 sec timeout
668                  */
669                 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
670                 d->prg_cpu[idx]->begin.status = cpu_to_le32(
671                         (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
672                         ((cycleTimer&0x01fff000)>>12));
673
674                 DBGMSG("cycleTimer: %08x timeStamp: %08x",
675                        cycleTimer, d->prg_cpu[idx]->begin.status);
676         } else 
677                 d->prg_cpu[idx]->begin.status = 0;
678
679         if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
680
681                 if (packet->type == hpsb_raw) {
682                         d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
683                         d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
684                         d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
685                 } else {
686                         d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
687                                 (packet->header[0] & 0xFFFF);
688
689                         if (packet->tcode == TCODE_ISO_DATA) {
690                                 /* Sending an async stream packet */
691                                 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
692                         } else {
693                                 /* Sending a normal async request or response */
694                                 d->prg_cpu[idx]->data[1] =
695                                         (packet->header[1] & 0xFFFF) |
696                                         (packet->header[0] & 0xFFFF0000);
697                                 d->prg_cpu[idx]->data[2] = packet->header[2];
698                                 d->prg_cpu[idx]->data[3] = packet->header[3];
699                         }
700                         header_le32_to_cpu(d->prg_cpu[idx]->data, packet->tcode);
701                 }
702
703                 if (packet->data_size) { /* block transmit */
704                         if (packet->tcode == TCODE_STREAM_DATA){
705                                 d->prg_cpu[idx]->begin.control =
706                                         cpu_to_le32(DMA_CTL_OUTPUT_MORE |
707                                                     DMA_CTL_IMMEDIATE | 0x8);
708                         } else {
709                                 d->prg_cpu[idx]->begin.control =
710                                         cpu_to_le32(DMA_CTL_OUTPUT_MORE |
711                                                     DMA_CTL_IMMEDIATE | 0x10);
712                         }
713                         d->prg_cpu[idx]->end.control =
714                                 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
715                                             DMA_CTL_IRQ |
716                                             DMA_CTL_BRANCH |
717                                             packet->data_size);
718                         /*
719                          * Check that the packet data buffer
720                          * does not cross a page boundary.
721                          *
722                          * XXX Fix this some day. eth1394 seems to trigger
723                          * it, but ignoring it doesn't seem to cause a
724                          * problem.
725                          */
726 #if 0
727                         if (cross_bound((unsigned long)packet->data,
728                                         packet->data_size)>0) {
729                                 /* FIXME: do something about it */
730                                 PRINT(KERN_ERR,
731                                       "%s: packet data addr: %p size %Zd bytes "
732                                       "cross page boundary", __FUNCTION__,
733                                       packet->data, packet->data_size);
734                         }
735 #endif
736                         d->prg_cpu[idx]->end.address = cpu_to_le32(
737                                 pci_map_single(ohci->dev, packet->data,
738                                                packet->data_size,
739                                                PCI_DMA_TODEVICE));
740                         OHCI_DMA_ALLOC("single, block transmit packet");
741
742                         d->prg_cpu[idx]->end.branchAddress = 0;
743                         d->prg_cpu[idx]->end.status = 0;
744                         if (d->branchAddrPtr)
745                                 *(d->branchAddrPtr) =
746                                         cpu_to_le32(d->prg_bus[idx] | 0x3);
747                         d->branchAddrPtr =
748                                 &(d->prg_cpu[idx]->end.branchAddress);
749                 } else { /* quadlet transmit */
750                         if (packet->type == hpsb_raw)
751                                 d->prg_cpu[idx]->begin.control =
752                                         cpu_to_le32(DMA_CTL_OUTPUT_LAST |
753                                                     DMA_CTL_IMMEDIATE |
754                                                     DMA_CTL_IRQ |
755                                                     DMA_CTL_BRANCH |
756                                                     (packet->header_size + 4));
757                         else
758                                 d->prg_cpu[idx]->begin.control =
759                                         cpu_to_le32(DMA_CTL_OUTPUT_LAST |
760                                                     DMA_CTL_IMMEDIATE |
761                                                     DMA_CTL_IRQ |
762                                                     DMA_CTL_BRANCH |
763                                                     packet->header_size);
764
765                         if (d->branchAddrPtr)
766                                 *(d->branchAddrPtr) =
767                                         cpu_to_le32(d->prg_bus[idx] | 0x2);
768                         d->branchAddrPtr =
769                                 &(d->prg_cpu[idx]->begin.branchAddress);
770                 }
771
772         } else { /* iso packet */
773                 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
774                         (packet->header[0] & 0xFFFF);
775                 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
776                 header_le32_to_cpu(d->prg_cpu[idx]->data, packet->tcode);
777
778                 d->prg_cpu[idx]->begin.control =
779                         cpu_to_le32(DMA_CTL_OUTPUT_MORE |
780                                     DMA_CTL_IMMEDIATE | 0x8);
781                 d->prg_cpu[idx]->end.control =
782                         cpu_to_le32(DMA_CTL_OUTPUT_LAST |
783                                     DMA_CTL_UPDATE |
784                                     DMA_CTL_IRQ |
785                                     DMA_CTL_BRANCH |
786                                     packet->data_size);
787                 d->prg_cpu[idx]->end.address = cpu_to_le32(
788                                 pci_map_single(ohci->dev, packet->data,
789                                 packet->data_size, PCI_DMA_TODEVICE));
790                 OHCI_DMA_ALLOC("single, iso transmit packet");
791
792                 d->prg_cpu[idx]->end.branchAddress = 0;
793                 d->prg_cpu[idx]->end.status = 0;
794                 DBGMSG("Iso xmit context info: header[%08x %08x]\n"
795                        "                       begin=%08x %08x %08x %08x\n"
796                        "                             %08x %08x %08x %08x\n"
797                        "                       end  =%08x %08x %08x %08x",
798                        d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
799                        d->prg_cpu[idx]->begin.control,
800                        d->prg_cpu[idx]->begin.address,
801                        d->prg_cpu[idx]->begin.branchAddress,
802                        d->prg_cpu[idx]->begin.status,
803                        d->prg_cpu[idx]->data[0],
804                        d->prg_cpu[idx]->data[1],
805                        d->prg_cpu[idx]->data[2],
806                        d->prg_cpu[idx]->data[3],
807                        d->prg_cpu[idx]->end.control,
808                        d->prg_cpu[idx]->end.address,
809                        d->prg_cpu[idx]->end.branchAddress,
810                        d->prg_cpu[idx]->end.status);
811                 if (d->branchAddrPtr)
812                         *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
813                 d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
814         }
815         d->free_prgs--;
816
817         /* queue the packet in the appropriate context queue */
818         list_add_tail(&packet->driver_list, &d->fifo_list);
819         d->prg_ind = (d->prg_ind + 1) % d->num_desc;
820 }
821
822 /*
823  * This function fills the FIFO with the (eventual) pending packets
824  * and runs or wakes up the DMA prg if necessary.
825  *
826  * The function MUST be called with the d->lock held.
827  */
828 static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
829 {
830         struct hpsb_packet *packet, *ptmp;
831         int idx = d->prg_ind;
832         int z = 0;
833
834         /* insert the packets into the dma fifo */
835         list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
836                 if (!d->free_prgs)
837                         break;
838
839                 /* For the first packet only */
840                 if (!z)
841                         z = (packet->data_size) ? 3 : 2;
842
843                 /* Insert the packet */
844                 list_del_init(&packet->driver_list);
845                 insert_packet(ohci, d, packet);
846         }
847
848         /* Nothing must have been done, either no free_prgs or no packets */
849         if (z == 0)
850                 return;
851
852         /* Is the context running ? (should be unless it is
853            the first packet to be sent in this context) */
854         if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
855                 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
856
857                 DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
858                 reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
859
860                 /* Check that the node id is valid, and not 63 */
861                 if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
862                         PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
863                 else
864                         reg_write(ohci, d->ctrlSet, 0x8000);
865         } else {
866                 /* Wake up the dma context if necessary */
867                 if (!(reg_read(ohci, d->ctrlSet) & 0x400))
868                         DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
869
870                 /* do this always, to avoid race condition */
871                 reg_write(ohci, d->ctrlSet, 0x1000);
872         }
873
874         return;
875 }
876
877 /* Transmission of an async or iso packet */
878 static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
879 {
880         struct ti_ohci *ohci = host->hostdata;
881         struct dma_trm_ctx *d;
882         unsigned long flags;
883
884         if (packet->data_size > ohci->max_packet_size) {
885                 PRINT(KERN_ERR,
886                       "Transmit packet size %Zd is too big",
887                       packet->data_size);
888                 return -EOVERFLOW;
889         }
890
891         /* Decide whether we have an iso, a request, or a response packet */
892         if (packet->type == hpsb_raw)
893                 d = &ohci->at_req_context;
894         else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) {
895                 /* The legacy IT DMA context is initialized on first
896                  * use.  However, the alloc cannot be run from
897                  * interrupt context, so we bail out if that is the
898                  * case. I don't see anyone sending ISO packets from
899                  * interrupt context anyway... */
900
901                 if (ohci->it_legacy_context.ohci == NULL) {
902                         if (in_interrupt()) {
903                                 PRINT(KERN_ERR,
904                                       "legacy IT context cannot be initialized during interrupt");
905                                 return -EINVAL;
906                         }
907
908                         if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,
909                                               DMA_CTX_ISO, 0, IT_NUM_DESC,
910                                               OHCI1394_IsoXmitContextBase) < 0) {
911                                 PRINT(KERN_ERR,
912                                       "error initializing legacy IT context");
913                                 return -ENOMEM;
914                         }
915
916                         initialize_dma_trm_ctx(&ohci->it_legacy_context);
917                 }
918
919                 d = &ohci->it_legacy_context;
920         } else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
921                 d = &ohci->at_resp_context;
922         else
923                 d = &ohci->at_req_context;
924
925         spin_lock_irqsave(&d->lock,flags);
926
927         list_add_tail(&packet->driver_list, &d->pending_list);
928
929         dma_trm_flush(ohci, d);
930
931         spin_unlock_irqrestore(&d->lock,flags);
932
933         return 0;
934 }
935
936 static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
937 {
938         struct ti_ohci *ohci = host->hostdata;
939         int retval = 0;
940         unsigned long flags;
941         int phy_reg;
942
943         switch (cmd) {
944         case RESET_BUS:
945                 switch (arg) {
946                 case SHORT_RESET:
947                         phy_reg = get_phy_reg(ohci, 5);
948                         phy_reg |= 0x40;
949                         set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
950                         break;
951                 case LONG_RESET:
952                         phy_reg = get_phy_reg(ohci, 1);
953                         phy_reg |= 0x40;
954                         set_phy_reg(ohci, 1, phy_reg); /* set IBR */
955                         break;
956                 case SHORT_RESET_NO_FORCE_ROOT:
957                         phy_reg = get_phy_reg(ohci, 1);
958                         if (phy_reg & 0x80) {
959                                 phy_reg &= ~0x80;
960                                 set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
961                         }
962
963                         phy_reg = get_phy_reg(ohci, 5);
964                         phy_reg |= 0x40;
965                         set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
966                         break;
967                 case LONG_RESET_NO_FORCE_ROOT:
968                         phy_reg = get_phy_reg(ohci, 1);
969                         phy_reg &= ~0x80;
970                         phy_reg |= 0x40;
971                         set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
972                         break;
973                 case SHORT_RESET_FORCE_ROOT:
974                         phy_reg = get_phy_reg(ohci, 1);
975                         if (!(phy_reg & 0x80)) {
976                                 phy_reg |= 0x80;
977                                 set_phy_reg(ohci, 1, phy_reg); /* set RHB */
978                         }
979
980                         phy_reg = get_phy_reg(ohci, 5);
981                         phy_reg |= 0x40;
982                         set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
983                         break;
984                 case LONG_RESET_FORCE_ROOT:
985                         phy_reg = get_phy_reg(ohci, 1);
986                         phy_reg |= 0xc0;
987                         set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
988                         break;
989                 default:
990                         retval = -1;
991                 }
992                 break;
993
994         case GET_CYCLE_COUNTER:
995                 retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
996                 break;
997
998         case SET_CYCLE_COUNTER:
999                 reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
1000                 break;
1001
1002         case SET_BUS_ID:
1003                 PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
1004                 break;
1005
1006         case ACT_CYCLE_MASTER:
1007                 if (arg) {
1008                         /* check if we are root and other nodes are present */
1009                         u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
1010                         if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
1011                                 /*
1012                                  * enable cycleTimer, cycleMaster
1013                                  */
1014                                 DBGMSG("Cycle master enabled");
1015                                 reg_write(ohci, OHCI1394_LinkControlSet,
1016                                           OHCI1394_LinkControl_CycleTimerEnable |
1017                                           OHCI1394_LinkControl_CycleMaster);
1018                         }
1019                 } else {
1020                         /* disable cycleTimer, cycleMaster, cycleSource */
1021                         reg_write(ohci, OHCI1394_LinkControlClear,
1022                                   OHCI1394_LinkControl_CycleTimerEnable |
1023                                   OHCI1394_LinkControl_CycleMaster |
1024                                   OHCI1394_LinkControl_CycleSource);
1025                 }
1026                 break;
1027
1028         case CANCEL_REQUESTS:
1029                 DBGMSG("Cancel request received");
1030                 dma_trm_reset(&ohci->at_req_context);
1031                 dma_trm_reset(&ohci->at_resp_context);
1032                 break;
1033
1034         case ISO_LISTEN_CHANNEL:
1035         {
1036                 u64 mask;
1037                 struct dma_rcv_ctx *d = &ohci->ir_legacy_context;
1038                 int ir_legacy_active;
1039
1040                 if (arg<0 || arg>63) {
1041                         PRINT(KERN_ERR,
1042                               "%s: IS0 listen channel %d is out of range",
1043                               __FUNCTION__, arg);
1044                         return -EFAULT;
1045                 }
1046
1047                 mask = (u64)0x1<<arg;
1048
1049                 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1050
1051                 if (ohci->ISO_channel_usage & mask) {
1052                         PRINT(KERN_ERR,
1053                               "%s: IS0 listen channel %d is already used",
1054                               __FUNCTION__, arg);
1055                         spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1056                         return -EFAULT;
1057                 }
1058
1059                 ir_legacy_active = ohci->ir_legacy_channels;
1060
1061                 ohci->ISO_channel_usage |= mask;
1062                 ohci->ir_legacy_channels |= mask;
1063
1064                 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1065
1066                 if (!ir_legacy_active) {
1067                         if (ohci1394_register_iso_tasklet(ohci,
1068                                           &ohci->ir_legacy_tasklet) < 0) {
1069                                 PRINT(KERN_ERR, "No IR DMA context available");
1070                                 return -EBUSY;
1071                         }
1072
1073                         /* the IR context can be assigned to any DMA context
1074                          * by ohci1394_register_iso_tasklet */
1075                         d->ctx = ohci->ir_legacy_tasklet.context;
1076                         d->ctrlSet = OHCI1394_IsoRcvContextControlSet +
1077                                 32*d->ctx;
1078                         d->ctrlClear = OHCI1394_IsoRcvContextControlClear +
1079                                 32*d->ctx;
1080                         d->cmdPtr = OHCI1394_IsoRcvCommandPtr + 32*d->ctx;
1081                         d->ctxtMatch = OHCI1394_IsoRcvContextMatch + 32*d->ctx;
1082
1083                         initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
1084
1085                         if (printk_ratelimit())
1086                                 DBGMSG("IR legacy activated");
1087                 }
1088
1089                 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1090
1091                 if (arg>31)
1092                         reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
1093                                   1<<(arg-32));
1094                 else
1095                         reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
1096                                   1<<arg);
1097
1098                 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1099                 DBGMSG("Listening enabled on channel %d", arg);
1100                 break;
1101         }
1102         case ISO_UNLISTEN_CHANNEL:
1103         {
1104                 u64 mask;
1105
1106                 if (arg<0 || arg>63) {
1107                         PRINT(KERN_ERR,
1108                               "%s: IS0 unlisten channel %d is out of range",
1109                               __FUNCTION__, arg);
1110                         return -EFAULT;
1111                 }
1112
1113                 mask = (u64)0x1<<arg;
1114
1115                 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1116
1117                 if (!(ohci->ISO_channel_usage & mask)) {
1118                         PRINT(KERN_ERR,
1119                               "%s: IS0 unlisten channel %d is not used",
1120                               __FUNCTION__, arg);
1121                         spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1122                         return -EFAULT;
1123                 }
1124
1125                 ohci->ISO_channel_usage &= ~mask;
1126                 ohci->ir_legacy_channels &= ~mask;
1127
1128                 if (arg>31)
1129                         reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
1130                                   1<<(arg-32));
1131                 else
1132                         reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
1133                                   1<<arg);
1134
1135                 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1136                 DBGMSG("Listening disabled on channel %d", arg);
1137
1138                 if (ohci->ir_legacy_channels == 0) {
1139                         stop_dma_rcv_ctx(&ohci->ir_legacy_context);
1140                         DBGMSG("ISO legacy receive context stopped");
1141                 }
1142
1143                 break;
1144         }
1145         default:
1146                 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
1147                         cmd);
1148                 break;
1149         }
1150         return retval;
1151 }
1152
1153 /***********************************
1154  * rawiso ISO reception            *
1155  ***********************************/
1156
1157 /*
1158   We use either buffer-fill or packet-per-buffer DMA mode. The DMA
1159   buffer is split into "blocks" (regions described by one DMA
1160   descriptor). Each block must be one page or less in size, and
1161   must not cross a page boundary.
1162
1163   There is one little wrinkle with buffer-fill mode: a packet that
1164   starts in the final block may wrap around into the first block. But
1165   the user API expects all packets to be contiguous. Our solution is
1166   to keep the very last page of the DMA buffer in reserve - if a
1167   packet spans the gap, we copy its tail into this page.
1168 */
1169
1170 struct ohci_iso_recv {
1171         struct ti_ohci *ohci;
1172
1173         struct ohci1394_iso_tasklet task;
1174         int task_active;
1175
1176         enum { BUFFER_FILL_MODE = 0,
1177                PACKET_PER_BUFFER_MODE = 1 } dma_mode;
1178
1179         /* memory and PCI mapping for the DMA descriptors */
1180         struct dma_prog_region prog;
1181         struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
1182
1183         /* how many DMA blocks fit in the buffer */
1184         unsigned int nblocks;
1185
1186         /* stride of DMA blocks */
1187         unsigned int buf_stride;
1188
1189         /* number of blocks to batch between interrupts */
1190         int block_irq_interval;
1191
1192         /* block that DMA will finish next */
1193         int block_dma;
1194
1195         /* (buffer-fill only) block that the reader will release next */
1196         int block_reader;
1197
1198         /* (buffer-fill only) bytes of buffer the reader has released,
1199            less than one block */
1200         int released_bytes;
1201
1202         /* (buffer-fill only) buffer offset at which the next packet will appear */
1203         int dma_offset;
1204
1205         /* OHCI DMA context control registers */
1206         u32 ContextControlSet;
1207         u32 ContextControlClear;
1208         u32 CommandPtr;
1209         u32 ContextMatch;
1210 };
1211
1212 static void ohci_iso_recv_task(unsigned long data);
1213 static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1214 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1215 static int  ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1216 static void ohci_iso_recv_program(struct hpsb_iso *iso);
1217
1218 static int ohci_iso_recv_init(struct hpsb_iso *iso)
1219 {
1220         struct ti_ohci *ohci = iso->host->hostdata;
1221         struct ohci_iso_recv *recv;
1222         int ctx;
1223         int ret = -ENOMEM;
1224
1225         recv = kmalloc(sizeof(*recv), GFP_KERNEL);
1226         if (!recv)
1227                 return -ENOMEM;
1228
1229         iso->hostdata = recv;
1230         recv->ohci = ohci;
1231         recv->task_active = 0;
1232         dma_prog_region_init(&recv->prog);
1233         recv->block = NULL;
1234
1235         /* use buffer-fill mode, unless irq_interval is 1
1236            (note: multichannel requires buffer-fill) */
1237
1238         if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
1239              iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
1240                 recv->dma_mode = PACKET_PER_BUFFER_MODE;
1241         } else {
1242                 recv->dma_mode = BUFFER_FILL_MODE;
1243         }
1244
1245         /* set nblocks, buf_stride, block_irq_interval */
1246
1247         if (recv->dma_mode == BUFFER_FILL_MODE) {
1248                 recv->buf_stride = PAGE_SIZE;
1249
1250                 /* one block per page of data in the DMA buffer, minus the final guard page */
1251                 recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1252                 if (recv->nblocks < 3) {
1253                         DBGMSG("ohci_iso_recv_init: DMA buffer too small");
1254                         goto err;
1255                 }
1256
1257                 /* iso->irq_interval is in packets - translate that to blocks */
1258                 if (iso->irq_interval == 1)
1259                         recv->block_irq_interval = 1;
1260                 else
1261                         recv->block_irq_interval = iso->irq_interval *
1262                                                         ((recv->nblocks+1)/iso->buf_packets);
1263                 if (recv->block_irq_interval*4 > recv->nblocks)
1264                         recv->block_irq_interval = recv->nblocks/4;
1265                 if (recv->block_irq_interval < 1)
1266                         recv->block_irq_interval = 1;
1267
1268         } else {
1269                 int max_packet_size;
1270
1271                 recv->nblocks = iso->buf_packets;
1272                 recv->block_irq_interval = iso->irq_interval;
1273                 if (recv->block_irq_interval * 4 > iso->buf_packets)
1274                         recv->block_irq_interval = iso->buf_packets / 4;
1275                 if (recv->block_irq_interval < 1)
1276                 recv->block_irq_interval = 1;
1277
1278                 /* choose a buffer stride */
1279                 /* must be a power of 2, and <= PAGE_SIZE */
1280
1281                 max_packet_size = iso->buf_size / iso->buf_packets;
1282
1283                 for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1284                     recv->buf_stride *= 2);
1285
1286                 if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1287                    recv->buf_stride > PAGE_SIZE) {
1288                         /* this shouldn't happen, but anyway... */
1289                         DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
1290                         goto err;
1291                 }
1292         }
1293
1294         recv->block_reader = 0;
1295         recv->released_bytes = 0;
1296         recv->block_dma = 0;
1297         recv->dma_offset = 0;
1298
1299         /* size of DMA program = one descriptor per block */
1300         if (dma_prog_region_alloc(&recv->prog,
1301                                  sizeof(struct dma_cmd) * recv->nblocks,
1302                                  recv->ohci->dev))
1303                 goto err;
1304
1305         recv->block = (struct dma_cmd*) recv->prog.kvirt;
1306
1307         ohci1394_init_iso_tasklet(&recv->task,
1308                                   iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1309                                                        OHCI_ISO_RECEIVE,
1310                                   ohci_iso_recv_task, (unsigned long) iso);
1311
1312         if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0) {
1313                 ret = -EBUSY;
1314                 goto err;
1315         }
1316
1317         recv->task_active = 1;
1318
1319         /* recv context registers are spaced 32 bytes apart */
1320         ctx = recv->task.context;
1321         recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1322         recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1323         recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1324         recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1325
1326         if (iso->channel == -1) {
1327                 /* clear multi-channel selection mask */
1328                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1329                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1330         }
1331
1332         /* write the DMA program */
1333         ohci_iso_recv_program(iso);
1334
1335         DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1336                " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1337                recv->dma_mode == BUFFER_FILL_MODE ?
1338                "buffer-fill" : "packet-per-buffer",
1339                iso->buf_size/PAGE_SIZE, iso->buf_size,
1340                recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1341
1342         return 0;
1343
1344 err:
1345         ohci_iso_recv_shutdown(iso);
1346         return ret;
1347 }
1348
1349 static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1350 {
1351         struct ohci_iso_recv *recv = iso->hostdata;
1352
1353         /* disable interrupts */
1354         reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1355
1356         /* halt DMA */
1357         ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1358 }
1359
1360 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1361 {
1362         struct ohci_iso_recv *recv = iso->hostdata;
1363
1364         if (recv->task_active) {
1365                 ohci_iso_recv_stop(iso);
1366                 ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1367                 recv->task_active = 0;
1368         }
1369
1370         dma_prog_region_free(&recv->prog);
1371         kfree(recv);
1372         iso->hostdata = NULL;
1373 }
1374
1375 /* set up a "gapped" ring buffer DMA program */
1376 static void ohci_iso_recv_program(struct hpsb_iso *iso)
1377 {
1378         struct ohci_iso_recv *recv = iso->hostdata;
1379         int blk;
1380
1381         /* address of 'branch' field in previous DMA descriptor */
1382         u32 *prev_branch = NULL;
1383
1384         for (blk = 0; blk < recv->nblocks; blk++) {
1385                 u32 control;
1386
1387                 /* the DMA descriptor */
1388                 struct dma_cmd *cmd = &recv->block[blk];
1389
1390                 /* offset of the DMA descriptor relative to the DMA prog buffer */
1391                 unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1392
1393                 /* offset of this packet's data within the DMA buffer */
1394                 unsigned long buf_offset = blk * recv->buf_stride;
1395
1396                 if (recv->dma_mode == BUFFER_FILL_MODE) {
1397                         control = 2 << 28; /* INPUT_MORE */
1398                 } else {
1399                         control = 3 << 28; /* INPUT_LAST */
1400                 }
1401
1402                 control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1403
1404                 /* interrupt on last block, and at intervals */
1405                 if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1406                         control |= 3 << 20; /* want interrupt */
1407                 }
1408
1409                 control |= 3 << 18; /* enable branch to address */
1410                 control |= recv->buf_stride;
1411
1412                 cmd->control = cpu_to_le32(control);
1413                 cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1414                 cmd->branchAddress = 0; /* filled in on next loop */
1415                 cmd->status = cpu_to_le32(recv->buf_stride);
1416
1417                 /* link the previous descriptor to this one */
1418                 if (prev_branch) {
1419                         *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1420                 }
1421
1422                 prev_branch = &cmd->branchAddress;
1423         }
1424
1425         /* the final descriptor's branch address and Z should be left at 0 */
1426 }
1427
1428 /* listen or unlisten to a specific channel (multi-channel mode only) */
1429 static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1430 {
1431         struct ohci_iso_recv *recv = iso->hostdata;
1432         int reg, i;
1433
1434         if (channel < 32) {
1435                 reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1436                 i = channel;
1437         } else {
1438                 reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1439                 i = channel - 32;
1440         }
1441
1442         reg_write(recv->ohci, reg, (1 << i));
1443
1444         /* issue a dummy read to force all PCI writes to be posted immediately */
1445         mb();
1446         reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1447 }
1448
1449 static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1450 {
1451         struct ohci_iso_recv *recv = iso->hostdata;
1452         int i;
1453
1454         for (i = 0; i < 64; i++) {
1455                 if (mask & (1ULL << i)) {
1456                         if (i < 32)
1457                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1458                         else
1459                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1460                 } else {
1461                         if (i < 32)
1462                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1463                         else
1464                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1465                 }
1466         }
1467
1468         /* issue a dummy read to force all PCI writes to be posted immediately */
1469         mb();
1470         reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1471 }
1472
1473 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1474 {
1475         struct ohci_iso_recv *recv = iso->hostdata;
1476         struct ti_ohci *ohci = recv->ohci;
1477         u32 command, contextMatch;
1478
1479         reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1480         wmb();
1481
1482         /* always keep ISO headers */
1483         command = (1 << 30);
1484
1485         if (recv->dma_mode == BUFFER_FILL_MODE)
1486                 command |= (1 << 31);
1487
1488         reg_write(recv->ohci, recv->ContextControlSet, command);
1489
1490         /* match on specified tags */
1491         contextMatch = tag_mask << 28;
1492
1493         if (iso->channel == -1) {
1494                 /* enable multichannel reception */
1495                 reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1496         } else {
1497                 /* listen on channel */
1498                 contextMatch |= iso->channel;
1499         }
1500
1501         if (cycle != -1) {
1502                 u32 seconds;
1503
1504                 /* enable cycleMatch */
1505                 reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1506
1507                 /* set starting cycle */
1508                 cycle &= 0x1FFF;
1509
1510                 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1511                    just snarf them from the current time */
1512                 seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1513
1514                 /* advance one second to give some extra time for DMA to start */
1515                 seconds += 1;
1516
1517                 cycle |= (seconds & 3) << 13;
1518
1519                 contextMatch |= cycle << 12;
1520         }
1521
1522         if (sync != -1) {
1523                 /* set sync flag on first DMA descriptor */
1524                 struct dma_cmd *cmd = &recv->block[recv->block_dma];
1525                 cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
1526
1527                 /* match sync field */
1528                 contextMatch |= (sync&0xf)<<8;
1529         }
1530
1531         reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1532
1533         /* address of first descriptor block */
1534         command = dma_prog_region_offset_to_bus(&recv->prog,
1535                                                 recv->block_dma * sizeof(struct dma_cmd));
1536         command |= 1; /* Z=1 */
1537
1538         reg_write(recv->ohci, recv->CommandPtr, command);
1539
1540         /* enable interrupts */
1541         reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1542
1543         wmb();
1544
1545         /* run */
1546         reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1547
1548         /* issue a dummy read of the cycle timer register to force
1549            all PCI writes to be posted immediately */
1550         mb();
1551         reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1552
1553         /* check RUN */
1554         if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1555                 PRINT(KERN_ERR,
1556                       "Error starting IR DMA (ContextControl 0x%08x)\n",
1557                       reg_read(recv->ohci, recv->ContextControlSet));
1558                 return -1;
1559         }
1560
1561         return 0;
1562 }
1563
1564 static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1565 {
1566         /* re-use the DMA descriptor for the block */
1567         /* by linking the previous descriptor to it */
1568
1569         int next_i = block;
1570         int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1571
1572         struct dma_cmd *next = &recv->block[next_i];
1573         struct dma_cmd *prev = &recv->block[prev_i];
1574         
1575         /* ignore out-of-range requests */
1576         if ((block < 0) || (block > recv->nblocks))
1577                 return;
1578
1579         /* 'next' becomes the new end of the DMA chain,
1580            so disable branch and enable interrupt */
1581         next->branchAddress = 0;
1582         next->control |= cpu_to_le32(3 << 20);
1583         next->status = cpu_to_le32(recv->buf_stride);
1584
1585         /* link prev to next */
1586         prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1587                                                                         sizeof(struct dma_cmd) * next_i)
1588                                           | 1); /* Z=1 */
1589
1590         /* disable interrupt on previous DMA descriptor, except at intervals */
1591         if ((prev_i % recv->block_irq_interval) == 0) {
1592                 prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1593         } else {
1594                 prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1595         }
1596         wmb();
1597
1598         /* wake up DMA in case it fell asleep */
1599         reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1600 }
1601
1602 static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1603                                              struct hpsb_iso_packet_info *info)
1604 {
1605         /* release the memory where the packet was */
1606         recv->released_bytes += info->total_len;
1607
1608         /* have we released enough memory for one block? */
1609         while (recv->released_bytes > recv->buf_stride) {
1610                 ohci_iso_recv_release_block(recv, recv->block_reader);
1611                 recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1612                 recv->released_bytes -= recv->buf_stride;
1613         }
1614 }
1615
1616 static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1617 {
1618         struct ohci_iso_recv *recv = iso->hostdata;
1619         if (recv->dma_mode == BUFFER_FILL_MODE) {
1620                 ohci_iso_recv_bufferfill_release(recv, info);
1621         } else {
1622                 ohci_iso_recv_release_block(recv, info - iso->infos);
1623         }
1624 }
1625
1626 /* parse all packets from blocks that have been fully received */
1627 static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1628 {
1629         int wake = 0;
1630         int runaway = 0;
1631         struct ti_ohci *ohci = recv->ohci;
1632
1633         while (1) {
1634                 /* we expect the next parsable packet to begin at recv->dma_offset */
1635                 /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1636
1637                 unsigned int offset;
1638                 unsigned short len, cycle, total_len;
1639                 unsigned char channel, tag, sy;
1640
1641                 unsigned char *p = iso->data_buf.kvirt;
1642
1643                 unsigned int this_block = recv->dma_offset/recv->buf_stride;
1644
1645                 /* don't loop indefinitely */
1646                 if (runaway++ > 100000) {
1647                         atomic_inc(&iso->overflows);
1648                         PRINT(KERN_ERR,
1649                               "IR DMA error - Runaway during buffer parsing!\n");
1650                         break;
1651                 }
1652
1653                 /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1654                 if (this_block == recv->block_dma)
1655                         break;
1656
1657                 wake = 1;
1658
1659                 /* parse data length, tag, channel, and sy */
1660
1661                 /* note: we keep our own local copies of 'len' and 'offset'
1662                    so the user can't mess with them by poking in the mmap area */
1663
1664                 len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1665
1666                 if (len > 4096) {
1667                         PRINT(KERN_ERR,
1668                               "IR DMA error - bogus 'len' value %u\n", len);
1669                 }
1670
1671                 channel = p[recv->dma_offset+1] & 0x3F;
1672                 tag = p[recv->dma_offset+1] >> 6;
1673                 sy = p[recv->dma_offset+0] & 0xF;
1674
1675                 /* advance to data payload */
1676                 recv->dma_offset += 4;
1677
1678                 /* check for wrap-around */
1679                 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1680                         recv->dma_offset -= recv->buf_stride*recv->nblocks;
1681                 }
1682
1683                 /* dma_offset now points to the first byte of the data payload */
1684                 offset = recv->dma_offset;
1685
1686                 /* advance to xferStatus/timeStamp */
1687                 recv->dma_offset += len;
1688
1689                 total_len = len + 8; /* 8 bytes header+trailer in OHCI packet */
1690                 /* payload is padded to 4 bytes */
1691                 if (len % 4) {
1692                         recv->dma_offset += 4 - (len%4);
1693                         total_len += 4 - (len%4);
1694                 }
1695
1696                 /* check for wrap-around */
1697                 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1698                         /* uh oh, the packet data wraps from the last
1699                            to the first DMA block - make the packet
1700                            contiguous by copying its "tail" into the
1701                            guard page */
1702
1703                         int guard_off = recv->buf_stride*recv->nblocks;
1704                         int tail_len = len - (guard_off - offset);
1705
1706                         if (tail_len > 0  && tail_len < recv->buf_stride) {
1707                                 memcpy(iso->data_buf.kvirt + guard_off,
1708                                        iso->data_buf.kvirt,
1709                                        tail_len);
1710                         }
1711
1712                         recv->dma_offset -= recv->buf_stride*recv->nblocks;
1713                 }
1714
1715                 /* parse timestamp */
1716                 cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1717                 cycle &= 0x1FFF;
1718
1719                 /* advance to next packet */
1720                 recv->dma_offset += 4;
1721
1722                 /* check for wrap-around */
1723                 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1724                         recv->dma_offset -= recv->buf_stride*recv->nblocks;
1725                 }
1726
1727                 hpsb_iso_packet_received(iso, offset, len, total_len, cycle, channel, tag, sy);
1728         }
1729
1730         if (wake)
1731                 hpsb_iso_wake(iso);
1732 }
1733
1734 static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1735 {
1736         int loop;
1737         struct ti_ohci *ohci = recv->ohci;
1738
1739         /* loop over all blocks */
1740         for (loop = 0; loop < recv->nblocks; loop++) {
1741
1742                 /* check block_dma to see if it's done */
1743                 struct dma_cmd *im = &recv->block[recv->block_dma];
1744
1745                 /* check the DMA descriptor for new writes to xferStatus */
1746                 u16 xferstatus = le32_to_cpu(im->status) >> 16;
1747
1748                 /* rescount is the number of bytes *remaining to be written* in the block */
1749                 u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
1750
1751                 unsigned char event = xferstatus & 0x1F;
1752
1753                 if (!event) {
1754                         /* nothing has happened to this block yet */
1755                         break;
1756                 }
1757
1758                 if (event != 0x11) {
1759                         atomic_inc(&iso->overflows);
1760                         PRINT(KERN_ERR,
1761                               "IR DMA error - OHCI error code 0x%02x\n", event);
1762                 }
1763
1764                 if (rescount != 0) {
1765                         /* the card is still writing to this block;
1766                            we can't touch it until it's done */
1767                         break;
1768                 }
1769
1770                 /* OK, the block is finished... */
1771
1772                 /* sync our view of the block */
1773                 dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
1774
1775                 /* reset the DMA descriptor */
1776                 im->status = recv->buf_stride;
1777
1778                 /* advance block_dma */
1779                 recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
1780
1781                 if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
1782                         atomic_inc(&iso->overflows);
1783                         DBGMSG("ISO reception overflow - "
1784                                "ran out of DMA blocks");
1785                 }
1786         }
1787
1788         /* parse any packets that have arrived */
1789         ohci_iso_recv_bufferfill_parse(iso, recv);
1790 }
1791
1792 static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1793 {
1794         int count;
1795         int wake = 0;
1796         struct ti_ohci *ohci = recv->ohci;
1797
1798         /* loop over the entire buffer */
1799         for (count = 0; count < recv->nblocks; count++) {
1800                 u32 packet_len = 0;
1801
1802                 /* pointer to the DMA descriptor */
1803                 struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
1804
1805                 /* check the DMA descriptor for new writes to xferStatus */
1806                 u16 xferstatus = le32_to_cpu(il->status) >> 16;
1807                 u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
1808
1809                 unsigned char event = xferstatus & 0x1F;
1810
1811                 if (!event) {
1812                         /* this packet hasn't come in yet; we are done for now */
1813                         goto out;
1814                 }
1815
1816                 if (event == 0x11) {
1817                         /* packet received successfully! */
1818
1819                         /* rescount is the number of bytes *remaining* in the packet buffer,
1820                            after the packet was written */
1821                         packet_len = recv->buf_stride - rescount;
1822
1823                 } else if (event == 0x02) {
1824                         PRINT(KERN_ERR, "IR DMA error - packet too long for buffer\n");
1825                 } else if (event) {
1826                         PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event);
1827                 }
1828
1829                 /* sync our view of the buffer */
1830                 dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
1831
1832                 /* record the per-packet info */
1833                 {
1834                         /* iso header is 8 bytes ahead of the data payload */
1835                         unsigned char *hdr;
1836
1837                         unsigned int offset;
1838                         unsigned short cycle;
1839                         unsigned char channel, tag, sy;
1840
1841                         offset = iso->pkt_dma * recv->buf_stride;
1842                         hdr = iso->data_buf.kvirt + offset;
1843
1844                         /* skip iso header */
1845                         offset += 8;
1846                         packet_len -= 8;
1847
1848                         cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
1849                         channel = hdr[5] & 0x3F;
1850                         tag = hdr[5] >> 6;
1851                         sy = hdr[4] & 0xF;
1852
1853                         hpsb_iso_packet_received(iso, offset, packet_len,
1854                                         recv->buf_stride, cycle, channel, tag, sy);
1855                 }
1856
1857                 /* reset the DMA descriptor */
1858                 il->status = recv->buf_stride;
1859
1860                 wake = 1;
1861                 recv->block_dma = iso->pkt_dma;
1862         }
1863
1864 out:
1865         if (wake)
1866                 hpsb_iso_wake(iso);
1867 }
1868
1869 static void ohci_iso_recv_task(unsigned long data)
1870 {
1871         struct hpsb_iso *iso = (struct hpsb_iso*) data;
1872         struct ohci_iso_recv *recv = iso->hostdata;
1873
1874         if (recv->dma_mode == BUFFER_FILL_MODE)
1875                 ohci_iso_recv_bufferfill_task(iso, recv);
1876         else
1877                 ohci_iso_recv_packetperbuf_task(iso, recv);
1878 }
1879
1880 /***********************************
1881  * rawiso ISO transmission         *
1882  ***********************************/
1883
1884 struct ohci_iso_xmit {
1885         struct ti_ohci *ohci;
1886         struct dma_prog_region prog;
1887         struct ohci1394_iso_tasklet task;
1888         int task_active;
1889
1890         u32 ContextControlSet;
1891         u32 ContextControlClear;
1892         u32 CommandPtr;
1893 };
1894
1895 /* transmission DMA program:
1896    one OUTPUT_MORE_IMMEDIATE for the IT header
1897    one OUTPUT_LAST for the buffer data */
1898
1899 struct iso_xmit_cmd {
1900         struct dma_cmd output_more_immediate;
1901         u8 iso_hdr[8];
1902         u32 unused[2];
1903         struct dma_cmd output_last;
1904 };
1905
1906 static int ohci_iso_xmit_init(struct hpsb_iso *iso);
1907 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
1908 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
1909 static void ohci_iso_xmit_task(unsigned long data);
1910
1911 static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1912 {
1913         struct ohci_iso_xmit *xmit;
1914         unsigned int prog_size;
1915         int ctx;
1916         int ret = -ENOMEM;
1917
1918         xmit = kmalloc(sizeof(*xmit), GFP_KERNEL);
1919         if (!xmit)
1920                 return -ENOMEM;
1921
1922         iso->hostdata = xmit;
1923         xmit->ohci = iso->host->hostdata;
1924         xmit->task_active = 0;
1925
1926         dma_prog_region_init(&xmit->prog);
1927
1928         prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
1929
1930         if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
1931                 goto err;
1932
1933         ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
1934                                   ohci_iso_xmit_task, (unsigned long) iso);
1935
1936         if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0) {
1937                 ret = -EBUSY;
1938                 goto err;
1939         }
1940
1941         xmit->task_active = 1;
1942
1943         /* xmit context registers are spaced 16 bytes apart */
1944         ctx = xmit->task.context;
1945         xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
1946         xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
1947         xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
1948
1949         return 0;
1950
1951 err:
1952         ohci_iso_xmit_shutdown(iso);
1953         return ret;
1954 }
1955
1956 static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
1957 {
1958         struct ohci_iso_xmit *xmit = iso->hostdata;
1959         struct ti_ohci *ohci = xmit->ohci;
1960
1961         /* disable interrupts */
1962         reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
1963
1964         /* halt DMA */
1965         if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
1966                 /* XXX the DMA context will lock up if you try to send too much data! */
1967                 PRINT(KERN_ERR,
1968                       "you probably exceeded the OHCI card's bandwidth limit - "
1969                       "reload the module and reduce xmit bandwidth");
1970         }
1971 }
1972
1973 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
1974 {
1975         struct ohci_iso_xmit *xmit = iso->hostdata;
1976
1977         if (xmit->task_active) {
1978                 ohci_iso_xmit_stop(iso);
1979                 ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
1980                 xmit->task_active = 0;
1981         }
1982
1983         dma_prog_region_free(&xmit->prog);
1984         kfree(xmit);
1985         iso->hostdata = NULL;
1986 }
1987
1988 static void ohci_iso_xmit_task(unsigned long data)
1989 {
1990         struct hpsb_iso *iso = (struct hpsb_iso*) data;
1991         struct ohci_iso_xmit *xmit = iso->hostdata;
1992         struct ti_ohci *ohci = xmit->ohci;
1993         int wake = 0;
1994         int count;
1995
1996         /* check the whole buffer if necessary, starting at pkt_dma */
1997         for (count = 0; count < iso->buf_packets; count++) {
1998                 int cycle;
1999
2000                 /* DMA descriptor */
2001                 struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
2002
2003                 /* check for new writes to xferStatus */
2004                 u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
2005                 u8  event = xferstatus & 0x1F;
2006
2007                 if (!event) {
2008                         /* packet hasn't been sent yet; we are done for now */
2009                         break;
2010                 }
2011
2012                 if (event != 0x11)
2013                         PRINT(KERN_ERR,
2014                               "IT DMA error - OHCI error code 0x%02x\n", event);
2015
2016                 /* at least one packet went out, so wake up the writer */
2017                 wake = 1;
2018
2019                 /* parse cycle */
2020                 cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
2021
2022                 /* tell the subsystem the packet has gone out */
2023                 hpsb_iso_packet_sent(iso, cycle, event != 0x11);
2024
2025                 /* reset the DMA descriptor for next time */
2026                 cmd->output_last.status = 0;
2027         }
2028
2029         if (wake)
2030                 hpsb_iso_wake(iso);
2031 }
2032
2033 static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
2034 {
2035         struct ohci_iso_xmit *xmit = iso->hostdata;
2036         struct ti_ohci *ohci = xmit->ohci;
2037
2038         int next_i, prev_i;
2039         struct iso_xmit_cmd *next, *prev;
2040
2041         unsigned int offset;
2042         unsigned short len;
2043         unsigned char tag, sy;
2044
2045         /* check that the packet doesn't cross a page boundary
2046            (we could allow this if we added OUTPUT_MORE descriptor support) */
2047         if (cross_bound(info->offset, info->len)) {
2048                 PRINT(KERN_ERR,
2049                       "rawiso xmit: packet %u crosses a page boundary",
2050                       iso->first_packet);
2051                 return -EINVAL;
2052         }
2053
2054         offset = info->offset;
2055         len = info->len;
2056         tag = info->tag;
2057         sy = info->sy;
2058
2059         /* sync up the card's view of the buffer */
2060         dma_region_sync_for_device(&iso->data_buf, offset, len);
2061
2062         /* append first_packet to the DMA chain */
2063         /* by linking the previous descriptor to it */
2064         /* (next will become the new end of the DMA chain) */
2065
2066         next_i = iso->first_packet;
2067         prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
2068
2069         next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
2070         prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
2071
2072         /* set up the OUTPUT_MORE_IMMEDIATE descriptor */
2073         memset(next, 0, sizeof(struct iso_xmit_cmd));
2074         next->output_more_immediate.control = cpu_to_le32(0x02000008);
2075
2076         /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
2077
2078         /* tcode = 0xA, and sy */
2079         next->iso_hdr[0] = 0xA0 | (sy & 0xF);
2080
2081         /* tag and channel number */
2082         next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
2083
2084         /* transmission speed */
2085         next->iso_hdr[2] = iso->speed & 0x7;
2086
2087         /* payload size */
2088         next->iso_hdr[6] = len & 0xFF;
2089         next->iso_hdr[7] = len >> 8;
2090
2091         /* set up the OUTPUT_LAST */
2092         next->output_last.control = cpu_to_le32(1 << 28);
2093         next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
2094         next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
2095         next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
2096         next->output_last.control |= cpu_to_le32(len);
2097
2098         /* payload bus address */
2099         next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
2100
2101         /* leave branchAddress at zero for now */
2102
2103         /* re-write the previous DMA descriptor to chain to this one */
2104
2105         /* set prev branch address to point to next (Z=3) */
2106         prev->output_last.branchAddress = cpu_to_le32(
2107                 dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
2108
2109         /* disable interrupt, unless required by the IRQ interval */
2110         if (prev_i % iso->irq_interval) {
2111                 prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
2112         } else {
2113                 prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
2114         }
2115
2116         wmb();
2117
2118         /* wake DMA in case it is sleeping */
2119         reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
2120
2121         /* issue a dummy read of the cycle timer to force all PCI
2122            writes to be posted immediately */
2123         mb();
2124         reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
2125
2126         return 0;
2127 }
2128
2129 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
2130 {
2131         struct ohci_iso_xmit *xmit = iso->hostdata;
2132         struct ti_ohci *ohci = xmit->ohci;
2133
2134         /* clear out the control register */
2135         reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
2136         wmb();
2137
2138         /* address and length of first descriptor block (Z=3) */
2139         reg_write(xmit->ohci, xmit->CommandPtr,
2140                   dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
2141
2142         /* cycle match */
2143         if (cycle != -1) {
2144                 u32 start = cycle & 0x1FFF;
2145
2146                 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
2147                    just snarf them from the current time */
2148                 u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
2149
2150                 /* advance one second to give some extra time for DMA to start */
2151                 seconds += 1;
2152
2153                 start |= (seconds & 3) << 13;
2154
2155                 reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
2156         }
2157
2158         /* enable interrupts */
2159         reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
2160
2161         /* run */
2162         reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
2163         mb();
2164
2165         /* wait 100 usec to give the card time to go active */
2166         udelay(100);
2167
2168         /* check the RUN bit */
2169         if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
2170                 PRINT(KERN_ERR, "Error starting IT DMA (ContextControl 0x%08x)\n",
2171                       reg_read(xmit->ohci, xmit->ContextControlSet));
2172                 return -1;
2173         }
2174
2175         return 0;
2176 }
2177
2178 static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
2179 {
2180
2181         switch(cmd) {
2182         case XMIT_INIT:
2183                 return ohci_iso_xmit_init(iso);
2184         case XMIT_START:
2185                 return ohci_iso_xmit_start(iso, arg);
2186         case XMIT_STOP:
2187                 ohci_iso_xmit_stop(iso);
2188                 return 0;
2189         case XMIT_QUEUE:
2190                 return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
2191         case XMIT_SHUTDOWN:
2192                 ohci_iso_xmit_shutdown(iso);
2193                 return 0;
2194
2195         case RECV_INIT:
2196                 return ohci_iso_recv_init(iso);
2197         case RECV_START: {
2198                 int *args = (int*) arg;
2199                 return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
2200         }
2201         case RECV_STOP:
2202                 ohci_iso_recv_stop(iso);
2203                 return 0;
2204         case RECV_RELEASE:
2205                 ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
2206                 return 0;
2207         case RECV_FLUSH:
2208                 ohci_iso_recv_task((unsigned long) iso);
2209                 return 0;
2210         case RECV_SHUTDOWN:
2211                 ohci_iso_recv_shutdown(iso);
2212                 return 0;
2213         case RECV_LISTEN_CHANNEL:
2214                 ohci_iso_recv_change_channel(iso, arg, 1);
2215                 return 0;
2216         case RECV_UNLISTEN_CHANNEL:
2217                 ohci_iso_recv_change_channel(iso, arg, 0);
2218                 return 0;
2219         case RECV_SET_CHANNEL_MASK:
2220                 ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
2221                 return 0;
2222
2223         default:
2224                 PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
2225                         cmd);
2226                 break;
2227         }
2228         return -EINVAL;
2229 }
2230
2231 /***************************************
2232  * IEEE-1394 functionality section END *
2233  ***************************************/
2234
2235
2236 /********************************************************
2237  * Global stuff (interrupt handler, init/shutdown code) *
2238  ********************************************************/
2239
2240 static void dma_trm_reset(struct dma_trm_ctx *d)
2241 {
2242         unsigned long flags;
2243         LIST_HEAD(packet_list);
2244         struct ti_ohci *ohci = d->ohci;
2245         struct hpsb_packet *packet, *ptmp;
2246
2247         ohci1394_stop_context(ohci, d->ctrlClear, NULL);
2248
2249         /* Lock the context, reset it and release it. Move the packets
2250          * that were pending in the context to packet_list and free
2251          * them after releasing the lock. */
2252
2253         spin_lock_irqsave(&d->lock, flags);
2254
2255         list_splice(&d->fifo_list, &packet_list);
2256         list_splice(&d->pending_list, &packet_list);
2257         INIT_LIST_HEAD(&d->fifo_list);
2258         INIT_LIST_HEAD(&d->pending_list);
2259
2260         d->branchAddrPtr = NULL;
2261         d->sent_ind = d->prg_ind;
2262         d->free_prgs = d->num_desc;
2263
2264         spin_unlock_irqrestore(&d->lock, flags);
2265
2266         if (list_empty(&packet_list))
2267                 return;
2268
2269         PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
2270
2271         /* Now process subsystem callbacks for the packets from this
2272          * context. */
2273         list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
2274                 list_del_init(&packet->driver_list);
2275                 hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
2276         }
2277 }
2278
2279 static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
2280                                        quadlet_t rx_event,
2281                                        quadlet_t tx_event)
2282 {
2283         struct ohci1394_iso_tasklet *t;
2284         unsigned long mask;
2285         unsigned long flags;
2286
2287         spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
2288
2289         list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
2290                 mask = 1 << t->context;
2291
2292                 if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
2293                         tasklet_schedule(&t->tasklet);
2294                 else if (rx_event & mask)
2295                         tasklet_schedule(&t->tasklet);
2296         }
2297
2298         spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
2299 }
2300
2301 static irqreturn_t ohci_irq_handler(int irq, void *dev_id)
2302 {
2303         quadlet_t event, node_id;
2304         struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
2305         struct hpsb_host *host = ohci->host;
2306         int phyid = -1, isroot = 0;
2307         unsigned long flags;
2308
2309         /* Read and clear the interrupt event register.  Don't clear
2310          * the busReset event, though. This is done when we get the
2311          * selfIDComplete interrupt. */
2312         spin_lock_irqsave(&ohci->event_lock, flags);
2313         event = reg_read(ohci, OHCI1394_IntEventClear);
2314         reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
2315         spin_unlock_irqrestore(&ohci->event_lock, flags);
2316
2317         if (!event)
2318                 return IRQ_NONE;
2319
2320         /* If event is ~(u32)0 cardbus card was ejected.  In this case
2321          * we just return, and clean up in the ohci1394_pci_remove
2322          * function. */
2323         if (event == ~(u32) 0) {
2324                 DBGMSG("Device removed.");
2325                 return IRQ_NONE;
2326         }
2327
2328         DBGMSG("IntEvent: %08x", event);
2329
2330         if (event & OHCI1394_unrecoverableError) {
2331                 int ctx;
2332                 PRINT(KERN_ERR, "Unrecoverable error!");
2333
2334                 if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
2335                         PRINT(KERN_ERR, "Async Req Tx Context died: "
2336                                 "ctrl[%08x] cmdptr[%08x]",
2337                                 reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
2338                                 reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
2339
2340                 if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
2341                         PRINT(KERN_ERR, "Async Rsp Tx Context died: "
2342                                 "ctrl[%08x] cmdptr[%08x]",
2343                                 reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
2344                                 reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
2345
2346                 if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
2347                         PRINT(KERN_ERR, "Async Req Rcv Context died: "
2348                                 "ctrl[%08x] cmdptr[%08x]",
2349                                 reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
2350                                 reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
2351
2352                 if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
2353                         PRINT(KERN_ERR, "Async Rsp Rcv Context died: "
2354                                 "ctrl[%08x] cmdptr[%08x]",
2355                                 reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
2356                                 reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
2357
2358                 for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
2359                         if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
2360                                 PRINT(KERN_ERR, "Iso Xmit %d Context died: "
2361                                         "ctrl[%08x] cmdptr[%08x]", ctx,
2362                                         reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
2363                                         reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
2364                 }
2365
2366                 for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
2367                         if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
2368                                 PRINT(KERN_ERR, "Iso Recv %d Context died: "
2369                                         "ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
2370                                         reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
2371                                         reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
2372                                         reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
2373                 }
2374
2375                 event &= ~OHCI1394_unrecoverableError;
2376         }
2377         if (event & OHCI1394_postedWriteErr) {
2378                 PRINT(KERN_ERR, "physical posted write error");
2379                 /* no recovery strategy yet, had to involve protocol drivers */
2380                 event &= ~OHCI1394_postedWriteErr;
2381         }
2382         if (event & OHCI1394_cycleTooLong) {
2383                 if(printk_ratelimit())
2384                         PRINT(KERN_WARNING, "isochronous cycle too long");
2385                 else
2386                         DBGMSG("OHCI1394_cycleTooLong");
2387                 reg_write(ohci, OHCI1394_LinkControlSet,
2388                           OHCI1394_LinkControl_CycleMaster);
2389                 event &= ~OHCI1394_cycleTooLong;
2390         }
2391         if (event & OHCI1394_cycleInconsistent) {
2392                 /* We subscribe to the cycleInconsistent event only to
2393                  * clear the corresponding event bit... otherwise,
2394                  * isochronous cycleMatch DMA won't work. */
2395                 DBGMSG("OHCI1394_cycleInconsistent");
2396                 event &= ~OHCI1394_cycleInconsistent;
2397         }
2398         if (event & OHCI1394_busReset) {
2399                 /* The busReset event bit can't be cleared during the
2400                  * selfID phase, so we disable busReset interrupts, to
2401                  * avoid burying the cpu in interrupt requests. */
2402                 spin_lock_irqsave(&ohci->event_lock, flags);
2403                 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2404
2405                 if (ohci->check_busreset) {
2406                         int loop_count = 0;
2407
2408                         udelay(10);
2409
2410                         while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
2411                                 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2412
2413                                 spin_unlock_irqrestore(&ohci->event_lock, flags);
2414                                 udelay(10);
2415                                 spin_lock_irqsave(&ohci->event_lock, flags);
2416
2417                                 /* The loop counter check is to prevent the driver
2418                                  * from remaining in this state forever. For the
2419                                  * initial bus reset, the loop continues for ever
2420                                  * and the system hangs, until some device is plugged-in
2421                                  * or out manually into a port! The forced reset seems
2422                                  * to solve this problem. This mainly effects nForce2. */
2423                                 if (loop_count > 10000) {
2424                                         ohci_devctl(host, RESET_BUS, LONG_RESET);
2425                                         DBGMSG("Detected bus-reset loop. Forced a bus reset!");
2426                                         loop_count = 0;
2427                                 }
2428
2429                                 loop_count++;
2430                         }
2431                 }
2432                 spin_unlock_irqrestore(&ohci->event_lock, flags);
2433                 if (!host->in_bus_reset) {
2434                         DBGMSG("irq_handler: Bus reset requested");
2435
2436                         /* Subsystem call */
2437                         hpsb_bus_reset(ohci->host);
2438                 }
2439                 event &= ~OHCI1394_busReset;
2440         }
2441         if (event & OHCI1394_reqTxComplete) {
2442                 struct dma_trm_ctx *d = &ohci->at_req_context;
2443                 DBGMSG("Got reqTxComplete interrupt "
2444                        "status=0x%08X", reg_read(ohci, d->ctrlSet));
2445                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2446                         ohci1394_stop_context(ohci, d->ctrlClear,
2447                                               "reqTxComplete");
2448                 else
2449                         dma_trm_tasklet((unsigned long)d);
2450                         //tasklet_schedule(&d->task);
2451                 event &= ~OHCI1394_reqTxComplete;
2452         }
2453         if (event & OHCI1394_respTxComplete) {
2454                 struct dma_trm_ctx *d = &ohci->at_resp_context;
2455                 DBGMSG("Got respTxComplete interrupt "
2456                        "status=0x%08X", reg_read(ohci, d->ctrlSet));
2457                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2458                         ohci1394_stop_context(ohci, d->ctrlClear,
2459                                               "respTxComplete");
2460                 else
2461                         tasklet_schedule(&d->task);
2462                 event &= ~OHCI1394_respTxComplete;
2463         }
2464         if (event & OHCI1394_RQPkt) {
2465                 struct dma_rcv_ctx *d = &ohci->ar_req_context;
2466                 DBGMSG("Got RQPkt interrupt status=0x%08X",
2467                        reg_read(ohci, d->ctrlSet));
2468                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2469                         ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
2470                 else
2471                         tasklet_schedule(&d->task);
2472                 event &= ~OHCI1394_RQPkt;
2473         }
2474         if (event & OHCI1394_RSPkt) {
2475                 struct dma_rcv_ctx *d = &ohci->ar_resp_context;
2476                 DBGMSG("Got RSPkt interrupt status=0x%08X",
2477                        reg_read(ohci, d->ctrlSet));
2478                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2479                         ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
2480                 else
2481                         tasklet_schedule(&d->task);
2482                 event &= ~OHCI1394_RSPkt;
2483         }
2484         if (event & OHCI1394_isochRx) {
2485                 quadlet_t rx_event;
2486
2487                 rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
2488                 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
2489                 ohci_schedule_iso_tasklets(ohci, rx_event, 0);
2490                 event &= ~OHCI1394_isochRx;
2491         }
2492         if (event & OHCI1394_isochTx) {
2493                 quadlet_t tx_event;
2494
2495                 tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
2496                 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
2497                 ohci_schedule_iso_tasklets(ohci, 0, tx_event);
2498                 event &= ~OHCI1394_isochTx;
2499         }
2500         if (event & OHCI1394_selfIDComplete) {
2501                 if (host->in_bus_reset) {
2502                         node_id = reg_read(ohci, OHCI1394_NodeID);
2503
2504                         if (!(node_id & 0x80000000)) {
2505                                 PRINT(KERN_ERR,
2506                                       "SelfID received, but NodeID invalid "
2507                                       "(probably new bus reset occurred): %08X",
2508                                       node_id);
2509                                 goto selfid_not_valid;
2510                         }
2511
2512                         phyid =  node_id & 0x0000003f;
2513                         isroot = (node_id & 0x40000000) != 0;
2514
2515                         DBGMSG("SelfID interrupt received "
2516                               "(phyid %d, %s)", phyid,
2517                               (isroot ? "root" : "not root"));
2518
2519                         handle_selfid(ohci, host, phyid, isroot);
2520
2521                         /* Clear the bus reset event and re-enable the
2522                          * busReset interrupt.  */
2523                         spin_lock_irqsave(&ohci->event_lock, flags);
2524                         reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2525                         reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2526                         spin_unlock_irqrestore(&ohci->event_lock, flags);
2527
2528                         /* Turn on phys dma reception.
2529                          *
2530                          * TODO: Enable some sort of filtering management.
2531                          */
2532                         if (phys_dma) {
2533                                 reg_write(ohci, OHCI1394_PhyReqFilterHiSet,
2534                                           0xffffffff);
2535                                 reg_write(ohci, OHCI1394_PhyReqFilterLoSet,
2536                                           0xffffffff);
2537                         }
2538
2539                         DBGMSG("PhyReqFilter=%08x%08x",
2540                                reg_read(ohci, OHCI1394_PhyReqFilterHiSet),
2541                                reg_read(ohci, OHCI1394_PhyReqFilterLoSet));
2542
2543                         hpsb_selfid_complete(host, phyid, isroot);
2544                 } else
2545                         PRINT(KERN_ERR,
2546                               "SelfID received outside of bus reset sequence");
2547
2548 selfid_not_valid:
2549                 event &= ~OHCI1394_selfIDComplete;
2550         }
2551
2552         /* Make sure we handle everything, just in case we accidentally
2553          * enabled an interrupt that we didn't write a handler for.  */
2554         if (event)
2555                 PRINT(KERN_ERR, "Unhandled interrupt(s) 0x%08x",
2556                       event);
2557
2558         return IRQ_HANDLED;
2559 }
2560
2561 /* Put the buffer back into the dma context */
2562 static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
2563 {
2564         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2565         DBGMSG("Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
2566
2567         d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
2568         d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
2569         idx = (idx + d->num_desc - 1 ) % d->num_desc;
2570         d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
2571
2572         /* To avoid a race, ensure 1394 interface hardware sees the inserted
2573          * context program descriptors before it sees the wakeup bit set. */
2574         wmb();
2575         
2576         /* wake up the dma context if necessary */
2577         if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
2578                 PRINT(KERN_INFO,
2579                       "Waking dma ctx=%d ... processing is probably too slow",
2580                       d->ctx);
2581         }
2582
2583         /* do this always, to avoid race condition */
2584         reg_write(ohci, d->ctrlSet, 0x1000);
2585 }
2586
2587 #define cond_le32_to_cpu(data, noswap) \
2588         (noswap ? data : le32_to_cpu(data))
2589
2590 static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2591                             -1, 0, -1, 0, -1, -1, 16, -1};
2592
2593 /*
2594  * Determine the length of a packet in the buffer
2595  * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2596  */
2597 static inline int packet_length(struct dma_rcv_ctx *d, int idx,
2598                                 quadlet_t *buf_ptr, int offset,
2599                                 unsigned char tcode, int noswap)
2600 {
2601         int length = -1;
2602
2603         if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
2604                 length = TCODE_SIZE[tcode];
2605                 if (length == 0) {
2606                         if (offset + 12 >= d->buf_size) {
2607                                 length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
2608                                                 [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
2609                         } else {
2610                                 length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
2611                         }
2612                         length += 20;
2613                 }
2614         } else if (d->type == DMA_CTX_ISO) {
2615                 /* Assumption: buffer fill mode with header/trailer */
2616                 length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
2617         }
2618
2619         if (length > 0 && length % 4)
2620                 length += 4 - (length % 4);
2621
2622         return length;
2623 }
2624
2625 /* Tasklet that processes dma receive buffers */
2626 static void dma_rcv_tasklet (unsigned long data)
2627 {
2628         struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
2629         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2630         unsigned int split_left, idx, offset, rescount;
2631         unsigned char tcode;
2632         int length, bytes_left, ack;
2633         unsigned long flags;
2634         quadlet_t *buf_ptr;
2635         char *split_ptr;
2636         char msg[256];
2637
2638         spin_lock_irqsave(&d->lock, flags);
2639
2640         idx = d->buf_ind;
2641         offset = d->buf_offset;
2642         buf_ptr = d->buf_cpu[idx] + offset/4;
2643
2644         rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2645         bytes_left = d->buf_size - rescount - offset;
2646
2647         while (bytes_left > 0) {
2648                 tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
2649
2650                 /* packet_length() will return < 4 for an error */
2651                 length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
2652
2653                 if (length < 4) { /* something is wrong */
2654                         sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2655                                 tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
2656                                 d->ctx, length);
2657                         ohci1394_stop_context(ohci, d->ctrlClear, msg);
2658                         spin_unlock_irqrestore(&d->lock, flags);
2659                         return;
2660                 }
2661
2662                 /* The first case is where we have a packet that crosses
2663                  * over more than one descriptor. The next case is where
2664                  * it's all in the first descriptor.  */
2665                 if ((offset + length) > d->buf_size) {
2666                         DBGMSG("Split packet rcv'd");
2667                         if (length > d->split_buf_size) {
2668                                 ohci1394_stop_context(ohci, d->ctrlClear,
2669                                              "Split packet size exceeded");
2670                                 d->buf_ind = idx;
2671                                 d->buf_offset = offset;
2672                                 spin_unlock_irqrestore(&d->lock, flags);
2673                                 return;
2674                         }
2675
2676                         if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
2677                             == d->buf_size) {
2678                                 /* Other part of packet not written yet.
2679                                  * this should never happen I think
2680                                  * anyway we'll get it on the next call.  */
2681                                 PRINT(KERN_INFO,
2682                                       "Got only half a packet!");
2683                                 d->buf_ind = idx;
2684                                 d->buf_offset = offset;
2685                                 spin_unlock_irqrestore(&d->lock, flags);
2686                                 return;
2687                         }
2688
2689                         split_left = length;
2690                         split_ptr = (char *)d->spb;
2691                         memcpy(split_ptr,buf_ptr,d->buf_size-offset);
2692                         split_left -= d->buf_size-offset;
2693                         split_ptr += d->buf_size-offset;
2694                         insert_dma_buffer(d, idx);
2695                         idx = (idx+1) % d->num_desc;
2696                         buf_ptr = d->buf_cpu[idx];
2697                         offset=0;
2698
2699                         while (split_left >= d->buf_size) {
2700                                 memcpy(split_ptr,buf_ptr,d->buf_size);
2701                                 split_ptr += d->buf_size;
2702                                 split_left -= d->buf_size;
2703                                 insert_dma_buffer(d, idx);
2704                                 idx = (idx+1) % d->num_desc;
2705                                 buf_ptr = d->buf_cpu[idx];
2706                         }
2707
2708                         if (split_left > 0) {
2709                                 memcpy(split_ptr, buf_ptr, split_left);
2710                                 offset = split_left;
2711                                 buf_ptr += offset/4;
2712                         }
2713                 } else {
2714                         DBGMSG("Single packet rcv'd");
2715                         memcpy(d->spb, buf_ptr, length);
2716                         offset += length;
2717                         buf_ptr += length/4;
2718                         if (offset==d->buf_size) {
2719                                 insert_dma_buffer(d, idx);
2720                                 idx = (idx+1) % d->num_desc;
2721                                 buf_ptr = d->buf_cpu[idx];
2722                                 offset=0;
2723                         }
2724                 }
2725
2726                 /* We get one phy packet to the async descriptor for each
2727                  * bus reset. We always ignore it.  */
2728                 if (tcode != OHCI1394_TCODE_PHY) {
2729                         if (!ohci->no_swap_incoming)
2730                                 header_le32_to_cpu(d->spb, tcode);
2731                         DBGMSG("Packet received from node"
2732                                 " %d ack=0x%02X spd=%d tcode=0x%X"
2733                                 " length=%d ctx=%d tlabel=%d",
2734                                 (d->spb[1]>>16)&0x3f,
2735                                 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
2736                                 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
2737                                 tcode, length, d->ctx,
2738                                 (d->spb[0]>>10)&0x3f);
2739
2740                         ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
2741                                 == 0x11) ? 1 : 0;
2742
2743                         hpsb_packet_received(ohci->host, d->spb,
2744                                              length-4, ack);
2745                 }
2746 #ifdef OHCI1394_DEBUG
2747                 else
2748                         PRINT (KERN_DEBUG, "Got phy packet ctx=%d ... discarded",
2749                                d->ctx);
2750 #endif
2751
2752                 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2753
2754                 bytes_left = d->buf_size - rescount - offset;
2755
2756         }
2757
2758         d->buf_ind = idx;
2759         d->buf_offset = offset;
2760
2761         spin_unlock_irqrestore(&d->lock, flags);
2762 }
2763
2764 /* Bottom half that processes sent packets */
2765 static void dma_trm_tasklet (unsigned long data)
2766 {
2767         struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
2768         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2769         struct hpsb_packet *packet, *ptmp;
2770         unsigned long flags;
2771         u32 status, ack;
2772         size_t datasize;
2773
2774         spin_lock_irqsave(&d->lock, flags);
2775
2776         list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
2777                 datasize = packet->data_size;
2778                 if (datasize && packet->type != hpsb_raw)
2779                         status = le32_to_cpu(
2780                                 d->prg_cpu[d->sent_ind]->end.status) >> 16;
2781                 else
2782                         status = le32_to_cpu(
2783                                 d->prg_cpu[d->sent_ind]->begin.status) >> 16;
2784
2785                 if (status == 0)
2786                         /* this packet hasn't been sent yet*/
2787                         break;
2788
2789 #ifdef OHCI1394_DEBUG
2790                 if (datasize)
2791                         if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
2792                                 DBGMSG("Stream packet sent to channel %d tcode=0x%X "
2793                                        "ack=0x%X spd=%d dataLength=%d ctx=%d",
2794                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
2795                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2796                                        status&0x1f, (status>>5)&0x3,
2797                                        le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
2798                                        d->ctx);
2799                         else
2800                                 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2801                                        "%d ack=0x%X spd=%d dataLength=%d ctx=%d",
2802                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
2803                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2804                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
2805                                        status&0x1f, (status>>5)&0x3,
2806                                        le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
2807                                        d->ctx);
2808                 else
2809                         DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2810                                "%d ack=0x%X spd=%d data=0x%08X ctx=%d",
2811                                 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2812                                         >>16)&0x3f,
2813                                 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2814                                         >>4)&0xf,
2815                                 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2816                                         >>10)&0x3f,
2817                                 status&0x1f, (status>>5)&0x3,
2818                                 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
2819                                 d->ctx);
2820 #endif
2821
2822                 if (status & 0x10) {
2823                         ack = status & 0xf;
2824                 } else {
2825                         switch (status & 0x1f) {
2826                         case EVT_NO_STATUS: /* that should never happen */
2827                         case EVT_RESERVED_A: /* that should never happen */
2828                         case EVT_LONG_PACKET: /* that should never happen */
2829                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2830                                 ack = ACKX_SEND_ERROR;
2831                                 break;
2832                         case EVT_MISSING_ACK:
2833                                 ack = ACKX_TIMEOUT;
2834                                 break;
2835                         case EVT_UNDERRUN:
2836                                 ack = ACKX_SEND_ERROR;
2837                                 break;
2838                         case EVT_OVERRUN: /* that should never happen */
2839                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2840                                 ack = ACKX_SEND_ERROR;
2841                                 break;
2842                         case EVT_DESCRIPTOR_READ:
2843                         case EVT_DATA_READ:
2844                         case EVT_DATA_WRITE:
2845                                 ack = ACKX_SEND_ERROR;
2846                                 break;
2847                         case EVT_BUS_RESET: /* that should never happen */
2848                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2849                                 ack = ACKX_SEND_ERROR;
2850                                 break;
2851                         case EVT_TIMEOUT:
2852                                 ack = ACKX_TIMEOUT;
2853                                 break;
2854                         case EVT_TCODE_ERR:
2855                                 ack = ACKX_SEND_ERROR;
2856                                 break;
2857                         case EVT_RESERVED_B: /* that should never happen */
2858                         case EVT_RESERVED_C: /* that should never happen */
2859                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2860                                 ack = ACKX_SEND_ERROR;
2861                                 break;
2862                         case EVT_UNKNOWN:
2863                         case EVT_FLUSHED:
2864                                 ack = ACKX_SEND_ERROR;
2865                                 break;
2866                         default:
2867                                 PRINT(KERN_ERR, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
2868                                 ack = ACKX_SEND_ERROR;
2869                                 BUG();
2870                         }
2871                 }
2872
2873                 list_del_init(&packet->driver_list);
2874                 hpsb_packet_sent(ohci->host, packet, ack);
2875
2876                 if (datasize) {
2877                         pci_unmap_single(ohci->dev,
2878                                          cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2879                                          datasize, PCI_DMA_TODEVICE);
2880                         OHCI_DMA_FREE("single Xmit data packet");
2881                 }
2882
2883                 d->sent_ind = (d->sent_ind+1)%d->num_desc;
2884                 d->free_prgs++;
2885         }
2886
2887         dma_trm_flush(ohci, d);
2888
2889         spin_unlock_irqrestore(&d->lock, flags);
2890 }
2891
2892 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d)
2893 {
2894         if (d->ctrlClear) {
2895                 ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
2896
2897                 if (d->type == DMA_CTX_ISO) {
2898                         /* disable interrupts */
2899                         reg_write(d->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << d->ctx);
2900                         ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_legacy_tasklet);
2901                 } else {
2902                         tasklet_kill(&d->task);
2903                 }
2904         }
2905 }
2906
2907
2908 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2909 {
2910         int i;
2911         struct ti_ohci *ohci = d->ohci;
2912
2913         if (ohci == NULL)
2914                 return;
2915
2916         DBGMSG("Freeing dma_rcv_ctx %d", d->ctx);
2917
2918         if (d->buf_cpu) {
2919                 for (i=0; i<d->num_desc; i++)
2920                         if (d->buf_cpu[i] && d->buf_bus[i]) {
2921                                 pci_free_consistent(
2922                                         ohci->dev, d->buf_size,
2923                                         d->buf_cpu[i], d->buf_bus[i]);
2924                                 OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i);
2925                         }
2926                 kfree(d->buf_cpu);
2927                 kfree(d->buf_bus);
2928         }
2929         if (d->prg_cpu) {
2930                 for (i=0; i<d->num_desc; i++)
2931                         if (d->prg_cpu[i] && d->prg_bus[i]) {
2932                                 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
2933                                 OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i);
2934                         }
2935                 pci_pool_destroy(d->prg_pool);
2936                 OHCI_DMA_FREE("dma_rcv prg pool");
2937                 kfree(d->prg_cpu);
2938                 kfree(d->prg_bus);
2939         }
2940         kfree(d->spb);
2941
2942         /* Mark this context as freed. */
2943         d->ohci = NULL;
2944 }
2945
2946 static int
2947 alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2948                   enum context_type type, int ctx, int num_desc,
2949                   int buf_size, int split_buf_size, int context_base)
2950 {
2951         int i, len;
2952         static int num_allocs;
2953         static char pool_name[20];
2954
2955         d->ohci = ohci;
2956         d->type = type;
2957         d->ctx = ctx;
2958
2959         d->num_desc = num_desc;
2960         d->buf_size = buf_size;
2961         d->split_buf_size = split_buf_size;
2962
2963         d->ctrlSet = 0;
2964         d->ctrlClear = 0;
2965         d->cmdPtr = 0;
2966
2967         d->buf_cpu = kzalloc(d->num_desc * sizeof(*d->buf_cpu), GFP_ATOMIC);
2968         d->buf_bus = kzalloc(d->num_desc * sizeof(*d->buf_bus), GFP_ATOMIC);
2969
2970         if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2971                 PRINT(KERN_ERR, "Failed to allocate dma buffer");
2972                 free_dma_rcv_ctx(d);
2973                 return -ENOMEM;
2974         }
2975
2976         d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_ATOMIC);
2977         d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_ATOMIC);
2978
2979         if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2980                 PRINT(KERN_ERR, "Failed to allocate dma prg");
2981                 free_dma_rcv_ctx(d);
2982                 return -ENOMEM;
2983         }
2984
2985         d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
2986
2987         if (d->spb == NULL) {
2988                 PRINT(KERN_ERR, "Failed to allocate split buffer");
2989                 free_dma_rcv_ctx(d);
2990                 return -ENOMEM;
2991         }
2992         
2993         len = sprintf(pool_name, "ohci1394_rcv_prg");
2994         sprintf(pool_name+len, "%d", num_allocs);
2995         d->prg_pool = pci_pool_create(pool_name, ohci->dev,
2996                                 sizeof(struct dma_cmd), 4, 0);
2997         if(d->prg_pool == NULL)
2998         {
2999                 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
3000                 free_dma_rcv_ctx(d);
3001                 return -ENOMEM;
3002         }
3003         num_allocs++;
3004
3005         OHCI_DMA_ALLOC("dma_rcv prg pool");
3006
3007         for (i=0; i<d->num_desc; i++) {
3008                 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
3009                                                      d->buf_size,
3010                                                      d->buf_bus+i);
3011                 OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i);
3012
3013                 if (d->buf_cpu[i] != NULL) {
3014                         memset(d->buf_cpu[i], 0, d->buf_size);
3015                 } else {
3016                         PRINT(KERN_ERR,
3017                               "Failed to allocate dma buffer");
3018                         free_dma_rcv_ctx(d);
3019                         return -ENOMEM;
3020                 }
3021
3022                 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
3023                 OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
3024
3025                 if (d->prg_cpu[i] != NULL) {
3026                         memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
3027                 } else {
3028                         PRINT(KERN_ERR,
3029                               "Failed to allocate dma prg");
3030                         free_dma_rcv_ctx(d);
3031                         return -ENOMEM;
3032                 }
3033         }
3034
3035         spin_lock_init(&d->lock);
3036
3037         if (type == DMA_CTX_ISO) {
3038                 ohci1394_init_iso_tasklet(&ohci->ir_legacy_tasklet,
3039                                           OHCI_ISO_MULTICHANNEL_RECEIVE,
3040                                           dma_rcv_tasklet, (unsigned long) d);
3041         } else {
3042                 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3043                 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3044                 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3045
3046                 tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d);
3047         }
3048
3049         return 0;
3050 }
3051
3052 static void free_dma_trm_ctx(struct dma_trm_ctx *d)
3053 {
3054         int i;
3055         struct ti_ohci *ohci = d->ohci;
3056
3057         if (ohci == NULL)
3058                 return;
3059
3060         DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
3061
3062         if (d->prg_cpu) {
3063                 for (i=0; i<d->num_desc; i++)
3064                         if (d->prg_cpu[i] && d->prg_bus[i]) {
3065                                 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
3066                                 OHCI_DMA_FREE("pool dma_trm prg[%d]", i);
3067                         }
3068                 pci_pool_destroy(d->prg_pool);
3069                 OHCI_DMA_FREE("dma_trm prg pool");
3070                 kfree(d->prg_cpu);
3071                 kfree(d->prg_bus);
3072         }
3073
3074         /* Mark this context as freed. */
3075         d->ohci = NULL;
3076 }
3077
3078 static int
3079 alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
3080                   enum context_type type, int ctx, int num_desc,
3081                   int context_base)
3082 {
3083         int i, len;
3084         static char pool_name[20];
3085         static int num_allocs=0;
3086
3087         d->ohci = ohci;
3088         d->type = type;
3089         d->ctx = ctx;
3090         d->num_desc = num_desc;
3091         d->ctrlSet = 0;
3092         d->ctrlClear = 0;
3093         d->cmdPtr = 0;
3094
3095         d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_KERNEL);
3096         d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_KERNEL);
3097
3098         if (d->prg_cpu == NULL || d->prg_bus == NULL) {
3099                 PRINT(KERN_ERR, "Failed to allocate at dma prg");
3100                 free_dma_trm_ctx(d);
3101                 return -ENOMEM;
3102         }
3103
3104         len = sprintf(pool_name, "ohci1394_trm_prg");
3105         sprintf(pool_name+len, "%d", num_allocs);
3106         d->prg_pool = pci_pool_create(pool_name, ohci->dev,
3107                                 sizeof(struct at_dma_prg), 4, 0);
3108         if (d->prg_pool == NULL) {
3109                 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
3110                 free_dma_trm_ctx(d);
3111                 return -ENOMEM;
3112         }
3113         num_allocs++;
3114
3115         OHCI_DMA_ALLOC("dma_rcv prg pool");
3116
3117         for (i = 0; i < d->num_desc; i++) {
3118                 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
3119                 OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
3120
3121                 if (d->prg_cpu[i] != NULL) {
3122                         memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
3123                 } else {
3124                         PRINT(KERN_ERR,
3125                               "Failed to allocate at dma prg");
3126                         free_dma_trm_ctx(d);
3127                         return -ENOMEM;
3128                 }
3129         }
3130
3131         spin_lock_init(&d->lock);
3132
3133         /* initialize tasklet */
3134         if (type == DMA_CTX_ISO) {
3135                 ohci1394_init_iso_tasklet(&ohci->it_legacy_tasklet, OHCI_ISO_TRANSMIT,
3136                                           dma_trm_tasklet, (unsigned long) d);
3137                 if (ohci1394_register_iso_tasklet(ohci,
3138                                                   &ohci->it_legacy_tasklet) < 0) {
3139                         PRINT(KERN_ERR, "No IT DMA context available");
3140                         free_dma_trm_ctx(d);
3141                         return -EBUSY;
3142                 }
3143
3144                 /* IT can be assigned to any context by register_iso_tasklet */
3145                 d->ctx = ohci->it_legacy_tasklet.context;
3146                 d->ctrlSet = OHCI1394_IsoXmitContextControlSet + 16 * d->ctx;
3147                 d->ctrlClear = OHCI1394_IsoXmitContextControlClear + 16 * d->ctx;
3148                 d->cmdPtr = OHCI1394_IsoXmitCommandPtr + 16 * d->ctx;
3149         } else {
3150                 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3151                 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3152                 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3153                 tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
3154         }
3155
3156         return 0;
3157 }
3158
3159 static void ohci_set_hw_config_rom(struct hpsb_host *host, quadlet_t *config_rom)
3160 {
3161         struct ti_ohci *ohci = host->hostdata;
3162
3163         reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(config_rom[0]));
3164         reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(config_rom[2]));
3165
3166         memcpy(ohci->csr_config_rom_cpu, config_rom, OHCI_CONFIG_ROM_LEN);
3167 }
3168
3169
3170 static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
3171                                  quadlet_t data, quadlet_t compare)
3172 {
3173         struct ti_ohci *ohci = host->hostdata;
3174         int i;
3175
3176         reg_write(ohci, OHCI1394_CSRData, data);
3177         reg_write(ohci, OHCI1394_CSRCompareData, compare);
3178         reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
3179
3180         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
3181                 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
3182                         break;
3183
3184                 mdelay(1);
3185         }
3186
3187         return reg_read(ohci, OHCI1394_CSRData);
3188 }
3189
3190 static struct hpsb_host_driver ohci1394_driver = {
3191         .owner =                THIS_MODULE,
3192         .name =                 OHCI1394_DRIVER_NAME,
3193         .set_hw_config_rom =    ohci_set_hw_config_rom,
3194         .transmit_packet =      ohci_transmit,
3195         .devctl =               ohci_devctl,
3196         .isoctl =               ohci_isoctl,
3197         .hw_csr_reg =           ohci_hw_csr_reg,
3198 };
3199
3200 /***********************************
3201  * PCI Driver Interface functions  *
3202  ***********************************/
3203
3204 #define FAIL(err, fmt, args...)                 \
3205 do {                                            \
3206         PRINT_G(KERN_ERR, fmt , ## args);       \
3207         ohci1394_pci_remove(dev);               \
3208         return err;                             \
3209 } while (0)
3210
3211 static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3212                                         const struct pci_device_id *ent)
3213 {
3214         struct hpsb_host *host;
3215         struct ti_ohci *ohci;   /* shortcut to currently handled device */
3216         resource_size_t ohci_base;
3217
3218 #ifdef CONFIG_PPC_PMAC
3219         /* Necessary on some machines if ohci1394 was loaded/ unloaded before */
3220         if (machine_is(powermac)) {
3221                 struct device_node *ofn = pci_device_to_OF_node(dev);
3222
3223                 if (ofn) {
3224                         pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
3225                         pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3226                 }
3227         }
3228 #endif /* CONFIG_PPC_PMAC */
3229
3230         if (pci_enable_device(dev))
3231                 FAIL(-ENXIO, "Failed to enable OHCI hardware");
3232         pci_set_master(dev);
3233
3234         host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
3235         if (!host) FAIL(-ENOMEM, "Failed to allocate host structure");
3236
3237         ohci = host->hostdata;
3238         ohci->dev = dev;
3239         ohci->host = host;
3240         ohci->init_state = OHCI_INIT_ALLOC_HOST;
3241         host->pdev = dev;
3242         pci_set_drvdata(dev, ohci);
3243
3244         /* We don't want hardware swapping */
3245         pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3246
3247         /* Some oddball Apple controllers do not order the selfid
3248          * properly, so we make up for it here.  */
3249 #ifndef __LITTLE_ENDIAN
3250         /* XXX: Need a better way to check this. I'm wondering if we can
3251          * read the values of the OHCI1394_PCI_HCI_Control and the
3252          * noByteSwapData registers to see if they were not cleared to
3253          * zero. Should this work? Obviously it's not defined what these
3254          * registers will read when they aren't supported. Bleh! */
3255         if (dev->vendor == PCI_VENDOR_ID_APPLE &&
3256             dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
3257                 ohci->no_swap_incoming = 1;
3258                 ohci->selfid_swap = 0;
3259         } else
3260                 ohci->selfid_swap = 1;
3261 #endif
3262
3263
3264 #ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3265 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3266 #endif
3267
3268         /* These chipsets require a bit of extra care when checking after
3269          * a busreset.  */
3270         if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
3271              dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
3272             (dev->vendor ==  PCI_VENDOR_ID_NVIDIA &&
3273              dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
3274                 ohci->check_busreset = 1;
3275
3276         /* We hardwire the MMIO length, since some CardBus adaptors
3277          * fail to report the right length.  Anyway, the ohci spec
3278          * clearly says it's 2kb, so this shouldn't be a problem. */
3279         ohci_base = pci_resource_start(dev, 0);
3280         if (pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE)
3281                 PRINT(KERN_WARNING, "PCI resource length of 0x%llx too small!",
3282                       (unsigned long long)pci_resource_len(dev, 0));
3283
3284         if (!request_mem_region(ohci_base, OHCI1394_REGISTER_SIZE,
3285                                 OHCI1394_DRIVER_NAME))
3286                 FAIL(-ENOMEM, "MMIO resource (0x%llx - 0x%llx) unavailable",
3287                         (unsigned long long)ohci_base,
3288                         (unsigned long long)ohci_base + OHCI1394_REGISTER_SIZE);
3289         ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3290
3291         ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
3292         if (ohci->registers == NULL)
3293                 FAIL(-ENXIO, "Failed to remap registers - card not accessible");
3294         ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3295         DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
3296
3297         /* csr_config rom allocation */
3298         ohci->csr_config_rom_cpu =
3299                 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3300                                      &ohci->csr_config_rom_bus);
3301         OHCI_DMA_ALLOC("consistent csr_config_rom");
3302         if (ohci->csr_config_rom_cpu == NULL)
3303                 FAIL(-ENOMEM, "Failed to allocate buffer config rom");
3304         ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3305
3306         /* self-id dma buffer allocation */
3307         ohci->selfid_buf_cpu =
3308                 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3309                       &ohci->selfid_buf_bus);
3310         OHCI_DMA_ALLOC("consistent selfid_buf");
3311
3312         if (ohci->selfid_buf_cpu == NULL)
3313                 FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
3314         ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3315
3316         if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
3317                 PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
3318                       "8Kb boundary... may cause problems on some CXD3222 chip",
3319                       ohci->selfid_buf_cpu);
3320
3321         /* No self-id errors at startup */
3322         ohci->self_id_errors = 0;
3323
3324         ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
3325         /* AR DMA request context allocation */
3326         if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3327                               DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3328                               AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
3329                               OHCI1394_AsReqRcvContextBase) < 0)
3330                 FAIL(-ENOMEM, "Failed to allocate AR Req context");
3331
3332         /* AR DMA response context allocation */
3333         if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3334                               DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3335                               AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
3336                               OHCI1394_AsRspRcvContextBase) < 0)
3337                 FAIL(-ENOMEM, "Failed to allocate AR Resp context");
3338
3339         /* AT DMA request context */
3340         if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3341                               DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
3342                               OHCI1394_AsReqTrContextBase) < 0)
3343                 FAIL(-ENOMEM, "Failed to allocate AT Req context");
3344
3345         /* AT DMA response context */
3346         if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3347                               DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
3348                               OHCI1394_AsRspTrContextBase) < 0)
3349                 FAIL(-ENOMEM, "Failed to allocate AT Resp context");
3350
3351         /* Start off with a soft reset, to clear everything to a sane
3352          * state. */
3353         ohci_soft_reset(ohci);
3354
3355         /* Now enable LPS, which we need in order to start accessing
3356          * most of the registers.  In fact, on some cards (ALI M5251),
3357          * accessing registers in the SClk domain without LPS enabled
3358          * will lock up the machine.  Wait 50msec to make sure we have
3359          * full link enabled.  */
3360         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3361
3362         /* Disable and clear interrupts */
3363         reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3364         reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3365
3366         mdelay(50);
3367
3368         /* Determine the number of available IR and IT contexts. */
3369         ohci->nb_iso_rcv_ctx =
3370                 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
3371         ohci->nb_iso_xmit_ctx =
3372                 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
3373
3374         /* Set the usage bits for non-existent contexts so they can't
3375          * be allocated */
3376         ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
3377         ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
3378
3379         INIT_LIST_HEAD(&ohci->iso_tasklet_list);
3380         spin_lock_init(&ohci->iso_tasklet_list_lock);
3381         ohci->ISO_channel_usage = 0;
3382         spin_lock_init(&ohci->IR_channel_lock);
3383
3384         /* Allocate the IR DMA context right here so we don't have
3385          * to do it in interrupt path - note that this doesn't
3386          * waste much memory and avoids the jugglery required to
3387          * allocate it in IRQ path. */
3388         if (alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context,
3389                               DMA_CTX_ISO, 0, IR_NUM_DESC,
3390                               IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
3391                               OHCI1394_IsoRcvContextBase) < 0) {
3392                 FAIL(-ENOMEM, "Cannot allocate IR Legacy DMA context");
3393         }
3394
3395         /* We hopefully don't have to pre-allocate IT DMA like we did
3396          * for IR DMA above. Allocate it on-demand and mark inactive. */
3397         ohci->it_legacy_context.ohci = NULL;
3398         spin_lock_init(&ohci->event_lock);
3399
3400         /*
3401          * interrupts are disabled, all right, but... due to IRQF_SHARED we
3402          * might get called anyway.  We'll see no event, of course, but
3403          * we need to get to that "no event", so enough should be initialized
3404          * by that point.
3405          */
3406         if (request_irq(dev->irq, ohci_irq_handler, IRQF_SHARED,
3407                          OHCI1394_DRIVER_NAME, ohci))
3408                 FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
3409
3410         ohci->init_state = OHCI_INIT_HAVE_IRQ;
3411         ohci_initialize(ohci);
3412
3413         /* Set certain csr values */
3414         host->csr.guid_hi = reg_read(ohci, OHCI1394_GUIDHi);
3415         host->csr.guid_lo = reg_read(ohci, OHCI1394_GUIDLo);
3416         host->csr.cyc_clk_acc = 100;  /* how do we determine clk accuracy? */
3417         host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
3418         host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
3419
3420         if (phys_dma) {
3421                 host->low_addr_space =
3422                         (u64) reg_read(ohci, OHCI1394_PhyUpperBound) << 16;
3423                 if (!host->low_addr_space)
3424                         host->low_addr_space = OHCI1394_PHYS_UPPER_BOUND_FIXED;
3425         }
3426         host->middle_addr_space = OHCI1394_MIDDLE_ADDRESS_SPACE;
3427
3428         /* Tell the highlevel this host is ready */
3429         if (hpsb_add_host(host))
3430                 FAIL(-ENOMEM, "Failed to register host with highlevel");
3431
3432         ohci->init_state = OHCI_INIT_DONE;
3433
3434         return 0;
3435 #undef FAIL
3436 }
3437
3438 static void ohci1394_pci_remove(struct pci_dev *pdev)
3439 {
3440         struct ti_ohci *ohci;
3441         struct device *dev;
3442
3443         ohci = pci_get_drvdata(pdev);
3444         if (!ohci)
3445                 return;
3446
3447         dev = get_device(&ohci->host->device);
3448
3449         switch (ohci->init_state) {
3450         case OHCI_INIT_DONE:
3451                 hpsb_remove_host(ohci->host);
3452
3453                 /* Clear out BUS Options */
3454                 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3455                 reg_write(ohci, OHCI1394_BusOptions,
3456                           (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3457                           0x00ff0000);
3458                 memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3459
3460         case OHCI_INIT_HAVE_IRQ:
3461                 /* Clear interrupt registers */
3462                 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3463                 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3464                 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3465                 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3466                 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3467                 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3468
3469                 /* Disable IRM Contender */
3470                 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3471
3472                 /* Clear link control register */
3473                 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3474
3475                 /* Let all other nodes know to ignore us */
3476                 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3477
3478                 /* Soft reset before we start - this disables
3479                  * interrupts and clears linkEnable and LPS. */
3480                 ohci_soft_reset(ohci);
3481                 free_irq(ohci->dev->irq, ohci);
3482
3483         case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3484                 /* The ohci_soft_reset() stops all DMA contexts, so we
3485                  * dont need to do this.  */
3486                 free_dma_rcv_ctx(&ohci->ar_req_context);
3487                 free_dma_rcv_ctx(&ohci->ar_resp_context);
3488                 free_dma_trm_ctx(&ohci->at_req_context);
3489                 free_dma_trm_ctx(&ohci->at_resp_context);
3490                 free_dma_rcv_ctx(&ohci->ir_legacy_context);
3491                 free_dma_trm_ctx(&ohci->it_legacy_context);
3492
3493         case OHCI_INIT_HAVE_SELFID_BUFFER:
3494                 pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3495                                     ohci->selfid_buf_cpu,
3496                                     ohci->selfid_buf_bus);
3497                 OHCI_DMA_FREE("consistent selfid_buf");
3498
3499         case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3500                 pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3501                                     ohci->csr_config_rom_cpu,
3502                                     ohci->csr_config_rom_bus);
3503                 OHCI_DMA_FREE("consistent csr_config_rom");
3504
3505         case OHCI_INIT_HAVE_IOMAPPING:
3506                 iounmap(ohci->registers);
3507
3508         case OHCI_INIT_HAVE_MEM_REGION:
3509                 release_mem_region(pci_resource_start(ohci->dev, 0),
3510                                    OHCI1394_REGISTER_SIZE);
3511
3512 #ifdef CONFIG_PPC_PMAC
3513         /* On UniNorth, power down the cable and turn off the chip clock
3514          * to save power on laptops */
3515         if (machine_is(powermac)) {
3516                 struct device_node* ofn = pci_device_to_OF_node(ohci->dev);
3517
3518                 if (ofn) {
3519                         pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3520                         pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
3521                 }
3522         }
3523 #endif /* CONFIG_PPC_PMAC */
3524
3525         case OHCI_INIT_ALLOC_HOST:
3526                 pci_set_drvdata(ohci->dev, NULL);
3527         }
3528
3529         if (dev)
3530                 put_device(dev);
3531 }
3532
3533 #ifdef CONFIG_PM
3534 static int ohci1394_pci_suspend(struct pci_dev *pdev, pm_message_t state)
3535 {
3536         int err;
3537         struct ti_ohci *ohci = pci_get_drvdata(pdev);
3538
3539         if (!ohci) {
3540                 printk(KERN_ERR "%s: tried to suspend nonexisting host\n",
3541                        OHCI1394_DRIVER_NAME);
3542                 return -ENXIO;
3543         }
3544         DBGMSG("suspend called");
3545
3546         /* Clear the async DMA contexts and stop using the controller */
3547         hpsb_bus_reset(ohci->host);
3548
3549         /* See ohci1394_pci_remove() for comments on this sequence */
3550         reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3551         reg_write(ohci, OHCI1394_BusOptions,
3552                   (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3553                   0x00ff0000);
3554         reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3555         reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3556         reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3557         reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3558         reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3559         reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3560         set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3561         reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3562         ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3563         ohci_soft_reset(ohci);
3564
3565         err = pci_save_state(pdev);
3566         if (err) {
3567                 PRINT(KERN_ERR, "pci_save_state failed with %d", err);
3568                 return err;
3569         }
3570         err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
3571         if (err)
3572                 DBGMSG("pci_set_power_state failed with %d", err);
3573
3574 /* PowerMac suspend code comes last */
3575 #ifdef CONFIG_PPC_PMAC
3576         if (machine_is(powermac)) {
3577                 struct device_node *ofn = pci_device_to_OF_node(pdev);
3578
3579                 if (ofn)
3580                         pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3581         }
3582 #endif /* CONFIG_PPC_PMAC */
3583
3584         return 0;
3585 }
3586
3587 static int ohci1394_pci_resume(struct pci_dev *pdev)
3588 {
3589         int err;
3590         struct ti_ohci *ohci = pci_get_drvdata(pdev);
3591
3592         if (!ohci) {
3593                 printk(KERN_ERR "%s: tried to resume nonexisting host\n",
3594                        OHCI1394_DRIVER_NAME);
3595                 return -ENXIO;
3596         }
3597         DBGMSG("resume called");
3598
3599 /* PowerMac resume code comes first */
3600 #ifdef CONFIG_PPC_PMAC
3601         if (machine_is(powermac)) {
3602                 struct device_node *ofn = pci_device_to_OF_node(pdev);
3603
3604                 if (ofn)
3605                         pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3606         }
3607 #endif /* CONFIG_PPC_PMAC */
3608
3609         pci_set_power_state(pdev, PCI_D0);
3610         pci_restore_state(pdev);
3611         err = pci_enable_device(pdev);
3612         if (err) {
3613                 PRINT(KERN_ERR, "pci_enable_device failed with %d", err);
3614                 return err;
3615         }
3616
3617         /* See ohci1394_pci_probe() for comments on this sequence */
3618         ohci_soft_reset(ohci);
3619         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3620         reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3621         reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3622         mdelay(50);
3623         ohci_initialize(ohci);
3624
3625         hpsb_resume_host(ohci->host);
3626         return 0;
3627 }
3628 #endif /* CONFIG_PM */
3629
3630 static struct pci_device_id ohci1394_pci_tbl[] = {
3631         {
3632                 .class =        PCI_CLASS_SERIAL_FIREWIRE_OHCI,
3633                 .class_mask =   PCI_ANY_ID,
3634                 .vendor =       PCI_ANY_ID,
3635                 .device =       PCI_ANY_ID,
3636                 .subvendor =    PCI_ANY_ID,
3637                 .subdevice =    PCI_ANY_ID,
3638         },
3639         { 0, },
3640 };
3641
3642 MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
3643
3644 static struct pci_driver ohci1394_pci_driver = {
3645         .name =         OHCI1394_DRIVER_NAME,
3646         .id_table =     ohci1394_pci_tbl,
3647         .probe =        ohci1394_pci_probe,
3648         .remove =       ohci1394_pci_remove,
3649 #ifdef CONFIG_PM
3650         .resume =       ohci1394_pci_resume,
3651         .suspend =      ohci1394_pci_suspend,
3652 #endif
3653 };
3654
3655 /***********************************
3656  * OHCI1394 Video Interface        *
3657  ***********************************/
3658
3659 /* essentially the only purpose of this code is to allow another
3660    module to hook into ohci's interrupt handler */
3661
3662 /* returns zero if successful, one if DMA context is locked up */
3663 int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
3664 {
3665         int i=0;
3666
3667         /* stop the channel program if it's still running */
3668         reg_write(ohci, reg, 0x8000);
3669
3670         /* Wait until it effectively stops */
3671         while (reg_read(ohci, reg) & 0x400) {
3672                 i++;
3673                 if (i>5000) {
3674                         PRINT(KERN_ERR,
3675                               "Runaway loop while stopping context: %s...", msg ? msg : "");
3676                         return 1;
3677                 }
3678
3679                 mb();
3680                 udelay(10);
3681         }
3682         if (msg) PRINT(KERN_ERR, "%s: dma prg stopped", msg);
3683         return 0;
3684 }
3685
3686 void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
3687                                void (*func)(unsigned long), unsigned long data)
3688 {
3689         tasklet_init(&tasklet->tasklet, func, data);
3690         tasklet->type = type;
3691         /* We init the tasklet->link field, so we can list_del() it
3692          * without worrying whether it was added to the list or not. */
3693         INIT_LIST_HEAD(&tasklet->link);
3694 }
3695
3696 int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
3697                                   struct ohci1394_iso_tasklet *tasklet)
3698 {
3699         unsigned long flags, *usage;
3700         int n, i, r = -EBUSY;
3701
3702         if (tasklet->type == OHCI_ISO_TRANSMIT) {
3703                 n = ohci->nb_iso_xmit_ctx;
3704                 usage = &ohci->it_ctx_usage;
3705         }
3706         else {
3707                 n = ohci->nb_iso_rcv_ctx;
3708                 usage = &ohci->ir_ctx_usage;
3709
3710                 /* only one receive context can be multichannel (OHCI sec 10.4.1) */
3711                 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3712                         if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
3713                                 return r;
3714                         }
3715                 }
3716         }
3717
3718         spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3719
3720         for (i = 0; i < n; i++)
3721                 if (!test_and_set_bit(i, usage)) {
3722                         tasklet->context = i;
3723                         list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
3724                         r = 0;
3725                         break;
3726                 }
3727
3728         spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3729
3730         return r;
3731 }
3732
3733 void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
3734                                      struct ohci1394_iso_tasklet *tasklet)
3735 {
3736         unsigned long flags;
3737
3738         tasklet_kill(&tasklet->tasklet);
3739
3740         spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3741
3742         if (tasklet->type == OHCI_ISO_TRANSMIT)
3743                 clear_bit(tasklet->context, &ohci->it_ctx_usage);
3744         else {
3745                 clear_bit(tasklet->context, &ohci->ir_ctx_usage);
3746
3747                 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3748                         clear_bit(0, &ohci->ir_multichannel_used);
3749                 }
3750         }
3751
3752         list_del(&tasklet->link);
3753
3754         spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3755 }
3756
3757 EXPORT_SYMBOL(ohci1394_stop_context);
3758 EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3759 EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3760 EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3761
3762 /***********************************
3763  * General module initialization   *
3764  ***********************************/
3765
3766 MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
3767 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
3768 MODULE_LICENSE("GPL");
3769
3770 static void __exit ohci1394_cleanup (void)
3771 {
3772         pci_unregister_driver(&ohci1394_pci_driver);
3773 }
3774
3775 static int __init ohci1394_init(void)
3776 {
3777         return pci_register_driver(&ohci1394_pci_driver);
3778 }
3779
3780 /* Register before most other device drivers.
3781  * Useful for remote debugging via physical DMA, e.g. using firescope. */
3782 fs_initcall(ohci1394_init);
3783 module_exit(ohci1394_cleanup);