[PATCH] Sync up ieee-1394
[linux-2.6.git] / drivers / ieee1394 / ohci1394.c
1 /*
2  * ohci1394.c - driver for OHCI 1394 boards
3  * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4  *                        Gord Peters <GordPeters@smarttech.com>
5  *              2001      Ben Collins <bcollins@debian.org>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software Foundation,
19  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20  */
21
22 /*
23  * Things known to be working:
24  * . Async Request Transmit
25  * . Async Response Receive
26  * . Async Request Receive
27  * . Async Response Transmit
28  * . Iso Receive
29  * . DMA mmap for iso receive
30  * . Config ROM generation
31  *
32  * Things implemented, but still in test phase:
33  * . Iso Transmit
34  * . Async Stream Packets Transmit (Receive done via Iso interface)
35  *
36  * Things not implemented:
37  * . DMA error recovery
38  *
39  * Known bugs:
40  * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41  *   added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
42  */
43
44 /*
45  * Acknowledgments:
46  *
47  * Adam J Richter <adam@yggdrasil.com>
48  *  . Use of pci_class to find device
49  *
50  * Emilie Chung <emilie.chung@axis.com>
51  *  . Tip on Async Request Filter
52  *
53  * Pascal Drolet <pascal.drolet@informission.ca>
54  *  . Various tips for optimization and functionnalities
55  *
56  * Robert Ficklin <rficklin@westengineering.com>
57  *  . Loop in irq_handler
58  *
59  * James Goodwin <jamesg@Filanet.com>
60  *  . Various tips on initialization, self-id reception, etc.
61  *
62  * Albrecht Dress <ad@mpifr-bonn.mpg.de>
63  *  . Apple PowerBook detection
64  *
65  * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
66  *  . Reset the board properly before leaving + misc cleanups
67  *
68  * Leon van Stuivenberg <leonvs@iae.nl>
69  *  . Bug fixes
70  *
71  * Ben Collins <bcollins@debian.org>
72  *  . Working big-endian support
73  *  . Updated to 2.4.x module scheme (PCI aswell)
74  *  . Config ROM generation
75  *
76  * Manfred Weihs <weihs@ict.tuwien.ac.at>
77  *  . Reworked code for initiating bus resets
78  *    (long, short, with or without hold-off)
79  *
80  * Nandu Santhi <contactnandu@users.sourceforge.net>
81  *  . Added support for nVidia nForce2 onboard Firewire chipset
82  *
83  */
84
85 #include <linux/config.h>
86 #include <linux/kernel.h>
87 #include <linux/list.h>
88 #include <linux/slab.h>
89 #include <linux/interrupt.h>
90 #include <linux/wait.h>
91 #include <linux/errno.h>
92 #include <linux/module.h>
93 #include <linux/moduleparam.h>
94 #include <linux/pci.h>
95 #include <linux/fs.h>
96 #include <linux/poll.h>
97 #include <asm/byteorder.h>
98 #include <asm/atomic.h>
99 #include <asm/uaccess.h>
100 #include <linux/delay.h>
101 #include <linux/spinlock.h>
102
103 #include <asm/pgtable.h>
104 #include <asm/page.h>
105 #include <asm/irq.h>
106 #include <linux/sched.h>
107 #include <linux/types.h>
108 #include <linux/vmalloc.h>
109 #include <linux/init.h>
110
111 #ifdef CONFIG_PPC_PMAC
112 #include <asm/machdep.h>
113 #include <asm/pmac_feature.h>
114 #include <asm/prom.h>
115 #include <asm/pci-bridge.h>
116 #endif
117
118 #include "csr1212.h"
119 #include "ieee1394.h"
120 #include "ieee1394_types.h"
121 #include "hosts.h"
122 #include "dma.h"
123 #include "iso.h"
124 #include "ieee1394_core.h"
125 #include "highlevel.h"
126 #include "ohci1394.h"
127
128 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
129 #define OHCI1394_DEBUG
130 #endif
131
132 #ifdef DBGMSG
133 #undef DBGMSG
134 #endif
135
136 #ifdef OHCI1394_DEBUG
137 #define DBGMSG(fmt, args...) \
138 printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
139 #else
140 #define DBGMSG(fmt, args...)
141 #endif
142
143 #ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
144 #define OHCI_DMA_ALLOC(fmt, args...) \
145         HPSB_ERR("%s(%s)alloc(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
146                 ++global_outstanding_dmas, ## args)
147 #define OHCI_DMA_FREE(fmt, args...) \
148         HPSB_ERR("%s(%s)free(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
149                 --global_outstanding_dmas, ## args)
150 static int global_outstanding_dmas = 0;
151 #else
152 #define OHCI_DMA_ALLOC(fmt, args...)
153 #define OHCI_DMA_FREE(fmt, args...)
154 #endif
155
156 /* print general (card independent) information */
157 #define PRINT_G(level, fmt, args...) \
158 printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
159
160 /* print card specific information */
161 #define PRINT(level, fmt, args...) \
162 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
163
164 static char version[] __devinitdata =
165         "$Rev: 1299 $ Ben Collins <bcollins@debian.org>";
166
167 /* Module Parameters */
168 static int phys_dma = 1;
169 module_param(phys_dma, int, 0644);
170 MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
171
172 static void dma_trm_tasklet(unsigned long data);
173 static void dma_trm_reset(struct dma_trm_ctx *d);
174
175 static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
176                              enum context_type type, int ctx, int num_desc,
177                              int buf_size, int split_buf_size, int context_base);
178 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d);
179 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
180
181 static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
182                              enum context_type type, int ctx, int num_desc,
183                              int context_base);
184
185 static void ohci1394_pci_remove(struct pci_dev *pdev);
186
187 #ifndef __LITTLE_ENDIAN
188 static unsigned hdr_sizes[] =
189 {
190         3,      /* TCODE_WRITEQ */
191         4,      /* TCODE_WRITEB */
192         3,      /* TCODE_WRITE_RESPONSE */
193         0,      /* ??? */
194         3,      /* TCODE_READQ */
195         4,      /* TCODE_READB */
196         3,      /* TCODE_READQ_RESPONSE */
197         4,      /* TCODE_READB_RESPONSE */
198         1,      /* TCODE_CYCLE_START (???) */
199         4,      /* TCODE_LOCK_REQUEST */
200         2,      /* TCODE_ISO_DATA */
201         4,      /* TCODE_LOCK_RESPONSE */
202 };
203
204 /* Swap headers */
205 static inline void packet_swab(quadlet_t *data, int tcode)
206 {
207         size_t size = hdr_sizes[tcode];
208
209         if (tcode > TCODE_LOCK_RESPONSE || hdr_sizes[tcode] == 0)
210                 return;
211
212         while (size--)
213                 data[size] = swab32(data[size]);
214 }
215 #else
216 /* Don't waste cycles on same sex byte swaps */
217 #define packet_swab(w,x)
218 #endif /* !LITTLE_ENDIAN */
219
220 /***********************************
221  * IEEE-1394 functionality section *
222  ***********************************/
223
224 static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
225 {
226         int i;
227         unsigned long flags;
228         quadlet_t r;
229
230         spin_lock_irqsave (&ohci->phy_reg_lock, flags);
231
232         reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
233
234         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
235                 if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
236                         break;
237
238                 mdelay(1);
239         }
240
241         r = reg_read(ohci, OHCI1394_PhyControl);
242
243         if (i >= OHCI_LOOP_COUNT)
244                 PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
245                        r, r & 0x80000000, i);
246
247         spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
248
249         return (r & 0x00ff0000) >> 16;
250 }
251
252 static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
253 {
254         int i;
255         unsigned long flags;
256         u32 r = 0;
257
258         spin_lock_irqsave (&ohci->phy_reg_lock, flags);
259
260         reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
261
262         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
263                 r = reg_read(ohci, OHCI1394_PhyControl);
264                 if (!(r & 0x00004000))
265                         break;
266
267                 mdelay(1);
268         }
269
270         if (i == OHCI_LOOP_COUNT)
271                 PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
272                        r, r & 0x00004000, i);
273
274         spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
275
276         return;
277 }
278
279 /* Or's our value into the current value */
280 static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
281 {
282         u8 old;
283
284         old = get_phy_reg (ohci, addr);
285         old |= data;
286         set_phy_reg (ohci, addr, old);
287
288         return;
289 }
290
291 static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
292                                 int phyid, int isroot)
293 {
294         quadlet_t *q = ohci->selfid_buf_cpu;
295         quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
296         size_t size;
297         quadlet_t q0, q1;
298
299         /* Check status of self-id reception */
300
301         if (ohci->selfid_swap)
302                 q0 = le32_to_cpu(q[0]);
303         else
304                 q0 = q[0];
305
306         if ((self_id_count & 0x80000000) ||
307             ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
308                 PRINT(KERN_ERR,
309                       "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
310                       self_id_count, q0, ohci->self_id_errors);
311
312                 /* Tip by James Goodwin <jamesg@Filanet.com>:
313                  * We had an error, generate another bus reset in response.  */
314                 if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
315                         set_phy_reg_mask (ohci, 1, 0x40);
316                         ohci->self_id_errors++;
317                 } else {
318                         PRINT(KERN_ERR,
319                               "Too many errors on SelfID error reception, giving up!");
320                 }
321                 return;
322         }
323
324         /* SelfID Ok, reset error counter. */
325         ohci->self_id_errors = 0;
326
327         size = ((self_id_count & 0x00001FFC) >> 2) - 1;
328         q++;
329
330         while (size > 0) {
331                 if (ohci->selfid_swap) {
332                         q0 = le32_to_cpu(q[0]);
333                         q1 = le32_to_cpu(q[1]);
334                 } else {
335                         q0 = q[0];
336                         q1 = q[1];
337                 }
338
339                 if (q0 == ~q1) {
340                         DBGMSG ("SelfID packet 0x%x received", q0);
341                         hpsb_selfid_received(host, cpu_to_be32(q0));
342                         if (((q0 & 0x3f000000) >> 24) == phyid)
343                                 DBGMSG ("SelfID for this node is 0x%08x", q0);
344                 } else {
345                         PRINT(KERN_ERR,
346                               "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
347                 }
348                 q += 2;
349                 size -= 2;
350         }
351
352         DBGMSG("SelfID complete");
353
354         return;
355 }
356
357 static void ohci_soft_reset(struct ti_ohci *ohci) {
358         int i;
359
360         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
361
362         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
363                 if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
364                         break;
365                 mdelay(1);
366         }
367         DBGMSG ("Soft reset finished");
368 }
369
370
371 /* Generate the dma receive prgs and start the context */
372 static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
373 {
374         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
375         int i;
376
377         ohci1394_stop_context(ohci, d->ctrlClear, NULL);
378
379         for (i=0; i<d->num_desc; i++) {
380                 u32 c;
381
382                 c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
383                 if (generate_irq)
384                         c |= DMA_CTL_IRQ;
385
386                 d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
387
388                 /* End of descriptor list? */
389                 if (i + 1 < d->num_desc) {
390                         d->prg_cpu[i]->branchAddress =
391                                 cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
392                 } else {
393                         d->prg_cpu[i]->branchAddress =
394                                 cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
395                 }
396
397                 d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
398                 d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
399         }
400
401         d->buf_ind = 0;
402         d->buf_offset = 0;
403
404         if (d->type == DMA_CTX_ISO) {
405                 /* Clear contextControl */
406                 reg_write(ohci, d->ctrlClear, 0xffffffff);
407
408                 /* Set bufferFill, isochHeader, multichannel for IR context */
409                 reg_write(ohci, d->ctrlSet, 0xd0000000);
410
411                 /* Set the context match register to match on all tags */
412                 reg_write(ohci, d->ctxtMatch, 0xf0000000);
413
414                 /* Clear the multi channel mask high and low registers */
415                 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
416                 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
417
418                 /* Set up isoRecvIntMask to generate interrupts */
419                 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
420         }
421
422         /* Tell the controller where the first AR program is */
423         reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
424
425         /* Run context */
426         reg_write(ohci, d->ctrlSet, 0x00008000);
427
428         DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
429 }
430
431 /* Initialize the dma transmit context */
432 static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
433 {
434         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
435
436         /* Stop the context */
437         ohci1394_stop_context(ohci, d->ctrlClear, NULL);
438
439         d->prg_ind = 0;
440         d->sent_ind = 0;
441         d->free_prgs = d->num_desc;
442         d->branchAddrPtr = NULL;
443         INIT_LIST_HEAD(&d->fifo_list);
444         INIT_LIST_HEAD(&d->pending_list);
445
446         if (d->type == DMA_CTX_ISO) {
447                 /* enable interrupts */
448                 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
449         }
450
451         DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
452 }
453
454 /* Count the number of available iso contexts */
455 static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
456 {
457         int i,ctx=0;
458         u32 tmp;
459
460         reg_write(ohci, reg, 0xffffffff);
461         tmp = reg_read(ohci, reg);
462
463         DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
464
465         /* Count the number of contexts */
466         for (i=0; i<32; i++) {
467                 if (tmp & 1) ctx++;
468                 tmp >>= 1;
469         }
470         return ctx;
471 }
472
473 /* Global initialization */
474 static void ohci_initialize(struct ti_ohci *ohci)
475 {
476         char irq_buf[16];
477         quadlet_t buf;
478         int num_ports, i;
479
480         spin_lock_init(&ohci->phy_reg_lock);
481         spin_lock_init(&ohci->event_lock);
482
483         /* Put some defaults to these undefined bus options */
484         buf = reg_read(ohci, OHCI1394_BusOptions);
485         buf |=  0x60000000; /* Enable CMC and ISC */
486         if (hpsb_disable_irm)
487                 buf &= ~0x80000000;
488         else
489                 buf |=  0x80000000; /* Enable IRMC */
490         buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
491         buf &= ~0x18000000; /* Disable PMC and BMC */
492         reg_write(ohci, OHCI1394_BusOptions, buf);
493
494         /* Set the bus number */
495         reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
496
497         /* Enable posted writes */
498         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
499
500         /* Clear link control register */
501         reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
502
503         /* Enable cycle timer and cycle master and set the IRM
504          * contender bit in our self ID packets if appropriate. */
505         reg_write(ohci, OHCI1394_LinkControlSet,
506                   OHCI1394_LinkControl_CycleTimerEnable |
507                   OHCI1394_LinkControl_CycleMaster);
508         i = get_phy_reg(ohci, 4) | PHY_04_LCTRL;
509         if (hpsb_disable_irm)
510                 i &= ~PHY_04_CONTENDER;
511         else
512                 i |= PHY_04_CONTENDER;
513         set_phy_reg(ohci, 4, i);
514
515         /* Set up self-id dma buffer */
516         reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
517
518         /* enable self-id and phys */
519         reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID |
520                   OHCI1394_LinkControl_RcvPhyPkt);
521
522         /* Set the Config ROM mapping register */
523         reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
524
525         /* Now get our max packet size */
526         ohci->max_packet_size =
527                 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
528                 
529         /* Don't accept phy packets into AR request context */
530         reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
531
532         /* Clear the interrupt mask */
533         reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
534         reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
535
536         /* Clear the interrupt mask */
537         reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
538         reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
539
540         /* Initialize AR dma */
541         initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
542         initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
543
544         /* Initialize AT dma */
545         initialize_dma_trm_ctx(&ohci->at_req_context);
546         initialize_dma_trm_ctx(&ohci->at_resp_context);
547         
548         /* Initialize IR Legacy DMA channel mask */
549         ohci->ir_legacy_channels = 0;
550
551         /*
552          * Accept AT requests from all nodes. This probably
553          * will have to be controlled from the subsystem
554          * on a per node basis.
555          */
556         reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0x80000000);
557
558         /* Specify AT retries */
559         reg_write(ohci, OHCI1394_ATRetries,
560                   OHCI1394_MAX_AT_REQ_RETRIES |
561                   (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
562                   (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
563
564         /* We don't want hardware swapping */
565         reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
566
567         /* Enable interrupts */
568         reg_write(ohci, OHCI1394_IntMaskSet,
569                   OHCI1394_unrecoverableError |
570                   OHCI1394_masterIntEnable |
571                   OHCI1394_busReset |
572                   OHCI1394_selfIDComplete |
573                   OHCI1394_RSPkt |
574                   OHCI1394_RQPkt |
575                   OHCI1394_respTxComplete |
576                   OHCI1394_reqTxComplete |
577                   OHCI1394_isochRx |
578                   OHCI1394_isochTx |
579                   OHCI1394_cycleInconsistent);
580
581         /* Enable link */
582         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
583
584         buf = reg_read(ohci, OHCI1394_Version);
585 #ifndef __sparc__
586         sprintf (irq_buf, "%d", ohci->dev->irq);
587 #else
588         sprintf (irq_buf, "%s", __irq_itoa(ohci->dev->irq));
589 #endif
590         PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%s]  "
591               "MMIO=[%lx-%lx]  Max Packet=[%d]",
592               ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
593               ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), irq_buf,
594               pci_resource_start(ohci->dev, 0),
595               pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
596               ohci->max_packet_size);
597
598         /* Check all of our ports to make sure that if anything is
599          * connected, we enable that port. */
600         num_ports = get_phy_reg(ohci, 2) & 0xf;
601         for (i = 0; i < num_ports; i++) {
602                 unsigned int status;
603
604                 set_phy_reg(ohci, 7, i);
605                 status = get_phy_reg(ohci, 8);
606
607                 if (status & 0x20)
608                         set_phy_reg(ohci, 8, status & ~1);
609         }
610
611         /* Serial EEPROM Sanity check. */
612         if ((ohci->max_packet_size < 512) ||
613             (ohci->max_packet_size > 4096)) {
614                 /* Serial EEPROM contents are suspect, set a sane max packet
615                  * size and print the raw contents for bug reports if verbose
616                  * debug is enabled. */
617 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
618                 int i;
619 #endif
620
621                 PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
622                       "attempting to setting max_packet_size to 512 bytes");
623                 reg_write(ohci, OHCI1394_BusOptions,
624                           (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
625                 ohci->max_packet_size = 512;
626 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
627                 PRINT(KERN_DEBUG, "    EEPROM Present: %d",
628                       (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
629                 reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
630
631                 for (i = 0;
632                      ((i < 1000) &&
633                       (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
634                         udelay(10);
635
636                 for (i = 0; i < 0x20; i++) {
637                         reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
638                         PRINT(KERN_DEBUG, "    EEPROM %02x: %02x", i,
639                               (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
640                 }
641 #endif
642         }
643 }
644
645 /*
646  * Insert a packet in the DMA fifo and generate the DMA prg
647  * FIXME: rewrite the program in order to accept packets crossing
648  *        page boundaries.
649  *        check also that a single dma descriptor doesn't cross a
650  *        page boundary.
651  */
652 static void insert_packet(struct ti_ohci *ohci,
653                           struct dma_trm_ctx *d, struct hpsb_packet *packet)
654 {
655         u32 cycleTimer;
656         int idx = d->prg_ind;
657
658         DBGMSG("Inserting packet for node " NODE_BUS_FMT
659                ", tlabel=%d, tcode=0x%x, speed=%d",
660                NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
661                packet->tcode, packet->speed_code);
662
663         d->prg_cpu[idx]->begin.address = 0;
664         d->prg_cpu[idx]->begin.branchAddress = 0;
665
666         if (d->type == DMA_CTX_ASYNC_RESP) {
667                 /*
668                  * For response packets, we need to put a timeout value in
669                  * the 16 lower bits of the status... let's try 1 sec timeout
670                  */
671                 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
672                 d->prg_cpu[idx]->begin.status = cpu_to_le32(
673                         (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
674                         ((cycleTimer&0x01fff000)>>12));
675
676                 DBGMSG("cycleTimer: %08x timeStamp: %08x",
677                        cycleTimer, d->prg_cpu[idx]->begin.status);
678         } else 
679                 d->prg_cpu[idx]->begin.status = 0;
680
681         if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
682
683                 if (packet->type == hpsb_raw) {
684                         d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
685                         d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
686                         d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
687                 } else {
688                         d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
689                                 (packet->header[0] & 0xFFFF);
690
691                         if (packet->tcode == TCODE_ISO_DATA) {
692                                 /* Sending an async stream packet */
693                                 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
694                         } else {
695                                 /* Sending a normal async request or response */
696                                 d->prg_cpu[idx]->data[1] =
697                                         (packet->header[1] & 0xFFFF) |
698                                         (packet->header[0] & 0xFFFF0000);
699                                 d->prg_cpu[idx]->data[2] = packet->header[2];
700                                 d->prg_cpu[idx]->data[3] = packet->header[3];
701                         }
702                         packet_swab(d->prg_cpu[idx]->data, packet->tcode);
703                 }
704
705                 if (packet->data_size) { /* block transmit */
706                         if (packet->tcode == TCODE_STREAM_DATA){
707                                 d->prg_cpu[idx]->begin.control =
708                                         cpu_to_le32(DMA_CTL_OUTPUT_MORE |
709                                                     DMA_CTL_IMMEDIATE | 0x8);
710                         } else {
711                                 d->prg_cpu[idx]->begin.control =
712                                         cpu_to_le32(DMA_CTL_OUTPUT_MORE |
713                                                     DMA_CTL_IMMEDIATE | 0x10);
714                         }
715                         d->prg_cpu[idx]->end.control =
716                                 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
717                                             DMA_CTL_IRQ |
718                                             DMA_CTL_BRANCH |
719                                             packet->data_size);
720                         /*
721                          * Check that the packet data buffer
722                          * does not cross a page boundary.
723                          *
724                          * XXX Fix this some day. eth1394 seems to trigger
725                          * it, but ignoring it doesn't seem to cause a
726                          * problem.
727                          */
728 #if 0
729                         if (cross_bound((unsigned long)packet->data,
730                                         packet->data_size)>0) {
731                                 /* FIXME: do something about it */
732                                 PRINT(KERN_ERR,
733                                       "%s: packet data addr: %p size %Zd bytes "
734                                       "cross page boundary", __FUNCTION__,
735                                       packet->data, packet->data_size);
736                         }
737 #endif
738                         d->prg_cpu[idx]->end.address = cpu_to_le32(
739                                 pci_map_single(ohci->dev, packet->data,
740                                                packet->data_size,
741                                                PCI_DMA_TODEVICE));
742                         OHCI_DMA_ALLOC("single, block transmit packet");
743
744                         d->prg_cpu[idx]->end.branchAddress = 0;
745                         d->prg_cpu[idx]->end.status = 0;
746                         if (d->branchAddrPtr)
747                                 *(d->branchAddrPtr) =
748                                         cpu_to_le32(d->prg_bus[idx] | 0x3);
749                         d->branchAddrPtr =
750                                 &(d->prg_cpu[idx]->end.branchAddress);
751                 } else { /* quadlet transmit */
752                         if (packet->type == hpsb_raw)
753                                 d->prg_cpu[idx]->begin.control =
754                                         cpu_to_le32(DMA_CTL_OUTPUT_LAST |
755                                                     DMA_CTL_IMMEDIATE |
756                                                     DMA_CTL_IRQ |
757                                                     DMA_CTL_BRANCH |
758                                                     (packet->header_size + 4));
759                         else
760                                 d->prg_cpu[idx]->begin.control =
761                                         cpu_to_le32(DMA_CTL_OUTPUT_LAST |
762                                                     DMA_CTL_IMMEDIATE |
763                                                     DMA_CTL_IRQ |
764                                                     DMA_CTL_BRANCH |
765                                                     packet->header_size);
766
767                         if (d->branchAddrPtr)
768                                 *(d->branchAddrPtr) =
769                                         cpu_to_le32(d->prg_bus[idx] | 0x2);
770                         d->branchAddrPtr =
771                                 &(d->prg_cpu[idx]->begin.branchAddress);
772                 }
773
774         } else { /* iso packet */
775                 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
776                         (packet->header[0] & 0xFFFF);
777                 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
778                 packet_swab(d->prg_cpu[idx]->data, packet->tcode);
779
780                 d->prg_cpu[idx]->begin.control =
781                         cpu_to_le32(DMA_CTL_OUTPUT_MORE |
782                                     DMA_CTL_IMMEDIATE | 0x8);
783                 d->prg_cpu[idx]->end.control =
784                         cpu_to_le32(DMA_CTL_OUTPUT_LAST |
785                                     DMA_CTL_UPDATE |
786                                     DMA_CTL_IRQ |
787                                     DMA_CTL_BRANCH |
788                                     packet->data_size);
789                 d->prg_cpu[idx]->end.address = cpu_to_le32(
790                                 pci_map_single(ohci->dev, packet->data,
791                                 packet->data_size, PCI_DMA_TODEVICE));
792                 OHCI_DMA_ALLOC("single, iso transmit packet");
793
794                 d->prg_cpu[idx]->end.branchAddress = 0;
795                 d->prg_cpu[idx]->end.status = 0;
796                 DBGMSG("Iso xmit context info: header[%08x %08x]\n"
797                        "                       begin=%08x %08x %08x %08x\n"
798                        "                             %08x %08x %08x %08x\n"
799                        "                       end  =%08x %08x %08x %08x",
800                        d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
801                        d->prg_cpu[idx]->begin.control,
802                        d->prg_cpu[idx]->begin.address,
803                        d->prg_cpu[idx]->begin.branchAddress,
804                        d->prg_cpu[idx]->begin.status,
805                        d->prg_cpu[idx]->data[0],
806                        d->prg_cpu[idx]->data[1],
807                        d->prg_cpu[idx]->data[2],
808                        d->prg_cpu[idx]->data[3],
809                        d->prg_cpu[idx]->end.control,
810                        d->prg_cpu[idx]->end.address,
811                        d->prg_cpu[idx]->end.branchAddress,
812                        d->prg_cpu[idx]->end.status);
813                 if (d->branchAddrPtr)
814                         *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
815                 d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
816         }
817         d->free_prgs--;
818
819         /* queue the packet in the appropriate context queue */
820         list_add_tail(&packet->driver_list, &d->fifo_list);
821         d->prg_ind = (d->prg_ind + 1) % d->num_desc;
822 }
823
824 /*
825  * This function fills the FIFO with the (eventual) pending packets
826  * and runs or wakes up the DMA prg if necessary.
827  *
828  * The function MUST be called with the d->lock held.
829  */
830 static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
831 {
832         struct hpsb_packet *packet, *ptmp;
833         int idx = d->prg_ind;
834         int z = 0;
835
836         /* insert the packets into the dma fifo */
837         list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
838                 if (!d->free_prgs)
839                         break;
840
841                 /* For the first packet only */
842                 if (!z)
843                         z = (packet->data_size) ? 3 : 2;
844
845                 /* Insert the packet */
846                 list_del_init(&packet->driver_list);
847                 insert_packet(ohci, d, packet);
848         }
849
850         /* Nothing must have been done, either no free_prgs or no packets */
851         if (z == 0)
852                 return;
853
854         /* Is the context running ? (should be unless it is
855            the first packet to be sent in this context) */
856         if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
857                 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
858
859                 DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
860                 reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
861
862                 /* Check that the node id is valid, and not 63 */
863                 if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
864                         PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
865                 else
866                         reg_write(ohci, d->ctrlSet, 0x8000);
867         } else {
868                 /* Wake up the dma context if necessary */
869                 if (!(reg_read(ohci, d->ctrlSet) & 0x400))
870                         DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
871
872                 /* do this always, to avoid race condition */
873                 reg_write(ohci, d->ctrlSet, 0x1000);
874         }
875
876         return;
877 }
878
879 /* Transmission of an async or iso packet */
880 static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
881 {
882         struct ti_ohci *ohci = host->hostdata;
883         struct dma_trm_ctx *d;
884         unsigned long flags;
885
886         if (packet->data_size > ohci->max_packet_size) {
887                 PRINT(KERN_ERR,
888                       "Transmit packet size %Zd is too big",
889                       packet->data_size);
890                 return -EOVERFLOW;
891         }
892
893         /* Decide whether we have an iso, a request, or a response packet */
894         if (packet->type == hpsb_raw)
895                 d = &ohci->at_req_context;
896         else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) {
897                 /* The legacy IT DMA context is initialized on first
898                  * use.  However, the alloc cannot be run from
899                  * interrupt context, so we bail out if that is the
900                  * case. I don't see anyone sending ISO packets from
901                  * interrupt context anyway... */
902
903                 if (ohci->it_legacy_context.ohci == NULL) {
904                         if (in_interrupt()) {
905                                 PRINT(KERN_ERR,
906                                       "legacy IT context cannot be initialized during interrupt");
907                                 return -EINVAL;
908                         }
909
910                         if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,
911                                               DMA_CTX_ISO, 0, IT_NUM_DESC,
912                                               OHCI1394_IsoXmitContextBase) < 0) {
913                                 PRINT(KERN_ERR,
914                                       "error initializing legacy IT context");
915                                 return -ENOMEM;
916                         }
917
918                         initialize_dma_trm_ctx(&ohci->it_legacy_context);
919                 }
920
921                 d = &ohci->it_legacy_context;
922         } else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
923                 d = &ohci->at_resp_context;
924         else
925                 d = &ohci->at_req_context;
926
927         spin_lock_irqsave(&d->lock,flags);
928
929         list_add_tail(&packet->driver_list, &d->pending_list);
930
931         dma_trm_flush(ohci, d);
932
933         spin_unlock_irqrestore(&d->lock,flags);
934
935         return 0;
936 }
937
938 static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
939 {
940         struct ti_ohci *ohci = host->hostdata;
941         int retval = 0;
942         unsigned long flags;
943         int phy_reg;
944
945         switch (cmd) {
946         case RESET_BUS:
947                 switch (arg) {
948                 case SHORT_RESET:
949                         phy_reg = get_phy_reg(ohci, 5);
950                         phy_reg |= 0x40;
951                         set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
952                         break;
953                 case LONG_RESET:
954                         phy_reg = get_phy_reg(ohci, 1);
955                         phy_reg |= 0x40;
956                         set_phy_reg(ohci, 1, phy_reg); /* set IBR */
957                         break;
958                 case SHORT_RESET_NO_FORCE_ROOT:
959                         phy_reg = get_phy_reg(ohci, 1);
960                         if (phy_reg & 0x80) {
961                                 phy_reg &= ~0x80;
962                                 set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
963                         }
964
965                         phy_reg = get_phy_reg(ohci, 5);
966                         phy_reg |= 0x40;
967                         set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
968                         break;
969                 case LONG_RESET_NO_FORCE_ROOT:
970                         phy_reg = get_phy_reg(ohci, 1);
971                         phy_reg &= ~0x80;
972                         phy_reg |= 0x40;
973                         set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
974                         break;
975                 case SHORT_RESET_FORCE_ROOT:
976                         phy_reg = get_phy_reg(ohci, 1);
977                         if (!(phy_reg & 0x80)) {
978                                 phy_reg |= 0x80;
979                                 set_phy_reg(ohci, 1, phy_reg); /* set RHB */
980                         }
981
982                         phy_reg = get_phy_reg(ohci, 5);
983                         phy_reg |= 0x40;
984                         set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
985                         break;
986                 case LONG_RESET_FORCE_ROOT:
987                         phy_reg = get_phy_reg(ohci, 1);
988                         phy_reg |= 0xc0;
989                         set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
990                         break;
991                 default:
992                         retval = -1;
993                 }
994                 break;
995
996         case GET_CYCLE_COUNTER:
997                 retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
998                 break;
999
1000         case SET_CYCLE_COUNTER:
1001                 reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
1002                 break;
1003
1004         case SET_BUS_ID:
1005                 PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
1006                 break;
1007
1008         case ACT_CYCLE_MASTER:
1009                 if (arg) {
1010                         /* check if we are root and other nodes are present */
1011                         u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
1012                         if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
1013                                 /*
1014                                  * enable cycleTimer, cycleMaster
1015                                  */
1016                                 DBGMSG("Cycle master enabled");
1017                                 reg_write(ohci, OHCI1394_LinkControlSet,
1018                                           OHCI1394_LinkControl_CycleTimerEnable |
1019                                           OHCI1394_LinkControl_CycleMaster);
1020                         }
1021                 } else {
1022                         /* disable cycleTimer, cycleMaster, cycleSource */
1023                         reg_write(ohci, OHCI1394_LinkControlClear,
1024                                   OHCI1394_LinkControl_CycleTimerEnable |
1025                                   OHCI1394_LinkControl_CycleMaster |
1026                                   OHCI1394_LinkControl_CycleSource);
1027                 }
1028                 break;
1029
1030         case CANCEL_REQUESTS:
1031                 DBGMSG("Cancel request received");
1032                 dma_trm_reset(&ohci->at_req_context);
1033                 dma_trm_reset(&ohci->at_resp_context);
1034                 break;
1035
1036         case ISO_LISTEN_CHANNEL:
1037         {
1038                 u64 mask;
1039                 struct dma_rcv_ctx *d = &ohci->ir_legacy_context;
1040                 int ir_legacy_active;
1041
1042                 if (arg<0 || arg>63) {
1043                         PRINT(KERN_ERR,
1044                               "%s: IS0 listen channel %d is out of range",
1045                               __FUNCTION__, arg);
1046                         return -EFAULT;
1047                 }
1048
1049                 mask = (u64)0x1<<arg;
1050
1051                 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1052
1053                 if (ohci->ISO_channel_usage & mask) {
1054                         PRINT(KERN_ERR,
1055                               "%s: IS0 listen channel %d is already used",
1056                               __FUNCTION__, arg);
1057                         spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1058                         return -EFAULT;
1059                 }
1060
1061                 ir_legacy_active = ohci->ir_legacy_channels;
1062
1063                 ohci->ISO_channel_usage |= mask;
1064                 ohci->ir_legacy_channels |= mask;
1065
1066                 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1067
1068                 if (!ir_legacy_active) {
1069                         if (ohci1394_register_iso_tasklet(ohci,
1070                                           &ohci->ir_legacy_tasklet) < 0) {
1071                                 PRINT(KERN_ERR, "No IR DMA context available");
1072                                 return -EBUSY;
1073                         }
1074
1075                         /* the IR context can be assigned to any DMA context
1076                          * by ohci1394_register_iso_tasklet */
1077                         d->ctx = ohci->ir_legacy_tasklet.context;
1078                         d->ctrlSet = OHCI1394_IsoRcvContextControlSet +
1079                                 32*d->ctx;
1080                         d->ctrlClear = OHCI1394_IsoRcvContextControlClear +
1081                                 32*d->ctx;
1082                         d->cmdPtr = OHCI1394_IsoRcvCommandPtr + 32*d->ctx;
1083                         d->ctxtMatch = OHCI1394_IsoRcvContextMatch + 32*d->ctx;
1084
1085                         initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
1086
1087                         PRINT(KERN_ERR, "IR legacy activated");
1088                 }
1089
1090                 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1091
1092                 if (arg>31)
1093                         reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
1094                                   1<<(arg-32));
1095                 else
1096                         reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
1097                                   1<<arg);
1098
1099                 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1100                 DBGMSG("Listening enabled on channel %d", arg);
1101                 break;
1102         }
1103         case ISO_UNLISTEN_CHANNEL:
1104         {
1105                 u64 mask;
1106
1107                 if (arg<0 || arg>63) {
1108                         PRINT(KERN_ERR,
1109                               "%s: IS0 unlisten channel %d is out of range",
1110                               __FUNCTION__, arg);
1111                         return -EFAULT;
1112                 }
1113
1114                 mask = (u64)0x1<<arg;
1115
1116                 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1117
1118                 if (!(ohci->ISO_channel_usage & mask)) {
1119                         PRINT(KERN_ERR,
1120                               "%s: IS0 unlisten channel %d is not used",
1121                               __FUNCTION__, arg);
1122                         spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1123                         return -EFAULT;
1124                 }
1125
1126                 ohci->ISO_channel_usage &= ~mask;
1127                 ohci->ir_legacy_channels &= ~mask;
1128
1129                 if (arg>31)
1130                         reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
1131                                   1<<(arg-32));
1132                 else
1133                         reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
1134                                   1<<arg);
1135
1136                 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1137                 DBGMSG("Listening disabled on channel %d", arg);
1138
1139                 if (ohci->ir_legacy_channels == 0) {
1140                         stop_dma_rcv_ctx(&ohci->ir_legacy_context);
1141                         DBGMSG("ISO legacy receive context stopped");
1142                 }
1143
1144                 break;
1145         }
1146         default:
1147                 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
1148                         cmd);
1149                 break;
1150         }
1151         return retval;
1152 }
1153
1154 /***********************************
1155  * rawiso ISO reception            *
1156  ***********************************/
1157
1158 /*
1159   We use either buffer-fill or packet-per-buffer DMA mode. The DMA
1160   buffer is split into "blocks" (regions described by one DMA
1161   descriptor). Each block must be one page or less in size, and
1162   must not cross a page boundary.
1163
1164   There is one little wrinkle with buffer-fill mode: a packet that
1165   starts in the final block may wrap around into the first block. But
1166   the user API expects all packets to be contiguous. Our solution is
1167   to keep the very last page of the DMA buffer in reserve - if a
1168   packet spans the gap, we copy its tail into this page.
1169 */
1170
1171 struct ohci_iso_recv {
1172         struct ti_ohci *ohci;
1173
1174         struct ohci1394_iso_tasklet task;
1175         int task_active;
1176
1177         enum { BUFFER_FILL_MODE = 0,
1178                PACKET_PER_BUFFER_MODE = 1 } dma_mode;
1179
1180         /* memory and PCI mapping for the DMA descriptors */
1181         struct dma_prog_region prog;
1182         struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
1183
1184         /* how many DMA blocks fit in the buffer */
1185         unsigned int nblocks;
1186
1187         /* stride of DMA blocks */
1188         unsigned int buf_stride;
1189
1190         /* number of blocks to batch between interrupts */
1191         int block_irq_interval;
1192
1193         /* block that DMA will finish next */
1194         int block_dma;
1195
1196         /* (buffer-fill only) block that the reader will release next */
1197         int block_reader;
1198
1199         /* (buffer-fill only) bytes of buffer the reader has released,
1200            less than one block */
1201         int released_bytes;
1202
1203         /* (buffer-fill only) buffer offset at which the next packet will appear */
1204         int dma_offset;
1205
1206         /* OHCI DMA context control registers */
1207         u32 ContextControlSet;
1208         u32 ContextControlClear;
1209         u32 CommandPtr;
1210         u32 ContextMatch;
1211 };
1212
1213 static void ohci_iso_recv_task(unsigned long data);
1214 static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1215 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1216 static int  ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1217 static void ohci_iso_recv_program(struct hpsb_iso *iso);
1218
1219 static int ohci_iso_recv_init(struct hpsb_iso *iso)
1220 {
1221         struct ti_ohci *ohci = iso->host->hostdata;
1222         struct ohci_iso_recv *recv;
1223         int ctx;
1224         int ret = -ENOMEM;
1225
1226         recv = kmalloc(sizeof(*recv), SLAB_KERNEL);
1227         if (!recv)
1228                 return -ENOMEM;
1229
1230         iso->hostdata = recv;
1231         recv->ohci = ohci;
1232         recv->task_active = 0;
1233         dma_prog_region_init(&recv->prog);
1234         recv->block = NULL;
1235
1236         /* use buffer-fill mode, unless irq_interval is 1
1237            (note: multichannel requires buffer-fill) */
1238
1239         if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
1240              iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
1241                 recv->dma_mode = PACKET_PER_BUFFER_MODE;
1242         } else {
1243                 recv->dma_mode = BUFFER_FILL_MODE;
1244         }
1245
1246         /* set nblocks, buf_stride, block_irq_interval */
1247
1248         if (recv->dma_mode == BUFFER_FILL_MODE) {
1249                 recv->buf_stride = PAGE_SIZE;
1250
1251                 /* one block per page of data in the DMA buffer, minus the final guard page */
1252                 recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1253                 if (recv->nblocks < 3) {
1254                         DBGMSG("ohci_iso_recv_init: DMA buffer too small");
1255                         goto err;
1256                 }
1257
1258                 /* iso->irq_interval is in packets - translate that to blocks */
1259                 if (iso->irq_interval == 1)
1260                         recv->block_irq_interval = 1;
1261                 else
1262                         recv->block_irq_interval = iso->irq_interval *
1263                                                         ((recv->nblocks+1)/iso->buf_packets);
1264                 if (recv->block_irq_interval*4 > recv->nblocks)
1265                         recv->block_irq_interval = recv->nblocks/4;
1266                 if (recv->block_irq_interval < 1)
1267                         recv->block_irq_interval = 1;
1268
1269         } else {
1270                 int max_packet_size;
1271
1272                 recv->nblocks = iso->buf_packets;
1273                 recv->block_irq_interval = iso->irq_interval;
1274                 if (recv->block_irq_interval * 4 > iso->buf_packets)
1275                         recv->block_irq_interval = iso->buf_packets / 4;
1276                 if (recv->block_irq_interval < 1)
1277                 recv->block_irq_interval = 1;
1278
1279                 /* choose a buffer stride */
1280                 /* must be a power of 2, and <= PAGE_SIZE */
1281
1282                 max_packet_size = iso->buf_size / iso->buf_packets;
1283
1284                 for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1285                     recv->buf_stride *= 2);
1286
1287                 if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1288                    recv->buf_stride > PAGE_SIZE) {
1289                         /* this shouldn't happen, but anyway... */
1290                         DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
1291                         goto err;
1292                 }
1293         }
1294
1295         recv->block_reader = 0;
1296         recv->released_bytes = 0;
1297         recv->block_dma = 0;
1298         recv->dma_offset = 0;
1299
1300         /* size of DMA program = one descriptor per block */
1301         if (dma_prog_region_alloc(&recv->prog,
1302                                  sizeof(struct dma_cmd) * recv->nblocks,
1303                                  recv->ohci->dev))
1304                 goto err;
1305
1306         recv->block = (struct dma_cmd*) recv->prog.kvirt;
1307
1308         ohci1394_init_iso_tasklet(&recv->task,
1309                                   iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1310                                                        OHCI_ISO_RECEIVE,
1311                                   ohci_iso_recv_task, (unsigned long) iso);
1312
1313         if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0) {
1314                 ret = -EBUSY;
1315                 goto err;
1316         }
1317
1318         recv->task_active = 1;
1319
1320         /* recv context registers are spaced 32 bytes apart */
1321         ctx = recv->task.context;
1322         recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1323         recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1324         recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1325         recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1326
1327         if (iso->channel == -1) {
1328                 /* clear multi-channel selection mask */
1329                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1330                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1331         }
1332
1333         /* write the DMA program */
1334         ohci_iso_recv_program(iso);
1335
1336         DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1337                " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1338                recv->dma_mode == BUFFER_FILL_MODE ?
1339                "buffer-fill" : "packet-per-buffer",
1340                iso->buf_size/PAGE_SIZE, iso->buf_size,
1341                recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1342
1343         return 0;
1344
1345 err:
1346         ohci_iso_recv_shutdown(iso);
1347         return ret;
1348 }
1349
1350 static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1351 {
1352         struct ohci_iso_recv *recv = iso->hostdata;
1353
1354         /* disable interrupts */
1355         reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1356
1357         /* halt DMA */
1358         ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1359 }
1360
1361 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1362 {
1363         struct ohci_iso_recv *recv = iso->hostdata;
1364
1365         if (recv->task_active) {
1366                 ohci_iso_recv_stop(iso);
1367                 ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1368                 recv->task_active = 0;
1369         }
1370
1371         dma_prog_region_free(&recv->prog);
1372         kfree(recv);
1373         iso->hostdata = NULL;
1374 }
1375
1376 /* set up a "gapped" ring buffer DMA program */
1377 static void ohci_iso_recv_program(struct hpsb_iso *iso)
1378 {
1379         struct ohci_iso_recv *recv = iso->hostdata;
1380         int blk;
1381
1382         /* address of 'branch' field in previous DMA descriptor */
1383         u32 *prev_branch = NULL;
1384
1385         for (blk = 0; blk < recv->nblocks; blk++) {
1386                 u32 control;
1387
1388                 /* the DMA descriptor */
1389                 struct dma_cmd *cmd = &recv->block[blk];
1390
1391                 /* offset of the DMA descriptor relative to the DMA prog buffer */
1392                 unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1393
1394                 /* offset of this packet's data within the DMA buffer */
1395                 unsigned long buf_offset = blk * recv->buf_stride;
1396
1397                 if (recv->dma_mode == BUFFER_FILL_MODE) {
1398                         control = 2 << 28; /* INPUT_MORE */
1399                 } else {
1400                         control = 3 << 28; /* INPUT_LAST */
1401                 }
1402
1403                 control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1404
1405                 /* interrupt on last block, and at intervals */
1406                 if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1407                         control |= 3 << 20; /* want interrupt */
1408                 }
1409
1410                 control |= 3 << 18; /* enable branch to address */
1411                 control |= recv->buf_stride;
1412
1413                 cmd->control = cpu_to_le32(control);
1414                 cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1415                 cmd->branchAddress = 0; /* filled in on next loop */
1416                 cmd->status = cpu_to_le32(recv->buf_stride);
1417
1418                 /* link the previous descriptor to this one */
1419                 if (prev_branch) {
1420                         *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1421                 }
1422
1423                 prev_branch = &cmd->branchAddress;
1424         }
1425
1426         /* the final descriptor's branch address and Z should be left at 0 */
1427 }
1428
1429 /* listen or unlisten to a specific channel (multi-channel mode only) */
1430 static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1431 {
1432         struct ohci_iso_recv *recv = iso->hostdata;
1433         int reg, i;
1434
1435         if (channel < 32) {
1436                 reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1437                 i = channel;
1438         } else {
1439                 reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1440                 i = channel - 32;
1441         }
1442
1443         reg_write(recv->ohci, reg, (1 << i));
1444
1445         /* issue a dummy read to force all PCI writes to be posted immediately */
1446         mb();
1447         reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1448 }
1449
1450 static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1451 {
1452         struct ohci_iso_recv *recv = iso->hostdata;
1453         int i;
1454
1455         for (i = 0; i < 64; i++) {
1456                 if (mask & (1ULL << i)) {
1457                         if (i < 32)
1458                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1459                         else
1460                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1461                 } else {
1462                         if (i < 32)
1463                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1464                         else
1465                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1466                 }
1467         }
1468
1469         /* issue a dummy read to force all PCI writes to be posted immediately */
1470         mb();
1471         reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1472 }
1473
1474 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1475 {
1476         struct ohci_iso_recv *recv = iso->hostdata;
1477         struct ti_ohci *ohci = recv->ohci;
1478         u32 command, contextMatch;
1479
1480         reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1481         wmb();
1482
1483         /* always keep ISO headers */
1484         command = (1 << 30);
1485
1486         if (recv->dma_mode == BUFFER_FILL_MODE)
1487                 command |= (1 << 31);
1488
1489         reg_write(recv->ohci, recv->ContextControlSet, command);
1490
1491         /* match on specified tags */
1492         contextMatch = tag_mask << 28;
1493
1494         if (iso->channel == -1) {
1495                 /* enable multichannel reception */
1496                 reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1497         } else {
1498                 /* listen on channel */
1499                 contextMatch |= iso->channel;
1500         }
1501
1502         if (cycle != -1) {
1503                 u32 seconds;
1504
1505                 /* enable cycleMatch */
1506                 reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1507
1508                 /* set starting cycle */
1509                 cycle &= 0x1FFF;
1510
1511                 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1512                    just snarf them from the current time */
1513                 seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1514
1515                 /* advance one second to give some extra time for DMA to start */
1516                 seconds += 1;
1517
1518                 cycle |= (seconds & 3) << 13;
1519
1520                 contextMatch |= cycle << 12;
1521         }
1522
1523         if (sync != -1) {
1524                 /* set sync flag on first DMA descriptor */
1525                 struct dma_cmd *cmd = &recv->block[recv->block_dma];
1526                 cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
1527
1528                 /* match sync field */
1529                 contextMatch |= (sync&0xf)<<8;
1530         }
1531
1532         reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1533
1534         /* address of first descriptor block */
1535         command = dma_prog_region_offset_to_bus(&recv->prog,
1536                                                 recv->block_dma * sizeof(struct dma_cmd));
1537         command |= 1; /* Z=1 */
1538
1539         reg_write(recv->ohci, recv->CommandPtr, command);
1540
1541         /* enable interrupts */
1542         reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1543
1544         wmb();
1545
1546         /* run */
1547         reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1548
1549         /* issue a dummy read of the cycle timer register to force
1550            all PCI writes to be posted immediately */
1551         mb();
1552         reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1553
1554         /* check RUN */
1555         if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1556                 PRINT(KERN_ERR,
1557                       "Error starting IR DMA (ContextControl 0x%08x)\n",
1558                       reg_read(recv->ohci, recv->ContextControlSet));
1559                 return -1;
1560         }
1561
1562         return 0;
1563 }
1564
1565 static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1566 {
1567         /* re-use the DMA descriptor for the block */
1568         /* by linking the previous descriptor to it */
1569
1570         int next_i = block;
1571         int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1572
1573         struct dma_cmd *next = &recv->block[next_i];
1574         struct dma_cmd *prev = &recv->block[prev_i];
1575         
1576         /* ignore out-of-range requests */
1577         if ((block < 0) || (block > recv->nblocks))
1578                 return;
1579
1580         /* 'next' becomes the new end of the DMA chain,
1581            so disable branch and enable interrupt */
1582         next->branchAddress = 0;
1583         next->control |= cpu_to_le32(3 << 20);
1584         next->status = cpu_to_le32(recv->buf_stride);
1585
1586         /* link prev to next */
1587         prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1588                                                                         sizeof(struct dma_cmd) * next_i)
1589                                           | 1); /* Z=1 */
1590
1591         /* disable interrupt on previous DMA descriptor, except at intervals */
1592         if ((prev_i % recv->block_irq_interval) == 0) {
1593                 prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1594         } else {
1595                 prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1596         }
1597         wmb();
1598
1599         /* wake up DMA in case it fell asleep */
1600         reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1601 }
1602
1603 static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1604                                              struct hpsb_iso_packet_info *info)
1605 {
1606         /* release the memory where the packet was */
1607         recv->released_bytes += info->total_len;
1608
1609         /* have we released enough memory for one block? */
1610         while (recv->released_bytes > recv->buf_stride) {
1611                 ohci_iso_recv_release_block(recv, recv->block_reader);
1612                 recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1613                 recv->released_bytes -= recv->buf_stride;
1614         }
1615 }
1616
1617 static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1618 {
1619         struct ohci_iso_recv *recv = iso->hostdata;
1620         if (recv->dma_mode == BUFFER_FILL_MODE) {
1621                 ohci_iso_recv_bufferfill_release(recv, info);
1622         } else {
1623                 ohci_iso_recv_release_block(recv, info - iso->infos);
1624         }
1625 }
1626
1627 /* parse all packets from blocks that have been fully received */
1628 static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1629 {
1630         int wake = 0;
1631         int runaway = 0;
1632         struct ti_ohci *ohci = recv->ohci;
1633
1634         while (1) {
1635                 /* we expect the next parsable packet to begin at recv->dma_offset */
1636                 /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1637
1638                 unsigned int offset;
1639                 unsigned short len, cycle, total_len;
1640                 unsigned char channel, tag, sy;
1641
1642                 unsigned char *p = iso->data_buf.kvirt;
1643
1644                 unsigned int this_block = recv->dma_offset/recv->buf_stride;
1645
1646                 /* don't loop indefinitely */
1647                 if (runaway++ > 100000) {
1648                         atomic_inc(&iso->overflows);
1649                         PRINT(KERN_ERR,
1650                               "IR DMA error - Runaway during buffer parsing!\n");
1651                         break;
1652                 }
1653
1654                 /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1655                 if (this_block == recv->block_dma)
1656                         break;
1657
1658                 wake = 1;
1659
1660                 /* parse data length, tag, channel, and sy */
1661
1662                 /* note: we keep our own local copies of 'len' and 'offset'
1663                    so the user can't mess with them by poking in the mmap area */
1664
1665                 len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1666
1667                 if (len > 4096) {
1668                         PRINT(KERN_ERR,
1669                               "IR DMA error - bogus 'len' value %u\n", len);
1670                 }
1671
1672                 channel = p[recv->dma_offset+1] & 0x3F;
1673                 tag = p[recv->dma_offset+1] >> 6;
1674                 sy = p[recv->dma_offset+0] & 0xF;
1675
1676                 /* advance to data payload */
1677                 recv->dma_offset += 4;
1678
1679                 /* check for wrap-around */
1680                 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1681                         recv->dma_offset -= recv->buf_stride*recv->nblocks;
1682                 }
1683
1684                 /* dma_offset now points to the first byte of the data payload */
1685                 offset = recv->dma_offset;
1686
1687                 /* advance to xferStatus/timeStamp */
1688                 recv->dma_offset += len;
1689
1690                 total_len = len + 8; /* 8 bytes header+trailer in OHCI packet */
1691                 /* payload is padded to 4 bytes */
1692                 if (len % 4) {
1693                         recv->dma_offset += 4 - (len%4);
1694                         total_len += 4 - (len%4);
1695                 }
1696
1697                 /* check for wrap-around */
1698                 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1699                         /* uh oh, the packet data wraps from the last
1700                            to the first DMA block - make the packet
1701                            contiguous by copying its "tail" into the
1702                            guard page */
1703
1704                         int guard_off = recv->buf_stride*recv->nblocks;
1705                         int tail_len = len - (guard_off - offset);
1706
1707                         if (tail_len > 0  && tail_len < recv->buf_stride) {
1708                                 memcpy(iso->data_buf.kvirt + guard_off,
1709                                        iso->data_buf.kvirt,
1710                                        tail_len);
1711                         }
1712
1713                         recv->dma_offset -= recv->buf_stride*recv->nblocks;
1714                 }
1715
1716                 /* parse timestamp */
1717                 cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1718                 cycle &= 0x1FFF;
1719
1720                 /* advance to next packet */
1721                 recv->dma_offset += 4;
1722
1723                 /* check for wrap-around */
1724                 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1725                         recv->dma_offset -= recv->buf_stride*recv->nblocks;
1726                 }
1727
1728                 hpsb_iso_packet_received(iso, offset, len, total_len, cycle, channel, tag, sy);
1729         }
1730
1731         if (wake)
1732                 hpsb_iso_wake(iso);
1733 }
1734
1735 static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1736 {
1737         int loop;
1738         struct ti_ohci *ohci = recv->ohci;
1739
1740         /* loop over all blocks */
1741         for (loop = 0; loop < recv->nblocks; loop++) {
1742
1743                 /* check block_dma to see if it's done */
1744                 struct dma_cmd *im = &recv->block[recv->block_dma];
1745
1746                 /* check the DMA descriptor for new writes to xferStatus */
1747                 u16 xferstatus = le32_to_cpu(im->status) >> 16;
1748
1749                 /* rescount is the number of bytes *remaining to be written* in the block */
1750                 u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
1751
1752                 unsigned char event = xferstatus & 0x1F;
1753
1754                 if (!event) {
1755                         /* nothing has happened to this block yet */
1756                         break;
1757                 }
1758
1759                 if (event != 0x11) {
1760                         atomic_inc(&iso->overflows);
1761                         PRINT(KERN_ERR,
1762                               "IR DMA error - OHCI error code 0x%02x\n", event);
1763                 }
1764
1765                 if (rescount != 0) {
1766                         /* the card is still writing to this block;
1767                            we can't touch it until it's done */
1768                         break;
1769                 }
1770
1771                 /* OK, the block is finished... */
1772
1773                 /* sync our view of the block */
1774                 dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
1775
1776                 /* reset the DMA descriptor */
1777                 im->status = recv->buf_stride;
1778
1779                 /* advance block_dma */
1780                 recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
1781
1782                 if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
1783                         atomic_inc(&iso->overflows);
1784                         DBGMSG("ISO reception overflow - "
1785                                "ran out of DMA blocks");
1786                 }
1787         }
1788
1789         /* parse any packets that have arrived */
1790         ohci_iso_recv_bufferfill_parse(iso, recv);
1791 }
1792
1793 static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1794 {
1795         int count;
1796         int wake = 0;
1797         struct ti_ohci *ohci = recv->ohci;
1798
1799         /* loop over the entire buffer */
1800         for (count = 0; count < recv->nblocks; count++) {
1801                 u32 packet_len = 0;
1802
1803                 /* pointer to the DMA descriptor */
1804                 struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
1805
1806                 /* check the DMA descriptor for new writes to xferStatus */
1807                 u16 xferstatus = le32_to_cpu(il->status) >> 16;
1808                 u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
1809
1810                 unsigned char event = xferstatus & 0x1F;
1811
1812                 if (!event) {
1813                         /* this packet hasn't come in yet; we are done for now */
1814                         goto out;
1815                 }
1816
1817                 if (event == 0x11) {
1818                         /* packet received successfully! */
1819
1820                         /* rescount is the number of bytes *remaining* in the packet buffer,
1821                            after the packet was written */
1822                         packet_len = recv->buf_stride - rescount;
1823
1824                 } else if (event == 0x02) {
1825                         PRINT(KERN_ERR, "IR DMA error - packet too long for buffer\n");
1826                 } else if (event) {
1827                         PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event);
1828                 }
1829
1830                 /* sync our view of the buffer */
1831                 dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
1832
1833                 /* record the per-packet info */
1834                 {
1835                         /* iso header is 8 bytes ahead of the data payload */
1836                         unsigned char *hdr;
1837
1838                         unsigned int offset;
1839                         unsigned short cycle;
1840                         unsigned char channel, tag, sy;
1841
1842                         offset = iso->pkt_dma * recv->buf_stride;
1843                         hdr = iso->data_buf.kvirt + offset;
1844
1845                         /* skip iso header */
1846                         offset += 8;
1847                         packet_len -= 8;
1848
1849                         cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
1850                         channel = hdr[5] & 0x3F;
1851                         tag = hdr[5] >> 6;
1852                         sy = hdr[4] & 0xF;
1853
1854                         hpsb_iso_packet_received(iso, offset, packet_len,
1855                                         recv->buf_stride, cycle, channel, tag, sy);
1856                 }
1857
1858                 /* reset the DMA descriptor */
1859                 il->status = recv->buf_stride;
1860
1861                 wake = 1;
1862                 recv->block_dma = iso->pkt_dma;
1863         }
1864
1865 out:
1866         if (wake)
1867                 hpsb_iso_wake(iso);
1868 }
1869
1870 static void ohci_iso_recv_task(unsigned long data)
1871 {
1872         struct hpsb_iso *iso = (struct hpsb_iso*) data;
1873         struct ohci_iso_recv *recv = iso->hostdata;
1874
1875         if (recv->dma_mode == BUFFER_FILL_MODE)
1876                 ohci_iso_recv_bufferfill_task(iso, recv);
1877         else
1878                 ohci_iso_recv_packetperbuf_task(iso, recv);
1879 }
1880
1881 /***********************************
1882  * rawiso ISO transmission         *
1883  ***********************************/
1884
1885 struct ohci_iso_xmit {
1886         struct ti_ohci *ohci;
1887         struct dma_prog_region prog;
1888         struct ohci1394_iso_tasklet task;
1889         int task_active;
1890
1891         u32 ContextControlSet;
1892         u32 ContextControlClear;
1893         u32 CommandPtr;
1894 };
1895
1896 /* transmission DMA program:
1897    one OUTPUT_MORE_IMMEDIATE for the IT header
1898    one OUTPUT_LAST for the buffer data */
1899
1900 struct iso_xmit_cmd {
1901         struct dma_cmd output_more_immediate;
1902         u8 iso_hdr[8];
1903         u32 unused[2];
1904         struct dma_cmd output_last;
1905 };
1906
1907 static int ohci_iso_xmit_init(struct hpsb_iso *iso);
1908 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
1909 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
1910 static void ohci_iso_xmit_task(unsigned long data);
1911
1912 static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1913 {
1914         struct ohci_iso_xmit *xmit;
1915         unsigned int prog_size;
1916         int ctx;
1917         int ret = -ENOMEM;
1918
1919         xmit = kmalloc(sizeof(*xmit), SLAB_KERNEL);
1920         if (!xmit)
1921                 return -ENOMEM;
1922
1923         iso->hostdata = xmit;
1924         xmit->ohci = iso->host->hostdata;
1925         xmit->task_active = 0;
1926
1927         dma_prog_region_init(&xmit->prog);
1928
1929         prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
1930
1931         if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
1932                 goto err;
1933
1934         ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
1935                                   ohci_iso_xmit_task, (unsigned long) iso);
1936
1937         if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0) {
1938                 ret = -EBUSY;
1939                 goto err;
1940         }
1941
1942         xmit->task_active = 1;
1943
1944         /* xmit context registers are spaced 16 bytes apart */
1945         ctx = xmit->task.context;
1946         xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
1947         xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
1948         xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
1949
1950         return 0;
1951
1952 err:
1953         ohci_iso_xmit_shutdown(iso);
1954         return ret;
1955 }
1956
1957 static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
1958 {
1959         struct ohci_iso_xmit *xmit = iso->hostdata;
1960         struct ti_ohci *ohci = xmit->ohci;
1961
1962         /* disable interrupts */
1963         reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
1964
1965         /* halt DMA */
1966         if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
1967                 /* XXX the DMA context will lock up if you try to send too much data! */
1968                 PRINT(KERN_ERR,
1969                       "you probably exceeded the OHCI card's bandwidth limit - "
1970                       "reload the module and reduce xmit bandwidth");
1971         }
1972 }
1973
1974 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
1975 {
1976         struct ohci_iso_xmit *xmit = iso->hostdata;
1977
1978         if (xmit->task_active) {
1979                 ohci_iso_xmit_stop(iso);
1980                 ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
1981                 xmit->task_active = 0;
1982         }
1983
1984         dma_prog_region_free(&xmit->prog);
1985         kfree(xmit);
1986         iso->hostdata = NULL;
1987 }
1988
1989 static void ohci_iso_xmit_task(unsigned long data)
1990 {
1991         struct hpsb_iso *iso = (struct hpsb_iso*) data;
1992         struct ohci_iso_xmit *xmit = iso->hostdata;
1993         struct ti_ohci *ohci = xmit->ohci;
1994         int wake = 0;
1995         int count;
1996
1997         /* check the whole buffer if necessary, starting at pkt_dma */
1998         for (count = 0; count < iso->buf_packets; count++) {
1999                 int cycle;
2000
2001                 /* DMA descriptor */
2002                 struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
2003
2004                 /* check for new writes to xferStatus */
2005                 u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
2006                 u8  event = xferstatus & 0x1F;
2007
2008                 if (!event) {
2009                         /* packet hasn't been sent yet; we are done for now */
2010                         break;
2011                 }
2012
2013                 if (event != 0x11)
2014                         PRINT(KERN_ERR,
2015                               "IT DMA error - OHCI error code 0x%02x\n", event);
2016
2017                 /* at least one packet went out, so wake up the writer */
2018                 wake = 1;
2019
2020                 /* parse cycle */
2021                 cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
2022
2023                 /* tell the subsystem the packet has gone out */
2024                 hpsb_iso_packet_sent(iso, cycle, event != 0x11);
2025
2026                 /* reset the DMA descriptor for next time */
2027                 cmd->output_last.status = 0;
2028         }
2029
2030         if (wake)
2031                 hpsb_iso_wake(iso);
2032 }
2033
2034 static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
2035 {
2036         struct ohci_iso_xmit *xmit = iso->hostdata;
2037         struct ti_ohci *ohci = xmit->ohci;
2038
2039         int next_i, prev_i;
2040         struct iso_xmit_cmd *next, *prev;
2041
2042         unsigned int offset;
2043         unsigned short len;
2044         unsigned char tag, sy;
2045
2046         /* check that the packet doesn't cross a page boundary
2047            (we could allow this if we added OUTPUT_MORE descriptor support) */
2048         if (cross_bound(info->offset, info->len)) {
2049                 PRINT(KERN_ERR,
2050                       "rawiso xmit: packet %u crosses a page boundary",
2051                       iso->first_packet);
2052                 return -EINVAL;
2053         }
2054
2055         offset = info->offset;
2056         len = info->len;
2057         tag = info->tag;
2058         sy = info->sy;
2059
2060         /* sync up the card's view of the buffer */
2061         dma_region_sync_for_device(&iso->data_buf, offset, len);
2062
2063         /* append first_packet to the DMA chain */
2064         /* by linking the previous descriptor to it */
2065         /* (next will become the new end of the DMA chain) */
2066
2067         next_i = iso->first_packet;
2068         prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
2069
2070         next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
2071         prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
2072
2073         /* set up the OUTPUT_MORE_IMMEDIATE descriptor */
2074         memset(next, 0, sizeof(struct iso_xmit_cmd));
2075         next->output_more_immediate.control = cpu_to_le32(0x02000008);
2076
2077         /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
2078
2079         /* tcode = 0xA, and sy */
2080         next->iso_hdr[0] = 0xA0 | (sy & 0xF);
2081
2082         /* tag and channel number */
2083         next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
2084
2085         /* transmission speed */
2086         next->iso_hdr[2] = iso->speed & 0x7;
2087
2088         /* payload size */
2089         next->iso_hdr[6] = len & 0xFF;
2090         next->iso_hdr[7] = len >> 8;
2091
2092         /* set up the OUTPUT_LAST */
2093         next->output_last.control = cpu_to_le32(1 << 28);
2094         next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
2095         next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
2096         next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
2097         next->output_last.control |= cpu_to_le32(len);
2098
2099         /* payload bus address */
2100         next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
2101
2102         /* leave branchAddress at zero for now */
2103
2104         /* re-write the previous DMA descriptor to chain to this one */
2105
2106         /* set prev branch address to point to next (Z=3) */
2107         prev->output_last.branchAddress = cpu_to_le32(
2108                 dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
2109
2110         /* disable interrupt, unless required by the IRQ interval */
2111         if (prev_i % iso->irq_interval) {
2112                 prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
2113         } else {
2114                 prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
2115         }
2116
2117         wmb();
2118
2119         /* wake DMA in case it is sleeping */
2120         reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
2121
2122         /* issue a dummy read of the cycle timer to force all PCI
2123            writes to be posted immediately */
2124         mb();
2125         reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
2126
2127         return 0;
2128 }
2129
2130 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
2131 {
2132         struct ohci_iso_xmit *xmit = iso->hostdata;
2133         struct ti_ohci *ohci = xmit->ohci;
2134
2135         /* clear out the control register */
2136         reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
2137         wmb();
2138
2139         /* address and length of first descriptor block (Z=3) */
2140         reg_write(xmit->ohci, xmit->CommandPtr,
2141                   dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
2142
2143         /* cycle match */
2144         if (cycle != -1) {
2145                 u32 start = cycle & 0x1FFF;
2146
2147                 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
2148                    just snarf them from the current time */
2149                 u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
2150
2151                 /* advance one second to give some extra time for DMA to start */
2152                 seconds += 1;
2153
2154                 start |= (seconds & 3) << 13;
2155
2156                 reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
2157         }
2158
2159         /* enable interrupts */
2160         reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
2161
2162         /* run */
2163         reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
2164         mb();
2165
2166         /* wait 100 usec to give the card time to go active */
2167         udelay(100);
2168
2169         /* check the RUN bit */
2170         if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
2171                 PRINT(KERN_ERR, "Error starting IT DMA (ContextControl 0x%08x)\n",
2172                       reg_read(xmit->ohci, xmit->ContextControlSet));
2173                 return -1;
2174         }
2175
2176         return 0;
2177 }
2178
2179 static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
2180 {
2181
2182         switch(cmd) {
2183         case XMIT_INIT:
2184                 return ohci_iso_xmit_init(iso);
2185         case XMIT_START:
2186                 return ohci_iso_xmit_start(iso, arg);
2187         case XMIT_STOP:
2188                 ohci_iso_xmit_stop(iso);
2189                 return 0;
2190         case XMIT_QUEUE:
2191                 return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
2192         case XMIT_SHUTDOWN:
2193                 ohci_iso_xmit_shutdown(iso);
2194                 return 0;
2195
2196         case RECV_INIT:
2197                 return ohci_iso_recv_init(iso);
2198         case RECV_START: {
2199                 int *args = (int*) arg;
2200                 return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
2201         }
2202         case RECV_STOP:
2203                 ohci_iso_recv_stop(iso);
2204                 return 0;
2205         case RECV_RELEASE:
2206                 ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
2207                 return 0;
2208         case RECV_FLUSH:
2209                 ohci_iso_recv_task((unsigned long) iso);
2210                 return 0;
2211         case RECV_SHUTDOWN:
2212                 ohci_iso_recv_shutdown(iso);
2213                 return 0;
2214         case RECV_LISTEN_CHANNEL:
2215                 ohci_iso_recv_change_channel(iso, arg, 1);
2216                 return 0;
2217         case RECV_UNLISTEN_CHANNEL:
2218                 ohci_iso_recv_change_channel(iso, arg, 0);
2219                 return 0;
2220         case RECV_SET_CHANNEL_MASK:
2221                 ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
2222                 return 0;
2223
2224         default:
2225                 PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
2226                         cmd);
2227                 break;
2228         }
2229         return -EINVAL;
2230 }
2231
2232 /***************************************
2233  * IEEE-1394 functionality section END *
2234  ***************************************/
2235
2236
2237 /********************************************************
2238  * Global stuff (interrupt handler, init/shutdown code) *
2239  ********************************************************/
2240
2241 static void dma_trm_reset(struct dma_trm_ctx *d)
2242 {
2243         unsigned long flags;
2244         LIST_HEAD(packet_list);
2245         struct ti_ohci *ohci = d->ohci;
2246         struct hpsb_packet *packet, *ptmp;
2247
2248         ohci1394_stop_context(ohci, d->ctrlClear, NULL);
2249
2250         /* Lock the context, reset it and release it. Move the packets
2251          * that were pending in the context to packet_list and free
2252          * them after releasing the lock. */
2253
2254         spin_lock_irqsave(&d->lock, flags);
2255
2256         list_splice(&d->fifo_list, &packet_list);
2257         list_splice(&d->pending_list, &packet_list);
2258         INIT_LIST_HEAD(&d->fifo_list);
2259         INIT_LIST_HEAD(&d->pending_list);
2260
2261         d->branchAddrPtr = NULL;
2262         d->sent_ind = d->prg_ind;
2263         d->free_prgs = d->num_desc;
2264
2265         spin_unlock_irqrestore(&d->lock, flags);
2266
2267         if (list_empty(&packet_list))
2268                 return;
2269
2270         PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
2271
2272         /* Now process subsystem callbacks for the packets from this
2273          * context. */
2274         list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
2275                 list_del_init(&packet->driver_list);
2276                 hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
2277         }
2278 }
2279
2280 static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
2281                                        quadlet_t rx_event,
2282                                        quadlet_t tx_event)
2283 {
2284         struct ohci1394_iso_tasklet *t;
2285         unsigned long mask;
2286
2287         spin_lock(&ohci->iso_tasklet_list_lock);
2288
2289         list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
2290                 mask = 1 << t->context;
2291
2292                 if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
2293                         tasklet_schedule(&t->tasklet);
2294                 else if (rx_event & mask)
2295                         tasklet_schedule(&t->tasklet);
2296         }
2297
2298         spin_unlock(&ohci->iso_tasklet_list_lock);
2299
2300 }
2301
2302 static irqreturn_t ohci_irq_handler(int irq, void *dev_id,
2303                              struct pt_regs *regs_are_unused)
2304 {
2305         quadlet_t event, node_id;
2306         struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
2307         struct hpsb_host *host = ohci->host;
2308         int phyid = -1, isroot = 0;
2309         unsigned long flags;
2310
2311         /* Read and clear the interrupt event register.  Don't clear
2312          * the busReset event, though. This is done when we get the
2313          * selfIDComplete interrupt. */
2314         spin_lock_irqsave(&ohci->event_lock, flags);
2315         event = reg_read(ohci, OHCI1394_IntEventClear);
2316         reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
2317         spin_unlock_irqrestore(&ohci->event_lock, flags);
2318
2319         if (!event)
2320                 return IRQ_NONE;
2321
2322         /* If event is ~(u32)0 cardbus card was ejected.  In this case
2323          * we just return, and clean up in the ohci1394_pci_remove
2324          * function. */
2325         if (event == ~(u32) 0) {
2326                 DBGMSG("Device removed.");
2327                 return IRQ_NONE;
2328         }
2329
2330         DBGMSG("IntEvent: %08x", event);
2331
2332         if (event & OHCI1394_unrecoverableError) {
2333                 int ctx;
2334                 PRINT(KERN_ERR, "Unrecoverable error!");
2335
2336                 if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
2337                         PRINT(KERN_ERR, "Async Req Tx Context died: "
2338                                 "ctrl[%08x] cmdptr[%08x]",
2339                                 reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
2340                                 reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
2341
2342                 if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
2343                         PRINT(KERN_ERR, "Async Rsp Tx Context died: "
2344                                 "ctrl[%08x] cmdptr[%08x]",
2345                                 reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
2346                                 reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
2347
2348                 if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
2349                         PRINT(KERN_ERR, "Async Req Rcv Context died: "
2350                                 "ctrl[%08x] cmdptr[%08x]",
2351                                 reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
2352                                 reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
2353
2354                 if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
2355                         PRINT(KERN_ERR, "Async Rsp Rcv Context died: "
2356                                 "ctrl[%08x] cmdptr[%08x]",
2357                                 reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
2358                                 reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
2359
2360                 for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
2361                         if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
2362                                 PRINT(KERN_ERR, "Iso Xmit %d Context died: "
2363                                         "ctrl[%08x] cmdptr[%08x]", ctx,
2364                                         reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
2365                                         reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
2366                 }
2367
2368                 for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
2369                         if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
2370                                 PRINT(KERN_ERR, "Iso Recv %d Context died: "
2371                                         "ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
2372                                         reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
2373                                         reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
2374                                         reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
2375                 }
2376
2377                 event &= ~OHCI1394_unrecoverableError;
2378         }
2379
2380         if (event & OHCI1394_cycleInconsistent) {
2381                 /* We subscribe to the cycleInconsistent event only to
2382                  * clear the corresponding event bit... otherwise,
2383                  * isochronous cycleMatch DMA won't work. */
2384                 DBGMSG("OHCI1394_cycleInconsistent");
2385                 event &= ~OHCI1394_cycleInconsistent;
2386         }
2387
2388         if (event & OHCI1394_busReset) {
2389                 /* The busReset event bit can't be cleared during the
2390                  * selfID phase, so we disable busReset interrupts, to
2391                  * avoid burying the cpu in interrupt requests. */
2392                 spin_lock_irqsave(&ohci->event_lock, flags);
2393                 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2394
2395                 if (ohci->check_busreset) {
2396                         int loop_count = 0;
2397
2398                         udelay(10);
2399
2400                         while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
2401                                 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2402
2403                                 spin_unlock_irqrestore(&ohci->event_lock, flags);
2404                                 udelay(10);
2405                                 spin_lock_irqsave(&ohci->event_lock, flags);
2406
2407                                 /* The loop counter check is to prevent the driver
2408                                  * from remaining in this state forever. For the
2409                                  * initial bus reset, the loop continues for ever
2410                                  * and the system hangs, until some device is plugged-in
2411                                  * or out manually into a port! The forced reset seems
2412                                  * to solve this problem. This mainly effects nForce2. */
2413                                 if (loop_count > 10000) {
2414                                         ohci_devctl(host, RESET_BUS, LONG_RESET);
2415                                         DBGMSG("Detected bus-reset loop. Forced a bus reset!");
2416                                         loop_count = 0;
2417                                 }
2418
2419                                 loop_count++;
2420                         }
2421                 }
2422                 spin_unlock_irqrestore(&ohci->event_lock, flags);
2423                 if (!host->in_bus_reset) {
2424                         DBGMSG("irq_handler: Bus reset requested");
2425
2426                         /* Subsystem call */
2427                         hpsb_bus_reset(ohci->host);
2428                 }
2429                 event &= ~OHCI1394_busReset;
2430         }
2431
2432         if (event & OHCI1394_reqTxComplete) {
2433                 struct dma_trm_ctx *d = &ohci->at_req_context;
2434                 DBGMSG("Got reqTxComplete interrupt "
2435                        "status=0x%08X", reg_read(ohci, d->ctrlSet));
2436                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2437                         ohci1394_stop_context(ohci, d->ctrlClear,
2438                                               "reqTxComplete");
2439                 else
2440                         dma_trm_tasklet((unsigned long)d);
2441                         //tasklet_schedule(&d->task);
2442                 event &= ~OHCI1394_reqTxComplete;
2443         }
2444         if (event & OHCI1394_respTxComplete) {
2445                 struct dma_trm_ctx *d = &ohci->at_resp_context;
2446                 DBGMSG("Got respTxComplete interrupt "
2447                        "status=0x%08X", reg_read(ohci, d->ctrlSet));
2448                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2449                         ohci1394_stop_context(ohci, d->ctrlClear,
2450                                               "respTxComplete");
2451                 else
2452                         tasklet_schedule(&d->task);
2453                 event &= ~OHCI1394_respTxComplete;
2454         }
2455         if (event & OHCI1394_RQPkt) {
2456                 struct dma_rcv_ctx *d = &ohci->ar_req_context;
2457                 DBGMSG("Got RQPkt interrupt status=0x%08X",
2458                        reg_read(ohci, d->ctrlSet));
2459                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2460                         ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
2461                 else
2462                         tasklet_schedule(&d->task);
2463                 event &= ~OHCI1394_RQPkt;
2464         }
2465         if (event & OHCI1394_RSPkt) {
2466                 struct dma_rcv_ctx *d = &ohci->ar_resp_context;
2467                 DBGMSG("Got RSPkt interrupt status=0x%08X",
2468                        reg_read(ohci, d->ctrlSet));
2469                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2470                         ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
2471                 else
2472                         tasklet_schedule(&d->task);
2473                 event &= ~OHCI1394_RSPkt;
2474         }
2475         if (event & OHCI1394_isochRx) {
2476                 quadlet_t rx_event;
2477
2478                 rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
2479                 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
2480                 ohci_schedule_iso_tasklets(ohci, rx_event, 0);
2481                 event &= ~OHCI1394_isochRx;
2482         }
2483         if (event & OHCI1394_isochTx) {
2484                 quadlet_t tx_event;
2485
2486                 tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
2487                 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
2488                 ohci_schedule_iso_tasklets(ohci, 0, tx_event);
2489                 event &= ~OHCI1394_isochTx;
2490         }
2491         if (event & OHCI1394_selfIDComplete) {
2492                 if (host->in_bus_reset) {
2493                         node_id = reg_read(ohci, OHCI1394_NodeID);
2494
2495                         if (!(node_id & 0x80000000)) {
2496                                 PRINT(KERN_ERR,
2497                                       "SelfID received, but NodeID invalid "
2498                                       "(probably new bus reset occurred): %08X",
2499                                       node_id);
2500                                 goto selfid_not_valid;
2501                         }
2502
2503                         phyid =  node_id & 0x0000003f;
2504                         isroot = (node_id & 0x40000000) != 0;
2505
2506                         DBGMSG("SelfID interrupt received "
2507                               "(phyid %d, %s)", phyid,
2508                               (isroot ? "root" : "not root"));
2509
2510                         handle_selfid(ohci, host, phyid, isroot);
2511
2512                         /* Clear the bus reset event and re-enable the
2513                          * busReset interrupt.  */
2514                         spin_lock_irqsave(&ohci->event_lock, flags);
2515                         reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2516                         reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2517                         spin_unlock_irqrestore(&ohci->event_lock, flags);
2518
2519                         /* Accept Physical requests from all nodes. */
2520                         reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0xffffffff);
2521                         reg_write(ohci,OHCI1394_AsReqFilterLoSet, 0xffffffff);
2522
2523                         /* Turn on phys dma reception.
2524                          *
2525                          * TODO: Enable some sort of filtering management.
2526                          */
2527                         if (phys_dma) {
2528                                 reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0xffffffff);
2529                                 reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0xffffffff);
2530                                 reg_write(ohci,OHCI1394_PhyUpperBound, 0xffff0000);
2531                         } else {
2532                                 reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0x00000000);
2533                                 reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0x00000000);
2534                         }
2535
2536                         DBGMSG("PhyReqFilter=%08x%08x",
2537                                reg_read(ohci,OHCI1394_PhyReqFilterHiSet),
2538                                reg_read(ohci,OHCI1394_PhyReqFilterLoSet));
2539
2540                         hpsb_selfid_complete(host, phyid, isroot);
2541                 } else
2542                         PRINT(KERN_ERR,
2543                               "SelfID received outside of bus reset sequence");
2544
2545 selfid_not_valid:
2546                 event &= ~OHCI1394_selfIDComplete;
2547         }
2548
2549         /* Make sure we handle everything, just in case we accidentally
2550          * enabled an interrupt that we didn't write a handler for.  */
2551         if (event)
2552                 PRINT(KERN_ERR, "Unhandled interrupt(s) 0x%08x",
2553                       event);
2554
2555         return IRQ_HANDLED;
2556 }
2557
2558 /* Put the buffer back into the dma context */
2559 static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
2560 {
2561         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2562         DBGMSG("Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
2563
2564         d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
2565         d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
2566         idx = (idx + d->num_desc - 1 ) % d->num_desc;
2567         d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
2568
2569         /* To avoid a race, ensure 1394 interface hardware sees the inserted
2570          * context program descriptors before it sees the wakeup bit set. */
2571         wmb();
2572         
2573         /* wake up the dma context if necessary */
2574         if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
2575                 PRINT(KERN_INFO,
2576                       "Waking dma ctx=%d ... processing is probably too slow",
2577                       d->ctx);
2578         }
2579
2580         /* do this always, to avoid race condition */
2581         reg_write(ohci, d->ctrlSet, 0x1000);
2582 }
2583
2584 #define cond_le32_to_cpu(data, noswap) \
2585         (noswap ? data : le32_to_cpu(data))
2586
2587 static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2588                             -1, 0, -1, 0, -1, -1, 16, -1};
2589
2590 /*
2591  * Determine the length of a packet in the buffer
2592  * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2593  */
2594 static __inline__ int packet_length(struct dma_rcv_ctx *d, int idx, quadlet_t *buf_ptr,
2595                          int offset, unsigned char tcode, int noswap)
2596 {
2597         int length = -1;
2598
2599         if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
2600                 length = TCODE_SIZE[tcode];
2601                 if (length == 0) {
2602                         if (offset + 12 >= d->buf_size) {
2603                                 length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
2604                                                 [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
2605                         } else {
2606                                 length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
2607                         }
2608                         length += 20;
2609                 }
2610         } else if (d->type == DMA_CTX_ISO) {
2611                 /* Assumption: buffer fill mode with header/trailer */
2612                 length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
2613         }
2614
2615         if (length > 0 && length % 4)
2616                 length += 4 - (length % 4);
2617
2618         return length;
2619 }
2620
2621 /* Tasklet that processes dma receive buffers */
2622 static void dma_rcv_tasklet (unsigned long data)
2623 {
2624         struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
2625         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2626         unsigned int split_left, idx, offset, rescount;
2627         unsigned char tcode;
2628         int length, bytes_left, ack;
2629         unsigned long flags;
2630         quadlet_t *buf_ptr;
2631         char *split_ptr;
2632         char msg[256];
2633
2634         spin_lock_irqsave(&d->lock, flags);
2635
2636         idx = d->buf_ind;
2637         offset = d->buf_offset;
2638         buf_ptr = d->buf_cpu[idx] + offset/4;
2639
2640         rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2641         bytes_left = d->buf_size - rescount - offset;
2642
2643         while (bytes_left > 0) {
2644                 tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
2645
2646                 /* packet_length() will return < 4 for an error */
2647                 length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
2648
2649                 if (length < 4) { /* something is wrong */
2650                         sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2651                                 tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
2652                                 d->ctx, length);
2653                         ohci1394_stop_context(ohci, d->ctrlClear, msg);
2654                         spin_unlock_irqrestore(&d->lock, flags);
2655                         return;
2656                 }
2657
2658                 /* The first case is where we have a packet that crosses
2659                  * over more than one descriptor. The next case is where
2660                  * it's all in the first descriptor.  */
2661                 if ((offset + length) > d->buf_size) {
2662                         DBGMSG("Split packet rcv'd");
2663                         if (length > d->split_buf_size) {
2664                                 ohci1394_stop_context(ohci, d->ctrlClear,
2665                                              "Split packet size exceeded");
2666                                 d->buf_ind = idx;
2667                                 d->buf_offset = offset;
2668                                 spin_unlock_irqrestore(&d->lock, flags);
2669                                 return;
2670                         }
2671
2672                         if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
2673                             == d->buf_size) {
2674                                 /* Other part of packet not written yet.
2675                                  * this should never happen I think
2676                                  * anyway we'll get it on the next call.  */
2677                                 PRINT(KERN_INFO,
2678                                       "Got only half a packet!");
2679                                 d->buf_ind = idx;
2680                                 d->buf_offset = offset;
2681                                 spin_unlock_irqrestore(&d->lock, flags);
2682                                 return;
2683                         }
2684
2685                         split_left = length;
2686                         split_ptr = (char *)d->spb;
2687                         memcpy(split_ptr,buf_ptr,d->buf_size-offset);
2688                         split_left -= d->buf_size-offset;
2689                         split_ptr += d->buf_size-offset;
2690                         insert_dma_buffer(d, idx);
2691                         idx = (idx+1) % d->num_desc;
2692                         buf_ptr = d->buf_cpu[idx];
2693                         offset=0;
2694
2695                         while (split_left >= d->buf_size) {
2696                                 memcpy(split_ptr,buf_ptr,d->buf_size);
2697                                 split_ptr += d->buf_size;
2698                                 split_left -= d->buf_size;
2699                                 insert_dma_buffer(d, idx);
2700                                 idx = (idx+1) % d->num_desc;
2701                                 buf_ptr = d->buf_cpu[idx];
2702                         }
2703
2704                         if (split_left > 0) {
2705                                 memcpy(split_ptr, buf_ptr, split_left);
2706                                 offset = split_left;
2707                                 buf_ptr += offset/4;
2708                         }
2709                 } else {
2710                         DBGMSG("Single packet rcv'd");
2711                         memcpy(d->spb, buf_ptr, length);
2712                         offset += length;
2713                         buf_ptr += length/4;
2714                         if (offset==d->buf_size) {
2715                                 insert_dma_buffer(d, idx);
2716                                 idx = (idx+1) % d->num_desc;
2717                                 buf_ptr = d->buf_cpu[idx];
2718                                 offset=0;
2719                         }
2720                 }
2721
2722                 /* We get one phy packet to the async descriptor for each
2723                  * bus reset. We always ignore it.  */
2724                 if (tcode != OHCI1394_TCODE_PHY) {
2725                         if (!ohci->no_swap_incoming)
2726                                 packet_swab(d->spb, tcode);
2727                         DBGMSG("Packet received from node"
2728                                 " %d ack=0x%02X spd=%d tcode=0x%X"
2729                                 " length=%d ctx=%d tlabel=%d",
2730                                 (d->spb[1]>>16)&0x3f,
2731                                 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
2732                                 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
2733                                 tcode, length, d->ctx,
2734                                 (cond_le32_to_cpu(d->spb[0], ohci->no_swap_incoming)>>10)&0x3f);
2735
2736                         ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
2737                                 == 0x11) ? 1 : 0;
2738
2739                         hpsb_packet_received(ohci->host, d->spb,
2740                                              length-4, ack);
2741                 }
2742 #ifdef OHCI1394_DEBUG
2743                 else
2744                         PRINT (KERN_DEBUG, "Got phy packet ctx=%d ... discarded",
2745                                d->ctx);
2746 #endif
2747
2748                 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2749
2750                 bytes_left = d->buf_size - rescount - offset;
2751
2752         }
2753
2754         d->buf_ind = idx;
2755         d->buf_offset = offset;
2756
2757         spin_unlock_irqrestore(&d->lock, flags);
2758 }
2759
2760 /* Bottom half that processes sent packets */
2761 static void dma_trm_tasklet (unsigned long data)
2762 {
2763         struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
2764         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2765         struct hpsb_packet *packet, *ptmp;
2766         unsigned long flags;
2767         u32 status, ack;
2768         size_t datasize;
2769
2770         spin_lock_irqsave(&d->lock, flags);
2771
2772         list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
2773                 datasize = packet->data_size;
2774                 if (datasize && packet->type != hpsb_raw)
2775                         status = le32_to_cpu(
2776                                 d->prg_cpu[d->sent_ind]->end.status) >> 16;
2777                 else
2778                         status = le32_to_cpu(
2779                                 d->prg_cpu[d->sent_ind]->begin.status) >> 16;
2780
2781                 if (status == 0)
2782                         /* this packet hasn't been sent yet*/
2783                         break;
2784
2785 #ifdef OHCI1394_DEBUG
2786                 if (datasize)
2787                         if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
2788                                 DBGMSG("Stream packet sent to channel %d tcode=0x%X "
2789                                        "ack=0x%X spd=%d dataLength=%d ctx=%d",
2790                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
2791                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2792                                        status&0x1f, (status>>5)&0x3,
2793                                        le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
2794                                        d->ctx);
2795                         else
2796                                 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2797                                        "%d ack=0x%X spd=%d dataLength=%d ctx=%d",
2798                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
2799                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2800                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
2801                                        status&0x1f, (status>>5)&0x3,
2802                                        le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
2803                                        d->ctx);
2804                 else
2805                         DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2806                                "%d ack=0x%X spd=%d data=0x%08X ctx=%d",
2807                                 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2808                                         >>16)&0x3f,
2809                                 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2810                                         >>4)&0xf,
2811                                 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2812                                         >>10)&0x3f,
2813                                 status&0x1f, (status>>5)&0x3,
2814                                 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
2815                                 d->ctx);
2816 #endif
2817
2818                 if (status & 0x10) {
2819                         ack = status & 0xf;
2820                 } else {
2821                         switch (status & 0x1f) {
2822                         case EVT_NO_STATUS: /* that should never happen */
2823                         case EVT_RESERVED_A: /* that should never happen */
2824                         case EVT_LONG_PACKET: /* that should never happen */
2825                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2826                                 ack = ACKX_SEND_ERROR;
2827                                 break;
2828                         case EVT_MISSING_ACK:
2829                                 ack = ACKX_TIMEOUT;
2830                                 break;
2831                         case EVT_UNDERRUN:
2832                                 ack = ACKX_SEND_ERROR;
2833                                 break;
2834                         case EVT_OVERRUN: /* that should never happen */
2835                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2836                                 ack = ACKX_SEND_ERROR;
2837                                 break;
2838                         case EVT_DESCRIPTOR_READ:
2839                         case EVT_DATA_READ:
2840                         case EVT_DATA_WRITE:
2841                                 ack = ACKX_SEND_ERROR;
2842                                 break;
2843                         case EVT_BUS_RESET: /* that should never happen */
2844                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2845                                 ack = ACKX_SEND_ERROR;
2846                                 break;
2847                         case EVT_TIMEOUT:
2848                                 ack = ACKX_TIMEOUT;
2849                                 break;
2850                         case EVT_TCODE_ERR:
2851                                 ack = ACKX_SEND_ERROR;
2852                                 break;
2853                         case EVT_RESERVED_B: /* that should never happen */
2854                         case EVT_RESERVED_C: /* that should never happen */
2855                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2856                                 ack = ACKX_SEND_ERROR;
2857                                 break;
2858                         case EVT_UNKNOWN:
2859                         case EVT_FLUSHED:
2860                                 ack = ACKX_SEND_ERROR;
2861                                 break;
2862                         default:
2863                                 PRINT(KERN_ERR, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
2864                                 ack = ACKX_SEND_ERROR;
2865                                 BUG();
2866                         }
2867                 }
2868
2869                 list_del_init(&packet->driver_list);
2870                 hpsb_packet_sent(ohci->host, packet, ack);
2871
2872                 if (datasize) {
2873                         pci_unmap_single(ohci->dev,
2874                                          cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2875                                          datasize, PCI_DMA_TODEVICE);
2876                         OHCI_DMA_FREE("single Xmit data packet");
2877                 }
2878
2879                 d->sent_ind = (d->sent_ind+1)%d->num_desc;
2880                 d->free_prgs++;
2881         }
2882
2883         dma_trm_flush(ohci, d);
2884
2885         spin_unlock_irqrestore(&d->lock, flags);
2886 }
2887
2888 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d)
2889 {
2890         if (d->ctrlClear) {
2891                 ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
2892
2893                 if (d->type == DMA_CTX_ISO) {
2894                         /* disable interrupts */
2895                         reg_write(d->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << d->ctx);
2896                         ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_legacy_tasklet);
2897                 } else {
2898                         tasklet_kill(&d->task);
2899                 }
2900         }
2901 }
2902
2903
2904 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2905 {
2906         int i;
2907         struct ti_ohci *ohci = d->ohci;
2908
2909         if (ohci == NULL)
2910                 return;
2911
2912         DBGMSG("Freeing dma_rcv_ctx %d", d->ctx);
2913
2914         if (d->buf_cpu) {
2915                 for (i=0; i<d->num_desc; i++)
2916                         if (d->buf_cpu[i] && d->buf_bus[i]) {
2917                                 pci_free_consistent(
2918                                         ohci->dev, d->buf_size,
2919                                         d->buf_cpu[i], d->buf_bus[i]);
2920                                 OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i);
2921                         }
2922                 kfree(d->buf_cpu);
2923                 kfree(d->buf_bus);
2924         }
2925         if (d->prg_cpu) {
2926                 for (i=0; i<d->num_desc; i++)
2927                         if (d->prg_cpu[i] && d->prg_bus[i]) {
2928                                 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
2929                                 OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i);
2930                         }
2931                 pci_pool_destroy(d->prg_pool);
2932                 OHCI_DMA_FREE("dma_rcv prg pool");
2933                 kfree(d->prg_cpu);
2934                 kfree(d->prg_bus);
2935         }
2936         kfree(d->spb);
2937
2938         /* Mark this context as freed. */
2939         d->ohci = NULL;
2940 }
2941
2942 static int
2943 alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2944                   enum context_type type, int ctx, int num_desc,
2945                   int buf_size, int split_buf_size, int context_base)
2946 {
2947         int i, len;
2948         static int num_allocs;
2949         static char pool_name[20];
2950
2951         d->ohci = ohci;
2952         d->type = type;
2953         d->ctx = ctx;
2954
2955         d->num_desc = num_desc;
2956         d->buf_size = buf_size;
2957         d->split_buf_size = split_buf_size;
2958
2959         d->ctrlSet = 0;
2960         d->ctrlClear = 0;
2961         d->cmdPtr = 0;
2962
2963         d->buf_cpu = kmalloc(d->num_desc * sizeof(quadlet_t*), GFP_ATOMIC);
2964         d->buf_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_ATOMIC);
2965
2966         if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2967                 PRINT(KERN_ERR, "Failed to allocate dma buffer");
2968                 free_dma_rcv_ctx(d);
2969                 return -ENOMEM;
2970         }
2971         memset(d->buf_cpu, 0, d->num_desc * sizeof(quadlet_t*));
2972         memset(d->buf_bus, 0, d->num_desc * sizeof(dma_addr_t));
2973
2974         d->prg_cpu = kmalloc(d->num_desc * sizeof(struct dma_cmd*),
2975                                 GFP_ATOMIC);
2976         d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_ATOMIC);
2977
2978         if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2979                 PRINT(KERN_ERR, "Failed to allocate dma prg");
2980                 free_dma_rcv_ctx(d);
2981                 return -ENOMEM;
2982         }
2983         memset(d->prg_cpu, 0, d->num_desc * sizeof(struct dma_cmd*));
2984         memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
2985
2986         d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
2987
2988         if (d->spb == NULL) {
2989                 PRINT(KERN_ERR, "Failed to allocate split buffer");
2990                 free_dma_rcv_ctx(d);
2991                 return -ENOMEM;
2992         }
2993         
2994         len = sprintf(pool_name, "ohci1394_rcv_prg");
2995         sprintf(pool_name+len, "%d", num_allocs);
2996         d->prg_pool = pci_pool_create(pool_name, ohci->dev,
2997                                 sizeof(struct dma_cmd), 4, 0);
2998         if(d->prg_pool == NULL)
2999         {
3000                 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
3001                 free_dma_rcv_ctx(d);
3002                 return -ENOMEM;
3003         }
3004         num_allocs++;
3005
3006         OHCI_DMA_ALLOC("dma_rcv prg pool");
3007
3008         for (i=0; i<d->num_desc; i++) {
3009                 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
3010                                                      d->buf_size,
3011                                                      d->buf_bus+i);
3012                 OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i);
3013
3014                 if (d->buf_cpu[i] != NULL) {
3015                         memset(d->buf_cpu[i], 0, d->buf_size);
3016                 } else {
3017                         PRINT(KERN_ERR,
3018                               "Failed to allocate dma buffer");
3019                         free_dma_rcv_ctx(d);
3020                         return -ENOMEM;
3021                 }
3022
3023                 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
3024                 OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
3025
3026                 if (d->prg_cpu[i] != NULL) {
3027                         memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
3028                 } else {
3029                         PRINT(KERN_ERR,
3030                               "Failed to allocate dma prg");
3031                         free_dma_rcv_ctx(d);
3032                         return -ENOMEM;
3033                 }
3034         }
3035
3036         spin_lock_init(&d->lock);
3037
3038         if (type == DMA_CTX_ISO) {
3039                 ohci1394_init_iso_tasklet(&ohci->ir_legacy_tasklet,
3040                                           OHCI_ISO_MULTICHANNEL_RECEIVE,
3041                                           dma_rcv_tasklet, (unsigned long) d);
3042         } else {
3043                 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3044                 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3045                 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3046
3047                 tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d);
3048         }
3049
3050         return 0;
3051 }
3052
3053 static void free_dma_trm_ctx(struct dma_trm_ctx *d)
3054 {
3055         int i;
3056         struct ti_ohci *ohci = d->ohci;
3057
3058         if (ohci == NULL)
3059                 return;
3060
3061         DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
3062
3063         if (d->prg_cpu) {
3064                 for (i=0; i<d->num_desc; i++)
3065                         if (d->prg_cpu[i] && d->prg_bus[i]) {
3066                                 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
3067                                 OHCI_DMA_FREE("pool dma_trm prg[%d]", i);
3068                         }
3069                 pci_pool_destroy(d->prg_pool);
3070                 OHCI_DMA_FREE("dma_trm prg pool");
3071                 kfree(d->prg_cpu);
3072                 kfree(d->prg_bus);
3073         }
3074
3075         /* Mark this context as freed. */
3076         d->ohci = NULL;
3077 }
3078
3079 static int
3080 alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
3081                   enum context_type type, int ctx, int num_desc,
3082                   int context_base)
3083 {
3084         int i, len;
3085         static char pool_name[20];
3086         static int num_allocs=0;
3087
3088         d->ohci = ohci;
3089         d->type = type;
3090         d->ctx = ctx;
3091         d->num_desc = num_desc;
3092         d->ctrlSet = 0;
3093         d->ctrlClear = 0;
3094         d->cmdPtr = 0;
3095
3096         d->prg_cpu = kmalloc(d->num_desc * sizeof(struct at_dma_prg*),
3097                              GFP_KERNEL);
3098         d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
3099
3100         if (d->prg_cpu == NULL || d->prg_bus == NULL) {
3101                 PRINT(KERN_ERR, "Failed to allocate at dma prg");
3102                 free_dma_trm_ctx(d);
3103                 return -ENOMEM;
3104         }
3105         memset(d->prg_cpu, 0, d->num_desc * sizeof(struct at_dma_prg*));
3106         memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
3107
3108         len = sprintf(pool_name, "ohci1394_trm_prg");
3109         sprintf(pool_name+len, "%d", num_allocs);
3110         d->prg_pool = pci_pool_create(pool_name, ohci->dev,
3111                                 sizeof(struct at_dma_prg), 4, 0);
3112         if (d->prg_pool == NULL) {
3113                 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
3114                 free_dma_trm_ctx(d);
3115                 return -ENOMEM;
3116         }
3117         num_allocs++;
3118
3119         OHCI_DMA_ALLOC("dma_rcv prg pool");
3120
3121         for (i = 0; i < d->num_desc; i++) {
3122                 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
3123                 OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
3124
3125                 if (d->prg_cpu[i] != NULL) {
3126                         memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
3127                 } else {
3128                         PRINT(KERN_ERR,
3129                               "Failed to allocate at dma prg");
3130                         free_dma_trm_ctx(d);
3131                         return -ENOMEM;
3132                 }
3133         }
3134
3135         spin_lock_init(&d->lock);
3136
3137         /* initialize tasklet */
3138         if (type == DMA_CTX_ISO) {
3139                 ohci1394_init_iso_tasklet(&ohci->it_legacy_tasklet, OHCI_ISO_TRANSMIT,
3140                                           dma_trm_tasklet, (unsigned long) d);
3141                 if (ohci1394_register_iso_tasklet(ohci,
3142                                                   &ohci->it_legacy_tasklet) < 0) {
3143                         PRINT(KERN_ERR, "No IT DMA context available");
3144                         free_dma_trm_ctx(d);
3145                         return -EBUSY;
3146                 }
3147
3148                 /* IT can be assigned to any context by register_iso_tasklet */
3149                 d->ctx = ohci->it_legacy_tasklet.context;
3150                 d->ctrlSet = OHCI1394_IsoXmitContextControlSet + 16 * d->ctx;
3151                 d->ctrlClear = OHCI1394_IsoXmitContextControlClear + 16 * d->ctx;
3152                 d->cmdPtr = OHCI1394_IsoXmitCommandPtr + 16 * d->ctx;
3153         } else {
3154                 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3155                 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3156                 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3157                 tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
3158         }
3159
3160         return 0;
3161 }
3162
3163 static void ohci_set_hw_config_rom(struct hpsb_host *host, quadlet_t *config_rom)
3164 {
3165         struct ti_ohci *ohci = host->hostdata;
3166
3167         reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(config_rom[0]));
3168         reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(config_rom[2]));
3169
3170         memcpy(ohci->csr_config_rom_cpu, config_rom, OHCI_CONFIG_ROM_LEN);
3171 }
3172
3173
3174 static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
3175                                  quadlet_t data, quadlet_t compare)
3176 {
3177         struct ti_ohci *ohci = host->hostdata;
3178         int i;
3179
3180         reg_write(ohci, OHCI1394_CSRData, data);
3181         reg_write(ohci, OHCI1394_CSRCompareData, compare);
3182         reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
3183
3184         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
3185                 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
3186                         break;
3187
3188                 mdelay(1);
3189         }
3190
3191         return reg_read(ohci, OHCI1394_CSRData);
3192 }
3193
3194 static struct hpsb_host_driver ohci1394_driver = {
3195         .owner =                THIS_MODULE,
3196         .name =                 OHCI1394_DRIVER_NAME,
3197         .set_hw_config_rom =    ohci_set_hw_config_rom,
3198         .transmit_packet =      ohci_transmit,
3199         .devctl =               ohci_devctl,
3200         .isoctl =               ohci_isoctl,
3201         .hw_csr_reg =           ohci_hw_csr_reg,
3202 };
3203
3204 \f
3205
3206 /***********************************
3207  * PCI Driver Interface functions  *
3208  ***********************************/
3209
3210 #define FAIL(err, fmt, args...)                 \
3211 do {                                            \
3212         PRINT_G(KERN_ERR, fmt , ## args);       \
3213         ohci1394_pci_remove(dev);               \
3214         return err;                             \
3215 } while (0)
3216
3217 static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3218                                         const struct pci_device_id *ent)
3219 {
3220         static int version_printed = 0;
3221
3222         struct hpsb_host *host;
3223         struct ti_ohci *ohci;   /* shortcut to currently handled device */
3224         unsigned long ohci_base;
3225
3226         if (version_printed++ == 0)
3227                 PRINT_G(KERN_INFO, "%s", version);
3228
3229         if (pci_enable_device(dev))
3230                 FAIL(-ENXIO, "Failed to enable OHCI hardware");
3231         pci_set_master(dev);
3232
3233         host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
3234         if (!host) FAIL(-ENOMEM, "Failed to allocate host structure");
3235
3236         ohci = host->hostdata;
3237         ohci->dev = dev;
3238         ohci->host = host;
3239         ohci->init_state = OHCI_INIT_ALLOC_HOST;
3240         host->pdev = dev;
3241         pci_set_drvdata(dev, ohci);
3242
3243         /* We don't want hardware swapping */
3244         pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3245
3246         /* Some oddball Apple controllers do not order the selfid
3247          * properly, so we make up for it here.  */
3248 #ifndef __LITTLE_ENDIAN
3249         /* XXX: Need a better way to check this. I'm wondering if we can
3250          * read the values of the OHCI1394_PCI_HCI_Control and the
3251          * noByteSwapData registers to see if they were not cleared to
3252          * zero. Should this work? Obviously it's not defined what these
3253          * registers will read when they aren't supported. Bleh! */
3254         if (dev->vendor == PCI_VENDOR_ID_APPLE &&
3255             dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
3256                 ohci->no_swap_incoming = 1;
3257                 ohci->selfid_swap = 0;
3258         } else
3259                 ohci->selfid_swap = 1;
3260 #endif
3261
3262
3263 #ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3264 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3265 #endif
3266
3267         /* These chipsets require a bit of extra care when checking after
3268          * a busreset.  */
3269         if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
3270              dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
3271             (dev->vendor ==  PCI_VENDOR_ID_NVIDIA &&
3272              dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
3273                 ohci->check_busreset = 1;
3274
3275         /* We hardwire the MMIO length, since some CardBus adaptors
3276          * fail to report the right length.  Anyway, the ohci spec
3277          * clearly says it's 2kb, so this shouldn't be a problem. */
3278         ohci_base = pci_resource_start(dev, 0);
3279         if (pci_resource_len(dev, 0) != OHCI1394_REGISTER_SIZE)
3280                 PRINT(KERN_WARNING, "Unexpected PCI resource length of %lx!",
3281                       pci_resource_len(dev, 0));
3282
3283         /* Seems PCMCIA handles this internally. Not sure why. Seems
3284          * pretty bogus to force a driver to special case this.  */
3285 #ifndef PCMCIA
3286         if (!request_mem_region (ohci_base, OHCI1394_REGISTER_SIZE, OHCI1394_DRIVER_NAME))
3287                 FAIL(-ENOMEM, "MMIO resource (0x%lx - 0x%lx) unavailable",
3288                      ohci_base, ohci_base + OHCI1394_REGISTER_SIZE);
3289 #endif
3290         ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3291
3292         ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
3293         if (ohci->registers == NULL)
3294                 FAIL(-ENXIO, "Failed to remap registers - card not accessible");
3295         ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3296         DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
3297
3298         /* csr_config rom allocation */
3299         ohci->csr_config_rom_cpu =
3300                 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3301                                      &ohci->csr_config_rom_bus);
3302         OHCI_DMA_ALLOC("consistent csr_config_rom");
3303         if (ohci->csr_config_rom_cpu == NULL)
3304                 FAIL(-ENOMEM, "Failed to allocate buffer config rom");
3305         ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3306
3307         /* self-id dma buffer allocation */
3308         ohci->selfid_buf_cpu =
3309                 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3310                       &ohci->selfid_buf_bus);
3311         OHCI_DMA_ALLOC("consistent selfid_buf");
3312
3313         if (ohci->selfid_buf_cpu == NULL)
3314                 FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
3315         ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3316
3317         if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
3318                 PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
3319                       "8Kb boundary... may cause problems on some CXD3222 chip",
3320                       ohci->selfid_buf_cpu);
3321
3322         /* No self-id errors at startup */
3323         ohci->self_id_errors = 0;
3324
3325         ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
3326         /* AR DMA request context allocation */
3327         if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3328                               DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3329                               AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
3330                               OHCI1394_AsReqRcvContextBase) < 0)
3331                 FAIL(-ENOMEM, "Failed to allocate AR Req context");
3332
3333         /* AR DMA response context allocation */
3334         if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3335                               DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3336                               AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
3337                               OHCI1394_AsRspRcvContextBase) < 0)
3338                 FAIL(-ENOMEM, "Failed to allocate AR Resp context");
3339
3340         /* AT DMA request context */
3341         if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3342                               DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
3343                               OHCI1394_AsReqTrContextBase) < 0)
3344                 FAIL(-ENOMEM, "Failed to allocate AT Req context");
3345
3346         /* AT DMA response context */
3347         if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3348                               DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
3349                               OHCI1394_AsRspTrContextBase) < 0)
3350                 FAIL(-ENOMEM, "Failed to allocate AT Resp context");
3351
3352         /* Start off with a soft reset, to clear everything to a sane
3353          * state. */
3354         ohci_soft_reset(ohci);
3355
3356         /* Now enable LPS, which we need in order to start accessing
3357          * most of the registers.  In fact, on some cards (ALI M5251),
3358          * accessing registers in the SClk domain without LPS enabled
3359          * will lock up the machine.  Wait 50msec to make sure we have
3360          * full link enabled.  */
3361         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3362
3363         /* Disable and clear interrupts */
3364         reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3365         reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3366
3367         mdelay(50);
3368
3369         /* Determine the number of available IR and IT contexts. */
3370         ohci->nb_iso_rcv_ctx =
3371                 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
3372         DBGMSG("%d iso receive contexts available",
3373                ohci->nb_iso_rcv_ctx);
3374
3375         ohci->nb_iso_xmit_ctx =
3376                 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
3377         DBGMSG("%d iso transmit contexts available",
3378                ohci->nb_iso_xmit_ctx);
3379
3380         /* Set the usage bits for non-existent contexts so they can't
3381          * be allocated */
3382         ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
3383         ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
3384
3385         INIT_LIST_HEAD(&ohci->iso_tasklet_list);
3386         spin_lock_init(&ohci->iso_tasklet_list_lock);
3387         ohci->ISO_channel_usage = 0;
3388         spin_lock_init(&ohci->IR_channel_lock);
3389
3390         /* Allocate the IR DMA context right here so we don't have
3391          * to do it in interrupt path - note that this doesn't
3392          * waste much memory and avoids the jugglery required to
3393          * allocate it in IRQ path. */
3394         if (alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context,
3395                               DMA_CTX_ISO, 0, IR_NUM_DESC,
3396                               IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
3397                               OHCI1394_IsoRcvContextBase) < 0) {
3398                 FAIL(-ENOMEM, "Cannot allocate IR Legacy DMA context");
3399         }
3400
3401         /* We hopefully don't have to pre-allocate IT DMA like we did
3402          * for IR DMA above. Allocate it on-demand and mark inactive. */
3403         ohci->it_legacy_context.ohci = NULL;
3404
3405         if (request_irq(dev->irq, ohci_irq_handler, SA_SHIRQ,
3406                          OHCI1394_DRIVER_NAME, ohci))
3407                 FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
3408
3409         ohci->init_state = OHCI_INIT_HAVE_IRQ;
3410         ohci_initialize(ohci);
3411
3412         /* Set certain csr values */
3413         host->csr.guid_hi = reg_read(ohci, OHCI1394_GUIDHi);
3414         host->csr.guid_lo = reg_read(ohci, OHCI1394_GUIDLo);
3415         host->csr.cyc_clk_acc = 100;  /* how do we determine clk accuracy? */
3416         host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
3417         host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
3418
3419         /* Tell the highlevel this host is ready */
3420         if (hpsb_add_host(host))
3421                 FAIL(-ENOMEM, "Failed to register host with highlevel");
3422
3423         ohci->init_state = OHCI_INIT_DONE;
3424
3425         return 0;
3426 #undef FAIL
3427 }
3428
3429 static void ohci1394_pci_remove(struct pci_dev *pdev)
3430 {
3431         struct ti_ohci *ohci;
3432         struct device *dev;
3433
3434         ohci = pci_get_drvdata(pdev);
3435         if (!ohci)
3436                 return;
3437
3438         dev = get_device(&ohci->host->device);
3439
3440         switch (ohci->init_state) {
3441         case OHCI_INIT_DONE:
3442                 hpsb_remove_host(ohci->host);
3443
3444                 /* Clear out BUS Options */
3445                 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3446                 reg_write(ohci, OHCI1394_BusOptions,
3447                           (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3448                           0x00ff0000);
3449                 memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3450
3451         case OHCI_INIT_HAVE_IRQ:
3452                 /* Clear interrupt registers */
3453                 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3454                 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3455                 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3456                 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3457                 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3458                 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3459
3460                 /* Disable IRM Contender */
3461                 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3462
3463                 /* Clear link control register */
3464                 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3465
3466                 /* Let all other nodes know to ignore us */
3467                 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3468
3469                 /* Soft reset before we start - this disables
3470                  * interrupts and clears linkEnable and LPS. */
3471                 ohci_soft_reset(ohci);
3472                 free_irq(ohci->dev->irq, ohci);
3473
3474         case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3475                 /* The ohci_soft_reset() stops all DMA contexts, so we
3476                  * dont need to do this.  */
3477                 /* Free AR dma */
3478                 free_dma_rcv_ctx(&ohci->ar_req_context);
3479                 free_dma_rcv_ctx(&ohci->ar_resp_context);
3480
3481                 /* Free AT dma */
3482                 free_dma_trm_ctx(&ohci->at_req_context);
3483                 free_dma_trm_ctx(&ohci->at_resp_context);
3484
3485                 /* Free IR dma */
3486                 free_dma_rcv_ctx(&ohci->ir_legacy_context);
3487
3488                 /* Free IT dma */
3489                 free_dma_trm_ctx(&ohci->it_legacy_context);
3490
3491                 /* Free IR legacy dma */
3492                 free_dma_rcv_ctx(&ohci->ir_legacy_context);
3493
3494
3495         case OHCI_INIT_HAVE_SELFID_BUFFER:
3496                 pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3497                                     ohci->selfid_buf_cpu,
3498                                     ohci->selfid_buf_bus);
3499                 OHCI_DMA_FREE("consistent selfid_buf");
3500
3501         case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3502                 pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3503                                     ohci->csr_config_rom_cpu,
3504                                     ohci->csr_config_rom_bus);
3505                 OHCI_DMA_FREE("consistent csr_config_rom");
3506
3507         case OHCI_INIT_HAVE_IOMAPPING:
3508                 iounmap(ohci->registers);
3509
3510         case OHCI_INIT_HAVE_MEM_REGION:
3511 #ifndef PCMCIA
3512                 release_mem_region(pci_resource_start(ohci->dev, 0),
3513                                    OHCI1394_REGISTER_SIZE);
3514 #endif
3515
3516 #ifdef CONFIG_PPC_PMAC
3517         /* On UniNorth, power down the cable and turn off the chip
3518          * clock when the module is removed to save power on
3519          * laptops. Turning it back ON is done by the arch code when
3520          * pci_enable_device() is called */
3521         {
3522                 struct device_node* of_node;
3523
3524                 of_node = pci_device_to_OF_node(ohci->dev);
3525                 if (of_node) {
3526                         pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3527                         pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, of_node, 0, 0);
3528                 }
3529         }
3530 #endif /* CONFIG_PPC_PMAC */
3531
3532         case OHCI_INIT_ALLOC_HOST:
3533                 pci_set_drvdata(ohci->dev, NULL);
3534         }
3535
3536         if (dev)
3537                 put_device(dev);
3538 }
3539
3540
3541 static int ohci1394_pci_resume (struct pci_dev *pdev)
3542 {
3543 #ifdef CONFIG_PPC_PMAC
3544         if (_machine == _MACH_Pmac) {
3545                 struct device_node *of_node;
3546
3547                 /* Re-enable 1394 */
3548                 of_node = pci_device_to_OF_node (pdev);
3549                 if (of_node)
3550                         pmac_call_feature (PMAC_FTR_1394_ENABLE, of_node, 0, 1);
3551         }
3552 #endif /* CONFIG_PPC_PMAC */
3553
3554         pci_enable_device(pdev);
3555
3556         return 0;
3557 }
3558
3559
3560 static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
3561 {
3562 #ifdef CONFIG_PPC_PMAC
3563         if (_machine == _MACH_Pmac) {
3564                 struct device_node *of_node;
3565
3566                 /* Disable 1394 */
3567                 of_node = pci_device_to_OF_node (pdev);
3568                 if (of_node)
3569                         pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3570         }
3571 #endif
3572
3573         return 0;
3574 }
3575
3576
3577 #define PCI_CLASS_FIREWIRE_OHCI     ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10)
3578
3579 static struct pci_device_id ohci1394_pci_tbl[] = {
3580         {
3581                 .class =        PCI_CLASS_FIREWIRE_OHCI,
3582                 .class_mask =   PCI_ANY_ID,
3583                 .vendor =       PCI_ANY_ID,
3584                 .device =       PCI_ANY_ID,
3585                 .subvendor =    PCI_ANY_ID,
3586                 .subdevice =    PCI_ANY_ID,
3587         },
3588         { 0, },
3589 };
3590
3591 MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
3592
3593 static struct pci_driver ohci1394_pci_driver = {
3594         .name =         OHCI1394_DRIVER_NAME,
3595         .id_table =     ohci1394_pci_tbl,
3596         .probe =        ohci1394_pci_probe,
3597         .remove =       ohci1394_pci_remove,
3598         .resume =       ohci1394_pci_resume,
3599         .suspend =      ohci1394_pci_suspend,
3600 };
3601
3602 \f
3603
3604 /***********************************
3605  * OHCI1394 Video Interface        *
3606  ***********************************/
3607
3608 /* essentially the only purpose of this code is to allow another
3609    module to hook into ohci's interrupt handler */
3610
3611 int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
3612 {
3613         int i=0;
3614
3615         /* stop the channel program if it's still running */
3616         reg_write(ohci, reg, 0x8000);
3617
3618         /* Wait until it effectively stops */
3619         while (reg_read(ohci, reg) & 0x400) {
3620                 i++;
3621                 if (i>5000) {
3622                         PRINT(KERN_ERR,
3623                               "Runaway loop while stopping context: %s...", msg ? msg : "");
3624                         return 1;
3625                 }
3626
3627                 mb();
3628                 udelay(10);
3629         }
3630         if (msg) PRINT(KERN_ERR, "%s: dma prg stopped", msg);
3631         return 0;
3632 }
3633
3634 void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
3635                                void (*func)(unsigned long), unsigned long data)
3636 {
3637         tasklet_init(&tasklet->tasklet, func, data);
3638         tasklet->type = type;
3639         /* We init the tasklet->link field, so we can list_del() it
3640          * without worrying whether it was added to the list or not. */
3641         INIT_LIST_HEAD(&tasklet->link);
3642 }
3643
3644 int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
3645                                   struct ohci1394_iso_tasklet *tasklet)
3646 {
3647         unsigned long flags, *usage;
3648         int n, i, r = -EBUSY;
3649
3650         if (tasklet->type == OHCI_ISO_TRANSMIT) {
3651                 n = ohci->nb_iso_xmit_ctx;
3652                 usage = &ohci->it_ctx_usage;
3653         }
3654         else {
3655                 n = ohci->nb_iso_rcv_ctx;
3656                 usage = &ohci->ir_ctx_usage;
3657
3658                 /* only one receive context can be multichannel (OHCI sec 10.4.1) */
3659                 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3660                         if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
3661                                 return r;
3662                         }
3663                 }
3664         }
3665
3666         spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3667
3668         for (i = 0; i < n; i++)
3669                 if (!test_and_set_bit(i, usage)) {
3670                         tasklet->context = i;
3671                         list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
3672                         r = 0;
3673                         break;
3674                 }
3675
3676         spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3677
3678         return r;
3679 }
3680
3681 void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
3682                                      struct ohci1394_iso_tasklet *tasklet)
3683 {
3684         unsigned long flags;
3685
3686         tasklet_kill(&tasklet->tasklet);
3687
3688         spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3689
3690         if (tasklet->type == OHCI_ISO_TRANSMIT)
3691                 clear_bit(tasklet->context, &ohci->it_ctx_usage);
3692         else {
3693                 clear_bit(tasklet->context, &ohci->ir_ctx_usage);
3694
3695                 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3696                         clear_bit(0, &ohci->ir_multichannel_used);
3697                 }
3698         }
3699
3700         list_del(&tasklet->link);
3701
3702         spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3703 }
3704
3705 EXPORT_SYMBOL(ohci1394_stop_context);
3706 EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3707 EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3708 EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3709
3710
3711 /***********************************
3712  * General module initialization   *
3713  ***********************************/
3714
3715 MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
3716 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
3717 MODULE_LICENSE("GPL");
3718
3719 static void __exit ohci1394_cleanup (void)
3720 {
3721         pci_unregister_driver(&ohci1394_pci_driver);
3722 }
3723
3724 static int __init ohci1394_init(void)
3725 {
3726         return pci_register_driver(&ohci1394_pci_driver);
3727 }
3728
3729 module_init(ohci1394_init);
3730 module_exit(ohci1394_cleanup);