4d19a914ec52b1f4fb13947e9cd14418f60902df
[linux-3.10.git] / drivers / staging / vme / bridges / vme_tsi148.c
1 /*
2  * Support for the Tundra TSI148 VME-PCI Bridge Chip
3  *
4  * Author: Martyn Welch <martyn.welch@gefanuc.com>
5  * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc.
6  *
7  * Based on work by Tom Armistead and Ajit Prem
8  * Copyright 2004 Motorola Inc.
9  *
10  * This program is free software; you can redistribute  it and/or modify it
11  * under  the terms of  the GNU General  Public License as published by the
12  * Free Software Foundation;  either version 2 of the  License, or (at your
13  * option) any later version.
14  */
15
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/mm.h>
19 #include <linux/types.h>
20 #include <linux/errno.h>
21 #include <linux/proc_fs.h>
22 #include <linux/pci.h>
23 #include <linux/poll.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/interrupt.h>
26 #include <linux/spinlock.h>
27 #include <linux/sched.h>
28 #include <asm/time.h>
29 #include <asm/io.h>
30 #include <asm/uaccess.h>
31
32 #include "../vme.h"
33 #include "../vme_bridge.h"
34 #include "vme_tsi148.h"
35
36 static int __init tsi148_init(void);
37 static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
38 static void tsi148_remove(struct pci_dev *);
39 static void __exit tsi148_exit(void);
40
41
42 int tsi148_slave_set(struct vme_slave_resource *, int, unsigned long long,
43         unsigned long long, dma_addr_t, vme_address_t, vme_cycle_t);
44 int tsi148_slave_get(struct vme_slave_resource *, int *, unsigned long long *,
45         unsigned long long *, dma_addr_t *, vme_address_t *, vme_cycle_t *);
46
47 int tsi148_master_get(struct vme_master_resource *, int *, unsigned long long *,
48         unsigned long long *, vme_address_t *, vme_cycle_t *, vme_width_t *);
49 int tsi148_master_set(struct vme_master_resource *, int, unsigned long long,
50         unsigned long long, vme_address_t, vme_cycle_t, vme_width_t);
51 ssize_t tsi148_master_read(struct vme_master_resource *, void *, size_t,
52         loff_t);
53 ssize_t tsi148_master_write(struct vme_master_resource *, void *, size_t,
54         loff_t);
55 unsigned int tsi148_master_rmw(struct vme_master_resource *, unsigned int,
56         unsigned int, unsigned int, loff_t);
57 int tsi148_dma_list_add (struct vme_dma_list *, struct vme_dma_attr *,
58         struct vme_dma_attr *, size_t);
59 int tsi148_dma_list_exec(struct vme_dma_list *);
60 int tsi148_dma_list_empty(struct vme_dma_list *);
61 int tsi148_generate_irq(int, int);
62 int tsi148_slot_get(void);
63
64 /* Modue parameter */
65 static int err_chk;
66 static int geoid;
67
68 /* XXX These should all be in a per device structure */
69 static struct vme_bridge *tsi148_bridge;
70 static wait_queue_head_t dma_queue[2];
71 static wait_queue_head_t iack_queue;
72 static void (*lm_callback[4])(int);     /* Called in interrupt handler */
73 static void *crcsr_kernel;
74 static dma_addr_t crcsr_bus;
75 static struct vme_master_resource *flush_image;
76 static struct mutex vme_rmw;    /* Only one RMW cycle at a time */
77 static struct mutex vme_int;    /*
78                                  * Only one VME interrupt can be
79                                  * generated at a time, provide locking
80                                  */
81
82 static char driver_name[] = "vme_tsi148";
83
84 static struct pci_device_id tsi148_ids[] = {
85         { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
86         { },
87 };
88
89 static struct pci_driver tsi148_driver = {
90         .name = driver_name,
91         .id_table = tsi148_ids,
92         .probe = tsi148_probe,
93         .remove = tsi148_remove,
94 };
95
96 static void reg_join(unsigned int high, unsigned int low,
97         unsigned long long *variable)
98 {
99         *variable = (unsigned long long)high << 32;
100         *variable |= (unsigned long long)low;
101 }
102
103 static void reg_split(unsigned long long variable, unsigned int *high,
104         unsigned int *low)
105 {
106         *low = (unsigned int)variable & 0xFFFFFFFF;
107         *high = (unsigned int)(variable >> 32);
108 }
109
110 /*
111  * Wakes up DMA queue.
112  */
113 static u32 tsi148_DMA_irqhandler(int channel_mask)
114 {
115         u32 serviced = 0;
116
117         if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
118                 wake_up(&dma_queue[0]);
119                 serviced |= TSI148_LCSR_INTC_DMA0C;
120         }
121         if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
122                 wake_up(&dma_queue[1]);
123                 serviced |= TSI148_LCSR_INTC_DMA1C;
124         }
125
126         return serviced;
127 }
128
129 /*
130  * Wake up location monitor queue
131  */
132 static u32 tsi148_LM_irqhandler(u32 stat)
133 {
134         int i;
135         u32 serviced = 0;
136
137         for (i = 0; i < 4; i++) {
138                 if(stat & TSI148_LCSR_INTS_LMS[i]) {
139                         /* We only enable interrupts if the callback is set */
140                         lm_callback[i](i);
141                         serviced |= TSI148_LCSR_INTC_LMC[i];
142                 }
143         }
144
145         return serviced;
146 }
147
148 /*
149  * Wake up mail box queue.
150  *
151  * XXX This functionality is not exposed up though API.
152  */
153 static u32 tsi148_MB_irqhandler(u32 stat)
154 {
155         int i;
156         u32 val;
157         u32 serviced = 0;
158
159         for (i = 0; i < 4; i++) {
160                 if(stat & TSI148_LCSR_INTS_MBS[i]) {
161                         val = ioread32be(tsi148_bridge->base +
162                                 TSI148_GCSR_MBOX[i]);
163                         printk("VME Mailbox %d received: 0x%x\n", i, val);
164                         serviced |= TSI148_LCSR_INTC_MBC[i];
165                 }
166         }
167
168         return serviced;
169 }
170
171 /*
172  * Display error & status message when PERR (PCI) exception interrupt occurs.
173  */
174 static u32 tsi148_PERR_irqhandler(void)
175 {
176         printk(KERN_ERR
177                 "PCI Exception at address: 0x%08x:%08x, attributes: %08x\n",
178                 ioread32be(tsi148_bridge->base + TSI148_LCSR_EDPAU),
179                 ioread32be(tsi148_bridge->base + TSI148_LCSR_EDPAL),
180                 ioread32be(tsi148_bridge->base + TSI148_LCSR_EDPAT)
181                 );
182         printk(KERN_ERR
183                 "PCI-X attribute reg: %08x, PCI-X split completion reg: %08x\n",
184                 ioread32be(tsi148_bridge->base + TSI148_LCSR_EDPXA),
185                 ioread32be(tsi148_bridge->base + TSI148_LCSR_EDPXS)
186                 );
187
188         iowrite32be(TSI148_LCSR_EDPAT_EDPCL,
189                 tsi148_bridge->base + TSI148_LCSR_EDPAT);
190
191         return TSI148_LCSR_INTC_PERRC;
192 }
193
194 /*
195  * Save address and status when VME error interrupt occurs.
196  */
197 static u32 tsi148_VERR_irqhandler(void)
198 {
199         unsigned int error_addr_high, error_addr_low;
200         unsigned long long error_addr;
201         u32 error_attrib;
202         struct vme_bus_error *error;
203
204         error_addr_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_VEAU);
205         error_addr_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_VEAL);
206         error_attrib = ioread32be(tsi148_bridge->base + TSI148_LCSR_VEAT);
207
208         reg_join(error_addr_high, error_addr_low, &error_addr);
209
210         /* Check for exception register overflow (we have lost error data) */
211         if(error_attrib & TSI148_LCSR_VEAT_VEOF) {
212                 printk(KERN_ERR "VME Bus Exception Overflow Occurred\n");
213         }
214
215         error = (struct vme_bus_error *)kmalloc(sizeof (struct vme_bus_error),
216                 GFP_ATOMIC);
217         if (error) {
218                 error->address = error_addr;
219                 error->attributes = error_attrib;
220                 list_add_tail(&(error->list), &(tsi148_bridge->vme_errors));
221         } else {
222                 printk(KERN_ERR
223                         "Unable to alloc memory for VMEbus Error reporting\n");
224                 printk(KERN_ERR
225                         "VME Bus Error at address: 0x%llx, attributes: %08x\n",
226                         error_addr, error_attrib);
227         }
228
229         /* Clear Status */
230         iowrite32be(TSI148_LCSR_VEAT_VESCL,
231                 tsi148_bridge->base + TSI148_LCSR_VEAT);
232
233         return TSI148_LCSR_INTC_VERRC;
234 }
235
236 /*
237  * Wake up IACK queue.
238  */
239 static u32 tsi148_IACK_irqhandler(void)
240 {
241         wake_up(&iack_queue);
242
243         return TSI148_LCSR_INTC_IACKC;
244 }
245
246 /*
247  * Calling VME bus interrupt callback if provided.
248  */
249 static u32 tsi148_VIRQ_irqhandler(u32 stat)
250 {
251         int vec, i, serviced = 0;
252
253         for (i = 7; i > 0; i--) {
254                 if (stat & (1 << i)) {
255                         /*
256                          *      Note:   Even though the registers are defined
257                          *      as 32-bits in the spec, we only want to issue
258                          *      8-bit IACK cycles on the bus, read from offset
259                          *      3.
260                          */
261                         vec = ioread8(tsi148_bridge->base +
262                                 TSI148_LCSR_VIACK[i] + 3);
263
264                         vme_irq_handler(tsi148_bridge, i, vec);
265
266                         serviced |= (1 << i);
267                 }
268         }
269
270         return serviced;
271 }
272
273 /*
274  * Top level interrupt handler.  Clears appropriate interrupt status bits and
275  * then calls appropriate sub handler(s).
276  */
277 static irqreturn_t tsi148_irqhandler(int irq, void *dev_id)
278 {
279         u32 stat, enable, serviced = 0;
280
281         /* Determine which interrupts are unmasked and set */
282         enable = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEO);
283         stat = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTS);
284
285         /* Only look at unmasked interrupts */
286         stat &= enable;
287
288         if (unlikely(!stat)) {
289                 return IRQ_NONE;
290         }
291
292         /* Call subhandlers as appropriate */
293         /* DMA irqs */
294         if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
295                 serviced |= tsi148_DMA_irqhandler(stat);
296
297         /* Location monitor irqs */
298         if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
299                         TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
300                 serviced |= tsi148_LM_irqhandler(stat);
301
302         /* Mail box irqs */
303         if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
304                         TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
305                 serviced |= tsi148_MB_irqhandler(stat);
306
307         /* PCI bus error */
308         if (stat & TSI148_LCSR_INTS_PERRS)
309                 serviced |= tsi148_PERR_irqhandler();
310
311         /* VME bus error */
312         if (stat & TSI148_LCSR_INTS_VERRS)
313                 serviced |= tsi148_VERR_irqhandler();
314
315         /* IACK irq */
316         if (stat & TSI148_LCSR_INTS_IACKS)
317                 serviced |= tsi148_IACK_irqhandler();
318
319         /* VME bus irqs */
320         if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
321                         TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
322                         TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
323                         TSI148_LCSR_INTS_IRQ1S))
324                 serviced |= tsi148_VIRQ_irqhandler(stat);
325
326         /* Clear serviced interrupts */
327         iowrite32be(serviced, tsi148_bridge->base + TSI148_LCSR_INTC);
328
329         return IRQ_HANDLED;
330 }
331
332 static int tsi148_irq_init(struct vme_bridge *bridge)
333 {
334         int result;
335         unsigned int tmp;
336         struct pci_dev *pdev;
337
338         /* Need pdev */
339         pdev = container_of(bridge->parent, struct pci_dev, dev);
340
341         /* Initialise list for VME bus errors */
342         INIT_LIST_HEAD(&(bridge->vme_errors));
343
344         mutex_init(&(bridge->irq_mtx));
345
346         result = request_irq(pdev->irq,
347                              tsi148_irqhandler,
348                              IRQF_SHARED,
349                              driver_name, pdev);
350         if (result) {
351                 dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
352                         pdev->irq);
353                 return result;
354         }
355
356         /* Enable and unmask interrupts */
357         tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
358                 TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
359                 TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
360                 TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
361                 TSI148_LCSR_INTEO_IACKEO;
362
363         /* XXX This leaves the following interrupts masked.
364          * TSI148_LCSR_INTEO_VIEEO
365          * TSI148_LCSR_INTEO_SYSFLEO
366          * TSI148_LCSR_INTEO_ACFLEO
367          */
368
369         /* Don't enable Location Monitor interrupts here - they will be
370          * enabled when the location monitors are properly configured and
371          * a callback has been attached.
372          * TSI148_LCSR_INTEO_LM0EO
373          * TSI148_LCSR_INTEO_LM1EO
374          * TSI148_LCSR_INTEO_LM2EO
375          * TSI148_LCSR_INTEO_LM3EO
376          */
377
378         /* Don't enable VME interrupts until we add a handler, else the board
379          * will respond to it and we don't want that unless it knows how to
380          * properly deal with it.
381          * TSI148_LCSR_INTEO_IRQ7EO
382          * TSI148_LCSR_INTEO_IRQ6EO
383          * TSI148_LCSR_INTEO_IRQ5EO
384          * TSI148_LCSR_INTEO_IRQ4EO
385          * TSI148_LCSR_INTEO_IRQ3EO
386          * TSI148_LCSR_INTEO_IRQ2EO
387          * TSI148_LCSR_INTEO_IRQ1EO
388          */
389
390         iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
391         iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
392
393         return 0;
394 }
395
396 static void tsi148_irq_exit(struct pci_dev *pdev)
397 {
398         /* Turn off interrupts */
399         iowrite32be(0x0, tsi148_bridge->base + TSI148_LCSR_INTEO);
400         iowrite32be(0x0, tsi148_bridge->base + TSI148_LCSR_INTEN);
401
402         /* Clear all interrupts */
403         iowrite32be(0xFFFFFFFF, tsi148_bridge->base + TSI148_LCSR_INTC);
404
405         /* Detach interrupt handler */
406         free_irq(pdev->irq, pdev);
407 }
408
409 /*
410  * Check to see if an IACk has been received, return true (1) or false (0).
411  */
412 int tsi148_iack_received(void)
413 {
414         u32 tmp;
415
416         tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_VICR);
417
418         if (tmp & TSI148_LCSR_VICR_IRQS)
419                 return 0;
420         else
421                 return 1;
422 }
423
424 /*
425  * Configure VME interrupt
426  */
427 void tsi148_irq_set(int level, int state, int sync)
428 {
429         struct pci_dev *pdev;
430         u32 tmp;
431
432         /* We need to do the ordering differently for enabling and disabling */
433         if (state == 0) {
434                 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEN);
435                 tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
436                 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEN);
437
438                 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEO);
439                 tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
440                 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEO);
441
442                 if (sync != 0) {
443                         pdev = container_of(tsi148_bridge->parent,
444                                 struct pci_dev, dev);
445
446                         synchronize_irq(pdev->irq);
447                 }
448         } else {
449                 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEO);
450                 tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
451                 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEO);
452
453                 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEN);
454                 tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
455                 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEN);
456         }
457 }
458
459 /*
460  * Generate a VME bus interrupt at the requested level & vector. Wait for
461  * interrupt to be acked.
462  */
463 int tsi148_irq_generate(int level, int statid)
464 {
465         u32 tmp;
466
467         mutex_lock(&(vme_int));
468
469         /* Read VICR register */
470         tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_VICR);
471
472         /* Set Status/ID */
473         tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
474                 (statid & TSI148_LCSR_VICR_STID_M);
475         iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_VICR);
476
477         /* Assert VMEbus IRQ */
478         tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
479         iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_VICR);
480
481         /* XXX Consider implementing a timeout? */
482         wait_event_interruptible(iack_queue, tsi148_iack_received());
483
484         mutex_unlock(&(vme_int));
485
486         return 0;
487 }
488
489 /*
490  * Find the first error in this address range
491  */
492 static struct vme_bus_error *tsi148_find_error(vme_address_t aspace,
493         unsigned long long address, size_t count)
494 {
495         struct list_head *err_pos;
496         struct vme_bus_error *vme_err, *valid = NULL;
497         unsigned long long bound;
498
499         bound = address + count;
500
501         /*
502          * XXX We are currently not looking at the address space when parsing
503          *     for errors. This is because parsing the Address Modifier Codes
504          *     is going to be quite resource intensive to do properly. We
505          *     should be OK just looking at the addresses and this is certainly
506          *     much better than what we had before.
507          */
508         err_pos = NULL;
509         /* Iterate through errors */
510         list_for_each(err_pos, &(tsi148_bridge->vme_errors)) {
511                 vme_err = list_entry(err_pos, struct vme_bus_error, list);
512                 if((vme_err->address >= address) && (vme_err->address < bound)){
513                         valid = vme_err;
514                         break;
515                 }
516         }
517
518         return valid;
519 }
520
521 /*
522  * Clear errors in the provided address range.
523  */
524 static void tsi148_clear_errors(vme_address_t aspace,
525         unsigned long long address, size_t count)
526 {
527         struct list_head *err_pos, *temp;
528         struct vme_bus_error *vme_err;
529         unsigned long long bound;
530
531         bound = address + count;
532
533         /*
534          * XXX We are currently not looking at the address space when parsing
535          *     for errors. This is because parsing the Address Modifier Codes
536          *     is going to be quite resource intensive to do properly. We
537          *     should be OK just looking at the addresses and this is certainly
538          *     much better than what we had before.
539          */
540         err_pos = NULL;
541         /* Iterate through errors */
542         list_for_each_safe(err_pos, temp, &(tsi148_bridge->vme_errors)) {
543                 vme_err = list_entry(err_pos, struct vme_bus_error, list);
544
545                 if((vme_err->address >= address) && (vme_err->address < bound)){
546                         list_del(err_pos);
547                         kfree(vme_err);
548                 }
549         }
550 }
551
552 /*
553  * Initialize a slave window with the requested attributes.
554  */
555 int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
556         unsigned long long vme_base, unsigned long long size,
557         dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
558 {
559         unsigned int i, addr = 0, granularity = 0;
560         unsigned int temp_ctl = 0;
561         unsigned int vme_base_low, vme_base_high;
562         unsigned int vme_bound_low, vme_bound_high;
563         unsigned int pci_offset_low, pci_offset_high;
564         unsigned long long vme_bound, pci_offset;
565
566 #if 0
567         printk("Set slave image %d to:\n", image->number);
568         printk("\tEnabled: %s\n", (enabled == 1)? "yes" : "no");
569         printk("\tVME Base:0x%llx\n", vme_base);
570         printk("\tWindow Size:0x%llx\n", size);
571         printk("\tPCI Base:0x%lx\n", (unsigned long)pci_base);
572         printk("\tAddress Space:0x%x\n", aspace);
573         printk("\tTransfer Cycle Properties:0x%x\n", cycle);
574 #endif
575
576         i = image->number;
577
578         switch (aspace) {
579         case VME_A16:
580                 granularity = 0x10;
581                 addr |= TSI148_LCSR_ITAT_AS_A16;
582                 break;
583         case VME_A24:
584                 granularity = 0x1000;
585                 addr |= TSI148_LCSR_ITAT_AS_A24;
586                 break;
587         case VME_A32:
588                 granularity = 0x10000;
589                 addr |= TSI148_LCSR_ITAT_AS_A32;
590                 break;
591         case VME_A64:
592                 granularity = 0x10000;
593                 addr |= TSI148_LCSR_ITAT_AS_A64;
594                 break;
595         case VME_CRCSR:
596         case VME_USER1:
597         case VME_USER2:
598         case VME_USER3:
599         case VME_USER4:
600         default:
601                 printk("Invalid address space\n");
602                 return -EINVAL;
603                 break;
604         }
605
606         /* Convert 64-bit variables to 2x 32-bit variables */
607         reg_split(vme_base, &vme_base_high, &vme_base_low);
608
609         /*
610          * Bound address is a valid address for the window, adjust
611          * accordingly
612          */
613         vme_bound = vme_base + size - granularity;
614         reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
615         pci_offset = (unsigned long long)pci_base - vme_base;
616         reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
617
618         if (vme_base_low & (granularity - 1)) {
619                 printk("Invalid VME base alignment\n");
620                 return -EINVAL;
621         }
622         if (vme_bound_low & (granularity - 1)) {
623                 printk("Invalid VME bound alignment\n");
624                 return -EINVAL;
625         }
626         if (pci_offset_low & (granularity - 1)) {
627                 printk("Invalid PCI Offset alignment\n");
628                 return -EINVAL;
629         }
630
631 #if 0
632         printk("\tVME Bound:0x%llx\n", vme_bound);
633         printk("\tPCI Offset:0x%llx\n", pci_offset);
634 #endif
635
636         /*  Disable while we are mucking around */
637         temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
638                 TSI148_LCSR_OFFSET_ITAT);
639         temp_ctl &= ~TSI148_LCSR_ITAT_EN;
640         iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_IT[i] +
641                 TSI148_LCSR_OFFSET_ITAT);
642
643         /* Setup mapping */
644         iowrite32be(vme_base_high, tsi148_bridge->base + TSI148_LCSR_IT[i] +
645                 TSI148_LCSR_OFFSET_ITSAU);
646         iowrite32be(vme_base_low, tsi148_bridge->base + TSI148_LCSR_IT[i] +
647                 TSI148_LCSR_OFFSET_ITSAL);
648         iowrite32be(vme_bound_high, tsi148_bridge->base + TSI148_LCSR_IT[i] +
649                 TSI148_LCSR_OFFSET_ITEAU);
650         iowrite32be(vme_bound_low, tsi148_bridge->base + TSI148_LCSR_IT[i] +
651                 TSI148_LCSR_OFFSET_ITEAL);
652         iowrite32be(pci_offset_high, tsi148_bridge->base + TSI148_LCSR_IT[i] +
653                 TSI148_LCSR_OFFSET_ITOFU);
654         iowrite32be(pci_offset_low, tsi148_bridge->base + TSI148_LCSR_IT[i] +
655                 TSI148_LCSR_OFFSET_ITOFL);
656
657 /* XXX Prefetch stuff currently unsupported */
658 #if 0
659
660         for (x = 0; x < 4; x++) {
661                 if ((64 << x) >= vmeIn->prefetchSize) {
662                         break;
663                 }
664         }
665         if (x == 4)
666                 x--;
667         temp_ctl |= (x << 16);
668
669         if (vmeIn->prefetchThreshold)
670                 if (vmeIn->prefetchThreshold)
671                         temp_ctl |= 0x40000;
672 #endif
673
674         /* Setup 2eSST speeds */
675         temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
676         switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
677         case VME_2eSST160:
678                 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
679                 break;
680         case VME_2eSST267:
681                 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
682                 break;
683         case VME_2eSST320:
684                 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
685                 break;
686         }
687
688         /* Setup cycle types */
689         temp_ctl &= ~(0x1F << 7);
690         if (cycle & VME_BLT)
691                 temp_ctl |= TSI148_LCSR_ITAT_BLT;
692         if (cycle & VME_MBLT)
693                 temp_ctl |= TSI148_LCSR_ITAT_MBLT;
694         if (cycle & VME_2eVME)
695                 temp_ctl |= TSI148_LCSR_ITAT_2eVME;
696         if (cycle & VME_2eSST)
697                 temp_ctl |= TSI148_LCSR_ITAT_2eSST;
698         if (cycle & VME_2eSSTB)
699                 temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
700
701         /* Setup address space */
702         temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
703         temp_ctl |= addr;
704
705         temp_ctl &= ~0xF;
706         if (cycle & VME_SUPER)
707                 temp_ctl |= TSI148_LCSR_ITAT_SUPR ;
708         if (cycle & VME_USER)
709                 temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
710         if (cycle & VME_PROG)
711                 temp_ctl |= TSI148_LCSR_ITAT_PGM;
712         if (cycle & VME_DATA)
713                 temp_ctl |= TSI148_LCSR_ITAT_DATA;
714
715         /* Write ctl reg without enable */
716         iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_IT[i] +
717                 TSI148_LCSR_OFFSET_ITAT);
718
719         if (enabled)
720                 temp_ctl |= TSI148_LCSR_ITAT_EN;
721
722         iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_IT[i] +
723                 TSI148_LCSR_OFFSET_ITAT);
724
725         return 0;
726 }
727
728 /*
729  * Get slave window configuration.
730  *
731  * XXX Prefetch currently unsupported.
732  */
733 int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
734         unsigned long long *vme_base, unsigned long long *size,
735         dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
736 {
737         unsigned int i, granularity = 0, ctl = 0;
738         unsigned int vme_base_low, vme_base_high;
739         unsigned int vme_bound_low, vme_bound_high;
740         unsigned int pci_offset_low, pci_offset_high;
741         unsigned long long vme_bound, pci_offset;
742
743
744         i = image->number;
745
746         /* Read registers */
747         ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
748                 TSI148_LCSR_OFFSET_ITAT);
749
750         vme_base_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
751                 TSI148_LCSR_OFFSET_ITSAU);
752         vme_base_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
753                 TSI148_LCSR_OFFSET_ITSAL);
754         vme_bound_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
755                 TSI148_LCSR_OFFSET_ITEAU);
756         vme_bound_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
757                 TSI148_LCSR_OFFSET_ITEAL);
758         pci_offset_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
759                 TSI148_LCSR_OFFSET_ITOFU);
760         pci_offset_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
761                 TSI148_LCSR_OFFSET_ITOFL);
762
763         /* Convert 64-bit variables to 2x 32-bit variables */
764         reg_join(vme_base_high, vme_base_low, vme_base);
765         reg_join(vme_bound_high, vme_bound_low, &vme_bound);
766         reg_join(pci_offset_high, pci_offset_low, &pci_offset);
767
768         *pci_base = (dma_addr_t)vme_base + pci_offset;
769
770         *enabled = 0;
771         *aspace = 0;
772         *cycle = 0;
773
774         if (ctl & TSI148_LCSR_ITAT_EN)
775                 *enabled = 1;
776
777         if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
778                 granularity = 0x10;
779                 *aspace |= VME_A16;
780         }
781         if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
782                 granularity = 0x1000;
783                 *aspace |= VME_A24;
784         }
785         if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
786                 granularity = 0x10000;
787                 *aspace |= VME_A32;
788         }
789         if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
790                 granularity = 0x10000;
791                 *aspace |= VME_A64;
792         }
793
794         /* Need granularity before we set the size */
795         *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
796
797
798         if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
799                 *cycle |= VME_2eSST160;
800         if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
801                 *cycle |= VME_2eSST267;
802         if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
803                 *cycle |= VME_2eSST320;
804
805         if (ctl & TSI148_LCSR_ITAT_BLT)
806                 *cycle |= VME_BLT;
807         if (ctl & TSI148_LCSR_ITAT_MBLT)
808                 *cycle |= VME_MBLT;
809         if (ctl & TSI148_LCSR_ITAT_2eVME)
810                 *cycle |= VME_2eVME;
811         if (ctl & TSI148_LCSR_ITAT_2eSST)
812                 *cycle |= VME_2eSST;
813         if (ctl & TSI148_LCSR_ITAT_2eSSTB)
814                 *cycle |= VME_2eSSTB;
815
816         if (ctl & TSI148_LCSR_ITAT_SUPR)
817                 *cycle |= VME_SUPER;
818         if (ctl & TSI148_LCSR_ITAT_NPRIV)
819                 *cycle |= VME_USER;
820         if (ctl & TSI148_LCSR_ITAT_PGM)
821                 *cycle |= VME_PROG;
822         if (ctl & TSI148_LCSR_ITAT_DATA)
823                 *cycle |= VME_DATA;
824
825         return 0;
826 }
827
828 /*
829  * Allocate and map PCI Resource
830  */
831 static int tsi148_alloc_resource(struct vme_master_resource *image,
832         unsigned long long size)
833 {
834         unsigned long long existing_size;
835         int retval = 0;
836         struct pci_dev *pdev;
837
838         /* Find pci_dev container of dev */
839         if (tsi148_bridge->parent == NULL) {
840                 printk("Dev entry NULL\n");
841                 return -EINVAL;
842         }
843         pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
844
845         existing_size = (unsigned long long)(image->pci_resource.end -
846                 image->pci_resource.start);
847
848         /* If the existing size is OK, return */
849         if ((size != 0) && (existing_size == (size - 1)))
850                 return 0;
851
852         if (existing_size != 0) {
853                 iounmap(image->kern_base);
854                 image->kern_base = NULL;
855                 if (image->pci_resource.name != NULL)
856                         kfree(image->pci_resource.name);
857                 release_resource(&(image->pci_resource));
858                 memset(&(image->pci_resource), 0, sizeof(struct resource));
859         }
860
861         /* Exit here if size is zero */
862         if (size == 0) {
863                 return 0;
864         }
865
866         if (image->pci_resource.name == NULL) {
867                 image->pci_resource.name = kmalloc(VMENAMSIZ+3, GFP_KERNEL);
868                 if (image->pci_resource.name == NULL) {
869                         printk(KERN_ERR "Unable to allocate memory for resource"
870                                 " name\n");
871                         retval = -ENOMEM;
872                         goto err_name;
873                 }
874         }
875
876         sprintf((char *)image->pci_resource.name, "%s.%d", tsi148_bridge->name,
877                 image->number);
878
879         image->pci_resource.start = 0;
880         image->pci_resource.end = (unsigned long)size;
881         image->pci_resource.flags = IORESOURCE_MEM;
882
883         retval = pci_bus_alloc_resource(pdev->bus,
884                 &(image->pci_resource), size, size, PCIBIOS_MIN_MEM,
885                 0, NULL, NULL);
886         if (retval) {
887                 printk(KERN_ERR "Failed to allocate mem resource for "
888                         "window %d size 0x%lx start 0x%lx\n",
889                         image->number, (unsigned long)size,
890                         (unsigned long)image->pci_resource.start);
891                 goto err_resource;
892         }
893
894         image->kern_base = ioremap_nocache(
895                 image->pci_resource.start, size);
896         if (image->kern_base == NULL) {
897                 printk(KERN_ERR "Failed to remap resource\n");
898                 retval = -ENOMEM;
899                 goto err_remap;
900         }
901
902         return 0;
903
904         iounmap(image->kern_base);
905         image->kern_base = NULL;
906 err_remap:
907         release_resource(&(image->pci_resource));
908 err_resource:
909         kfree(image->pci_resource.name);
910         memset(&(image->pci_resource), 0, sizeof(struct resource));
911 err_name:
912         return retval;
913 }
914
915 /*
916  * Free and unmap PCI Resource
917  */
918 static void tsi148_free_resource(struct vme_master_resource *image)
919 {
920         iounmap(image->kern_base);
921         image->kern_base = NULL;
922         release_resource(&(image->pci_resource));
923         kfree(image->pci_resource.name);
924         memset(&(image->pci_resource), 0, sizeof(struct resource));
925 }
926
927 /*
928  * Set the attributes of an outbound window.
929  */
930 int tsi148_master_set( struct vme_master_resource *image, int enabled,
931         unsigned long long vme_base, unsigned long long size,
932         vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
933 {
934         int retval = 0;
935         unsigned int i;
936         unsigned int temp_ctl = 0;
937         unsigned int pci_base_low, pci_base_high;
938         unsigned int pci_bound_low, pci_bound_high;
939         unsigned int vme_offset_low, vme_offset_high;
940         unsigned long long pci_bound, vme_offset, pci_base;
941
942         /* Verify input data */
943         if (vme_base & 0xFFFF) {
944                 printk(KERN_ERR "Invalid VME Window alignment\n");
945                 retval = -EINVAL;
946                 goto err_window;
947         }
948
949         if ((size == 0) && (enabled != 0)) {
950                 printk(KERN_ERR "Size must be non-zero for enabled windows\n");
951                 retval = -EINVAL;
952                 goto err_window;
953         }
954
955         spin_lock(&(image->lock));
956
957         /* Let's allocate the resource here rather than further up the stack as
958          * it avoids pushing loads of bus dependant stuff up the stack. If size
959          * is zero, any existing resource will be freed.
960          */
961         retval = tsi148_alloc_resource(image, size);
962         if (retval) {
963                 spin_unlock(&(image->lock));
964                 printk(KERN_ERR "Unable to allocate memory for "
965                         "resource\n");
966                 goto err_res;
967         }
968
969         if (size == 0) {
970                 pci_base = 0;
971                 pci_bound = 0;
972                 vme_offset = 0;
973         } else {
974                 pci_base = (unsigned long long)image->pci_resource.start;
975
976                 /*
977                  * Bound address is a valid address for the window, adjust
978                  * according to window granularity.
979                  */
980                 pci_bound = pci_base + (size - 0x10000);
981                 vme_offset = vme_base - pci_base;
982         }
983
984         /* Convert 64-bit variables to 2x 32-bit variables */
985         reg_split(pci_base, &pci_base_high, &pci_base_low);
986         reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
987         reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
988
989         if (pci_base_low & 0xFFFF) {
990                 spin_unlock(&(image->lock));
991                 printk(KERN_ERR "Invalid PCI base alignment\n");
992                 retval = -EINVAL;
993                 goto err_gran;
994         }
995         if (pci_bound_low & 0xFFFF) {
996                 spin_unlock(&(image->lock));
997                 printk(KERN_ERR "Invalid PCI bound alignment\n");
998                 retval = -EINVAL;
999                 goto err_gran;
1000         }
1001         if (vme_offset_low & 0xFFFF) {
1002                 spin_unlock(&(image->lock));
1003                 printk(KERN_ERR "Invalid VME Offset alignment\n");
1004                 retval = -EINVAL;
1005                 goto err_gran;
1006         }
1007
1008         i = image->number;
1009
1010         /* Disable while we are mucking around */
1011         temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1012                 TSI148_LCSR_OFFSET_OTAT);
1013         temp_ctl &= ~TSI148_LCSR_OTAT_EN;
1014         iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1015                 TSI148_LCSR_OFFSET_OTAT);
1016
1017 /* XXX Prefetch stuff currently unsupported */
1018 #if 0
1019         if (vmeOut->prefetchEnable) {
1020                 temp_ctl |= 0x40000;
1021                 for (x = 0; x < 4; x++) {
1022                         if ((2 << x) >= vmeOut->prefetchSize)
1023                                 break;
1024                 }
1025                 if (x == 4)
1026                         x = 3;
1027                 temp_ctl |= (x << 16);
1028         }
1029 #endif
1030
1031         /* Setup 2eSST speeds */
1032         temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
1033         switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1034         case VME_2eSST160:
1035                 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
1036                 break;
1037         case VME_2eSST267:
1038                 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
1039                 break;
1040         case VME_2eSST320:
1041                 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
1042                 break;
1043         }
1044
1045         /* Setup cycle types */
1046         if (cycle & VME_BLT) {
1047                 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1048                 temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
1049         }
1050         if (cycle & VME_MBLT) {
1051                 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1052                 temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
1053         }
1054         if (cycle & VME_2eVME) {
1055                 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1056                 temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
1057         }
1058         if (cycle & VME_2eSST) {
1059                 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1060                 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
1061         }
1062         if (cycle & VME_2eSSTB) {
1063                 printk(KERN_WARNING "Currently not setting Broadcast Select "
1064                         "Registers\n");
1065                 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1066                 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
1067         }
1068
1069         /* Setup data width */
1070         temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
1071         switch (dwidth) {
1072         case VME_D16:
1073                 temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
1074                 break;
1075         case VME_D32:
1076                 temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
1077                 break;
1078         default:
1079                 spin_unlock(&(image->lock));
1080                 printk(KERN_ERR "Invalid data width\n");
1081                 retval = -EINVAL;
1082                 goto err_dwidth;
1083         }
1084
1085         /* Setup address space */
1086         temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
1087         switch (aspace) {
1088         case VME_A16:
1089                 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
1090                 break;
1091         case VME_A24:
1092                 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
1093                 break;
1094         case VME_A32:
1095                 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
1096                 break;
1097         case VME_A64:
1098                 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
1099                 break;
1100         case VME_CRCSR:
1101                 temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
1102                 break;
1103         case VME_USER1:
1104                 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
1105                 break;
1106         case VME_USER2:
1107                 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
1108                 break;
1109         case VME_USER3:
1110                 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
1111                 break;
1112         case VME_USER4:
1113                 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
1114                 break;
1115         default:
1116                 spin_unlock(&(image->lock));
1117                 printk(KERN_ERR "Invalid address space\n");
1118                 retval = -EINVAL;
1119                 goto err_aspace;
1120                 break;
1121         }
1122
1123         temp_ctl &= ~(3<<4);
1124         if (cycle & VME_SUPER)
1125                 temp_ctl |= TSI148_LCSR_OTAT_SUP;
1126         if (cycle & VME_PROG)
1127                 temp_ctl |= TSI148_LCSR_OTAT_PGM;
1128
1129         /* Setup mapping */
1130         iowrite32be(pci_base_high, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1131                 TSI148_LCSR_OFFSET_OTSAU);
1132         iowrite32be(pci_base_low, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1133                 TSI148_LCSR_OFFSET_OTSAL);
1134         iowrite32be(pci_bound_high, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1135                 TSI148_LCSR_OFFSET_OTEAU);
1136         iowrite32be(pci_bound_low, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1137                 TSI148_LCSR_OFFSET_OTEAL);
1138         iowrite32be(vme_offset_high, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1139                 TSI148_LCSR_OFFSET_OTOFU);
1140         iowrite32be(vme_offset_low, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1141                 TSI148_LCSR_OFFSET_OTOFL);
1142
1143 /* XXX We need to deal with OTBS */
1144 #if 0
1145         iowrite32be(vmeOut->bcastSelect2esst, tsi148_bridge->base +
1146                 TSI148_LCSR_OT[i] + TSI148_LCSR_OFFSET_OTBS);
1147 #endif
1148
1149         /* Write ctl reg without enable */
1150         iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1151                 TSI148_LCSR_OFFSET_OTAT);
1152
1153         if (enabled)
1154                 temp_ctl |= TSI148_LCSR_OTAT_EN;
1155
1156         iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1157                 TSI148_LCSR_OFFSET_OTAT);
1158
1159         spin_unlock(&(image->lock));
1160         return 0;
1161
1162 err_aspace:
1163 err_dwidth:
1164 err_gran:
1165         tsi148_free_resource(image);
1166 err_res:
1167 err_window:
1168         return retval;
1169
1170 }
1171
1172 /*
1173  * Set the attributes of an outbound window.
1174  *
1175  * XXX Not parsing prefetch information.
1176  */
1177 int __tsi148_master_get( struct vme_master_resource *image, int *enabled,
1178         unsigned long long *vme_base, unsigned long long *size,
1179         vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
1180 {
1181         unsigned int i, ctl;
1182         unsigned int pci_base_low, pci_base_high;
1183         unsigned int pci_bound_low, pci_bound_high;
1184         unsigned int vme_offset_low, vme_offset_high;
1185
1186         unsigned long long pci_base, pci_bound, vme_offset;
1187
1188         i = image->number;
1189
1190         ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1191                 TSI148_LCSR_OFFSET_OTAT);
1192
1193         pci_base_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1194                 TSI148_LCSR_OFFSET_OTSAU);
1195         pci_base_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1196                 TSI148_LCSR_OFFSET_OTSAL);
1197         pci_bound_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1198                 TSI148_LCSR_OFFSET_OTEAU);
1199         pci_bound_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1200                 TSI148_LCSR_OFFSET_OTEAL);
1201         vme_offset_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1202                 TSI148_LCSR_OFFSET_OTOFU);
1203         vme_offset_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1204                 TSI148_LCSR_OFFSET_OTOFL);
1205
1206         /* Convert 64-bit variables to 2x 32-bit variables */
1207         reg_join(pci_base_high, pci_base_low, &pci_base);
1208         reg_join(pci_bound_high, pci_bound_low, &pci_bound);
1209         reg_join(vme_offset_high, vme_offset_low, &vme_offset);
1210
1211         *vme_base = pci_base + vme_offset;
1212         *size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
1213
1214         *enabled = 0;
1215         *aspace = 0;
1216         *cycle = 0;
1217         *dwidth = 0;
1218
1219         if (ctl & TSI148_LCSR_OTAT_EN)
1220                 *enabled = 1;
1221
1222         /* Setup address space */
1223         if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
1224                 *aspace |= VME_A16;
1225         if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
1226                 *aspace |= VME_A24;
1227         if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
1228                 *aspace |= VME_A32;
1229         if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
1230                 *aspace |= VME_A64;
1231         if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
1232                 *aspace |= VME_CRCSR;
1233         if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
1234                 *aspace |= VME_USER1;
1235         if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
1236                 *aspace |= VME_USER2;
1237         if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
1238                 *aspace |= VME_USER3;
1239         if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
1240                 *aspace |= VME_USER4;
1241
1242         /* Setup 2eSST speeds */
1243         if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
1244                 *cycle |= VME_2eSST160;
1245         if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
1246                 *cycle |= VME_2eSST267;
1247         if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
1248                 *cycle |= VME_2eSST320;
1249
1250         /* Setup cycle types */
1251         if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_SCT)
1252                 *cycle |= VME_SCT;
1253         if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_BLT)
1254                 *cycle |= VME_BLT;
1255         if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_MBLT)
1256                 *cycle |= VME_MBLT;
1257         if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_2eVME)
1258                 *cycle |= VME_2eVME;
1259         if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_2eSST)
1260                 *cycle |= VME_2eSST;
1261         if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_2eSSTB)
1262                 *cycle |= VME_2eSSTB;
1263
1264         if (ctl & TSI148_LCSR_OTAT_SUP)
1265                 *cycle |= VME_SUPER;
1266         else
1267                 *cycle |= VME_USER;
1268
1269         if (ctl & TSI148_LCSR_OTAT_PGM)
1270                 *cycle |= VME_PROG;
1271         else
1272                 *cycle |= VME_DATA;
1273
1274         /* Setup data width */
1275         if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
1276                 *dwidth = VME_D16;
1277         if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
1278                 *dwidth = VME_D32;
1279
1280         return 0;
1281 }
1282
1283
1284 int tsi148_master_get( struct vme_master_resource *image, int *enabled,
1285         unsigned long long *vme_base, unsigned long long *size,
1286         vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
1287 {
1288         int retval;
1289
1290         spin_lock(&(image->lock));
1291
1292         retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
1293                 cycle, dwidth);
1294
1295         spin_unlock(&(image->lock));
1296
1297         return retval;
1298 }
1299
1300 ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
1301         size_t count, loff_t offset)
1302 {
1303         int retval, enabled;
1304         unsigned long long vme_base, size;
1305         vme_address_t aspace;
1306         vme_cycle_t cycle;
1307         vme_width_t dwidth;
1308         struct vme_bus_error *vme_err = NULL;
1309
1310         spin_lock(&(image->lock));
1311
1312         memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count);
1313         retval = count;
1314
1315         if (!err_chk)
1316                 goto skip_chk;
1317
1318         __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1319                 &dwidth);
1320
1321         vme_err = tsi148_find_error(aspace, vme_base + offset, count);
1322         if(vme_err != NULL) {
1323                 dev_err(image->parent->parent, "First VME read error detected "
1324                         "an at address 0x%llx\n", vme_err->address);
1325                 retval = vme_err->address - (vme_base + offset);
1326                 /* Clear down save errors in this address range */
1327                 tsi148_clear_errors(aspace, vme_base + offset, count);
1328         }
1329
1330 skip_chk:
1331         spin_unlock(&(image->lock));
1332
1333         return retval;
1334 }
1335
1336
1337 /* XXX We need to change vme_master_resource->mtx to a spinlock so that read
1338  *     and write functions can be used in an interrupt context
1339  */
1340 ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
1341         size_t count, loff_t offset)
1342 {
1343         int retval = 0, enabled;
1344         unsigned long long vme_base, size;
1345         vme_address_t aspace;
1346         vme_cycle_t cycle;
1347         vme_width_t dwidth;
1348
1349         struct vme_bus_error *vme_err = NULL;
1350
1351         spin_lock(&(image->lock));
1352
1353         memcpy_toio(image->kern_base + offset, buf, (unsigned int)count);
1354         retval = count;
1355
1356         /*
1357          * Writes are posted. We need to do a read on the VME bus to flush out
1358          * all of the writes before we check for errors. We can't guarentee
1359          * that reading the data we have just written is safe. It is believed
1360          * that there isn't any read, write re-ordering, so we can read any
1361          * location in VME space, so lets read the Device ID from the tsi148's
1362          * own registers as mapped into CR/CSR space.
1363          *
1364          * We check for saved errors in the written address range/space.
1365          */
1366
1367         if (!err_chk)
1368                 goto skip_chk;
1369
1370         /*
1371          * Get window info first, to maximise the time that the buffers may
1372          * fluch on their own
1373          */
1374         __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1375                 &dwidth);
1376
1377         ioread16(flush_image->kern_base + 0x7F000);
1378
1379         vme_err = tsi148_find_error(aspace, vme_base + offset, count);
1380         if(vme_err != NULL) {
1381                 printk("First VME write error detected an at address 0x%llx\n",
1382                         vme_err->address);
1383                 retval = vme_err->address - (vme_base + offset);
1384                 /* Clear down save errors in this address range */
1385                 tsi148_clear_errors(aspace, vme_base + offset, count);
1386         }
1387
1388 skip_chk:
1389         spin_unlock(&(image->lock));
1390
1391         return retval;
1392 }
1393
1394 /*
1395  * Perform an RMW cycle on the VME bus.
1396  *
1397  * Requires a previously configured master window, returns final value.
1398  */
1399 unsigned int tsi148_master_rmw(struct vme_master_resource *image,
1400         unsigned int mask, unsigned int compare, unsigned int swap,
1401         loff_t offset)
1402 {
1403         unsigned long long pci_addr;
1404         unsigned int pci_addr_high, pci_addr_low;
1405         u32 tmp, result;
1406         int i;
1407
1408
1409         /* Find the PCI address that maps to the desired VME address */
1410         i = image->number;
1411
1412         /* Locking as we can only do one of these at a time */
1413         mutex_lock(&(vme_rmw));
1414
1415         /* Lock image */
1416         spin_lock(&(image->lock));
1417
1418         pci_addr_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1419                 TSI148_LCSR_OFFSET_OTSAU);
1420         pci_addr_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1421                 TSI148_LCSR_OFFSET_OTSAL);
1422
1423         reg_join(pci_addr_high, pci_addr_low, &pci_addr);
1424         reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
1425
1426         /* Configure registers */
1427         iowrite32be(mask, tsi148_bridge->base + TSI148_LCSR_RMWEN);
1428         iowrite32be(compare, tsi148_bridge->base + TSI148_LCSR_RMWC);
1429         iowrite32be(swap, tsi148_bridge->base + TSI148_LCSR_RMWS);
1430         iowrite32be(pci_addr_high, tsi148_bridge->base + TSI148_LCSR_RMWAU);
1431         iowrite32be(pci_addr_low, tsi148_bridge->base + TSI148_LCSR_RMWAL);
1432
1433         /* Enable RMW */
1434         tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_VMCTRL);
1435         tmp |= TSI148_LCSR_VMCTRL_RMWEN;
1436         iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_VMCTRL);
1437
1438         /* Kick process off with a read to the required address. */
1439         result = ioread32be(image->kern_base + offset);
1440
1441         /* Disable RMW */
1442         tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_VMCTRL);
1443         tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
1444         iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_VMCTRL);
1445
1446         spin_unlock(&(image->lock));
1447
1448         mutex_unlock(&(vme_rmw));
1449
1450         return result;
1451 }
1452
1453 static int tsi148_dma_set_vme_src_attributes (u32 *attr, vme_address_t aspace,
1454         vme_cycle_t cycle, vme_width_t dwidth)
1455 {
1456         /* Setup 2eSST speeds */
1457         switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1458         case VME_2eSST160:
1459                 *attr |= TSI148_LCSR_DSAT_2eSSTM_160;
1460                 break;
1461         case VME_2eSST267:
1462                 *attr |= TSI148_LCSR_DSAT_2eSSTM_267;
1463                 break;
1464         case VME_2eSST320:
1465                 *attr |= TSI148_LCSR_DSAT_2eSSTM_320;
1466                 break;
1467         }
1468
1469         /* Setup cycle types */
1470         if (cycle & VME_SCT) {
1471                 *attr |= TSI148_LCSR_DSAT_TM_SCT;
1472         }
1473         if (cycle & VME_BLT) {
1474                 *attr |= TSI148_LCSR_DSAT_TM_BLT;
1475         }
1476         if (cycle & VME_MBLT) {
1477                 *attr |= TSI148_LCSR_DSAT_TM_MBLT;
1478         }
1479         if (cycle & VME_2eVME) {
1480                 *attr |= TSI148_LCSR_DSAT_TM_2eVME;
1481         }
1482         if (cycle & VME_2eSST) {
1483                 *attr |= TSI148_LCSR_DSAT_TM_2eSST;
1484         }
1485         if (cycle & VME_2eSSTB) {
1486                 printk("Currently not setting Broadcast Select Registers\n");
1487                 *attr |= TSI148_LCSR_DSAT_TM_2eSSTB;
1488         }
1489
1490         /* Setup data width */
1491         switch (dwidth) {
1492         case VME_D16:
1493                 *attr |= TSI148_LCSR_DSAT_DBW_16;
1494                 break;
1495         case VME_D32:
1496                 *attr |= TSI148_LCSR_DSAT_DBW_32;
1497                 break;
1498         default:
1499                 printk("Invalid data width\n");
1500                 return -EINVAL;
1501         }
1502
1503         /* Setup address space */
1504         switch (aspace) {
1505         case VME_A16:
1506                 *attr |= TSI148_LCSR_DSAT_AMODE_A16;
1507                 break;
1508         case VME_A24:
1509                 *attr |= TSI148_LCSR_DSAT_AMODE_A24;
1510                 break;
1511         case VME_A32:
1512                 *attr |= TSI148_LCSR_DSAT_AMODE_A32;
1513                 break;
1514         case VME_A64:
1515                 *attr |= TSI148_LCSR_DSAT_AMODE_A64;
1516                 break;
1517         case VME_CRCSR:
1518                 *attr |= TSI148_LCSR_DSAT_AMODE_CRCSR;
1519                 break;
1520         case VME_USER1:
1521                 *attr |= TSI148_LCSR_DSAT_AMODE_USER1;
1522                 break;
1523         case VME_USER2:
1524                 *attr |= TSI148_LCSR_DSAT_AMODE_USER2;
1525                 break;
1526         case VME_USER3:
1527                 *attr |= TSI148_LCSR_DSAT_AMODE_USER3;
1528                 break;
1529         case VME_USER4:
1530                 *attr |= TSI148_LCSR_DSAT_AMODE_USER4;
1531                 break;
1532         default:
1533                 printk("Invalid address space\n");
1534                 return -EINVAL;
1535                 break;
1536         }
1537
1538         if (cycle & VME_SUPER)
1539                 *attr |= TSI148_LCSR_DSAT_SUP;
1540         if (cycle & VME_PROG)
1541                 *attr |= TSI148_LCSR_DSAT_PGM;
1542
1543         return 0;
1544 }
1545
1546 static int tsi148_dma_set_vme_dest_attributes(u32 *attr, vme_address_t aspace,
1547         vme_cycle_t cycle, vme_width_t dwidth)
1548 {
1549         /* Setup 2eSST speeds */
1550         switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1551         case VME_2eSST160:
1552                 *attr |= TSI148_LCSR_DDAT_2eSSTM_160;
1553                 break;
1554         case VME_2eSST267:
1555                 *attr |= TSI148_LCSR_DDAT_2eSSTM_267;
1556                 break;
1557         case VME_2eSST320:
1558                 *attr |= TSI148_LCSR_DDAT_2eSSTM_320;
1559                 break;
1560         }
1561
1562         /* Setup cycle types */
1563         if (cycle & VME_SCT) {
1564                 *attr |= TSI148_LCSR_DDAT_TM_SCT;
1565         }
1566         if (cycle & VME_BLT) {
1567                 *attr |= TSI148_LCSR_DDAT_TM_BLT;
1568         }
1569         if (cycle & VME_MBLT) {
1570                 *attr |= TSI148_LCSR_DDAT_TM_MBLT;
1571         }
1572         if (cycle & VME_2eVME) {
1573                 *attr |= TSI148_LCSR_DDAT_TM_2eVME;
1574         }
1575         if (cycle & VME_2eSST) {
1576                 *attr |= TSI148_LCSR_DDAT_TM_2eSST;
1577         }
1578         if (cycle & VME_2eSSTB) {
1579                 printk("Currently not setting Broadcast Select Registers\n");
1580                 *attr |= TSI148_LCSR_DDAT_TM_2eSSTB;
1581         }
1582
1583         /* Setup data width */
1584         switch (dwidth) {
1585         case VME_D16:
1586                 *attr |= TSI148_LCSR_DDAT_DBW_16;
1587                 break;
1588         case VME_D32:
1589                 *attr |= TSI148_LCSR_DDAT_DBW_32;
1590                 break;
1591         default:
1592                 printk("Invalid data width\n");
1593                 return -EINVAL;
1594         }
1595
1596         /* Setup address space */
1597         switch (aspace) {
1598         case VME_A16:
1599                 *attr |= TSI148_LCSR_DDAT_AMODE_A16;
1600                 break;
1601         case VME_A24:
1602                 *attr |= TSI148_LCSR_DDAT_AMODE_A24;
1603                 break;
1604         case VME_A32:
1605                 *attr |= TSI148_LCSR_DDAT_AMODE_A32;
1606                 break;
1607         case VME_A64:
1608                 *attr |= TSI148_LCSR_DDAT_AMODE_A64;
1609                 break;
1610         case VME_CRCSR:
1611                 *attr |= TSI148_LCSR_DDAT_AMODE_CRCSR;
1612                 break;
1613         case VME_USER1:
1614                 *attr |= TSI148_LCSR_DDAT_AMODE_USER1;
1615                 break;
1616         case VME_USER2:
1617                 *attr |= TSI148_LCSR_DDAT_AMODE_USER2;
1618                 break;
1619         case VME_USER3:
1620                 *attr |= TSI148_LCSR_DDAT_AMODE_USER3;
1621                 break;
1622         case VME_USER4:
1623                 *attr |= TSI148_LCSR_DDAT_AMODE_USER4;
1624                 break;
1625         default:
1626                 printk("Invalid address space\n");
1627                 return -EINVAL;
1628                 break;
1629         }
1630
1631         if (cycle & VME_SUPER)
1632                 *attr |= TSI148_LCSR_DDAT_SUP;
1633         if (cycle & VME_PROG)
1634                 *attr |= TSI148_LCSR_DDAT_PGM;
1635
1636         return 0;
1637 }
1638
1639 /*
1640  * Add a link list descriptor to the list
1641  *
1642  * XXX Need to handle 2eSST Broadcast select bits
1643  */
1644 int tsi148_dma_list_add (struct vme_dma_list *list, struct vme_dma_attr *src,
1645         struct vme_dma_attr *dest, size_t count)
1646 {
1647         struct tsi148_dma_entry *entry, *prev;
1648         u32 address_high, address_low;
1649         struct vme_dma_pattern *pattern_attr;
1650         struct vme_dma_pci *pci_attr;
1651         struct vme_dma_vme *vme_attr;
1652         dma_addr_t desc_ptr;
1653         int retval = 0;
1654
1655         /* XXX descriptor must be aligned on 64-bit boundaries */
1656         entry = (struct tsi148_dma_entry *)kmalloc(
1657                 sizeof(struct tsi148_dma_entry), GFP_KERNEL);
1658         if (entry == NULL) {
1659                 printk("Failed to allocate memory for dma resource "
1660                         "structure\n");
1661                 retval = -ENOMEM;
1662                 goto err_mem;
1663         }
1664
1665         /* Test descriptor alignment */
1666         if ((unsigned long)&(entry->descriptor) & 0x7) {
1667                 printk("Descriptor not aligned to 8 byte boundary as "
1668                         "required: %p\n", &(entry->descriptor));
1669                 retval = -EINVAL;
1670                 goto err_align;
1671         }
1672
1673         /* Given we are going to fill out the structure, we probably don't
1674          * need to zero it, but better safe than sorry for now.
1675          */
1676         memset(&(entry->descriptor), 0, sizeof(struct tsi148_dma_descriptor));
1677
1678         /* Fill out source part */
1679         switch (src->type) {
1680         case VME_DMA_PATTERN:
1681                 pattern_attr = (struct vme_dma_pattern *)src->private;
1682
1683                 entry->descriptor.dsal = pattern_attr->pattern;
1684                 entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PAT;
1685                 /* Default behaviour is 32 bit pattern */
1686                 if (pattern_attr->type & VME_DMA_PATTERN_BYTE) {
1687                         entry->descriptor.dsat |= TSI148_LCSR_DSAT_PSZ;
1688                 }
1689                 /* It seems that the default behaviour is to increment */
1690                 if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0) {
1691                         entry->descriptor.dsat |= TSI148_LCSR_DSAT_NIN;
1692                 }
1693                 break;
1694         case VME_DMA_PCI:
1695                 pci_attr = (struct vme_dma_pci *)src->private;
1696
1697                 reg_split((unsigned long long)pci_attr->address, &address_high,
1698                         &address_low);
1699                 entry->descriptor.dsau = address_high;
1700                 entry->descriptor.dsal = address_low;
1701                 entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PCI;
1702                 break;
1703         case VME_DMA_VME:
1704                 vme_attr = (struct vme_dma_vme *)src->private;
1705
1706                 reg_split((unsigned long long)vme_attr->address, &address_high,
1707                         &address_low);
1708                 entry->descriptor.dsau = address_high;
1709                 entry->descriptor.dsal = address_low;
1710                 entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_VME;
1711
1712                 retval = tsi148_dma_set_vme_src_attributes(
1713                         &(entry->descriptor.dsat), vme_attr->aspace,
1714                         vme_attr->cycle, vme_attr->dwidth);
1715                 if(retval < 0 )
1716                         goto err_source;
1717                 break;
1718         default:
1719                 printk("Invalid source type\n");
1720                 retval = -EINVAL;
1721                 goto err_source;
1722                 break;
1723         }
1724
1725         /* Assume last link - this will be over-written by adding another */
1726         entry->descriptor.dnlau = 0;
1727         entry->descriptor.dnlal = TSI148_LCSR_DNLAL_LLA;
1728
1729
1730         /* Fill out destination part */
1731         switch (dest->type) {
1732         case VME_DMA_PCI:
1733                 pci_attr = (struct vme_dma_pci *)dest->private;
1734
1735                 reg_split((unsigned long long)pci_attr->address, &address_high,
1736                         &address_low);
1737                 entry->descriptor.ddau = address_high;
1738                 entry->descriptor.ddal = address_low;
1739                 entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_PCI;
1740                 break;
1741         case VME_DMA_VME:
1742                 vme_attr = (struct vme_dma_vme *)dest->private;
1743
1744                 reg_split((unsigned long long)vme_attr->address, &address_high,
1745                         &address_low);
1746                 entry->descriptor.ddau = address_high;
1747                 entry->descriptor.ddal = address_low;
1748                 entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_VME;
1749
1750                 retval = tsi148_dma_set_vme_dest_attributes(
1751                         &(entry->descriptor.ddat), vme_attr->aspace,
1752                         vme_attr->cycle, vme_attr->dwidth);
1753                 if(retval < 0 )
1754                         goto err_dest;
1755                 break;
1756         default:
1757                 printk("Invalid destination type\n");
1758                 retval = -EINVAL;
1759                 goto err_dest;
1760                 break;
1761         }
1762
1763         /* Fill out count */
1764         entry->descriptor.dcnt = (u32)count;
1765
1766         /* Add to list */
1767         list_add_tail(&(entry->list), &(list->entries));
1768
1769         /* Fill out previous descriptors "Next Address" */
1770         if(entry->list.prev != &(list->entries)){
1771                 prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
1772                         list);
1773                 /* We need the bus address for the pointer */
1774                 desc_ptr = virt_to_bus(&(entry->descriptor));
1775                 reg_split(desc_ptr, &(prev->descriptor.dnlau),
1776                         &(prev->descriptor.dnlal));
1777         }
1778
1779         return 0;
1780
1781 err_dest:
1782 err_source:
1783 err_align:
1784                 kfree(entry);
1785 err_mem:
1786         return retval;
1787 }
1788
1789 /*
1790  * Check to see if the provided DMA channel is busy.
1791  */
1792 static int tsi148_dma_busy(int channel)
1793 {
1794         u32 tmp;
1795
1796         tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_DMA[channel] +
1797                 TSI148_LCSR_OFFSET_DSTA);
1798
1799         if (tmp & TSI148_LCSR_DSTA_BSY)
1800                 return 0;
1801         else
1802                 return 1;
1803
1804 }
1805
1806 /*
1807  * Execute a previously generated link list
1808  *
1809  * XXX Need to provide control register configuration.
1810  */
1811 int tsi148_dma_list_exec(struct vme_dma_list *list)
1812 {
1813         struct vme_dma_resource *ctrlr;
1814         int channel, retval = 0;
1815         struct tsi148_dma_entry *entry;
1816         dma_addr_t bus_addr;
1817         u32 bus_addr_high, bus_addr_low;
1818         u32 val, dctlreg = 0;
1819 #if 0
1820         int x;
1821 #endif
1822
1823         ctrlr = list->parent;
1824
1825         mutex_lock(&(ctrlr->mtx));
1826
1827         channel = ctrlr->number;
1828
1829         if (! list_empty(&(ctrlr->running))) {
1830                 /*
1831                  * XXX We have an active DMA transfer and currently haven't
1832                  *     sorted out the mechanism for "pending" DMA transfers.
1833                  *     Return busy.
1834                  */
1835                 /* Need to add to pending here */
1836                 mutex_unlock(&(ctrlr->mtx));
1837                 return -EBUSY;
1838         } else {
1839                 list_add(&(list->list), &(ctrlr->running));
1840         }
1841 #if 0
1842         /* XXX Still todo */
1843         for (x = 0; x < 8; x++) {       /* vme block size */
1844                 if ((32 << x) >= vmeDma->maxVmeBlockSize) {
1845                         break;
1846                 }
1847         }
1848         if (x == 8)
1849                 x = 7;
1850         dctlreg |= (x << 12);
1851
1852         for (x = 0; x < 8; x++) {       /* pci block size */
1853                 if ((32 << x) >= vmeDma->maxPciBlockSize) {
1854                         break;
1855                 }
1856         }
1857         if (x == 8)
1858                 x = 7;
1859         dctlreg |= (x << 4);
1860
1861         if (vmeDma->vmeBackOffTimer) {
1862                 for (x = 1; x < 8; x++) {       /* vme timer */
1863                         if ((1 << (x - 1)) >= vmeDma->vmeBackOffTimer) {
1864                                 break;
1865                         }
1866                 }
1867                 if (x == 8)
1868                         x = 7;
1869                 dctlreg |= (x << 8);
1870         }
1871
1872         if (vmeDma->pciBackOffTimer) {
1873                 for (x = 1; x < 8; x++) {       /* pci timer */
1874                         if ((1 << (x - 1)) >= vmeDma->pciBackOffTimer) {
1875                                 break;
1876                         }
1877                 }
1878                 if (x == 8)
1879                         x = 7;
1880                 dctlreg |= (x << 0);
1881         }
1882 #endif
1883
1884         /* Get first bus address and write into registers */
1885         entry = list_first_entry(&(list->entries), struct tsi148_dma_entry,
1886                 list);
1887
1888         bus_addr = virt_to_bus(&(entry->descriptor));
1889
1890         mutex_unlock(&(ctrlr->mtx));
1891
1892         reg_split(bus_addr, &bus_addr_high, &bus_addr_low);
1893
1894         iowrite32be(bus_addr_high, tsi148_bridge->base +
1895                 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
1896         iowrite32be(bus_addr_low, tsi148_bridge->base +
1897                 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
1898
1899         /* Start the operation */
1900         iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, tsi148_bridge->base +
1901                 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1902
1903         wait_event_interruptible(dma_queue[channel], tsi148_dma_busy(channel));
1904         /*
1905          * Read status register, this register is valid until we kick off a
1906          * new transfer.
1907          */
1908         val = ioread32be(tsi148_bridge->base + TSI148_LCSR_DMA[channel] +
1909                 TSI148_LCSR_OFFSET_DSTA);
1910
1911         if (val & TSI148_LCSR_DSTA_VBE) {
1912                 printk(KERN_ERR "tsi148: DMA Error. DSTA=%08X\n", val);
1913                 retval = -EIO;
1914         }
1915
1916         /* Remove list from running list */
1917         mutex_lock(&(ctrlr->mtx));
1918         list_del(&(list->list));
1919         mutex_unlock(&(ctrlr->mtx));
1920
1921         return retval;
1922 }
1923
1924 /*
1925  * Clean up a previously generated link list
1926  *
1927  * We have a separate function, don't assume that the chain can't be reused.
1928  */
1929 int tsi148_dma_list_empty(struct vme_dma_list *list)
1930 {
1931         struct list_head *pos, *temp;
1932         struct tsi148_dma_entry *entry;
1933
1934         /* detach and free each entry */
1935         list_for_each_safe(pos, temp, &(list->entries)) {
1936                 list_del(pos);
1937                 entry = list_entry(pos, struct tsi148_dma_entry, list);
1938                 kfree(entry);
1939         }
1940
1941         return (0);
1942 }
1943
1944 /*
1945  * All 4 location monitors reside at the same base - this is therefore a
1946  * system wide configuration.
1947  *
1948  * This does not enable the LM monitor - that should be done when the first
1949  * callback is attached and disabled when the last callback is removed.
1950  */
1951 int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
1952         vme_address_t aspace, vme_cycle_t cycle)
1953 {
1954         u32 lm_base_high, lm_base_low, lm_ctl = 0;
1955         int i;
1956
1957         mutex_lock(&(lm->mtx));
1958
1959         /* If we already have a callback attached, we can't move it! */
1960         for (i = 0; i < lm->monitors; i++) {
1961                 if(lm_callback[i] != NULL) {
1962                         mutex_unlock(&(lm->mtx));
1963                         printk("Location monitor callback attached, can't "
1964                                 "reset\n");
1965                         return -EBUSY;
1966                 }
1967         }
1968
1969         switch (aspace) {
1970         case VME_A16:
1971                 lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
1972                 break;
1973         case VME_A24:
1974                 lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
1975                 break;
1976         case VME_A32:
1977                 lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
1978                 break;
1979         case VME_A64:
1980                 lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
1981                 break;
1982         default:
1983                 mutex_unlock(&(lm->mtx));
1984                 printk("Invalid address space\n");
1985                 return -EINVAL;
1986                 break;
1987         }
1988
1989         if (cycle & VME_SUPER)
1990                 lm_ctl |= TSI148_LCSR_LMAT_SUPR ;
1991         if (cycle & VME_USER)
1992                 lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
1993         if (cycle & VME_PROG)
1994                 lm_ctl |= TSI148_LCSR_LMAT_PGM;
1995         if (cycle & VME_DATA)
1996                 lm_ctl |= TSI148_LCSR_LMAT_DATA;
1997
1998         reg_split(lm_base, &lm_base_high, &lm_base_low);
1999
2000         iowrite32be(lm_base_high, tsi148_bridge->base + TSI148_LCSR_LMBAU);
2001         iowrite32be(lm_base_low, tsi148_bridge->base + TSI148_LCSR_LMBAL);
2002         iowrite32be(lm_ctl, tsi148_bridge->base + TSI148_LCSR_LMAT);
2003
2004         mutex_unlock(&(lm->mtx));
2005
2006         return 0;
2007 }
2008
2009 /* Get configuration of the callback monitor and return whether it is enabled
2010  * or disabled.
2011  */
2012 int tsi148_lm_get(struct vme_lm_resource *lm, unsigned long long *lm_base,
2013         vme_address_t *aspace, vme_cycle_t *cycle)
2014 {
2015         u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
2016
2017         mutex_lock(&(lm->mtx));
2018
2019         lm_base_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_LMBAU);
2020         lm_base_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_LMBAL);
2021         lm_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_LMAT);
2022
2023         reg_join(lm_base_high, lm_base_low, lm_base);
2024
2025         if (lm_ctl & TSI148_LCSR_LMAT_EN)
2026                 enabled = 1;
2027
2028         if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16) {
2029                 *aspace |= VME_A16;
2030         }
2031         if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24) {
2032                 *aspace |= VME_A24;
2033         }
2034         if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32) {
2035                 *aspace |= VME_A32;
2036         }
2037         if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64) {
2038                 *aspace |= VME_A64;
2039         }
2040
2041         if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
2042                 *cycle |= VME_SUPER;
2043         if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
2044                 *cycle |= VME_USER;
2045         if (lm_ctl & TSI148_LCSR_LMAT_PGM)
2046                 *cycle |= VME_PROG;
2047         if (lm_ctl & TSI148_LCSR_LMAT_DATA)
2048                 *cycle |= VME_DATA;
2049
2050         mutex_unlock(&(lm->mtx));
2051
2052         return enabled;
2053 }
2054
2055 /*
2056  * Attach a callback to a specific location monitor.
2057  *
2058  * Callback will be passed the monitor triggered.
2059  */
2060 int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
2061         void (*callback)(int))
2062 {
2063         u32 lm_ctl, tmp;
2064
2065         mutex_lock(&(lm->mtx));
2066
2067         /* Ensure that the location monitor is configured - need PGM or DATA */
2068         lm_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_LMAT);
2069         if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
2070                 mutex_unlock(&(lm->mtx));
2071                 printk("Location monitor not properly configured\n");
2072                 return -EINVAL;
2073         }
2074
2075         /* Check that a callback isn't already attached */
2076         if (lm_callback[monitor] != NULL) {
2077                 mutex_unlock(&(lm->mtx));
2078                 printk("Existing callback attached\n");
2079                 return -EBUSY;
2080         }
2081
2082         /* Attach callback */
2083         lm_callback[monitor] = callback;
2084
2085         /* Enable Location Monitor interrupt */
2086         tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEN);
2087         tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
2088         iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEN);
2089
2090         tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEO);
2091         tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
2092         iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEO);
2093
2094         /* Ensure that global Location Monitor Enable set */
2095         if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
2096                 lm_ctl |= TSI148_LCSR_LMAT_EN;
2097                 iowrite32be(lm_ctl, tsi148_bridge->base + TSI148_LCSR_LMAT);
2098         }
2099
2100         mutex_unlock(&(lm->mtx));
2101
2102         return 0;
2103 }
2104
2105 /*
2106  * Detach a callback function forn a specific location monitor.
2107  */
2108 int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
2109 {
2110         u32 lm_en, tmp;
2111
2112         mutex_lock(&(lm->mtx));
2113
2114         /* Disable Location Monitor and ensure previous interrupts are clear */
2115         lm_en = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEN);
2116         lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
2117         iowrite32be(lm_en, tsi148_bridge->base + TSI148_LCSR_INTEN);
2118
2119         tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEO);
2120         tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
2121         iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEO);
2122
2123         iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
2124                  tsi148_bridge->base + TSI148_LCSR_INTC);
2125
2126         /* Detach callback */
2127         lm_callback[monitor] = NULL;
2128
2129         /* If all location monitors disabled, disable global Location Monitor */
2130         if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
2131                         TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
2132                 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_LMAT);
2133                 tmp &= ~TSI148_LCSR_LMAT_EN;
2134                 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_LMAT);
2135         }
2136
2137         mutex_unlock(&(lm->mtx));
2138
2139         return 0;
2140 }
2141
2142 /*
2143  * Determine Geographical Addressing
2144  */
2145 int tsi148_slot_get(void)
2146 {
2147         u32 slot = 0;
2148
2149         if (!geoid) {
2150                 slot = ioread32be(tsi148_bridge->base + TSI148_LCSR_VSTAT);
2151                 slot = slot & TSI148_LCSR_VSTAT_GA_M;
2152         } else
2153                 slot = geoid;
2154
2155         return (int)slot;
2156 }
2157
2158 static int __init tsi148_init(void)
2159 {
2160         return pci_register_driver(&tsi148_driver);
2161 }
2162
2163 /*
2164  * Configure CR/CSR space
2165  *
2166  * Access to the CR/CSR can be configured at power-up. The location of the
2167  * CR/CSR registers in the CR/CSR address space is determined by the boards
2168  * Auto-ID or Geographic address. This function ensures that the window is
2169  * enabled at an offset consistent with the boards geopgraphic address.
2170  *
2171  * Each board has a 512kB window, with the highest 4kB being used for the
2172  * boards registers, this means there is a fix length 508kB window which must
2173  * be mapped onto PCI memory.
2174  */
2175 static int tsi148_crcsr_init(struct pci_dev *pdev)
2176 {
2177         u32 cbar, crat, vstat;
2178         u32 crcsr_bus_high, crcsr_bus_low;
2179         int retval;
2180
2181         /* Allocate mem for CR/CSR image */
2182         crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
2183                 &crcsr_bus);
2184         if (crcsr_kernel == NULL) {
2185                 dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
2186                         "image\n");
2187                 return -ENOMEM;
2188         }
2189
2190         memset(crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
2191
2192         reg_split(crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
2193
2194         iowrite32be(crcsr_bus_high, tsi148_bridge->base + TSI148_LCSR_CROU);
2195         iowrite32be(crcsr_bus_low, tsi148_bridge->base + TSI148_LCSR_CROL);
2196
2197         /* Ensure that the CR/CSR is configured at the correct offset */
2198         cbar = ioread32be(tsi148_bridge->base + TSI148_CBAR);
2199         cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
2200
2201         vstat = tsi148_slot_get();
2202
2203         if (cbar != vstat) {
2204                 cbar = vstat;
2205                 dev_info(&pdev->dev, "Setting CR/CSR offset\n");
2206                 iowrite32be(cbar<<3, tsi148_bridge->base + TSI148_CBAR);
2207         }
2208         dev_info(&pdev->dev, "CR/CSR Offset: %d\n", cbar);
2209
2210         crat = ioread32be(tsi148_bridge->base + TSI148_LCSR_CRAT);
2211         if (crat & TSI148_LCSR_CRAT_EN) {
2212                 dev_info(&pdev->dev, "Enabling CR/CSR space\n");
2213                 iowrite32be(crat | TSI148_LCSR_CRAT_EN,
2214                         tsi148_bridge->base + TSI148_LCSR_CRAT);
2215         } else
2216                 dev_info(&pdev->dev, "CR/CSR already enabled\n");
2217
2218         /* If we want flushed, error-checked writes, set up a window
2219          * over the CR/CSR registers. We read from here to safely flush
2220          * through VME writes.
2221          */
2222         if(err_chk) {
2223                 retval = tsi148_master_set(flush_image, 1, (vstat * 0x80000),
2224                         0x80000, VME_CRCSR, VME_SCT, VME_D16);
2225                 if (retval)
2226                         dev_err(&pdev->dev, "Configuring flush image failed\n");
2227         }
2228
2229         return 0;
2230
2231 }
2232
2233 static void tsi148_crcsr_exit(struct pci_dev *pdev)
2234 {
2235         u32 crat;
2236
2237         /* Turn off CR/CSR space */
2238         crat = ioread32be(tsi148_bridge->base + TSI148_LCSR_CRAT);
2239         iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
2240                 tsi148_bridge->base + TSI148_LCSR_CRAT);
2241
2242         /* Free image */
2243         iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_CROU);
2244         iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_CROL);
2245
2246         pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, crcsr_kernel, crcsr_bus);
2247 }
2248
2249 static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2250 {
2251         int retval, i, master_num;
2252         u32 data;
2253         struct list_head *pos = NULL;
2254         struct vme_master_resource *master_image;
2255         struct vme_slave_resource *slave_image;
2256         struct vme_dma_resource *dma_ctrlr;
2257         struct vme_lm_resource *lm;
2258
2259         /* If we want to support more than one of each bridge, we need to
2260          * dynamically generate this so we get one per device
2261          */
2262         tsi148_bridge = (struct vme_bridge *)kmalloc(sizeof(struct vme_bridge),
2263                 GFP_KERNEL);
2264         if (tsi148_bridge == NULL) {
2265                 dev_err(&pdev->dev, "Failed to allocate memory for device "
2266                         "structure\n");
2267                 retval = -ENOMEM;
2268                 goto err_struct;
2269         }
2270
2271         memset(tsi148_bridge, 0, sizeof(struct vme_bridge));
2272
2273         /* Enable the device */
2274         retval = pci_enable_device(pdev);
2275         if (retval) {
2276                 dev_err(&pdev->dev, "Unable to enable device\n");
2277                 goto err_enable;
2278         }
2279
2280         /* Map Registers */
2281         retval = pci_request_regions(pdev, driver_name);
2282         if (retval) {
2283                 dev_err(&pdev->dev, "Unable to reserve resources\n");
2284                 goto err_resource;
2285         }
2286
2287         /* map registers in BAR 0 */
2288         tsi148_bridge->base = ioremap_nocache(pci_resource_start(pdev, 0), 4096);
2289         if (!tsi148_bridge->base) {
2290                 dev_err(&pdev->dev, "Unable to remap CRG region\n");
2291                 retval = -EIO;
2292                 goto err_remap;
2293         }
2294
2295         /* Check to see if the mapping worked out */
2296         data = ioread32(tsi148_bridge->base + TSI148_PCFS_ID) & 0x0000FFFF;
2297         if (data != PCI_VENDOR_ID_TUNDRA) {
2298                 dev_err(&pdev->dev, "CRG region check failed\n");
2299                 retval = -EIO;
2300                 goto err_test;
2301         }
2302
2303         /* Initialize wait queues & mutual exclusion flags */
2304         /* XXX These need to be moved to the vme_bridge structure */
2305         init_waitqueue_head(&dma_queue[0]);
2306         init_waitqueue_head(&dma_queue[1]);
2307         init_waitqueue_head(&iack_queue);
2308         mutex_init(&(vme_int));
2309         mutex_init(&(vme_rmw));
2310
2311         tsi148_bridge->parent = &(pdev->dev);
2312         strcpy(tsi148_bridge->name, driver_name);
2313
2314         /* Setup IRQ */
2315         retval = tsi148_irq_init(tsi148_bridge);
2316         if (retval != 0) {
2317                 dev_err(&pdev->dev, "Chip Initialization failed.\n");
2318                 goto err_irq;
2319         }
2320
2321         /* If we are going to flush writes, we need to read from the VME bus.
2322          * We need to do this safely, thus we read the devices own CR/CSR
2323          * register. To do this we must set up a window in CR/CSR space and
2324          * hence have one less master window resource available.
2325          */
2326         master_num = TSI148_MAX_MASTER;
2327         if(err_chk){
2328                 master_num--;
2329                 /* XXX */
2330                 flush_image = (struct vme_master_resource *)kmalloc(
2331                         sizeof(struct vme_master_resource), GFP_KERNEL);
2332                 if (flush_image == NULL) {
2333                         dev_err(&pdev->dev, "Failed to allocate memory for "
2334                         "flush resource structure\n");
2335                         retval = -ENOMEM;
2336                         goto err_master;
2337                 }
2338                 flush_image->parent = tsi148_bridge;
2339                 spin_lock_init(&(flush_image->lock));
2340                 flush_image->locked = 1;
2341                 flush_image->number = master_num;
2342                 flush_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2343                         VME_A64;
2344                 flush_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2345                         VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2346                         VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2347                         VME_PROG | VME_DATA;
2348                 flush_image->width_attr = VME_D16 | VME_D32;
2349                 memset(&(flush_image->pci_resource), 0,
2350                         sizeof(struct resource));
2351                 flush_image->kern_base  = NULL;
2352         }
2353
2354         /* Add master windows to list */
2355         INIT_LIST_HEAD(&(tsi148_bridge->master_resources));
2356         for (i = 0; i < master_num; i++) {
2357                 master_image = (struct vme_master_resource *)kmalloc(
2358                         sizeof(struct vme_master_resource), GFP_KERNEL);
2359                 if (master_image == NULL) {
2360                         dev_err(&pdev->dev, "Failed to allocate memory for "
2361                         "master resource structure\n");
2362                         retval = -ENOMEM;
2363                         goto err_master;
2364                 }
2365                 master_image->parent = tsi148_bridge;
2366                 spin_lock_init(&(master_image->lock));
2367                 master_image->locked = 0;
2368                 master_image->number = i;
2369                 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2370                         VME_A64;
2371                 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2372                         VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2373                         VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2374                         VME_PROG | VME_DATA;
2375                 master_image->width_attr = VME_D16 | VME_D32;
2376                 memset(&(master_image->pci_resource), 0,
2377                         sizeof(struct resource));
2378                 master_image->kern_base  = NULL;
2379                 list_add_tail(&(master_image->list),
2380                         &(tsi148_bridge->master_resources));
2381         }
2382
2383         /* Add slave windows to list */
2384         INIT_LIST_HEAD(&(tsi148_bridge->slave_resources));
2385         for (i = 0; i < TSI148_MAX_SLAVE; i++) {
2386                 slave_image = (struct vme_slave_resource *)kmalloc(
2387                         sizeof(struct vme_slave_resource), GFP_KERNEL);
2388                 if (slave_image == NULL) {
2389                         dev_err(&pdev->dev, "Failed to allocate memory for "
2390                         "slave resource structure\n");
2391                         retval = -ENOMEM;
2392                         goto err_slave;
2393                 }
2394                 slave_image->parent = tsi148_bridge;
2395                 mutex_init(&(slave_image->mtx));
2396                 slave_image->locked = 0;
2397                 slave_image->number = i;
2398                 slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2399                         VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
2400                         VME_USER3 | VME_USER4;
2401                 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2402                         VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2403                         VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2404                         VME_PROG | VME_DATA;
2405                 list_add_tail(&(slave_image->list),
2406                         &(tsi148_bridge->slave_resources));
2407         }
2408
2409         /* Add dma engines to list */
2410         INIT_LIST_HEAD(&(tsi148_bridge->dma_resources));
2411         for (i = 0; i < TSI148_MAX_DMA; i++) {
2412                 dma_ctrlr = (struct vme_dma_resource *)kmalloc(
2413                         sizeof(struct vme_dma_resource), GFP_KERNEL);
2414                 if (dma_ctrlr == NULL) {
2415                         dev_err(&pdev->dev, "Failed to allocate memory for "
2416                         "dma resource structure\n");
2417                         retval = -ENOMEM;
2418                         goto err_dma;
2419                 }
2420                 dma_ctrlr->parent = tsi148_bridge;
2421                 mutex_init(&(dma_ctrlr->mtx));
2422                 dma_ctrlr->locked = 0;
2423                 dma_ctrlr->number = i;
2424                 INIT_LIST_HEAD(&(dma_ctrlr->pending));
2425                 INIT_LIST_HEAD(&(dma_ctrlr->running));
2426                 list_add_tail(&(dma_ctrlr->list),
2427                         &(tsi148_bridge->dma_resources));
2428         }
2429
2430         /* Add location monitor to list */
2431         INIT_LIST_HEAD(&(tsi148_bridge->lm_resources));
2432         lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
2433         if (lm == NULL) {
2434                 dev_err(&pdev->dev, "Failed to allocate memory for "
2435                 "location monitor resource structure\n");
2436                 retval = -ENOMEM;
2437                 goto err_lm;
2438         }
2439         lm->parent = tsi148_bridge;
2440         mutex_init(&(lm->mtx));
2441         lm->locked = 0;
2442         lm->number = 1;
2443         lm->monitors = 4;
2444         list_add_tail(&(lm->list), &(tsi148_bridge->lm_resources));
2445
2446         tsi148_bridge->slave_get = tsi148_slave_get;
2447         tsi148_bridge->slave_set = tsi148_slave_set;
2448         tsi148_bridge->master_get = tsi148_master_get;
2449         tsi148_bridge->master_set = tsi148_master_set;
2450         tsi148_bridge->master_read = tsi148_master_read;
2451         tsi148_bridge->master_write = tsi148_master_write;
2452         tsi148_bridge->master_rmw = tsi148_master_rmw;
2453         tsi148_bridge->dma_list_add = tsi148_dma_list_add;
2454         tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
2455         tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
2456         tsi148_bridge->irq_set = tsi148_irq_set;
2457         tsi148_bridge->irq_generate = tsi148_irq_generate;
2458         tsi148_bridge->lm_set = tsi148_lm_set;
2459         tsi148_bridge->lm_get = tsi148_lm_get;
2460         tsi148_bridge->lm_attach = tsi148_lm_attach;
2461         tsi148_bridge->lm_detach = tsi148_lm_detach;
2462         tsi148_bridge->slot_get = tsi148_slot_get;
2463
2464         data = ioread32be(tsi148_bridge->base + TSI148_LCSR_VSTAT);
2465         dev_info(&pdev->dev, "Board is%s the VME system controller\n",
2466                 (data & TSI148_LCSR_VSTAT_SCONS)? "" : " not");
2467         if (!geoid) {
2468                 dev_info(&pdev->dev, "VME geographical address is %d\n",
2469                         data & TSI148_LCSR_VSTAT_GA_M);
2470         } else {
2471                 dev_info(&pdev->dev, "VME geographical address is set to %d\n",
2472                         geoid);
2473         }
2474         dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
2475                 err_chk ? "enabled" : "disabled");
2476
2477         if(tsi148_crcsr_init(pdev)) {
2478                 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
2479                 goto err_crcsr;
2480
2481         }
2482
2483         /* Need to save tsi148_bridge pointer locally in link list for use in
2484          * tsi148_remove()
2485          */
2486         retval = vme_register_bridge(tsi148_bridge);
2487         if (retval != 0) {
2488                 dev_err(&pdev->dev, "Chip Registration failed.\n");
2489                 goto err_reg;
2490         }
2491
2492         /* Clear VME bus "board fail", and "power-up reset" lines */
2493         data = ioread32be(tsi148_bridge->base + TSI148_LCSR_VSTAT);
2494         data &= ~TSI148_LCSR_VSTAT_BRDFL;
2495         data |= TSI148_LCSR_VSTAT_CPURST;
2496         iowrite32be(data, tsi148_bridge->base + TSI148_LCSR_VSTAT);
2497
2498         return 0;
2499
2500         vme_unregister_bridge(tsi148_bridge);
2501 err_reg:
2502         tsi148_crcsr_exit(pdev);
2503 err_crcsr:
2504 err_lm:
2505         /* resources are stored in link list */
2506         list_for_each(pos, &(tsi148_bridge->lm_resources)) {
2507                 lm = list_entry(pos, struct vme_lm_resource, list);
2508                 list_del(pos);
2509                 kfree(lm);
2510         }
2511 err_dma:
2512         /* resources are stored in link list */
2513         list_for_each(pos, &(tsi148_bridge->dma_resources)) {
2514                 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2515                 list_del(pos);
2516                 kfree(dma_ctrlr);
2517         }
2518 err_slave:
2519         /* resources are stored in link list */
2520         list_for_each(pos, &(tsi148_bridge->slave_resources)) {
2521                 slave_image = list_entry(pos, struct vme_slave_resource, list);
2522                 list_del(pos);
2523                 kfree(slave_image);
2524         }
2525 err_master:
2526         /* resources are stored in link list */
2527         list_for_each(pos, &(tsi148_bridge->master_resources)) {
2528                 master_image = list_entry(pos, struct vme_master_resource,                              list);
2529                 list_del(pos);
2530                 kfree(master_image);
2531         }
2532
2533         tsi148_irq_exit(pdev);
2534 err_irq:
2535 err_test:
2536         iounmap(tsi148_bridge->base);
2537 err_remap:
2538         pci_release_regions(pdev);
2539 err_resource:
2540         pci_disable_device(pdev);
2541 err_enable:
2542         kfree(tsi148_bridge);
2543 err_struct:
2544         return retval;
2545
2546 }
2547
2548 static void tsi148_remove(struct pci_dev *pdev)
2549 {
2550         struct list_head *pos = NULL;
2551         struct vme_master_resource *master_image;
2552         struct vme_slave_resource *slave_image;
2553         struct vme_dma_resource *dma_ctrlr;
2554         int i;
2555
2556         dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
2557
2558         /* XXX We need to find the pdev->dev in the list of vme_bridge->dev's */
2559
2560         /*
2561          *  Shutdown all inbound and outbound windows.
2562          */
2563         for (i = 0; i < 8; i++) {
2564                 iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_IT[i] +
2565                         TSI148_LCSR_OFFSET_ITAT);
2566                 iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_OT[i] +
2567                         TSI148_LCSR_OFFSET_OTAT);
2568         }
2569
2570         /*
2571          *  Shutdown Location monitor.
2572          */
2573         iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_LMAT);
2574
2575         /*
2576          *  Shutdown CRG map.
2577          */
2578         iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_CSRAT);
2579
2580         /*
2581          *  Clear error status.
2582          */
2583         iowrite32be(0xFFFFFFFF, tsi148_bridge->base + TSI148_LCSR_EDPAT);
2584         iowrite32be(0xFFFFFFFF, tsi148_bridge->base + TSI148_LCSR_VEAT);
2585         iowrite32be(0x07000700, tsi148_bridge->base + TSI148_LCSR_PSTAT);
2586
2587         /*
2588          *  Remove VIRQ interrupt (if any)
2589          */
2590         if (ioread32be(tsi148_bridge->base + TSI148_LCSR_VICR) & 0x800) {
2591                 iowrite32be(0x8000, tsi148_bridge->base + TSI148_LCSR_VICR);
2592         }
2593
2594         /*
2595          *  Map all Interrupts to PCI INTA
2596          */
2597         iowrite32be(0x0, tsi148_bridge->base + TSI148_LCSR_INTM1);
2598         iowrite32be(0x0, tsi148_bridge->base + TSI148_LCSR_INTM2);
2599
2600         tsi148_irq_exit(pdev);
2601
2602         vme_unregister_bridge(tsi148_bridge);
2603
2604         tsi148_crcsr_exit(pdev);
2605
2606         /* resources are stored in link list */
2607         list_for_each(pos, &(tsi148_bridge->dma_resources)) {
2608                 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2609                 list_del(pos);
2610                 kfree(dma_ctrlr);
2611         }
2612
2613         /* resources are stored in link list */
2614         list_for_each(pos, &(tsi148_bridge->slave_resources)) {
2615                 slave_image = list_entry(pos, struct vme_slave_resource, list);
2616                 list_del(pos);
2617                 kfree(slave_image);
2618         }
2619
2620         /* resources are stored in link list */
2621         list_for_each(pos, &(tsi148_bridge->master_resources)) {
2622                 master_image = list_entry(pos, struct vme_master_resource,
2623                         list);
2624                 list_del(pos);
2625                 kfree(master_image);
2626         }
2627
2628         tsi148_irq_exit(pdev);
2629
2630         iounmap(tsi148_bridge->base);
2631
2632         pci_release_regions(pdev);
2633
2634         pci_disable_device(pdev);
2635
2636         kfree(tsi148_bridge);
2637 }
2638
2639 static void __exit tsi148_exit(void)
2640 {
2641         pci_unregister_driver(&tsi148_driver);
2642
2643         printk(KERN_DEBUG "Driver removed.\n");
2644 }
2645
2646 MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
2647 module_param(err_chk, bool, 0);
2648
2649 MODULE_PARM_DESC(geoid, "Override geographical addressing");
2650 module_param(geoid, int, 0);
2651
2652 MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
2653 MODULE_LICENSE("GPL");
2654
2655 module_init(tsi148_init);
2656 module_exit(tsi148_exit);
2657
2658 /*----------------------------------------------------------------------------
2659  * STAGING
2660  *--------------------------------------------------------------------------*/
2661
2662 #if 0
2663 /*
2664  * Direct Mode DMA transfer
2665  *
2666  * XXX Not looking at direct mode for now, we can always use link list mode
2667  *     with a single entry.
2668  */
2669 int tsi148_dma_run(struct vme_dma_resource *resource, struct vme_dma_attr src,
2670         struct vme_dma_attr dest, size_t count)
2671 {
2672         u32 dctlreg = 0;
2673         unsigned int tmp;
2674         int val;
2675         int channel, x;
2676         struct vmeDmaPacket *cur_dma;
2677         struct tsi148_dma_descriptor *dmaLL;
2678
2679         /* direct mode */
2680         dctlreg = 0x800000;
2681
2682         for (x = 0; x < 8; x++) {       /* vme block size */
2683                 if ((32 << x) >= vmeDma->maxVmeBlockSize) {
2684                         break;
2685                 }
2686         }
2687         if (x == 8)
2688                 x = 7;
2689         dctlreg |= (x << 12);
2690
2691         for (x = 0; x < 8; x++) {       /* pci block size */
2692                 if ((32 << x) >= vmeDma->maxPciBlockSize) {
2693                         break;
2694                 }
2695         }
2696         if (x == 8)
2697                 x = 7;
2698         dctlreg |= (x << 4);
2699
2700         if (vmeDma->vmeBackOffTimer) {
2701                 for (x = 1; x < 8; x++) {       /* vme timer */
2702                         if ((1 << (x - 1)) >= vmeDma->vmeBackOffTimer) {
2703                                 break;
2704                         }
2705                 }
2706                 if (x == 8)
2707                         x = 7;
2708                 dctlreg |= (x << 8);
2709         }
2710
2711         if (vmeDma->pciBackOffTimer) {
2712                 for (x = 1; x < 8; x++) {       /* pci timer */
2713                         if ((1 << (x - 1)) >= vmeDma->pciBackOffTimer) {
2714                                 break;
2715                         }
2716                 }
2717                 if (x == 8)
2718                         x = 7;
2719                 dctlreg |= (x << 0);
2720         }
2721
2722         /* Program registers for DMA transfer */
2723         iowrite32be(dmaLL->dsau, tsi148_bridge->base +
2724                 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DSAU);
2725         iowrite32be(dmaLL->dsal, tsi148_bridge->base +
2726                 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DSAL);
2727         iowrite32be(dmaLL->ddau, tsi148_bridge->base +
2728                 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DDAU);
2729         iowrite32be(dmaLL->ddal, tsi148_bridge->base +
2730                 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DDAL);
2731         iowrite32be(dmaLL->dsat, tsi148_bridge->base +
2732                 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DSAT);
2733         iowrite32be(dmaLL->ddat, tsi148_bridge->base +
2734                 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DDAT);
2735         iowrite32be(dmaLL->dcnt, tsi148_bridge->base +
2736                 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCNT);
2737         iowrite32be(dmaLL->ddbs, tsi148_bridge->base +
2738                 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DDBS);
2739
2740         /* Start the operation */
2741         iowrite32be(dctlreg | 0x2000000, tsi148_bridge->base +
2742                 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
2743
2744         tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_DMA[channel] +
2745                 TSI148_LCSR_OFFSET_DSTA);
2746         wait_event_interruptible(dma_queue[channel], (tmp & 0x1000000) == 0);
2747
2748         /*
2749          * Read status register, we should probably do this in some error
2750          * handler rather than here so that we can be sure we haven't kicked off
2751          * another DMA transfer.
2752          */
2753         val = ioread32be(tsi148_bridge->base + TSI148_LCSR_DMA[channel] +
2754                 TSI148_LCSR_OFFSET_DSTA);
2755
2756         vmeDma->vmeDmaStatus = 0;
2757         if (val & 0x10000000) {
2758                 printk(KERN_ERR
2759                         "DMA Error in DMA_tempe_irqhandler DSTA=%08X\n",
2760                         val);
2761                 vmeDma->vmeDmaStatus = val;
2762
2763         }
2764         return (0);
2765 }
2766 #endif
2767
2768 #if 0
2769
2770 /* Global VME controller information */
2771 struct pci_dev *vme_pci_dev;
2772
2773 /*
2774  * Set the VME bus arbiter with the requested attributes
2775  */
2776 int tempe_set_arbiter(vmeArbiterCfg_t * vmeArb)
2777 {
2778         int temp_ctl = 0;
2779         int gto = 0;
2780
2781         temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_VCTRL);
2782         temp_ctl &= 0xFFEFFF00;
2783
2784         if (vmeArb->globalTimeoutTimer == 0xFFFFFFFF) {
2785                 gto = 8;
2786         } else if (vmeArb->globalTimeoutTimer > 2048) {
2787                 return (-EINVAL);
2788         } else if (vmeArb->globalTimeoutTimer == 0) {
2789                 gto = 0;
2790         } else {
2791                 gto = 1;
2792                 while ((16 * (1 << (gto - 1))) < vmeArb->globalTimeoutTimer) {
2793                         gto += 1;
2794                 }
2795         }
2796         temp_ctl |= gto;
2797
2798         if (vmeArb->arbiterMode != VME_PRIORITY_MODE) {
2799                 temp_ctl |= 1 << 6;
2800         }
2801
2802         if (vmeArb->arbiterTimeoutFlag) {
2803                 temp_ctl |= 1 << 7;
2804         }
2805
2806         if (vmeArb->noEarlyReleaseFlag) {
2807                 temp_ctl |= 1 << 20;
2808         }
2809         iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_VCTRL);
2810
2811         return (0);
2812 }
2813
2814 /*
2815  * Return the attributes of the VME bus arbiter.
2816  */
2817 int tempe_get_arbiter(vmeArbiterCfg_t * vmeArb)
2818 {
2819         int temp_ctl = 0;
2820         int gto = 0;
2821
2822
2823         temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_VCTRL);
2824
2825         gto = temp_ctl & 0xF;
2826         if (gto != 0) {
2827                 vmeArb->globalTimeoutTimer = (16 * (1 << (gto - 1)));
2828         }
2829
2830         if (temp_ctl & (1 << 6)) {
2831                 vmeArb->arbiterMode = VME_R_ROBIN_MODE;
2832         } else {
2833                 vmeArb->arbiterMode = VME_PRIORITY_MODE;
2834         }
2835
2836         if (temp_ctl & (1 << 7)) {
2837                 vmeArb->arbiterTimeoutFlag = 1;
2838         }
2839
2840         if (temp_ctl & (1 << 20)) {
2841                 vmeArb->noEarlyReleaseFlag = 1;
2842         }
2843
2844         return (0);
2845 }
2846
2847 /*
2848  * Set the VME bus requestor with the requested attributes
2849  */
2850 int tempe_set_requestor(vmeRequesterCfg_t * vmeReq)
2851 {
2852         int temp_ctl = 0;
2853
2854         temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_VMCTRL);
2855         temp_ctl &= 0xFFFF0000;
2856
2857         if (vmeReq->releaseMode == 1) {
2858                 temp_ctl |= (1 << 3);
2859         }
2860
2861         if (vmeReq->fairMode == 1) {
2862                 temp_ctl |= (1 << 2);
2863         }
2864
2865         temp_ctl |= (vmeReq->timeonTimeoutTimer & 7) << 8;
2866         temp_ctl |= (vmeReq->timeoffTimeoutTimer & 7) << 12;
2867         temp_ctl |= vmeReq->requestLevel;
2868
2869         iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_VMCTRL);
2870         return (0);
2871 }
2872
2873 /*
2874  * Return the attributes of the VME bus requestor
2875  */
2876 int tempe_get_requestor(vmeRequesterCfg_t * vmeReq)
2877 {
2878         int temp_ctl = 0;
2879
2880         temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_VMCTRL);
2881
2882         if (temp_ctl & 0x18) {
2883                 vmeReq->releaseMode = 1;
2884         }
2885
2886         if (temp_ctl & (1 << 2)) {
2887                 vmeReq->fairMode = 1;
2888         }
2889
2890         vmeReq->requestLevel = temp_ctl & 3;
2891         vmeReq->timeonTimeoutTimer = (temp_ctl >> 8) & 7;
2892         vmeReq->timeoffTimeoutTimer = (temp_ctl >> 12) & 7;
2893
2894         return (0);
2895 }
2896
2897
2898 #endif