The canonical patch subject line is:
- Subject: [PATCH 001/123] [<area>:] <explanation>
+ Subject: [PATCH 001/123] subsystem: summary phrase
The canonical patch message body contains the following:
support that - since because the sequence number is zero-padded,
the numerical and alphabetic sort is the same.
-See further details on how to phrase the "<explanation>" in the
-"Subject:" line in Andrew Morton's "The perfect patch", referenced
-below.
+The "subsystem" in the email's Subject should identify which
+area or subsystem of the kernel is being patched.
+
+The "summary phrase" in the email's Subject should concisely
+describe the patch which that email contains. The "summary
+phrase" should not be a filename. Do not use the same "summary
+phrase" for every patch in a whole patch series.
+
+Bear in mind that the "summary phrase" of your email becomes
+a globally-unique identifier for that patch. It propagates
+all the way into the git changelog. The "summary phrase" may
+later be used in developer discussions which refer to the patch.
+People will want to google for the "summary phrase" to read
+discussion regarding that patch.
+
+A couple of example Subjects:
+
+ Subject: [patch 2/5] ext2: improve scalability of bitmap searching
+ Subject: [PATCHv2 001/207] x86: fix eflags tracking
The "from" line must be the very first line in the message body,
and has the form:
Default: 0
icmp_echo_ignore_all - BOOLEAN
+ If set non-zero, then the kernel will ignore all ICMP ECHO
+ requests sent to it.
+ Default: 0
+
icmp_echo_ignore_broadcasts - BOOLEAN
- If either is set to true, then the kernel will ignore either all
- ICMP ECHO requests sent to it or just those to broadcast/multicast
- addresses, respectively.
+ If set non-zero, then the kernel will ignore all ICMP ECHO and
+ TIMESTAMP requests sent to it via broadcast/multicast.
+ Default: 1
icmp_ratelimit - INTEGER
Limit the maximal rates for sending ICMP packets whose type matches
"Ir" (THREAD_START_SP - sizeof(regs)),
"r" (®s),
"Ir" (sizeof(regs))
- : "r0", "r1", "r2", "r3", "ip", "memory");
+ : "r0", "r1", "r2", "r3", "ip", "lr", "memory");
out:
return ret;
bad_access:
spin_unlock(&mm->page_table_lock);
- /* simulate a read access fault */
+ /* simulate a write access fault */
do_DataAbort(addr, 15 + (1 << 11), regs);
return -1;
}
#include <linux/module.h>
#include <asm/arch/imxfb.h>
#include <asm/hardware.h>
+#include <asm/arch/imx-regs.h>
#include <asm/mach/map.h>
void imx_gpio_mode(int gpio_mode)
{
unsigned int pin = gpio_mode & GPIO_PIN_MASK;
- unsigned int port = (gpio_mode & GPIO_PORT_MASK) >> 5;
- unsigned int ocr = (gpio_mode & GPIO_OCR_MASK) >> 10;
+ unsigned int port = (gpio_mode & GPIO_PORT_MASK) >> GPIO_PORT_SHIFT;
+ unsigned int ocr = (gpio_mode & GPIO_OCR_MASK) >> GPIO_OCR_SHIFT;
unsigned int tmp;
/* Pullup enable */
GPR(port) &= ~(1<<pin);
/* use as gpio? */
- if( ocr == 3 )
+ if(gpio_mode & GPIO_GIUS)
GIUS(port) |= (1<<pin);
else
GIUS(port) &= ~(1<<pin);
tmp |= (ocr << (pin*2));
OCR1(port) = tmp;
- if( gpio_mode & GPIO_AOUT )
- ICONFA1(port) &= ~( 3<<(pin*2));
- if( gpio_mode & GPIO_BOUT )
- ICONFB1(port) &= ~( 3<<(pin*2));
+ ICONFA1(port) &= ~( 3<<(pin*2));
+ ICONFA1(port) |= ((gpio_mode >> GPIO_AOUT_SHIFT) & 3) << (pin * 2);
+ ICONFB1(port) &= ~( 3<<(pin*2));
+ ICONFB1(port) |= ((gpio_mode >> GPIO_BOUT_SHIFT) & 3) << (pin * 2);
} else {
tmp = OCR2(port);
tmp &= ~( 3<<((pin-16)*2));
tmp |= (ocr << ((pin-16)*2));
OCR2(port) = tmp;
- if( gpio_mode & GPIO_AOUT )
- ICONFA2(port) &= ~( 3<<((pin-16)*2));
- if( gpio_mode & GPIO_BOUT )
- ICONFB2(port) &= ~( 3<<((pin-16)*2));
+ ICONFA2(port) &= ~( 3<<((pin-16)*2));
+ ICONFA2(port) |= ((gpio_mode >> GPIO_AOUT_SHIFT) & 3) << ((pin-16) * 2);
+ ICONFB2(port) &= ~( 3<<((pin-16)*2));
+ ICONFB2(port) |= ((gpio_mode >> GPIO_BOUT_SHIFT) & 3) << ((pin-16) * 2);
}
}
mx1ads_init(void)
{
#ifdef CONFIG_LEDS
- imx_gpio_mode(GPIO_PORTA | GPIO_OUT | GPIO_GPIO | 2);
+ imx_gpio_mode(GPIO_PORTA | GPIO_OUT | 2);
#endif
platform_add_devices(devices, ARRAY_SIZE(devices));
}
config CPU_ICACHE_DISABLE
bool "Disable I-Cache"
- depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020
+ depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_V6
help
Say Y here to disable the processor instruction cache. Unless
you have a reason not to or are unsure, say N.
config CPU_DCACHE_DISABLE
bool "Disable D-Cache"
- depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020
+ depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_V6
help
Say Y here to disable the processor data cache. Unless
you have a reason not to or are unsure, say N.
config CPU_DCACHE_WRITETHROUGH
bool "Force write through D-cache"
- depends on (CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020) && !CPU_DCACHE_DISABLE
+ depends on (CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_V6) && !CPU_DCACHE_DISABLE
default y if CPU_ARM925T
help
Say Y here to use the data cache in writethrough mode. Unless you
config CPU_BPREDICT_DISABLE
bool "Disable branch prediction"
- depends on CPU_ARM1020
+ depends on CPU_ARM1020 || CPU_V6
help
Say Y here to disable branch prediction. If unsure, say N.
cmc_polling_enabled = 1;
spin_unlock(&cmc_history_lock);
+ /* If we're being hit with CMC interrupts, we won't
+ * ever execute the schedule_work() below. Need to
+ * disable CMC interrupts on this processor now.
+ */
+ ia64_mca_cmc_vector_disable(NULL);
schedule_work(&cmc_disable_work);
/*
;
dend = get_dec();
- tb_ticks_per_jiffy = (dstart - dend) / (6 * (HZ/100));
+ tb_ticks_per_jiffy = (dstart - dend) / ((6 * HZ)/100);
tb_to_us = mulhwu_scale_factor(dstart - dend, 60000);
printk(KERN_INFO "via_calibrate_decr: ticks per jiffy = %u (%u ticks)\n",
menu "General machine setup"
-config VT
- bool
- select INPUT
- default y
- ---help---
- If you say Y here, you will get support for terminal devices with
- display and keyboard devices. These are called "virtual" because you
- can run several virtual terminals (also called virtual consoles) on
- one physical terminal. This is rather useful, for example one
- virtual terminal can collect system messages and warnings, another
- one can be used for a text-mode user session, and a third could run
- an X session, all in parallel. Switching between virtual terminals
- is done with certain key combinations, usually Alt-<function key>.
-
- The setterm command ("man setterm") can be used to change the
- properties (such as colors or beeping) of a virtual terminal. The
- man page console_codes(4) ("man console_codes") contains the special
- character sequences that can be used to change those properties
- directly. The fonts used on virtual terminals can be changed with
- the setfont ("man setfont") command and the key bindings are defined
- with the loadkeys ("man loadkeys") command.
-
- You need at least one virtual terminal device in order to make use
- of your keyboard and monitor. Therefore, only people configuring an
- embedded system would want to say N here in order to save some
- memory; the only way to log into such a system is then via a serial
- or network connection.
-
- If unsure, say Y, or else you won't be able to do much with your new
- shiny Linux system :-)
-
-config VT_CONSOLE
- bool
- default y
- ---help---
- The system console is the device which receives all kernel messages
- and warnings and which allows logins in single user mode. If you
- answer Y here, a virtual terminal (the device used to interact with
- a physical terminal) can be used as system console. This is the most
- common mode of operations, so you should say Y here unless you want
- the kernel messages be output only to a serial port (in which case
- you should say Y to "Console on serial port", below).
-
- If you do say Y here, by default the currently visible virtual
- terminal (/dev/tty0) will be used as system console. You can change
- that with a kernel command line option such as "console=tty3" which
- would use the third virtual terminal as system console. (Try "man
- bootparam" or see the documentation of your boot loader (lilo or
- loadlin) about how to pass options to the kernel at boot time.)
-
- If unsure, say Y.
-
-config HW_CONSOLE
- bool
- default y
-
config SMP
bool "Symmetric multi-processing support (does not work on sun4/sun4c)"
depends on BROKEN
sbus_time_init();
}
-extern __inline__ unsigned long do_gettimeoffset(void)
+static inline unsigned long do_gettimeoffset(void)
{
return (*master_l10_counter >> 10) & 0x1fffff;
}
{ return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); }
/* to find an entry in a top-level page table... */
-extern inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
+static inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
{ return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); }
/* Find an entry in the second-level page table.. */
faddd %f0, %f2, %f4
fmuld %f0, %f2, %f6
ldxa [%g3] ASI_DMMU, %g5
-cplus_fptrap_insn_1:
- sethi %hi(0), %g2
+ sethi %hi(sparc64_kern_sec_context), %g2
+ ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
stxa %g2, [%g3] ASI_DMMU
membar #Sync
add %g6, TI_FPREGS + 0xc0, %g2
fzero %f34
ldxa [%g3] ASI_DMMU, %g5
add %g6, TI_FPREGS, %g1
-cplus_fptrap_insn_2:
- sethi %hi(0), %g2
+ sethi %hi(sparc64_kern_sec_context), %g2
+ ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
stxa %g2, [%g3] ASI_DMMU
membar #Sync
add %g6, TI_FPREGS + 0x40, %g2
3: mov SECONDARY_CONTEXT, %g3
add %g6, TI_FPREGS, %g1
ldxa [%g3] ASI_DMMU, %g5
-cplus_fptrap_insn_3:
- sethi %hi(0), %g2
+ sethi %hi(sparc64_kern_sec_context), %g2
+ ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
stxa %g2, [%g3] ASI_DMMU
membar #Sync
mov 0x40, %g2
stx %g3, [%g6 + TI_GSR]
mov SECONDARY_CONTEXT, %g3
ldxa [%g3] ASI_DMMU, %g5
-cplus_fptrap_insn_4:
- sethi %hi(0), %g2
+ sethi %hi(sparc64_kern_sec_context), %g2
+ ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
stxa %g2, [%g3] ASI_DMMU
membar #Sync
add %g6, TI_FPREGS, %g2
ba,pt %xcc, etrap
wr %g0, 0, %fprs
-cplus_fptrap_1:
- sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
-
- .globl cheetah_plus_patch_fpdis
-cheetah_plus_patch_fpdis:
- /* We configure the dTLB512_0 for 4MB pages and the
- * dTLB512_1 for 8K pages when in context zero.
- */
- sethi %hi(cplus_fptrap_1), %o0
- lduw [%o0 + %lo(cplus_fptrap_1)], %o1
-
- set cplus_fptrap_insn_1, %o2
- stw %o1, [%o2]
- flush %o2
- set cplus_fptrap_insn_2, %o2
- stw %o1, [%o2]
- flush %o2
- set cplus_fptrap_insn_3, %o2
- stw %o1, [%o2]
- flush %o2
- set cplus_fptrap_insn_4, %o2
- stw %o1, [%o2]
- flush %o2
-
- retl
- nop
-
/* The registers for cross calls will be:
*
* DATA 0: [low 32-bits] Address of function to call, jmp to this
wrpr %g3, 0, %otherwin
wrpr %g2, 0, %wstate
-cplus_etrap_insn_1:
- sethi %hi(0), %g3
- sllx %g3, 32, %g3
-cplus_etrap_insn_2:
- sethi %hi(0), %g2
- or %g3, %g2, %g3
+ sethi %hi(sparc64_kern_pri_context), %g2
+ ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3
stxa %g3, [%l4] ASI_DMMU
flush %l6
wr %g0, ASI_AIUS, %asi
mov PRIMARY_CONTEXT, %l4
wrpr %g3, 0, %otherwin
wrpr %g2, 0, %wstate
-cplus_etrap_insn_3:
- sethi %hi(0), %g3
- sllx %g3, 32, %g3
-cplus_etrap_insn_4:
- sethi %hi(0), %g2
- or %g3, %g2, %g3
+ sethi %hi(sparc64_kern_pri_context), %g2
+ ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3
stxa %g3, [%l4] ASI_DMMU
flush %l6
#undef TASK_REGOFF
#undef ETRAP_PSTATE1
-
-cplus_einsn_1:
- sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3
-cplus_einsn_2:
- sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
-
- .globl cheetah_plus_patch_etrap
-cheetah_plus_patch_etrap:
- /* We configure the dTLB512_0 for 4MB pages and the
- * dTLB512_1 for 8K pages when in context zero.
- */
- sethi %hi(cplus_einsn_1), %o0
- sethi %hi(cplus_etrap_insn_1), %o2
- lduw [%o0 + %lo(cplus_einsn_1)], %o1
- or %o2, %lo(cplus_etrap_insn_1), %o2
- stw %o1, [%o2]
- flush %o2
- sethi %hi(cplus_etrap_insn_3), %o2
- or %o2, %lo(cplus_etrap_insn_3), %o2
- stw %o1, [%o2]
- flush %o2
-
- sethi %hi(cplus_einsn_2), %o0
- sethi %hi(cplus_etrap_insn_2), %o2
- lduw [%o0 + %lo(cplus_einsn_2)], %o1
- or %o2, %lo(cplus_etrap_insn_2), %o2
- stw %o1, [%o2]
- flush %o2
- sethi %hi(cplus_etrap_insn_4), %o2
- or %o2, %lo(cplus_etrap_insn_4), %o2
- stw %o1, [%o2]
- flush %o2
-
- retl
- nop
1: sethi %hi(tlb_type), %g1
stw %g2, [%g1 + %lo(tlb_type)]
- BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f)
- ba,pt %xcc, 2f
- nop
-
-1: /* Patch context register writes to support nucleus page
- * size correctly.
- */
- call cheetah_plus_patch_etrap
- nop
- call cheetah_plus_patch_rtrap
- nop
- call cheetah_plus_patch_fpdis
- nop
- call cheetah_plus_patch_winfixup
- nop
-
-2: /* Patch copy/page operations to cheetah optimized versions. */
+ /* Patch copy/page operations to cheetah optimized versions. */
call cheetah_patch_copyops
nop
call cheetah_patch_copy_page
call prom_set_trap_table
sethi %hi(sparc64_ttable_tl0), %o0
- BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g2,g3,1f)
- ba,pt %xcc, 2f
- nop
-
-1: /* Start using proper page size encodings in ctx register. */
- sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3
+ /* Start using proper page size encodings in ctx register. */
+ sethi %hi(sparc64_kern_pri_context), %g3
+ ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2
mov PRIMARY_CONTEXT, %g1
- sllx %g3, 32, %g3
- sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
- or %g3, %g2, %g3
- stxa %g3, [%g1] ASI_DMMU
+ stxa %g2, [%g1] ASI_DMMU
membar #Sync
-2:
rdpr %pstate, %o1
or %o1, PSTATE_IE, %o1
wrpr %o1, 0, %pstate
brnz,pn %l3, kern_rtt
mov PRIMARY_CONTEXT, %l7
ldxa [%l7 + %l7] ASI_DMMU, %l0
-cplus_rtrap_insn_1:
- sethi %hi(0), %l1
- sllx %l1, 32, %l1
+ sethi %hi(sparc64_kern_pri_nuc_bits), %l1
+ ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1
or %l0, %l1, %l0
stxa %l0, [%l7] ASI_DMMU
flush %g6
wr %g0, FPRS_DU, %fprs
ba,pt %xcc, rt_continue
stb %l5, [%g6 + TI_FPDEPTH]
-
-cplus_rinsn_1:
- sethi %uhi(CTX_CHEETAH_PLUS_NUC), %l1
-
- .globl cheetah_plus_patch_rtrap
-cheetah_plus_patch_rtrap:
- /* We configure the dTLB512_0 for 4MB pages and the
- * dTLB512_1 for 8K pages when in context zero.
- */
- sethi %hi(cplus_rinsn_1), %o0
- sethi %hi(cplus_rtrap_insn_1), %o2
- lduw [%o0 + %lo(cplus_rinsn_1)], %o1
- or %o2, %lo(cplus_rtrap_insn_1), %o2
- stw %o1, [%o2]
- flush %o2
-
- retl
- nop
}
if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) {
- unsigned long kernel_pctx = 0;
-
- if (tlb_type == cheetah_plus)
- kernel_pctx |= (CTX_CHEETAH_PLUS_NUC |
- CTX_CHEETAH_PLUS_CTX0);
+ extern unsigned long sparc64_kern_pri_context;
/* Spitfire Errata #32 workaround */
__asm__ __volatile__("stxa %0, [%1] %2\n\t"
"flush %%g6"
: /* No outputs */
- : "r" (kernel_pctx),
+ : "r" (sparc64_kern_pri_context),
"r" (PRIMARY_CONTEXT),
"i" (ASI_DMMU));
call init_irqwork_curcpu
nop
- BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g2,g3,1f)
- ba,pt %xcc, 2f
- nop
-
-1: /* Start using proper page size encodings in ctx register. */
- sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3
+ /* Start using proper page size encodings in ctx register. */
+ sethi %hi(sparc64_kern_pri_context), %g3
+ ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2
mov PRIMARY_CONTEXT, %g1
- sllx %g3, 32, %g3
- sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
- or %g3, %g2, %g3
- stxa %g3, [%g1] ASI_DMMU
+ stxa %g2, [%g1] ASI_DMMU
membar #Sync
-2:
rdpr %pstate, %o1
or %o1, PSTATE_IE, %o1
wrpr %o1, 0, %pstate
.text
set_pcontext:
-cplus_winfixup_insn_1:
- sethi %hi(0), %l1
+ sethi %hi(sparc64_kern_pri_context), %l1
+ ldx [%l1 + %lo(sparc64_kern_pri_context)], %l1
mov PRIMARY_CONTEXT, %g1
- sllx %l1, 32, %l1
-cplus_winfixup_insn_2:
- sethi %hi(0), %g2
- or %l1, %g2, %l1
stxa %l1, [%g1] ASI_DMMU
flush %g6
retl
nop
-cplus_wfinsn_1:
- sethi %uhi(CTX_CHEETAH_PLUS_NUC), %l1
-cplus_wfinsn_2:
- sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
-
.align 32
/* Here are the rules, pay attention.
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
clr %l6
-
-
- .globl cheetah_plus_patch_winfixup
-cheetah_plus_patch_winfixup:
- sethi %hi(cplus_wfinsn_1), %o0
- sethi %hi(cplus_winfixup_insn_1), %o2
- lduw [%o0 + %lo(cplus_wfinsn_1)], %o1
- or %o2, %lo(cplus_winfixup_insn_1), %o2
- stw %o1, [%o2]
- flush %o2
-
- sethi %hi(cplus_wfinsn_2), %o0
- sethi %hi(cplus_winfixup_insn_2), %o2
- lduw [%o0 + %lo(cplus_wfinsn_2)], %o1
- or %o2, %lo(cplus_winfixup_insn_2), %o2
- stw %o1, [%o2]
- flush %o2
-
- retl
- nop
struct page *mem_map_zero __read_mostly;
+unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
+
+unsigned long sparc64_kern_pri_context __read_mostly;
+unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
+unsigned long sparc64_kern_sec_context __read_mostly;
+
int bigkernel = 0;
/* XXX Tune this... */
unsigned long data;
};
static struct linux_prom_translation prom_trans[512] __initdata;
+static unsigned int prom_trans_ents __initdata;
extern unsigned long prom_boot_page;
extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);
unsigned long prom_pmd_phys __read_mostly;
unsigned int swapper_pgd_zero __read_mostly;
-/* Allocate power-of-2 aligned chunks from the end of the
- * kernel image. Return physical address.
- */
-static inline unsigned long early_alloc_phys(unsigned long size)
-{
- unsigned long base;
-
- BUILD_BUG_ON(size & (size - 1));
-
- kern_size = (kern_size + (size - 1)) & ~(size - 1);
- base = kern_base + kern_size;
- kern_size += size;
-
- return base;
-}
-
-static inline unsigned long load_phys32(unsigned long pa)
-{
- unsigned long val;
-
- __asm__ __volatile__("lduwa [%1] %2, %0"
- : "=&r" (val)
- : "r" (pa), "i" (ASI_PHYS_USE_EC));
-
- return val;
-}
-
-static inline unsigned long load_phys64(unsigned long pa)
-{
- unsigned long val;
-
- __asm__ __volatile__("ldxa [%1] %2, %0"
- : "=&r" (val)
- : "r" (pa), "i" (ASI_PHYS_USE_EC));
-
- return val;
-}
-
-static inline void store_phys32(unsigned long pa, unsigned long val)
-{
- __asm__ __volatile__("stwa %0, [%1] %2"
- : /* no outputs */
- : "r" (val), "r" (pa), "i" (ASI_PHYS_USE_EC));
-}
-
-static inline void store_phys64(unsigned long pa, unsigned long val)
-{
- __asm__ __volatile__("stxa %0, [%1] %2"
- : /* no outputs */
- : "r" (val), "r" (pa), "i" (ASI_PHYS_USE_EC));
-}
+static pmd_t *prompmd __read_mostly;
#define BASE_PAGE_SIZE 8192
*/
unsigned long prom_virt_to_phys(unsigned long promva, int *error)
{
- unsigned long pmd_phys = (prom_pmd_phys +
- ((promva >> 23) & 0x7ff) * sizeof(pmd_t));
- unsigned long pte_phys;
- pmd_t pmd_ent;
- pte_t pte_ent;
+ pmd_t *pmdp = prompmd + ((promva >> 23) & 0x7ff);
+ pte_t *ptep;
unsigned long base;
- pmd_val(pmd_ent) = load_phys32(pmd_phys);
- if (pmd_none(pmd_ent)) {
+ if (pmd_none(*pmdp)) {
if (error)
*error = 1;
return 0;
}
-
- pte_phys = (unsigned long)pmd_val(pmd_ent) << 11UL;
- pte_phys += ((promva >> 13) & 0x3ff) * sizeof(pte_t);
- pte_val(pte_ent) = load_phys64(pte_phys);
- if (!pte_present(pte_ent)) {
+ ptep = (pte_t *)__pmd_page(*pmdp) + ((promva >> 13) & 0x3ff);
+ if (!pte_present(*ptep)) {
if (error)
*error = 1;
return 0;
}
if (error) {
*error = 0;
- return pte_val(pte_ent);
+ return pte_val(*ptep);
}
- base = pte_val(pte_ent) & _PAGE_PADDR;
- return (base + (promva & (BASE_PAGE_SIZE - 1)));
+ base = pte_val(*ptep) & _PAGE_PADDR;
+
+ return base + (promva & (BASE_PAGE_SIZE - 1));
}
/* The obp translations are saved based on 8k pagesize, since obp can
unsigned long vaddr;
for (vaddr = start; vaddr < end; vaddr += BASE_PAGE_SIZE) {
- unsigned long val, pte_phys, pmd_phys;
- pmd_t pmd_ent;
- int i;
-
- pmd_phys = (prom_pmd_phys +
- (((vaddr >> 23) & 0x7ff) * sizeof(pmd_t)));
- pmd_val(pmd_ent) = load_phys32(pmd_phys);
- if (pmd_none(pmd_ent)) {
- pte_phys = early_alloc_phys(BASE_PAGE_SIZE);
-
- for (i = 0; i < BASE_PAGE_SIZE / sizeof(pte_t); i++)
- store_phys64(pte_phys+i*sizeof(pte_t),0);
+ unsigned long val;
+ pmd_t *pmd;
+ pte_t *pte;
- pmd_val(pmd_ent) = pte_phys >> 11UL;
- store_phys32(pmd_phys, pmd_val(pmd_ent));
+ pmd = prompmd + ((vaddr >> 23) & 0x7ff);
+ if (pmd_none(*pmd)) {
+ pte = __alloc_bootmem(BASE_PAGE_SIZE, BASE_PAGE_SIZE,
+ PAGE_SIZE);
+ if (!pte)
+ prom_halt();
+ memset(pte, 0, BASE_PAGE_SIZE);
+ pmd_set(pmd, pte);
}
-
- pte_phys = (unsigned long)pmd_val(pmd_ent) << 11UL;
- pte_phys += (((vaddr >> 13) & 0x3ff) * sizeof(pte_t));
+ pte = (pte_t *) __pmd_page(*pmd) + ((vaddr >> 13) & 0x3ff);
val = data;
if (tlb_type == spitfire)
val &= ~0x0003fe0000000000UL;
- store_phys64(pte_phys, val | _PAGE_MODIFIED);
+ set_pte_at(&init_mm, vaddr, pte,
+ __pte(val | _PAGE_MODIFIED));
data += BASE_PAGE_SIZE;
}
}
#define OBP_PMD_SIZE 2048
-static void __init build_obp_pgtable(int prom_trans_ents)
+static void __init build_obp_pgtable(void)
{
unsigned long i;
- prom_pmd_phys = early_alloc_phys(OBP_PMD_SIZE);
- for (i = 0; i < OBP_PMD_SIZE; i += 4)
- store_phys32(prom_pmd_phys + i, 0);
+ prompmd = __alloc_bootmem(OBP_PMD_SIZE, OBP_PMD_SIZE, PAGE_SIZE);
+ if (!prompmd)
+ prom_halt();
+
+ memset(prompmd, 0, OBP_PMD_SIZE);
+
+ prom_pmd_phys = __pa(prompmd);
for (i = 0; i < prom_trans_ents; i++) {
unsigned long start, end;
/* Read OBP translations property into 'prom_trans[]'.
* Return the number of entries.
*/
-static int __init read_obp_translations(void)
+static void __init read_obp_translations(void)
{
int n, node;
prom_printf("prom_mappings: Couldn't get property.\n");
prom_halt();
}
+
n = n / sizeof(struct linux_prom_translation);
- return n;
+
+ prom_trans_ents = n;
}
static void __init remap_kernel(void)
prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);
prom_itlb_load(tlb_ent, tte_data, tte_vaddr);
if (bigkernel) {
- prom_dtlb_load(tlb_ent - 1,
+ tlb_ent -= 1;
+ prom_dtlb_load(tlb_ent,
tte_data + 0x400000,
tte_vaddr + 0x400000);
- prom_itlb_load(tlb_ent - 1,
+ prom_itlb_load(tlb_ent,
tte_data + 0x400000,
tte_vaddr + 0x400000);
}
+ sparc64_highest_unlocked_tlb_ent = tlb_ent - 1;
+ if (tlb_type == cheetah_plus) {
+ sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
+ CTX_CHEETAH_PLUS_NUC);
+ sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
+ sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
+ }
}
-static void __init inherit_prom_mappings(void)
-{
- int n;
- n = read_obp_translations();
- build_obp_pgtable(n);
+static void __init inherit_prom_mappings_pre(void)
+{
+ read_obp_translations();
/* Now fixup OBP's idea about where we really are mapped. */
prom_printf("Remapping the kernel... ");
remap_kernel();
prom_printf("done.\n");
+}
+static void __init inherit_prom_mappings_post(void)
+{
+ build_obp_pgtable();
register_prom_callbacks();
}
}
}
if (tlb_type == spitfire) {
- int high = SPITFIRE_HIGHEST_LOCKED_TLBENT - bigkernel;
- for (i = 0; i < high; i++) {
+ int high = sparc64_highest_unlocked_tlb_ent;
+ for (i = 0; i <= high; i++) {
unsigned long data;
/* Spitfire Errata #32 workaround */
}
}
} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
- int high = CHEETAH_HIGHEST_LOCKED_TLBENT - bigkernel;
+ int high = sparc64_highest_unlocked_tlb_ent;
- for (i = 0; i < high; i++) {
+ for (i = 0; i <= high; i++) {
unsigned long data;
data = cheetah_get_ldtlb_data(i);
swapper_pgd_zero = pgd_val(swapper_pg_dir[0]);
- /* Inherit non-locked OBP mappings. */
- inherit_prom_mappings();
+ inherit_prom_mappings_pre();
/* Ok, we can use our TLB miss and window trap handlers safely.
* We need to do a quick peek here to see if we are on StarFire
extern void setup_tba(int);
setup_tba(this_is_starfire);
}
-
- inherit_locked_prom_mappings(1);
-
__flush_tlb_all();
+ /* Everything from this point forward, until we are done with
+ * inherit_prom_mappings_post(), must complete successfully
+ * without calling into the firmware. The firwmare page tables
+ * have not been built, but we are running on the Linux kernel's
+ * trap table.
+ */
+
/* Setup bootmem... */
pages_avail = 0;
last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
+ inherit_prom_mappings_post();
+
+ inherit_locked_prom_mappings(1);
+
#ifdef CONFIG_DEBUG_PAGEALLOC
kernel_physical_mapping_init();
#endif
extern void restore_registers(int pid, union uml_pt_regs *regs);
extern void init_registers(int pid);
extern void get_safe_registers(unsigned long * regs);
+extern void get_thread_regs(union uml_pt_regs *uml_regs, void *buffer);
#endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
case RBP: UPT_RBP(regs) = __upt_val; break; \
case ORIG_RAX: UPT_ORIG_RAX(regs) = __upt_val; break; \
case CS: UPT_CS(regs) = __upt_val; break; \
- case DS: UPT_DS(regs) = __upt_val; break; \
- case ES: UPT_ES(regs) = __upt_val; break; \
- case FS: UPT_FS(regs) = __upt_val; break; \
- case GS: UPT_GS(regs) = __upt_val; break; \
case EFLAGS: UPT_EFLAGS(regs) = __upt_val; break; \
default : \
panic("Bad register in UPT_SET : %d\n", reg); \
if (esp == NULL) {
if (task != current && task != NULL) {
- /* XXX: Isn't this bogus? I.e. isn't this the
- * *userspace* stack of this task? If not so, use this
- * even when task == current (as in i386).
- */
esp = (unsigned long *) KSTK_ESP(task);
- /* Which one? No actual difference - just coding style.*/
- //esp = (unsigned long *) PT_REGS_IP(&task->thread.regs);
} else {
esp = (unsigned long *) &esp;
}
}
printk("Call Trace: \n");
- show_trace(current, esp);
+ show_trace(task, esp);
}
#include <errno.h>
#include <string.h>
+#include <setjmp.h>
#include "sysdep/ptrace_user.h"
#include "sysdep/ptrace.h"
#include "uml-config.h"
memcpy(regs, exec_regs, HOST_FRAME_SIZE * sizeof(unsigned long));
}
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
+void get_thread_regs(union uml_pt_regs *uml_regs, void *buffer)
+{
+ struct __jmp_buf_tag *jmpbuf = buffer;
+
+ UPT_SET(uml_regs, EIP, jmpbuf->__jmpbuf[JB_PC]);
+ UPT_SET(uml_regs, UESP, jmpbuf->__jmpbuf[JB_SP]);
+ UPT_SET(uml_regs, EBP, jmpbuf->__jmpbuf[JB_BP]);
+}
#include <errno.h>
#include <string.h>
+#include <setjmp.h>
#include "ptrace_user.h"
#include "uml-config.h"
#include "skas_ptregs.h"
memcpy(regs, exec_regs, HOST_FRAME_SIZE * sizeof(unsigned long));
}
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
+void get_thread_regs(union uml_pt_regs *uml_regs, void *buffer)
+{
+ struct __jmp_buf_tag *jmpbuf = buffer;
+
+ UPT_SET(uml_regs, RIP, jmpbuf->__jmpbuf[JB_PC]);
+ UPT_SET(uml_regs, RSP, jmpbuf->__jmpbuf[JB_RSP]);
+ UPT_SET(uml_regs, RBP, jmpbuf->__jmpbuf[JB_RBP]);
+}
task = current;
if (task != current) {
- //ebp = (unsigned long) KSTK_EBP(task);
- /* Which one? No actual difference - just coding style.*/
- ebp = (unsigned long) PT_REGS_EBP(&task->thread.regs);
+ ebp = (unsigned long) KSTK_EBP(task);
} else {
asm ("movl %%ebp, %0" : "=r" (ebp) : );
}
((unsigned long)stack & (~(THREAD_SIZE - 1)));
print_context_stack(context, stack, ebp);
- /*while (((long) stack & (THREAD_SIZE-1)) != 0) {
- addr = *stack;
- if (__kernel_text_address(addr)) {
- printk("%08lx: [<%08lx>]", (unsigned long) stack, addr);
- print_symbol(" %s", addr);
- printk("\n");
- }
- stack++;
- }*/
printk("\n");
}
OFFSET(HOST_SC_FP_ST, _fpstate, _st);
OFFSET(HOST_SC_FXSR_ENV, _fpstate, _fxsr_env);
- DEFINE_LONGS(HOST_FRAME_SIZE, FRAME_SIZE);
+ DEFINE(HOST_FRAME_SIZE, FRAME_SIZE);
DEFINE_LONGS(HOST_FP_SIZE, sizeof(struct user_i387_struct));
DEFINE_LONGS(HOST_XFP_SIZE, sizeof(struct user_fxsr_struct));
.org 0x4000
ENTRY(level2_ident_pgt)
/* 40MB for bootup. */
- .quad 0x0000000000000183
- .quad 0x0000000000200183
- .quad 0x0000000000400183
- .quad 0x0000000000600183
- .quad 0x0000000000800183
- .quad 0x0000000000A00183
- .quad 0x0000000000C00183
- .quad 0x0000000000E00183
- .quad 0x0000000001000183
- .quad 0x0000000001200183
- .quad 0x0000000001400183
- .quad 0x0000000001600183
- .quad 0x0000000001800183
- .quad 0x0000000001A00183
- .quad 0x0000000001C00183
- .quad 0x0000000001E00183
- .quad 0x0000000002000183
- .quad 0x0000000002200183
- .quad 0x0000000002400183
- .quad 0x0000000002600183
+ .quad 0x0000000000000083
+ .quad 0x0000000000200083
+ .quad 0x0000000000400083
+ .quad 0x0000000000600083
+ .quad 0x0000000000800083
+ .quad 0x0000000000A00083
+ .quad 0x0000000000C00083
+ .quad 0x0000000000E00083
+ .quad 0x0000000001000083
+ .quad 0x0000000001200083
+ .quad 0x0000000001400083
+ .quad 0x0000000001600083
+ .quad 0x0000000001800083
+ .quad 0x0000000001A00083
+ .quad 0x0000000001C00083
+ .quad 0x0000000001E00083
+ .quad 0x0000000002000083
+ .quad 0x0000000002200083
+ .quad 0x0000000002400083
+ .quad 0x0000000002600083
/* Temporary mappings for the super early allocator in arch/x86_64/mm/init.c */
.globl temp_boot_pmds
temp_boot_pmds:
static void srat_detect_node(void)
{
#ifdef CONFIG_NUMA
- unsigned apicid, node;
+ unsigned node;
int cpu = smp_processor_id();
/* Don't do the funky fallback heuristics the AMD version employs
for now. */
- apicid = phys_proc_id[cpu];
- node = apicid_to_node[apicid];
+ node = apicid_to_node[hard_smp_processor_id()];
if (node == NUMA_NO_NODE)
node = 0;
cpu_to_node[cpu] = node;
static void*
-fore200e_kmalloc(int size, int flags)
+fore200e_kmalloc(int size, unsigned int __nocast flags)
{
- void* chunk = kmalloc(size, flags);
+ void *chunk = kzalloc(size, flags);
- if (chunk)
- memset(chunk, 0x00, size);
- else
- printk(FORE200E "kmalloc() failed, requested size = %d, flags = 0x%x\n", size, flags);
+ if (!chunk)
+ printk(FORE200E "kmalloc() failed, requested size = %d, flags = 0x%x\n", size, flags);
return chunk;
}
MODULE_PARM_DESC(debug, "Enable debug output");
module_param_named(cards_limit, drm_cards_limit, int, 0444);
-module_param_named(debug, drm_debug, int, 0666);
+module_param_named(debug, drm_debug, int, 0600);
drm_head_t **drm_heads;
struct drm_sysfs_class *drm_class;
* a new message.
*
*/
-int cn_netlink_send(struct cn_msg *msg, u32 __group, int gfp_mask)
+int cn_netlink_send(struct cn_msg *msg, u32 __group,
+ unsigned int __nocast gfp_mask)
{
struct cn_callback_entry *__cbq;
unsigned int size;
return err;
}
+static void mthca_free_icms(struct mthca_dev *mdev)
+{
+ u8 status;
+
+ mthca_free_icm_table(mdev, mdev->mcg_table.table);
+ if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
+ mthca_free_icm_table(mdev, mdev->srq_table.table);
+ mthca_free_icm_table(mdev, mdev->cq_table.table);
+ mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
+ mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
+ mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
+ mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
+ mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
+ mthca_unmap_eq_icm(mdev);
+
+ mthca_UNMAP_ICM_AUX(mdev, &status);
+ mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
+}
+
static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
{
struct mthca_dev_lim dev_lim;
return 0;
err_free_icm:
- if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
- mthca_free_icm_table(mdev, mdev->srq_table.table);
- mthca_free_icm_table(mdev, mdev->cq_table.table);
- mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
- mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
- mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
- mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
- mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
- mthca_unmap_eq_icm(mdev);
-
- mthca_UNMAP_ICM_AUX(mdev, &status);
- mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
+ mthca_free_icms(mdev);
err_stop_fw:
mthca_UNMAP_FA(mdev, &status);
mthca_CLOSE_HCA(mdev, 0, &status);
if (mthca_is_memfree(mdev)) {
- if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
- mthca_free_icm_table(mdev, mdev->srq_table.table);
- mthca_free_icm_table(mdev, mdev->cq_table.table);
- mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
- mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
- mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
- mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
- mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
- mthca_unmap_eq_icm(mdev);
-
- mthca_UNMAP_ICM_AUX(mdev, &status);
- mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
+ mthca_free_icms(mdev);
mthca_UNMAP_FA(mdev, &status);
mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
spin_unlock(&priv->lock);
}
-static void path_lookup(struct sk_buff *skb, struct net_device *dev)
+static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
if (skb->dst && skb->dst->neighbour) {
if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
- path_lookup(skb, dev);
+ ipoib_path_lookup(skb, dev);
goto out;
}
module_init(ucb1x00_init);
module_exit(ucb1x00_exit);
-EXPORT_SYMBOL(ucb1x00_class);
-
EXPORT_SYMBOL(ucb1x00_io_set_dir);
EXPORT_SYMBOL(ucb1x00_io_write);
EXPORT_SYMBOL(ucb1x00_io_read);
void (*fn)(int, void *);
};
-extern struct class ucb1x00_class;
-
struct ucb1x00 {
spinlock_t lock;
struct mcp *mcp;
config NET_POCKET
bool "Pocket and portable adapters"
- depends on NET_ETHERNET && ISA
+ depends on NET_ETHERNET && PARPORT
---help---
Cute little network (Ethernet) devices which attach to the parallel
port ("pocket adapters"), commonly used with laptops. If you have
config ATP
tristate "AT-LAN-TEC/RealTek pocket adapter support"
- depends on NET_POCKET && ISA && X86
+ depends on NET_POCKET && PARPORT && X86
select CRC32
---help---
This is a network (Ethernet) device which attaches to your parallel
config DE600
tristate "D-Link DE600 pocket adapter support"
- depends on NET_POCKET && ISA
+ depends on NET_POCKET && PARPORT
---help---
This is a network (Ethernet) device which attaches to your parallel
port. Read <file:Documentation/networking/DLINK.txt> as well as the
config DE620
tristate "D-Link DE620 pocket adapter support"
- depends on NET_POCKET && ISA
+ depends on NET_POCKET && PARPORT
---help---
This is a network (Ethernet) device which attaches to your parallel
port. Read <file:Documentation/networking/DLINK.txt> as well as the
* * Added xmit_hash_policy_layer34()
* - Modified by Jay Vosburgh <fubar@us.ibm.com> to also support mode 4.
* Set version to 2.6.3.
+ * 2005/09/26 - Jay Vosburgh <fubar@us.ibm.com>
+ * - Removed backwards compatibility for old ifenslaves. Version 2.6.4.
*/
//#define BONDING_DEBUG 1
static int bond_mode = BOND_MODE_ROUNDROBIN;
static int xmit_hashtype= BOND_XMIT_POLICY_LAYER2;
static int lacp_fast = 0;
-static int app_abi_ver = 0;
-static int orig_app_abi_ver = -1; /* This is used to save the first ABI version
- * we receive from the application. Once set,
- * it won't be changed, and the module will
- * refuse to enslave/release interfaces if the
- * command comes from an application using
- * another ABI version.
- */
+
struct bond_parm_tbl {
char *modename;
int mode;
/*
* Copy all the Multicast addresses from src to the bonding device dst
*/
-static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond, int gpf_flag)
+static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond,
+ unsigned int __nocast gfp_flag)
{
struct dev_mc_list *dmi, *new_dmi;
for (dmi = mc_list; dmi; dmi = dmi->next) {
- new_dmi = kmalloc(sizeof(struct dev_mc_list), gpf_flag);
+ new_dmi = kmalloc(sizeof(struct dev_mc_list), gfp_flag);
if (!new_dmi) {
/* FIXME: Potential memory leak !!! */
}
}
- if (app_abi_ver >= 1) {
- /* The application is using an ABI, which requires the
- * slave interface to be closed.
- */
- if ((slave_dev->flags & IFF_UP)) {
- printk(KERN_ERR DRV_NAME
- ": Error: %s is up\n",
- slave_dev->name);
- res = -EPERM;
- goto err_undo_flags;
- }
-
- if (slave_dev->set_mac_address == NULL) {
- printk(KERN_ERR DRV_NAME
- ": Error: The slave device you specified does "
- "not support setting the MAC address.\n");
- printk(KERN_ERR
- "Your kernel likely does not support slave "
- "devices.\n");
+ /*
+ * Old ifenslave binaries are no longer supported. These can
+ * be identified with moderate accurary by the state of the slave:
+ * the current ifenslave will set the interface down prior to
+ * enslaving it; the old ifenslave will not.
+ */
+ if ((slave_dev->flags & IFF_UP)) {
+ printk(KERN_ERR DRV_NAME ": %s is up. "
+ "This may be due to an out of date ifenslave.\n",
+ slave_dev->name);
+ res = -EPERM;
+ goto err_undo_flags;
+ }
- res = -EOPNOTSUPP;
- goto err_undo_flags;
- }
- } else {
- /* The application is not using an ABI, which requires the
- * slave interface to be open.
- */
- if (!(slave_dev->flags & IFF_UP)) {
- printk(KERN_ERR DRV_NAME
- ": Error: %s is not running\n",
- slave_dev->name);
- res = -EINVAL;
- goto err_undo_flags;
- }
+ if (slave_dev->set_mac_address == NULL) {
+ printk(KERN_ERR DRV_NAME
+ ": Error: The slave device you specified does "
+ "not support setting the MAC address.\n");
+ printk(KERN_ERR
+ "Your kernel likely does not support slave devices.\n");
- if ((bond->params.mode == BOND_MODE_8023AD) ||
- (bond->params.mode == BOND_MODE_TLB) ||
- (bond->params.mode == BOND_MODE_ALB)) {
- printk(KERN_ERR DRV_NAME
- ": Error: to use %s mode, you must upgrade "
- "ifenslave.\n",
- bond_mode_name(bond->params.mode));
- res = -EOPNOTSUPP;
- goto err_undo_flags;
- }
+ res = -EOPNOTSUPP;
+ goto err_undo_flags;
}
new_slave = kmalloc(sizeof(struct slave), GFP_KERNEL);
*/
new_slave->original_flags = slave_dev->flags;
- if (app_abi_ver >= 1) {
- /* save slave's original ("permanent") mac address for
- * modes that needs it, and for restoring it upon release,
- * and then set it to the master's address
- */
- memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN);
+ /*
+ * Save slave's original ("permanent") mac address for modes
+ * that need it, and for restoring it upon release, and then
+ * set it to the master's address
+ */
+ memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN);
- /* set slave to master's mac address
- * The application already set the master's
- * mac address to that of the first slave
- */
- memcpy(addr.sa_data, bond_dev->dev_addr, bond_dev->addr_len);
- addr.sa_family = slave_dev->type;
- res = dev_set_mac_address(slave_dev, &addr);
- if (res) {
- dprintk("Error %d calling set_mac_address\n", res);
- goto err_free;
- }
+ /*
+ * Set slave to master's mac address. The application already
+ * set the master's mac address to that of the first slave
+ */
+ memcpy(addr.sa_data, bond_dev->dev_addr, bond_dev->addr_len);
+ addr.sa_family = slave_dev->type;
+ res = dev_set_mac_address(slave_dev, &addr);
+ if (res) {
+ dprintk("Error %d calling set_mac_address\n", res);
+ goto err_free;
+ }
- /* open the slave since the application closed it */
- res = dev_open(slave_dev);
- if (res) {
- dprintk("Openning slave %s failed\n", slave_dev->name);
- goto err_restore_mac;
- }
+ /* open the slave since the application closed it */
+ res = dev_open(slave_dev);
+ if (res) {
+ dprintk("Openning slave %s failed\n", slave_dev->name);
+ goto err_restore_mac;
}
res = netdev_set_master(slave_dev, bond_dev);
if (res) {
dprintk("Error %d calling netdev_set_master\n", res);
- if (app_abi_ver < 1) {
- goto err_free;
- } else {
- goto err_close;
- }
+ goto err_close;
}
new_slave->dev = slave_dev;
write_unlock_bh(&bond->lock);
- if (app_abi_ver < 1) {
- /*
- * !!! This is to support old versions of ifenslave.
- * We can remove this in 2.5 because our ifenslave takes
- * care of this for us.
- * We check to see if the master has a mac address yet.
- * If not, we'll give it the mac address of our slave device.
- */
- int ndx = 0;
-
- for (ndx = 0; ndx < bond_dev->addr_len; ndx++) {
- dprintk("Checking ndx=%d of bond_dev->dev_addr\n",
- ndx);
- if (bond_dev->dev_addr[ndx] != 0) {
- dprintk("Found non-zero byte at ndx=%d\n",
- ndx);
- break;
- }
- }
-
- if (ndx == bond_dev->addr_len) {
- /*
- * We got all the way through the address and it was
- * all 0's.
- */
- dprintk("%s doesn't have a MAC address yet. \n",
- bond_dev->name);
- dprintk("Going to give assign it from %s.\n",
- slave_dev->name);
- bond_sethwaddr(bond_dev, slave_dev);
- }
- }
-
printk(KERN_INFO DRV_NAME
": %s: enslaving %s as a%s interface with a%s link.\n",
bond_dev->name, slave_dev->name,
/* close slave before restoring its mac address */
dev_close(slave_dev);
- if (app_abi_ver >= 1) {
- /* restore original ("permanent") mac address */
- memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
- addr.sa_family = slave_dev->type;
- dev_set_mac_address(slave_dev, &addr);
- }
+ /* restore original ("permanent") mac address */
+ memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
+ addr.sa_family = slave_dev->type;
+ dev_set_mac_address(slave_dev, &addr);
/* restore the original state of the
* IFF_NOARP flag that might have been
/* close slave before restoring its mac address */
dev_close(slave_dev);
- if (app_abi_ver >= 1) {
- /* restore original ("permanent") mac address*/
- memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
- addr.sa_family = slave_dev->type;
- dev_set_mac_address(slave_dev, &addr);
- }
+ /* restore original ("permanent") mac address*/
+ memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
+ addr.sa_family = slave_dev->type;
+ dev_set_mac_address(slave_dev, &addr);
/* restore the original state of the IFF_NOARP flag that might have
* been set by bond_set_slave_inactive_flags()
return res;
}
-static int bond_ethtool_ioctl(struct net_device *bond_dev, struct ifreq *ifr)
-{
- struct ethtool_drvinfo info;
- void __user *addr = ifr->ifr_data;
- uint32_t cmd;
-
- if (get_user(cmd, (uint32_t __user *)addr)) {
- return -EFAULT;
- }
-
- switch (cmd) {
- case ETHTOOL_GDRVINFO:
- if (copy_from_user(&info, addr, sizeof(info))) {
- return -EFAULT;
- }
-
- if (strcmp(info.driver, "ifenslave") == 0) {
- int new_abi_ver;
- char *endptr;
-
- new_abi_ver = simple_strtoul(info.fw_version,
- &endptr, 0);
- if (*endptr) {
- printk(KERN_ERR DRV_NAME
- ": Error: got invalid ABI "
- "version from application\n");
-
- return -EINVAL;
- }
-
- if (orig_app_abi_ver == -1) {
- orig_app_abi_ver = new_abi_ver;
- }
-
- app_abi_ver = new_abi_ver;
- }
-
- strncpy(info.driver, DRV_NAME, 32);
- strncpy(info.version, DRV_VERSION, 32);
- snprintf(info.fw_version, 32, "%d", BOND_ABI_VERSION);
-
- if (copy_to_user(addr, &info, sizeof(info))) {
- return -EFAULT;
- }
-
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
{
struct bonding *bond = bond_dev->priv;
return 0;
rcu_read_lock();
- idev = __in_dev_get(dev);
+ idev = __in_dev_get_rcu(dev);
if (!idev)
goto out;
seq_printf(seq, "Link Failure Count: %d\n",
slave->link_failure_count);
- if (app_abi_ver >= 1) {
- seq_printf(seq,
- "Permanent HW addr: %02x:%02x:%02x:%02x:%02x:%02x\n",
- slave->perm_hwaddr[0],
- slave->perm_hwaddr[1],
- slave->perm_hwaddr[2],
- slave->perm_hwaddr[3],
- slave->perm_hwaddr[4],
- slave->perm_hwaddr[5]);
- }
+ seq_printf(seq,
+ "Permanent HW addr: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ slave->perm_hwaddr[0], slave->perm_hwaddr[1],
+ slave->perm_hwaddr[2], slave->perm_hwaddr[3],
+ slave->perm_hwaddr[4], slave->perm_hwaddr[5]);
if (bond->params.mode == BOND_MODE_8023AD) {
const struct aggregator *agg
struct ifslave k_sinfo;
struct ifslave __user *u_sinfo = NULL;
struct mii_ioctl_data *mii = NULL;
- int prev_abi_ver = orig_app_abi_ver;
int res = 0;
dprintk("bond_ioctl: master=%s, cmd=%d\n",
bond_dev->name, cmd);
switch (cmd) {
- case SIOCETHTOOL:
- return bond_ethtool_ioctl(bond_dev, ifr);
case SIOCGMIIPHY:
mii = if_mii(ifr);
if (!mii) {
return -EPERM;
}
- if (orig_app_abi_ver == -1) {
- /* no orig_app_abi_ver was provided yet, so we'll use the
- * current one from now on, even if it's 0
- */
- orig_app_abi_ver = app_abi_ver;
-
- } else if (orig_app_abi_ver != app_abi_ver) {
- printk(KERN_ERR DRV_NAME
- ": Error: already using ifenslave ABI version %d; to "
- "upgrade ifenslave to version %d, you must first "
- "reload bonding.\n",
- orig_app_abi_ver, app_abi_ver);
- return -EINVAL;
- }
-
slave_dev = dev_get_by_name(ifr->ifr_slave);
dprintk("slave_dev=%p: \n", slave_dev);
dev_put(slave_dev);
}
- if (res < 0) {
- /* The ioctl failed, so there's no point in changing the
- * orig_app_abi_ver. We'll restore it's value just in case
- * we've changed it earlier in this function.
- */
- orig_app_abi_ver = prev_abi_ver;
- }
-
return res;
}
}
}
+static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strncpy(drvinfo->driver, DRV_NAME, 32);
+ strncpy(drvinfo->version, DRV_VERSION, 32);
+ snprintf(drvinfo->fw_version, 32, "%d", BOND_ABI_VERSION);
+}
+
static struct ethtool_ops bond_ethtool_ops = {
.get_tx_csum = ethtool_op_get_tx_csum,
.get_sg = ethtool_op_get_sg,
+ .get_drvinfo = bond_ethtool_get_drvinfo,
};
/*
#include "bond_3ad.h"
#include "bond_alb.h"
-#define DRV_VERSION "2.6.3"
-#define DRV_RELDATE "June 8, 2005"
+#define DRV_VERSION "2.6.4"
+#define DRV_RELDATE "September 26, 2005"
#define DRV_NAME "bonding"
#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
#define CAS_REG_LEN (sizeof(ethtool_register_table)/sizeof(int))
#define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN)
-static u8 *cas_get_regs(struct cas *cp)
+static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
{
- u8 *ptr = kmalloc(CAS_MAX_REGS, GFP_KERNEL);
u8 *p;
int i;
unsigned long flags;
- if (!ptr)
- return NULL;
-
spin_lock_irqsave(&cp->lock, flags);
- for (i = 0, p = ptr; i < CAS_REG_LEN ; i ++, p += sizeof(u32)) {
+ for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
u16 hval;
u32 val;
if (ethtool_register_table[i].offsets < 0) {
memcpy(p, (u8 *)&val, sizeof(u32));
}
spin_unlock_irqrestore(&cp->lock, flags);
-
- return ptr;
}
static struct net_device_stats *cas_get_stats(struct net_device *dev)
spin_unlock_irqrestore(&cp->lock, flags);
}
-/* Eventually add support for changing the advertisement
- * on autoneg.
- */
-static int cas_ethtool_ioctl(struct net_device *dev, void __user *ep_user)
+static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct cas *cp = netdev_priv(dev);
+ strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN);
+ strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN);
+ info->fw_version[0] = '\0';
+ strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN);
+ info->regdump_len = cp->casreg_len < CAS_MAX_REGS ?
+ cp->casreg_len : CAS_MAX_REGS;
+ info->n_stats = CAS_NUM_STAT_KEYS;
+}
+
+static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct cas *cp = netdev_priv(dev);
u16 bmcr;
int full_duplex, speed, pause;
- struct ethtool_cmd ecmd;
unsigned long flags;
enum link_state linkstate = link_up;
- if (copy_from_user(&ecmd, ep_user, sizeof(ecmd)))
- return -EFAULT;
-
- switch(ecmd.cmd) {
- case ETHTOOL_GDRVINFO: {
- struct ethtool_drvinfo info = { .cmd = ETHTOOL_GDRVINFO };
-
- strncpy(info.driver, DRV_MODULE_NAME,
- ETHTOOL_BUSINFO_LEN);
- strncpy(info.version, DRV_MODULE_VERSION,
- ETHTOOL_BUSINFO_LEN);
- info.fw_version[0] = '\0';
- strncpy(info.bus_info, pci_name(cp->pdev),
- ETHTOOL_BUSINFO_LEN);
- info.regdump_len = cp->casreg_len < CAS_MAX_REGS ?
- cp->casreg_len : CAS_MAX_REGS;
- info.n_stats = CAS_NUM_STAT_KEYS;
- if (copy_to_user(ep_user, &info, sizeof(info)))
- return -EFAULT;
-
- return 0;
+ cmd->advertising = 0;
+ cmd->supported = SUPPORTED_Autoneg;
+ if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
+ cmd->supported |= SUPPORTED_1000baseT_Full;
+ cmd->advertising |= ADVERTISED_1000baseT_Full;
}
- case ETHTOOL_GSET:
- ecmd.advertising = 0;
- ecmd.supported = SUPPORTED_Autoneg;
- if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
- ecmd.supported |= SUPPORTED_1000baseT_Full;
- ecmd.advertising |= ADVERTISED_1000baseT_Full;
+ /* Record PHY settings if HW is on. */
+ spin_lock_irqsave(&cp->lock, flags);
+ bmcr = 0;
+ linkstate = cp->lstate;
+ if (CAS_PHY_MII(cp->phy_type)) {
+ cmd->port = PORT_MII;
+ cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ?
+ XCVR_INTERNAL : XCVR_EXTERNAL;
+ cmd->phy_address = cp->phy_addr;
+ cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII |
+ ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full;
+
+ cmd->supported |=
+ (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_TP | SUPPORTED_MII);
+
+ if (cp->hw_running) {
+ cas_mif_poll(cp, 0);
+ bmcr = cas_phy_read(cp, MII_BMCR);
+ cas_read_mii_link_mode(cp, &full_duplex,
+ &speed, &pause);
+ cas_mif_poll(cp, 1);
}
- /* Record PHY settings if HW is on. */
- spin_lock_irqsave(&cp->lock, flags);
- bmcr = 0;
- linkstate = cp->lstate;
- if (CAS_PHY_MII(cp->phy_type)) {
- ecmd.port = PORT_MII;
- ecmd.transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ?
- XCVR_INTERNAL : XCVR_EXTERNAL;
- ecmd.phy_address = cp->phy_addr;
- ecmd.advertising |= ADVERTISED_TP | ADVERTISED_MII |
- ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full;
-
- ecmd.supported |=
- (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_TP | SUPPORTED_MII);
-
- if (cp->hw_running) {
- cas_mif_poll(cp, 0);
- bmcr = cas_phy_read(cp, MII_BMCR);
- cas_read_mii_link_mode(cp, &full_duplex,
- &speed, &pause);
- cas_mif_poll(cp, 1);
- }
-
- } else {
- ecmd.port = PORT_FIBRE;
- ecmd.transceiver = XCVR_INTERNAL;
- ecmd.phy_address = 0;
- ecmd.supported |= SUPPORTED_FIBRE;
- ecmd.advertising |= ADVERTISED_FIBRE;
-
- if (cp->hw_running) {
- /* pcs uses the same bits as mii */
- bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
- cas_read_pcs_link_mode(cp, &full_duplex,
- &speed, &pause);
- }
+ } else {
+ cmd->port = PORT_FIBRE;
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->phy_address = 0;
+ cmd->supported |= SUPPORTED_FIBRE;
+ cmd->advertising |= ADVERTISED_FIBRE;
+
+ if (cp->hw_running) {
+ /* pcs uses the same bits as mii */
+ bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
+ cas_read_pcs_link_mode(cp, &full_duplex,
+ &speed, &pause);
}
- spin_unlock_irqrestore(&cp->lock, flags);
+ }
+ spin_unlock_irqrestore(&cp->lock, flags);
- if (bmcr & BMCR_ANENABLE) {
- ecmd.advertising |= ADVERTISED_Autoneg;
- ecmd.autoneg = AUTONEG_ENABLE;
- ecmd.speed = ((speed == 10) ?
- SPEED_10 :
- ((speed == 1000) ?
- SPEED_1000 : SPEED_100));
- ecmd.duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+ if (bmcr & BMCR_ANENABLE) {
+ cmd->advertising |= ADVERTISED_Autoneg;
+ cmd->autoneg = AUTONEG_ENABLE;
+ cmd->speed = ((speed == 10) ?
+ SPEED_10 :
+ ((speed == 1000) ?
+ SPEED_1000 : SPEED_100));
+ cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+ } else {
+ cmd->autoneg = AUTONEG_DISABLE;
+ cmd->speed =
+ (bmcr & CAS_BMCR_SPEED1000) ?
+ SPEED_1000 :
+ ((bmcr & BMCR_SPEED100) ? SPEED_100:
+ SPEED_10);
+ cmd->duplex =
+ (bmcr & BMCR_FULLDPLX) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ }
+ if (linkstate != link_up) {
+ /* Force these to "unknown" if the link is not up and
+ * autonogotiation in enabled. We can set the link
+ * speed to 0, but not cmd->duplex,
+ * because its legal values are 0 and 1. Ethtool will
+ * print the value reported in parentheses after the
+ * word "Unknown" for unrecognized values.
+ *
+ * If in forced mode, we report the speed and duplex
+ * settings that we configured.
+ */
+ if (cp->link_cntl & BMCR_ANENABLE) {
+ cmd->speed = 0;
+ cmd->duplex = 0xff;
} else {
- ecmd.autoneg = AUTONEG_DISABLE;
- ecmd.speed =
- (bmcr & CAS_BMCR_SPEED1000) ?
- SPEED_1000 :
- ((bmcr & BMCR_SPEED100) ? SPEED_100:
- SPEED_10);
- ecmd.duplex =
- (bmcr & BMCR_FULLDPLX) ?
- DUPLEX_FULL : DUPLEX_HALF;
- }
- if (linkstate != link_up) {
- /* Force these to "unknown" if the link is not up and
- * autonogotiation in enabled. We can set the link
- * speed to 0, but not ecmd.duplex,
- * because its legal values are 0 and 1. Ethtool will
- * print the value reported in parentheses after the
- * word "Unknown" for unrecognized values.
- *
- * If in forced mode, we report the speed and duplex
- * settings that we configured.
- */
- if (cp->link_cntl & BMCR_ANENABLE) {
- ecmd.speed = 0;
- ecmd.duplex = 0xff;
- } else {
- ecmd.speed = SPEED_10;
- if (cp->link_cntl & BMCR_SPEED100) {
- ecmd.speed = SPEED_100;
- } else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
- ecmd.speed = SPEED_1000;
- }
- ecmd.duplex = (cp->link_cntl & BMCR_FULLDPLX)?
- DUPLEX_FULL : DUPLEX_HALF;
+ cmd->speed = SPEED_10;
+ if (cp->link_cntl & BMCR_SPEED100) {
+ cmd->speed = SPEED_100;
+ } else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
+ cmd->speed = SPEED_1000;
}
+ cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)?
+ DUPLEX_FULL : DUPLEX_HALF;
}
- if (copy_to_user(ep_user, &ecmd, sizeof(ecmd)))
- return -EFAULT;
- return 0;
+ }
+ return 0;
+}
- case ETHTOOL_SSET:
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
+static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct cas *cp = netdev_priv(dev);
+ unsigned long flags;
- /* Verify the settings we care about. */
- if (ecmd.autoneg != AUTONEG_ENABLE &&
- ecmd.autoneg != AUTONEG_DISABLE)
- return -EINVAL;
+ /* Verify the settings we care about. */
+ if (cmd->autoneg != AUTONEG_ENABLE &&
+ cmd->autoneg != AUTONEG_DISABLE)
+ return -EINVAL;
- if (ecmd.autoneg == AUTONEG_DISABLE &&
- ((ecmd.speed != SPEED_1000 &&
- ecmd.speed != SPEED_100 &&
- ecmd.speed != SPEED_10) ||
- (ecmd.duplex != DUPLEX_HALF &&
- ecmd.duplex != DUPLEX_FULL)))
- return -EINVAL;
+ if (cmd->autoneg == AUTONEG_DISABLE &&
+ ((cmd->speed != SPEED_1000 &&
+ cmd->speed != SPEED_100 &&
+ cmd->speed != SPEED_10) ||
+ (cmd->duplex != DUPLEX_HALF &&
+ cmd->duplex != DUPLEX_FULL)))
+ return -EINVAL;
- /* Apply settings and restart link process. */
- spin_lock_irqsave(&cp->lock, flags);
- cas_begin_auto_negotiation(cp, &ecmd);
- spin_unlock_irqrestore(&cp->lock, flags);
- return 0;
+ /* Apply settings and restart link process. */
+ spin_lock_irqsave(&cp->lock, flags);
+ cas_begin_auto_negotiation(cp, cmd);
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return 0;
+}
- case ETHTOOL_NWAY_RST:
- if ((cp->link_cntl & BMCR_ANENABLE) == 0)
- return -EINVAL;
+static int cas_nway_reset(struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+ unsigned long flags;
- /* Restart link process. */
- spin_lock_irqsave(&cp->lock, flags);
- cas_begin_auto_negotiation(cp, NULL);
- spin_unlock_irqrestore(&cp->lock, flags);
+ if ((cp->link_cntl & BMCR_ANENABLE) == 0)
+ return -EINVAL;
- return 0;
+ /* Restart link process. */
+ spin_lock_irqsave(&cp->lock, flags);
+ cas_begin_auto_negotiation(cp, NULL);
+ spin_unlock_irqrestore(&cp->lock, flags);
- case ETHTOOL_GWOL:
- case ETHTOOL_SWOL:
- break; /* doesn't exist */
+ return 0;
+}
- /* get link status */
- case ETHTOOL_GLINK: {
- struct ethtool_value edata = { .cmd = ETHTOOL_GLINK };
+static u32 cas_get_link(struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+ return cp->lstate == link_up;
+}
- edata.data = (cp->lstate == link_up);
- if (copy_to_user(ep_user, &edata, sizeof(edata)))
- return -EFAULT;
- return 0;
- }
+static u32 cas_get_msglevel(struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+ return cp->msg_enable;
+}
- /* get message-level */
- case ETHTOOL_GMSGLVL: {
- struct ethtool_value edata = { .cmd = ETHTOOL_GMSGLVL };
+static void cas_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct cas *cp = netdev_priv(dev);
+ cp->msg_enable = value;
+}
- edata.data = cp->msg_enable;
- if (copy_to_user(ep_user, &edata, sizeof(edata)))
- return -EFAULT;
- return 0;
- }
+static int cas_get_regs_len(struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+ return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS;
+}
- /* set message-level */
- case ETHTOOL_SMSGLVL: {
- struct ethtool_value edata;
+static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct cas *cp = netdev_priv(dev);
+ regs->version = 0;
+ /* cas_read_regs handles locks (cp->lock). */
+ cas_read_regs(cp, p, regs->len / sizeof(u32));
+}
- if (!capable(CAP_NET_ADMIN)) {
- return (-EPERM);
- }
- if (copy_from_user(&edata, ep_user, sizeof(edata)))
- return -EFAULT;
- cp->msg_enable = edata.data;
- return 0;
- }
+static int cas_get_stats_count(struct net_device *dev)
+{
+ return CAS_NUM_STAT_KEYS;
+}
- case ETHTOOL_GREGS: {
- struct ethtool_regs edata;
- u8 *ptr;
- int len = cp->casreg_len < CAS_MAX_REGS ?
- cp->casreg_len: CAS_MAX_REGS;
-
- if (copy_from_user(&edata, ep_user, sizeof (edata)))
- return -EFAULT;
-
- if (edata.len > len)
- edata.len = len;
- edata.version = 0;
- if (copy_to_user (ep_user, &edata, sizeof(edata)))
- return -EFAULT;
-
- /* cas_get_regs handles locks (cp->lock). */
- ptr = cas_get_regs(cp);
- if (ptr == NULL)
- return -ENOMEM;
- if (copy_to_user(ep_user + sizeof (edata), ptr, edata.len))
- return -EFAULT;
-
- kfree(ptr);
- return (0);
- }
- case ETHTOOL_GSTRINGS: {
- struct ethtool_gstrings edata;
- int len;
-
- if (copy_from_user(&edata, ep_user, sizeof(edata)))
- return -EFAULT;
-
- len = edata.len;
- switch(edata.string_set) {
- case ETH_SS_STATS:
- edata.len = (len < CAS_NUM_STAT_KEYS) ?
- len : CAS_NUM_STAT_KEYS;
- if (copy_to_user(ep_user, &edata, sizeof(edata)))
- return -EFAULT;
-
- if (copy_to_user(ep_user + sizeof(edata),
- ðtool_cassini_statnames,
- (edata.len * ETH_GSTRING_LEN)))
- return -EFAULT;
- return 0;
- default:
- return -EINVAL;
- }
- }
- case ETHTOOL_GSTATS: {
- int i = 0;
- u64 *tmp;
- struct ethtool_stats edata;
- struct net_device_stats *stats;
- int len;
-
- if (copy_from_user(&edata, ep_user, sizeof(edata)))
- return -EFAULT;
-
- len = edata.n_stats;
- stats = cas_get_stats(cp->dev);
- edata.cmd = ETHTOOL_GSTATS;
- edata.n_stats = (len < CAS_NUM_STAT_KEYS) ?
- len : CAS_NUM_STAT_KEYS;
- if (copy_to_user(ep_user, &edata, sizeof (edata)))
- return -EFAULT;
-
- tmp = kmalloc(sizeof(u64)*CAS_NUM_STAT_KEYS, GFP_KERNEL);
- if (tmp) {
- tmp[i++] = stats->collisions;
- tmp[i++] = stats->rx_bytes;
- tmp[i++] = stats->rx_crc_errors;
- tmp[i++] = stats->rx_dropped;
- tmp[i++] = stats->rx_errors;
- tmp[i++] = stats->rx_fifo_errors;
- tmp[i++] = stats->rx_frame_errors;
- tmp[i++] = stats->rx_length_errors;
- tmp[i++] = stats->rx_over_errors;
- tmp[i++] = stats->rx_packets;
- tmp[i++] = stats->tx_aborted_errors;
- tmp[i++] = stats->tx_bytes;
- tmp[i++] = stats->tx_dropped;
- tmp[i++] = stats->tx_errors;
- tmp[i++] = stats->tx_fifo_errors;
- tmp[i++] = stats->tx_packets;
- BUG_ON(i != CAS_NUM_STAT_KEYS);
-
- i = copy_to_user(ep_user + sizeof(edata),
- tmp, sizeof(u64)*edata.n_stats);
- kfree(tmp);
- } else {
- return -ENOMEM;
- }
- if (i)
- return -EFAULT;
- return 0;
- }
- }
+static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ memcpy(data, ðtool_cassini_statnames,
+ CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
+}
- return -EOPNOTSUPP;
+static void cas_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *estats, u64 *data)
+{
+ struct cas *cp = netdev_priv(dev);
+ struct net_device_stats *stats = cas_get_stats(cp->dev);
+ int i = 0;
+ data[i++] = stats->collisions;
+ data[i++] = stats->rx_bytes;
+ data[i++] = stats->rx_crc_errors;
+ data[i++] = stats->rx_dropped;
+ data[i++] = stats->rx_errors;
+ data[i++] = stats->rx_fifo_errors;
+ data[i++] = stats->rx_frame_errors;
+ data[i++] = stats->rx_length_errors;
+ data[i++] = stats->rx_over_errors;
+ data[i++] = stats->rx_packets;
+ data[i++] = stats->tx_aborted_errors;
+ data[i++] = stats->tx_bytes;
+ data[i++] = stats->tx_dropped;
+ data[i++] = stats->tx_errors;
+ data[i++] = stats->tx_fifo_errors;
+ data[i++] = stats->tx_packets;
+ BUG_ON(i != CAS_NUM_STAT_KEYS);
}
+static struct ethtool_ops cas_ethtool_ops = {
+ .get_drvinfo = cas_get_drvinfo,
+ .get_settings = cas_get_settings,
+ .set_settings = cas_set_settings,
+ .nway_reset = cas_nway_reset,
+ .get_link = cas_get_link,
+ .get_msglevel = cas_get_msglevel,
+ .set_msglevel = cas_set_msglevel,
+ .get_regs_len = cas_get_regs_len,
+ .get_regs = cas_get_regs,
+ .get_stats_count = cas_get_stats_count,
+ .get_strings = cas_get_strings,
+ .get_ethtool_stats = cas_get_ethtool_stats,
+};
+
static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct cas *cp = netdev_priv(dev);
*/
down(&cp->pm_sem);
switch (cmd) {
- case SIOCETHTOOL:
- rc = cas_ethtool_ioctl(dev, ifr->ifr_data);
- break;
-
case SIOCGMIIPHY: /* Get address of MII PHY in use. */
data->phy_id = cp->phy_addr;
/* Fallthrough... */
dev->get_stats = cas_get_stats;
dev->set_multicast_list = cas_set_multicast;
dev->do_ioctl = cas_ioctl;
+ dev->ethtool_ops = &cas_ethtool_ops;
dev->tx_timeout = cas_tx_timeout;
dev->watchdog_timeo = CAS_TX_TIMEOUT;
dev->change_mtu = cas_change_mtu;
rc = -ENODEV;
goto bail;
}
+
+ /* Disable any PHY features not supported by the platform */
+ ep->phy_mii.def->features &= ~emacdata->phy_feat_exc;
/* Setup initial PHY config & startup aneg */
if (ep->phy_mii.def->ops->init)
netif_carrier_off(ndev);
if (ep->phy_mii.def->features & SUPPORTED_Autoneg)
ep->want_autoneg = 1;
+ else {
+ ep->want_autoneg = 0;
+
+ /* Select highest supported speed/duplex */
+ if (ep->phy_mii.def->features & SUPPORTED_1000baseT_Full) {
+ ep->phy_mii.speed = SPEED_1000;
+ ep->phy_mii.duplex = DUPLEX_FULL;
+ } else if (ep->phy_mii.def->features &
+ SUPPORTED_1000baseT_Half) {
+ ep->phy_mii.speed = SPEED_1000;
+ ep->phy_mii.duplex = DUPLEX_HALF;
+ } else if (ep->phy_mii.def->features &
+ SUPPORTED_100baseT_Full) {
+ ep->phy_mii.speed = SPEED_100;
+ ep->phy_mii.duplex = DUPLEX_FULL;
+ } else if (ep->phy_mii.def->features &
+ SUPPORTED_100baseT_Half) {
+ ep->phy_mii.speed = SPEED_100;
+ ep->phy_mii.duplex = DUPLEX_HALF;
+ } else if (ep->phy_mii.def->features &
+ SUPPORTED_10baseT_Full) {
+ ep->phy_mii.speed = SPEED_10;
+ ep->phy_mii.duplex = DUPLEX_FULL;
+ } else {
+ ep->phy_mii.speed = SPEED_10;
+ ep->phy_mii.duplex = DUPLEX_HALF;
+ }
+ }
emac_start_link(ep, NULL);
/* read the MAC Address */
return 0;
}
-static inline int rx_refill(struct net_device *ndev, int gfp)
+static inline int rx_refill(struct net_device *ndev, unsigned int __nocast gfp)
{
struct ns83820 *dev = PRIV(ndev);
unsigned i;
{
struct dev_mc_list *mc_addr;
- for (mc_addr = addrs; mc_addr && --count > 0; mc_addr = mc_addr->next) {
+ for (mc_addr = addrs; mc_addr && count-- > 0; mc_addr = mc_addr->next) {
u_int position = ether_crc(6, mc_addr->dmi_addr);
#ifndef final_version /* Verify multicast address. */
if ((mc_addr->dmi_addr[0] & 1) == 0)
static int skge_set_mac_address(struct net_device *dev, void *p)
{
struct skge_port *skge = netdev_priv(dev);
- struct sockaddr *addr = p;
- int err = 0;
+ struct skge_hw *hw = skge->hw;
+ unsigned port = skge->port;
+ const struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- skge_down(dev);
+ spin_lock_bh(&hw->phy_lock);
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
- memcpy_toio(skge->hw->regs + B2_MAC_1 + skge->port*8,
+ memcpy_toio(hw->regs + B2_MAC_1 + port*8,
dev->dev_addr, ETH_ALEN);
- memcpy_toio(skge->hw->regs + B2_MAC_2 + skge->port*8,
+ memcpy_toio(hw->regs + B2_MAC_2 + port*8,
dev->dev_addr, ETH_ALEN);
- if (dev->flags & IFF_UP)
- err = skge_up(dev);
- return err;
+
+ if (hw->chip_id == CHIP_ID_GENESIS)
+ xm_outaddr(hw, port, XM_SA, dev->dev_addr);
+ else {
+ gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
+ gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);
+ }
+ spin_unlock_bh(&hw->phy_lock);
+
+ return 0;
}
static const struct {
- finally added firmware (GPL'ed by Adaptec)
- removed compatibility code for 2.2.x
+ LK1.4.2.1 (Ion Badulescu)
+ - fixed 32/64 bit issues on i386 + CONFIG_HIGHMEM
+ - added 32-bit padding to outgoing skb's, removed previous workaround
+
TODO: - fix forced speed/duplexing code (broken a long time ago, when
somebody converted the driver to use the generic MII code)
- fix VLAN support
*/
#define DRV_NAME "starfire"
-#define DRV_VERSION "1.03+LK1.4.2"
-#define DRV_RELDATE "January 19, 2005"
+#define DRV_VERSION "1.03+LK1.4.2.1"
+#define DRV_RELDATE "October 3, 2005"
#include <linux/config.h>
#include <linux/version.h>
* of length 1. If and when this is fixed, the #define below can be removed.
*/
#define HAS_BROKEN_FIRMWARE
+
+/*
+ * If using the broken firmware, data must be padded to the next 32-bit boundary.
+ */
+#ifdef HAS_BROKEN_FIRMWARE
+#define PADDING_MASK 3
+#endif
+
/*
* Define this if using the driver with the zero-copy patch
*/
* This SUCKS.
* We need a much better method to determine if dma_addr_t is 64-bit.
*/
-#if (defined(__i386__) && defined(CONFIG_HIGHMEM) && (LINUX_VERSION_CODE > 0x20500 || defined(CONFIG_HIGHMEM64G))) || defined(__x86_64__) || defined (__ia64__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR))
+#if (defined(__i386__) && defined(CONFIG_HIGHMEM64G)) || defined(__x86_64__) || defined (__ia64__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR))
/* 64-bit dma_addr_t */
#define ADDR_64BITS /* This chip uses 64 bit addresses. */
+#define netdrv_addr_t u64
#define cpu_to_dma(x) cpu_to_le64(x)
#define dma_to_cpu(x) le64_to_cpu(x)
#define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
#define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
#define RX_DESC_ADDR_SIZE RxDescAddr64bit
#else /* 32-bit dma_addr_t */
+#define netdrv_addr_t u32
#define cpu_to_dma(x) cpu_to_le32(x)
#define dma_to_cpu(x) le32_to_cpu(x)
#define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
}
#if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
- {
- int has_bad_length = 0;
-
- if (skb_first_frag_len(skb) == 1)
- has_bad_length = 1;
- else {
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- if (skb_shinfo(skb)->frags[i].size == 1) {
- has_bad_length = 1;
- break;
- }
- }
-
- if (has_bad_length)
- skb_checksum_help(skb, 0);
+ if (skb->ip_summed == CHECKSUM_HW) {
+ skb = skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK);
+ if (skb == NULL)
+ return NETDEV_TX_OK;
}
#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
#endif
#endif
-#ifndef ADDR_64BITS
/* we can do this test only at run-time... sigh */
- if (sizeof(dma_addr_t) == sizeof(u64)) {
- printk("This driver has not been ported to this 64-bit architecture yet\n");
+ if (sizeof(dma_addr_t) != sizeof(netdrv_addr_t)) {
+ printk("This driver has dma_addr_t issues, please send email to maintainer\n");
return -ENODEV;
}
-#endif /* not ADDR_64BITS */
+
return pci_module_init (&starfire_driver);
}
#define ALIGNED_RX_SKB_ADDR(addr) \
((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr))
-static __inline__ struct sk_buff *gem_alloc_skb(int size, int gfp_flags)
+static __inline__ struct sk_buff *gem_alloc_skb(int size,
+ unsigned int __nocast gfp_flags)
{
struct sk_buff *skb = alloc_skb(size + 64, gfp_flags);
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.41"
-#define DRV_MODULE_RELDATE "September 27, 2005"
+#define DRV_MODULE_VERSION "3.42"
+#define DRV_MODULE_RELDATE "Oct 3, 2005"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
static struct pci_device_id write_reorder_chipsets[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD,
PCI_DEVICE_ID_AMD_FE_GATE_700C) },
- { PCI_DEVICE(PCI_VENDOR_ID_AMD,
- PCI_DEVICE_ID_AMD_K8_NB) },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA,
+ PCI_DEVICE_ID_VIA_8385_0) },
{ },
};
u32 misc_ctrl_reg;
tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
#endif
- /* If we have an AMD 762 or K8 chipset, write
- * reordering to the mailbox registers done by the host
- * controller can cause major troubles. We read back from
- * every mailbox register write to force the writes to be
- * posted to the chip in order.
- */
- if (pci_dev_present(write_reorder_chipsets))
- tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
-
/* Force memory write invalidate off. If we leave it on,
* then on 5700_BX chips we have to enable a workaround.
* The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
+ /* If we have an AMD 762 or VIA K8T800 chipset, write
+ * reordering to the mailbox registers done by the host
+ * controller can cause major troubles. We read back from
+ * every mailbox register write to force the writes to be
+ * posted to the chip in order.
+ */
+ if (pci_dev_present(write_reorder_chipsets) &&
+ !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
+ tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
+
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
tp->pci_lat_timer < 64) {
tp->pci_lat_timer = 64;
if (!time_after(jiffies, timeout)) continue;
DPRINTK( "Hardware timeout during initialization.\n");
iounmap(t_mmio);
- kfree(ti);
return -ENODEV;
}
ti->sram_phys =
DPRINTK("Unknown shared ram paging info %01X\n",
ti->shared_ram_paging);
iounmap(t_mmio);
- kfree(ti);
return -ENODEV;
break;
} /*end switch shared_ram_paging */
"driver limit (%05x), adapter not started.\n",
chk_base, ibmtr_mem_base + IBMTR_SHARED_RAM_SIZE);
iounmap(t_mmio);
- kfree(ti);
return -ENODEV;
} else { /* seems cool, record what we have figured out */
ti->sram_base = new_base >> 12;
DPRINTK("Could not grab irq %d. Halting Token Ring driver.\n",
irq);
iounmap(t_mmio);
- kfree(ti);
return -ENODEV;
}
/*?? Now, allocate some of the PIO PORTs for this driver.. */
DPRINTK("Could not grab PIO range. Halting driver.\n");
free_irq(dev->irq, dev);
iounmap(t_mmio);
- kfree(ti);
return -EBUSY;
}
int i;
for (i = 0; i < tp->mtable->leafcount; i++)
if (tp->mtable->mleaf[i].media == dev->if_port) {
- int startup = ! ((tp->chip_id == DC21143 && tp->revision == 65));
+ int startup = ! ((tp->chip_id == DC21143 && (tp->revision == 48 || tp->revision == 65)));
tp->cur_index = i;
tulip_select_media(dev, startup);
setup_done = 1;
#include <linux/ioport.h> /* request_region(), release_region() */
#include <linux/wanrouter.h> /* WAN router definitions */
#include <linux/wanpipe.h> /* WANPIPE common user API definitions */
+#include <linux/rcupdate.h>
#include <linux/in.h>
#include <asm/io.h> /* phys_to_virt() */
struct in_ifaddr *ifaddr;
struct in_device *in_dev;
+ unsigned long addr = 0;
- if ((in_dev = __in_dev_get(dev)) == NULL){
- return 0;
+ rcu_read_lock();
+ if ((in_dev = __in_dev_get_rcu(dev)) == NULL){
+ goto out;
}
if ((ifaddr = in_dev->ifa_list)== NULL ){
- return 0;
+ goto out;
}
switch (option){
case WAN_LOCAL_IP:
- return ifaddr->ifa_local;
+ addr = ifaddr->ifa_local;
break;
case WAN_POINTOPOINT_IP:
- return ifaddr->ifa_address;
+ addr = ifaddr->ifa_address;
break;
case WAN_NETMASK_IP:
- return ifaddr->ifa_mask;
+ addr = ifaddr->ifa_mask;
break;
case WAN_BROADCAST_IP:
- return ifaddr->ifa_broadcast;
+ addr = ifaddr->ifa_broadcast;
break;
default:
- return 0;
+ break;
}
- return 0;
+out:
+ rcu_read_unlock();
+ return addr;
}
void add_gateway(sdla_t *card, struct net_device *dev)
u32 addr = 0, mask = ~0; /* FIXME: is the mask correct? */
#ifdef CONFIG_INET
rcu_read_lock();
- if ((in_dev = __in_dev_get(dev)) != NULL)
+ if ((in_dev = __in_dev_get_rcu(dev)) != NULL)
{
for (ifa=in_dev->ifa_list; ifa != NULL;
ifa=ifa->ifa_next) {
return 0;
}
- /* Length of the packet body */
- /* FIXME: what if the skb is smaller than this? */
- len = max_t(int,skb->len - ETH_HLEN, ETH_ZLEN - ETH_HLEN);
+ /* Check packet length, pad short packets, round up odd length */
+ len = max_t(int, ALIGN(skb->len, 2), ETH_ZLEN);
+ if (skb->len < len) {
+ skb = skb_padto(skb, len);
+ if (skb == NULL)
+ goto fail;
+ }
+ len -= ETH_HLEN;
eh = (struct ethhdr *)skb->data;
p = skb->data;
}
- /* Round up for odd length packets */
- err = hermes_bap_pwrite(hw, USER_BAP, p, ALIGN(data_len, 2),
+ err = hermes_bap_pwrite(hw, USER_BAP, p, data_len,
txfid, data_off);
if (err) {
printk(KERN_ERR "%s: Error %d writing packet to BAP\n",
struct in_device *in_dev;
rcu_read_lock();
- in_dev = __in_dev_get(strip_info->dev);
+ in_dev = __in_dev_get_rcu(strip_info->dev);
if (in_dev == NULL) {
rcu_read_unlock();
return NULL;
brd = addr = 0;
rcu_read_lock();
- in_dev = __in_dev_get(strip_info->dev);
+ in_dev = __in_dev_get_rcu(strip_info->dev);
if (in_dev) {
if (in_dev->ifa_list) {
brd = in_dev->ifa_list->ifa_broadcast;
#include <linux/proc_fs.h>
#include <linux/ctype.h>
#include <linux/blkdev.h>
+#include <linux/rcupdate.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/hardware.h>
/* we are running as tasklet, so locking dev_base
* for reading should be OK */
read_lock(&dev_base_lock);
+ rcu_read_lock();
for (dev = dev_base; dev; dev = dev->next) {
struct net_device_stats *stats;
- struct in_device *in_dev = __in_dev_get(dev);
+ struct in_device *in_dev = __in_dev_get_rcu(dev);
if (!in_dev || !in_dev->ifa_list)
continue;
if (LOOPBACK(in_dev->ifa_list->ifa_local))
rx_total += stats->rx_packets;
tx_total += stats->tx_packets;
}
+ rcu_read_unlock();
read_unlock(&dev_base_lock);
retval = 0;
__u32 pdu_hdr;
__u32 pdu_hdr_ack;
__u16 ipa;
+ __u32 pkt_seqno;
};
struct qeth_reply {
"on interface %s", QETH_CARD_IFNAME(card));
return -ENOMEM;
}
+ kfree_skb(*skb);
*skb = new_skb;
}
return 0;
__qeth_set_offline(struct ccwgroup_device *cgdev, int recovery_mode)
{
struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data;
- int rc = 0;
+ int rc = 0, rc2 = 0, rc3 = 0;
enum qeth_card_states recover_flag;
QETH_DBF_TEXT(setup, 3, "setoffl");
CARD_BUS_ID(card));
return -ERESTARTSYS;
}
- if ((rc = ccw_device_set_offline(CARD_DDEV(card))) ||
- (rc = ccw_device_set_offline(CARD_WDEV(card))) ||
- (rc = ccw_device_set_offline(CARD_RDEV(card)))) {
+ rc = ccw_device_set_offline(CARD_DDEV(card));
+ rc2 = ccw_device_set_offline(CARD_WDEV(card));
+ rc3 = ccw_device_set_offline(CARD_RDEV(card));
+ if (!rc)
+ rc = (rc2) ? rc2 : rc3;
+ if (rc)
QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
- }
if (recover_flag == CARD_STATE_UP)
card->state = CARD_STATE_RECOVER;
qeth_notify_processes();
spin_lock_init(&card->vlanlock);
card->vlangrp = NULL;
#endif
+ spin_lock_init(&card->lock);
spin_lock_init(&card->ip_lock);
spin_lock_init(&card->thread_mask_lock);
card->thread_start_mask = 0;
spin_unlock_irqrestore(&reply->card->lock, flags);
}
-static void
-qeth_reset_ip_addresses(struct qeth_card *card)
-{
- QETH_DBF_TEXT(trace, 2, "rstipadd");
-
- qeth_clear_ip_list(card, 0, 1);
- /* this function will also schedule the SET_IP_THREAD */
- qeth_set_multicast_list(card->dev);
-}
-
static struct qeth_ipa_cmd *
qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob)
{
"IP address reset.\n",
QETH_CARD_IFNAME(card),
card->info.chpid);
- card->lan_online = 1;
netif_carrier_on(card->dev);
- qeth_reset_ip_addresses(card);
+ qeth_schedule_recovery(card);
return NULL;
case IPA_CMD_REGISTER_LOCAL_ADDR:
QETH_DBF_TEXT(trace,3, "irla");
skb_pull(skb, VLAN_HLEN);
}
#endif
+ *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
return vlan_id;
}
return -ENOMEM;
}
for(j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j){
- ptr = (void *) __get_free_page(GFP_KERNEL);
+ ptr = (void *) __get_free_page(GFP_KERNEL|GFP_DMA);
if (!ptr) {
while (j > 0)
free_page((unsigned long)
if (card->qdio.state == QETH_QDIO_ALLOCATED)
return 0;
- card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q), GFP_KERNEL);
+ card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q),
+ GFP_KERNEL|GFP_DMA);
if (!card->qdio.in_q)
return - ENOMEM;
QETH_DBF_TEXT(setup, 2, "inq");
}
for (i = 0; i < card->qdio.no_out_queues; ++i){
card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q),
- GFP_KERNEL);
+ GFP_KERNEL|GFP_DMA);
if (!card->qdio.out_qs[i]){
while (i > 0)
kfree(card->qdio.out_qs[--i]);
if (!card->vlangrp)
return;
rcu_read_lock();
- in_dev = __in_dev_get(card->vlangrp->vlan_devices[vid]);
+ in_dev = __in_dev_get_rcu(card->vlangrp->vlan_devices[vid]);
if (!in_dev)
goto out;
for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
+ /* Disable IPV6 support hard coded for Hipersockets */
+ if(card->info.type == QETH_CARD_TYPE_IQD)
+ card->options.ipa4.supported_funcs &= ~IPA_IPV6;
} else {
#ifdef CONFIG_QETH_IPV6
card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
goto out;
rcu_read_lock();
- in_dev = rcu_dereference(__in_dev_get(dev));
+ in_dev = __in_dev_get_rcu(dev);
if (in_dev == NULL) {
rcu_read_unlock();
return -EINVAL;
Remove un-needed eh_abort handler.
Add support for embedded firmware error strings.
2.26.02.003 - Correctly handle single sgl's with use_sg=1.
+ 2.26.02.004 - Add support for 9550SX controllers.
*/
#include <linux/module.h>
#include "3w-9xxx.h"
/* Globals */
-#define TW_DRIVER_VERSION "2.26.02.003"
+#define TW_DRIVER_VERSION "2.26.02.004"
static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
static unsigned int twa_device_extension_count;
static int twa_major = -1;
writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
}
- if (status_reg_value & TW_STATUS_SBUF_WRITE_ERROR) {
- TW_PRINTK(tw_dev->host, TW_DRIVER, 0xf, "SBUF Write Error: clearing");
- writel(TW_CONTROL_CLEAR_SBUF_WRITE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
- }
-
if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
if (tw_dev->reset_print == 0) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
return retval;
} /* End twa_empty_response_queue() */
+/* This function will clear the pchip/response queue on 9550SX */
+static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
+{
+ u32 status_reg_value, response_que_value;
+ int count = 0, retval = 1;
+
+ if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9550SX) {
+ status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
+
+ while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
+ response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
+ if ((response_que_value & TW_9550SX_DRAIN_COMPLETED) == TW_9550SX_DRAIN_COMPLETED) {
+ /* P-chip settle time */
+ msleep(500);
+ retval = 0;
+ goto out;
+ }
+ status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
+ count++;
+ }
+ if (count == TW_MAX_RESPONSE_DRAIN)
+ goto out;
+
+ retval = 0;
+ } else
+ retval = 0;
+out:
+ return retval;
+} /* End twa_empty_response_queue_large() */
+
/* This function passes sense keys from firmware to scsi layer */
static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
{
int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
while (tries < TW_MAX_RESET_TRIES) {
- if (do_soft_reset)
+ if (do_soft_reset) {
TW_SOFT_RESET(tw_dev);
+ /* Clear pchip/response queue on 9550SX */
+ if (twa_empty_response_queue_large(tw_dev)) {
+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
+ do_soft_reset = 1;
+ tries++;
+ continue;
+ }
+ }
/* Make sure controller is in a good state */
if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
goto out_free_device_extension;
}
- mem_addr = pci_resource_start(pdev, 1);
+ if (pdev->device == PCI_DEVICE_ID_3WARE_9000)
+ mem_addr = pci_resource_start(pdev, 1);
+ else
+ mem_addr = pci_resource_start(pdev, 2);
/* Save base address */
tw_dev->base_addr = ioremap(mem_addr, PAGE_SIZE);
static struct pci_device_id twa_pci_tbl[] __devinitdata = {
{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ }
};
MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
#define TW_CONTROL_CLEAR_PARITY_ERROR 0x00800000
#define TW_CONTROL_CLEAR_QUEUE_ERROR 0x00400000
#define TW_CONTROL_CLEAR_PCI_ABORT 0x00100000
-#define TW_CONTROL_CLEAR_SBUF_WRITE_ERROR 0x00000008
/* Status register bit definitions */
#define TW_STATUS_MAJOR_VERSION_MASK 0xF0000000
#define TW_STATUS_MICROCONTROLLER_READY 0x00002000
#define TW_STATUS_COMMAND_QUEUE_EMPTY 0x00001000
#define TW_STATUS_EXPECTED_BITS 0x00002000
-#define TW_STATUS_UNEXPECTED_BITS 0x00F00008
-#define TW_STATUS_SBUF_WRITE_ERROR 0x00000008
-#define TW_STATUS_VALID_INTERRUPT 0x00DF0008
+#define TW_STATUS_UNEXPECTED_BITS 0x00F00000
+#define TW_STATUS_VALID_INTERRUPT 0x00DF0000
/* RESPONSE QUEUE BIT DEFINITIONS */
#define TW_RESPONSE_ID_MASK 0x00000FF0
/* Compatibility defines */
#define TW_9000_ARCH_ID 0x5
-#define TW_CURRENT_DRIVER_SRL 28
-#define TW_CURRENT_DRIVER_BUILD 9
-#define TW_CURRENT_DRIVER_BRANCH 4
+#define TW_CURRENT_DRIVER_SRL 30
+#define TW_CURRENT_DRIVER_BUILD 80
+#define TW_CURRENT_DRIVER_BRANCH 0
/* Phase defines */
#define TW_PHASE_INITIAL 0
#define TW_PHASE_SGLIST 2
/* Misc defines */
+#define TW_9550SX_DRAIN_COMPLETED 0xFFFF
#define TW_SECTOR_SIZE 512
#define TW_ALIGNMENT_9000 4 /* 4 bytes */
#define TW_ALIGNMENT_9000_SGL 0x3
#ifndef PCI_DEVICE_ID_3WARE_9000
#define PCI_DEVICE_ID_3WARE_9000 0x1002
#endif
+#ifndef PCI_DEVICE_ID_3WARE_9550SX
+#define PCI_DEVICE_ID_3WARE_9550SX 0x1003
+#endif
/* Bitmask macros to eliminate bitfields */
#define TW_STATUS_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + 0x4)
#define TW_COMMAND_QUEUE_REG_ADDR(x) (sizeof(dma_addr_t) > 4 ? ((unsigned char __iomem *)x->base_addr + 0x20) : ((unsigned char __iomem *)x->base_addr + 0x8))
#define TW_RESPONSE_QUEUE_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + 0xC)
+#define TW_RESPONSE_QUEUE_REG_ADDR_LARGE(x) ((unsigned char __iomem *)x->base_addr + 0x30)
#define TW_CLEAR_ALL_INTERRUPTS(x) (writel(TW_STATUS_VALID_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
#define TW_CLEAR_ATTENTION_INTERRUPT(x) (writel(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
#define TW_CLEAR_HOST_INTERRUPT(x) (writel(TW_CONTROL_CLEAR_HOST_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
obj-$(CONFIG_SCSI_DC390T) += tmscsim.o
obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
+obj-$(CONFIG_MEGARAID_SAS) += megaraid/
obj-$(CONFIG_SCSI_ACARD) += atp870u.o
obj-$(CONFIG_SCSI_SUNESP) += esp.o
obj-$(CONFIG_SCSI_GDTH) += gdth.o
}
dresp = (struct aac_mount *)fib_data(fibptr);
+ if ((le32_to_cpu(dresp->status) == ST_OK) &&
+ (le32_to_cpu(dresp->mnt[0].vol) == CT_NONE)) {
+ dinfo->command = cpu_to_le32(VM_NameServe64);
+ dinfo->count = cpu_to_le32(index);
+ dinfo->type = cpu_to_le32(FT_FILESYS);
+
+ if (fib_send(ContainerCommand,
+ fibptr,
+ sizeof(struct aac_query_mount),
+ FsaNormal,
+ 1, 1,
+ NULL, NULL) < 0)
+ continue;
+ } else
+ dresp->mnt[0].capacityhigh = 0;
+
dprintk ((KERN_DEBUG
- "VM_NameServe cid=%d status=%d vol=%d state=%d cap=%u\n",
+ "VM_NameServe cid=%d status=%d vol=%d state=%d cap=%llu\n",
(int)index, (int)le32_to_cpu(dresp->status),
(int)le32_to_cpu(dresp->mnt[0].vol),
(int)le32_to_cpu(dresp->mnt[0].state),
- (unsigned)le32_to_cpu(dresp->mnt[0].capacity)));
+ ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
+ (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32)));
if ((le32_to_cpu(dresp->status) == ST_OK) &&
(le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
(le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
fsa_dev_ptr[index].valid = 1;
fsa_dev_ptr[index].type = le32_to_cpu(dresp->mnt[0].vol);
- fsa_dev_ptr[index].size = le32_to_cpu(dresp->mnt[0].capacity);
+ fsa_dev_ptr[index].size
+ = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
+ (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
fsa_dev_ptr[index].ro = 1;
}
* is updated in the struct fsa_dev_info structure rather than returned.
*/
-static int probe_container(struct aac_dev *dev, int cid)
+int probe_container(struct aac_dev *dev, int cid)
{
struct fsa_dev_info *fsa_dev_ptr;
int status;
dresp = (struct aac_mount *) fib_data(fibptr);
+ if ((le32_to_cpu(dresp->status) == ST_OK) &&
+ (le32_to_cpu(dresp->mnt[0].vol) == CT_NONE)) {
+ dinfo->command = cpu_to_le32(VM_NameServe64);
+ dinfo->count = cpu_to_le32(cid);
+ dinfo->type = cpu_to_le32(FT_FILESYS);
+
+ if (fib_send(ContainerCommand,
+ fibptr,
+ sizeof(struct aac_query_mount),
+ FsaNormal,
+ 1, 1,
+ NULL, NULL) < 0)
+ goto error;
+ } else
+ dresp->mnt[0].capacityhigh = 0;
+
if ((le32_to_cpu(dresp->status) == ST_OK) &&
(le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
(le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
fsa_dev_ptr[cid].valid = 1;
fsa_dev_ptr[cid].type = le32_to_cpu(dresp->mnt[0].vol);
- fsa_dev_ptr[cid].size = le32_to_cpu(dresp->mnt[0].capacity);
+ fsa_dev_ptr[cid].size
+ = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
+ (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
fsa_dev_ptr[cid].ro = 1;
}
fibptr,
sizeof(*info),
FsaNormal,
- 1, 1,
+ -1, 1, /* First `interrupt' command uses special wait */
NULL,
NULL);
if (!(dev->raw_io_interface)) {
dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size -
sizeof(struct aac_fibhdr) -
- sizeof(struct aac_write) + sizeof(struct sgmap)) /
- sizeof(struct sgmap);
+ sizeof(struct aac_write) + sizeof(struct sgentry)) /
+ sizeof(struct sgentry);
if (dev->dac_support) {
/*
* 38 scatter gather elements
(dev->max_fib_size -
sizeof(struct aac_fibhdr) -
sizeof(struct aac_write64) +
- sizeof(struct sgmap64)) /
- sizeof(struct sgmap64);
+ sizeof(struct sgentry64)) /
+ sizeof(struct sgentry64);
}
dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) {
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
- dprintk((KERN_DEBUG "io_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3], jiffies));
+ if (nblank(dprintk(x))) {
+ u64 lba;
+ switch (scsicmd->cmnd[0]) {
+ case WRITE_6:
+ case READ_6:
+ lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
+ (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
+ break;
+ case WRITE_16:
+ case READ_16:
+ lba = ((u64)scsicmd->cmnd[2] << 56) |
+ ((u64)scsicmd->cmnd[3] << 48) |
+ ((u64)scsicmd->cmnd[4] << 40) |
+ ((u64)scsicmd->cmnd[5] << 32) |
+ ((u64)scsicmd->cmnd[6] << 24) |
+ (scsicmd->cmnd[7] << 16) |
+ (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
+ break;
+ case WRITE_12:
+ case READ_12:
+ lba = ((u64)scsicmd->cmnd[2] << 24) |
+ (scsicmd->cmnd[3] << 16) |
+ (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+ break;
+ default:
+ lba = ((u64)scsicmd->cmnd[2] << 24) |
+ (scsicmd->cmnd[3] << 16) |
+ (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+ break;
+ }
+ printk(KERN_DEBUG
+ "io_callback[cpu %d]: lba = %llu, t = %ld.\n",
+ smp_processor_id(), (unsigned long long)lba, jiffies);
+ }
if (fibptr == NULL)
BUG();
static int aac_read(struct scsi_cmnd * scsicmd, int cid)
{
- u32 lba;
+ u64 lba;
u32 count;
int status;
/*
* Get block address and transfer length
*/
- if (scsicmd->cmnd[0] == READ_6) /* 6 byte command */
- {
+ switch (scsicmd->cmnd[0]) {
+ case READ_6:
dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", cid));
- lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
+ lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
+ (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
count = scsicmd->cmnd[4];
if (count == 0)
count = 256;
- } else {
+ break;
+ case READ_16:
+ dprintk((KERN_DEBUG "aachba: received a read(16) command on id %d.\n", cid));
+
+ lba = ((u64)scsicmd->cmnd[2] << 56) |
+ ((u64)scsicmd->cmnd[3] << 48) |
+ ((u64)scsicmd->cmnd[4] << 40) |
+ ((u64)scsicmd->cmnd[5] << 32) |
+ ((u64)scsicmd->cmnd[6] << 24) |
+ (scsicmd->cmnd[7] << 16) |
+ (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
+ count = (scsicmd->cmnd[10] << 24) |
+ (scsicmd->cmnd[11] << 16) |
+ (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
+ break;
+ case READ_12:
+ dprintk((KERN_DEBUG "aachba: received a read(12) command on id %d.\n", cid));
+
+ lba = ((u64)scsicmd->cmnd[2] << 24) |
+ (scsicmd->cmnd[3] << 16) |
+ (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+ count = (scsicmd->cmnd[6] << 24) |
+ (scsicmd->cmnd[7] << 16) |
+ (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
+ break;
+ default:
dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", cid));
- lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+ lba = ((u64)scsicmd->cmnd[2] << 24) |
+ (scsicmd->cmnd[3] << 16) |
+ (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
+ break;
}
- dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %u, t = %ld.\n",
+ dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
smp_processor_id(), (unsigned long long)lba, jiffies));
+ if ((!(dev->raw_io_interface) || !(dev->raw_io_64)) &&
+ (lba & 0xffffffff00000000LL)) {
+ dprintk((KERN_DEBUG "aac_read: Illegal lba\n"));
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_CHECK_CONDITION;
+ set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
+ HARDWARE_ERROR,
+ SENCODE_INTERNAL_TARGET_FAILURE,
+ ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
+ 0, 0);
+ memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+ (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
+ ? sizeof(scsicmd->sense_buffer)
+ : sizeof(dev->fsa_dev[cid].sense_data));
+ scsicmd->scsi_done(scsicmd);
+ return 0;
+ }
/*
* Alocate and initialize a Fib
*/
if (dev->raw_io_interface) {
struct aac_raw_io *readcmd;
readcmd = (struct aac_raw_io *) fib_data(cmd_fibcontext);
- readcmd->block[0] = cpu_to_le32(lba);
- readcmd->block[1] = 0;
+ readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
+ readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
readcmd->count = cpu_to_le32(count<<9);
readcmd->cid = cpu_to_le16(cid);
readcmd->flags = cpu_to_le16(1);
readcmd->command = cpu_to_le32(VM_CtHostRead64);
readcmd->cid = cpu_to_le16(cid);
readcmd->sector_count = cpu_to_le16(count);
- readcmd->block = cpu_to_le32(lba);
+ readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
readcmd->pad = 0;
readcmd->flags = 0;
readcmd = (struct aac_read *) fib_data(cmd_fibcontext);
readcmd->command = cpu_to_le32(VM_CtBlockRead);
readcmd->cid = cpu_to_le32(cid);
- readcmd->block = cpu_to_le32(lba);
+ readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
readcmd->count = cpu_to_le32(count * 512);
aac_build_sg(scsicmd, &readcmd->sg);
static int aac_write(struct scsi_cmnd * scsicmd, int cid)
{
- u32 lba;
+ u64 lba;
u32 count;
int status;
u16 fibsize;
count = scsicmd->cmnd[4];
if (count == 0)
count = 256;
+ } else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */
+ dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", cid));
+
+ lba = ((u64)scsicmd->cmnd[2] << 56) |
+ ((u64)scsicmd->cmnd[3] << 48) |
+ ((u64)scsicmd->cmnd[4] << 40) |
+ ((u64)scsicmd->cmnd[5] << 32) |
+ ((u64)scsicmd->cmnd[6] << 24) |
+ (scsicmd->cmnd[7] << 16) |
+ (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
+ count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) |
+ (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
+ } else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */
+ dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", cid));
+
+ lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16)
+ | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+ count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16)
+ | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
} else {
dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", cid));
- lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+ lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
}
- dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %u, t = %ld.\n",
+ dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
smp_processor_id(), (unsigned long long)lba, jiffies));
+ if ((!(dev->raw_io_interface) || !(dev->raw_io_64))
+ && (lba & 0xffffffff00000000LL)) {
+ dprintk((KERN_DEBUG "aac_write: Illegal lba\n"));
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
+ set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
+ HARDWARE_ERROR,
+ SENCODE_INTERNAL_TARGET_FAILURE,
+ ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
+ 0, 0);
+ memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+ (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
+ ? sizeof(scsicmd->sense_buffer)
+ : sizeof(dev->fsa_dev[cid].sense_data));
+ scsicmd->scsi_done(scsicmd);
+ return 0;
+ }
/*
* Allocate and initialize a Fib then setup a BlockWrite command
*/
if (dev->raw_io_interface) {
struct aac_raw_io *writecmd;
writecmd = (struct aac_raw_io *) fib_data(cmd_fibcontext);
- writecmd->block[0] = cpu_to_le32(lba);
- writecmd->block[1] = 0;
+ writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
+ writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
writecmd->count = cpu_to_le32(count<<9);
writecmd->cid = cpu_to_le16(cid);
writecmd->flags = 0;
writecmd->command = cpu_to_le32(VM_CtHostWrite64);
writecmd->cid = cpu_to_le16(cid);
writecmd->sector_count = cpu_to_le16(count);
- writecmd->block = cpu_to_le32(lba);
+ writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
writecmd->pad = 0;
writecmd->flags = 0;
writecmd = (struct aac_write *) fib_data(cmd_fibcontext);
writecmd->command = cpu_to_le32(VM_CtBlockWrite);
writecmd->cid = cpu_to_le32(cid);
- writecmd->block = cpu_to_le32(lba);
+ writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
writecmd->count = cpu_to_le32(count * 512);
writecmd->sg.count = cpu_to_le32(1);
/* ->stable is not used - it did mean which type of write */
*/
if ((fsa_dev_ptr[cid].valid & 1) == 0) {
switch (scsicmd->cmnd[0]) {
+ case SERVICE_ACTION_IN:
+ if (!(dev->raw_io_interface) ||
+ !(dev->raw_io_64) ||
+ ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
+ break;
case INQUIRY:
case READ_CAPACITY:
case TEST_UNIT_READY:
spin_unlock_irq(host->host_lock);
probe_container(dev, cid);
+ if ((fsa_dev_ptr[cid].valid & 1) == 0)
+ fsa_dev_ptr[cid].valid = 0;
spin_lock_irq(host->host_lock);
if (fsa_dev_ptr[cid].valid == 0) {
scsicmd->result = DID_NO_CONNECT << 16;
memset(&inq_data, 0, sizeof (struct inquiry_data));
inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */
- inq_data.inqd_dtq = 0x80; /* set RMB bit to one indicating that the medium is removable */
inq_data.inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
inq_data.inqd_len = 31;
/*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data));
return aac_get_container_name(scsicmd, cid);
}
+ case SERVICE_ACTION_IN:
+ if (!(dev->raw_io_interface) ||
+ !(dev->raw_io_64) ||
+ ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
+ break;
+ {
+ u64 capacity;
+ char cp[12];
+ unsigned int offset = 0;
+
+ dprintk((KERN_DEBUG "READ CAPACITY_16 command.\n"));
+ capacity = fsa_dev_ptr[cid].size - 1;
+ if (scsicmd->cmnd[13] > 12) {
+ offset = scsicmd->cmnd[13] - 12;
+ if (offset > sizeof(cp))
+ break;
+ memset(cp, 0, offset);
+ aac_internal_transfer(scsicmd, cp, 0, offset);
+ }
+ cp[0] = (capacity >> 56) & 0xff;
+ cp[1] = (capacity >> 48) & 0xff;
+ cp[2] = (capacity >> 40) & 0xff;
+ cp[3] = (capacity >> 32) & 0xff;
+ cp[4] = (capacity >> 24) & 0xff;
+ cp[5] = (capacity >> 16) & 0xff;
+ cp[6] = (capacity >> 8) & 0xff;
+ cp[7] = (capacity >> 0) & 0xff;
+ cp[8] = 0;
+ cp[9] = 0;
+ cp[10] = 2;
+ cp[11] = 0;
+ aac_internal_transfer(scsicmd, cp, offset, sizeof(cp));
+
+ /* Do not cache partition table for arrays */
+ scsicmd->device->removable = 1;
+
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ scsicmd->scsi_done(scsicmd);
+
+ return 0;
+ }
+
case READ_CAPACITY:
{
u32 capacity;
char cp[8];
dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
- if (fsa_dev_ptr[cid].size <= 0x100000000LL)
+ if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
capacity = fsa_dev_ptr[cid].size - 1;
else
capacity = (u32)-1;
cp[6] = 2;
cp[7] = 0;
aac_internal_transfer(scsicmd, cp, 0, sizeof(cp));
+ /* Do not cache partition table for arrays */
+ scsicmd->device->removable = 1;
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);
{
case READ_6:
case READ_10:
+ case READ_12:
+ case READ_16:
/*
* Hack to keep track of ordinal number of the device that
* corresponds to a container. Needed to convert
*/
spin_unlock_irq(host->host_lock);
- if (scsicmd->request->rq_disk)
- memcpy(fsa_dev_ptr[cid].devname,
- scsicmd->request->rq_disk->disk_name,
- 8);
-
+ if (scsicmd->request->rq_disk)
+ strlcpy(fsa_dev_ptr[cid].devname,
+ scsicmd->request->rq_disk->disk_name,
+ min(sizeof(fsa_dev_ptr[cid].devname),
+ sizeof(scsicmd->request->rq_disk->disk_name) + 1));
ret = aac_read(scsicmd, cid);
spin_lock_irq(host->host_lock);
return ret;
case WRITE_6:
case WRITE_10:
+ case WRITE_12:
+ case WRITE_16:
spin_unlock_irq(host->host_lock);
ret = aac_write(scsicmd, cid);
spin_lock_irq(host->host_lock);
case WRITE_10:
case READ_12:
case WRITE_12:
+ case READ_16:
+ case WRITE_16:
if(le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow ) {
printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
} else {
sizeof(scsicmd->sense_buffer) :
le32_to_cpu(srbreply->sense_data_size);
#ifdef AAC_DETAILED_STATUS_INFO
- dprintk((KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n",
- le32_to_cpu(srbreply->status), len));
+ printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n",
+ le32_to_cpu(srbreply->status), len);
#endif
memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
#if (!defined(dprintk))
# define dprintk(x)
#endif
+/* eg: if (nblank(dprintk(x))) */
+#define _nblank(x) #x
+#define nblank(x) _nblank(x)[0]
+
/*------------------------------------------------------------------------------
* D E F I N E S
*/
#define FsaNormal 1
-#define FsaHigh 2
/*
* Define the FIB. The FIB is the where all the requested data and
/* This is only valid for adapter to host command queues. */
spinlock_t *lock; /* Spinlock for this queue must take this lock before accessing the lock */
spinlock_t lockdata; /* Actual lock (used only on one side of the lock) */
- unsigned long SavedIrql; /* Previous IRQL when the spin lock is taken */
- u32 padding; /* Padding - FIXME - can remove I believe */
struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */
/* only valid for command queues which receive entries from the adapter. */
struct list_head pendingq; /* A queue of outstanding fib's to the adapter. */
u64 last;
u64 size;
u32 type;
+ u32 config_waiting_on;
u16 queue_depth;
+ u8 config_needed;
u8 valid;
u8 ro;
u8 locked;
/* macro side-effects BEWARE */
# define raw_io_interface \
init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4)
+ u8 raw_io_64;
u8 printf_enabled;
};
#define VM_CtBlockVerify64 18
#define VM_CtHostRead64 19
#define VM_CtHostWrite64 20
+#define VM_DrvErrTblLog 21
+#define VM_NameServe64 22
-#define MAX_VMCOMMAND_NUM 21 /* used for sizing stats array - leave last */
+#define MAX_VMCOMMAND_NUM 23 /* used for sizing stats array - leave last */
/*
* Descriptive information (eg, vital stats)
manager (eg, filesystem) */
__le32 altoid; /* != oid <==> snapshot or
broken mirror exists */
+ __le32 capacityhigh;
};
#define FSCS_NOTCLEAN 0x0001 /* fsck is neccessary before mounting */
#define AifCmdJobProgress 2 /* Progress report */
#define AifJobCtrZero 101 /* Array Zero progress */
#define AifJobStsSuccess 1 /* Job completes */
+#define AifJobStsRunning 102 /* Job running */
#define AifCmdAPIReport 3 /* Report from other user of API */
#define AifCmdDriverNotify 4 /* Notify host driver of event */
#define AifDenMorphComplete 200 /* A morph operation completed */
struct aac_driver_ident* aac_get_driver_ident(int devtype);
int aac_get_adapter_info(struct aac_dev* dev);
int aac_send_shutdown(struct aac_dev *dev);
+int probe_container(struct aac_dev *dev, int cid);
extern int numacb;
extern int acbsize;
extern char aac_driver_version[];
fibctx,
sizeof(struct aac_close),
FsaNormal,
- 1, 1,
+ -2 /* Timeout silently */, 1,
NULL, NULL);
if (status == 0)
dev->max_fib_size = sizeof(struct hw_fib);
dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size
- sizeof(struct aac_fibhdr)
- - sizeof(struct aac_write) + sizeof(struct sgmap))
- / sizeof(struct sgmap);
+ - sizeof(struct aac_write) + sizeof(struct sgentry))
+ / sizeof(struct sgentry);
+ dev->raw_io_64 = 0;
+ if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
+ 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) &&
+ (status[0] == 0x00000001)) {
+ if (status[1] & AAC_OPT_NEW_COMM_64)
+ dev->raw_io_64 = 1;
+ }
if ((!aac_adapter_sync_cmd(dev, GET_COMM_PREFERRED_SETTINGS,
0, 0, 0, 0, 0, 0,
status+0, status+1, status+2, status+3, status+4))
dev->max_fib_size = 512;
dev->sg_tablesize = host->sg_tablesize
= (512 - sizeof(struct aac_fibhdr)
- - sizeof(struct aac_write) + sizeof(struct sgmap))
- / sizeof(struct sgmap);
+ - sizeof(struct aac_write) + sizeof(struct sgentry))
+ / sizeof(struct sgentry);
host->can_queue = AAC_NUM_IO_FIB;
} else if (acbsize == 2048) {
host->max_sectors = 512;
#include <linux/completion.h>
#include <linux/blkdev.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
#include <asm/semaphore.h>
+#include <asm/delay.h>
#include "aacraid.h"
/* Interrupt Moderation, only interrupt for first two entries */
if (idx != le32_to_cpu(*(q->headers.consumer))) {
if (--idx == 0) {
- if (qid == AdapHighCmdQueue)
- idx = ADAP_HIGH_CMD_ENTRIES;
- else if (qid == AdapNormCmdQueue)
+ if (qid == AdapNormCmdQueue)
idx = ADAP_NORM_CMD_ENTRIES;
- else if (qid == AdapHighRespQueue)
- idx = ADAP_HIGH_RESP_ENTRIES;
- else if (qid == AdapNormRespQueue)
+ else
idx = ADAP_NORM_RESP_ENTRIES;
}
if (idx != le32_to_cpu(*(q->headers.consumer)))
*nonotify = 1;
}
- if (qid == AdapHighCmdQueue) {
- if (*index >= ADAP_HIGH_CMD_ENTRIES)
- *index = 0;
- } else if (qid == AdapNormCmdQueue) {
+ if (qid == AdapNormCmdQueue) {
if (*index >= ADAP_NORM_CMD_ENTRIES)
*index = 0; /* Wrap to front of the Producer Queue. */
- }
- else if (qid == AdapHighRespQueue)
- {
- if (*index >= ADAP_HIGH_RESP_ENTRIES)
- *index = 0;
- }
- else if (qid == AdapNormRespQueue)
- {
+ } else {
if (*index >= ADAP_NORM_RESP_ENTRIES)
*index = 0; /* Wrap to front of the Producer Queue. */
}
- else {
- printk("aacraid: invalid qid\n");
- BUG();
- }
if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */
printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
{
struct aac_entry * entry = NULL;
int map = 0;
- struct aac_queue * q = &dev->queues->queue[qid];
-
- spin_lock_irqsave(q->lock, q->SavedIrql);
- if (qid == AdapHighCmdQueue || qid == AdapNormCmdQueue)
- {
+ if (qid == AdapNormCmdQueue) {
/* if no entries wait for some if caller wants to */
while (!aac_get_entry(dev, qid, &entry, index, nonotify))
{
*/
entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
map = 1;
- }
- else if (qid == AdapHighRespQueue || qid == AdapNormRespQueue)
- {
+ } else {
while(!aac_get_entry(dev, qid, &entry, index, nonotify))
{
/* if no entries wait for some if caller wants to */
return 0;
}
-
-/**
- * aac_insert_entry - insert a queue entry
- * @dev: Adapter
- * @index: Index of entry to insert
- * @qid: Queue number
- * @nonotify: Suppress adapter notification
- *
- * Gets the next free QE off the requested priorty adapter command
- * queue and associates the Fib with the QE. The QE represented by
- * index is ready to insert on the queue when this routine returns
- * success.
- */
-
-static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned long nonotify)
-{
- struct aac_queue * q = &dev->queues->queue[qid];
-
- if(q == NULL)
- BUG();
- *(q->headers.producer) = cpu_to_le32(index + 1);
- spin_unlock_irqrestore(q->lock, q->SavedIrql);
-
- if (qid == AdapHighCmdQueue ||
- qid == AdapNormCmdQueue ||
- qid == AdapHighRespQueue ||
- qid == AdapNormRespQueue)
- {
- if (!nonotify)
- aac_adapter_notify(dev, qid);
- }
- else
- printk("Suprise insert!\n");
- return 0;
-}
-
/*
* Define the highest level of host to adapter communication routines.
* These routines will support host to adapter FS commuication. These
int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data)
{
u32 index;
- u32 qid;
struct aac_dev * dev = fibptr->dev;
unsigned long nointr = 0;
struct hw_fib * hw_fib = fibptr->hw_fib;
struct aac_queue * q;
unsigned long flags = 0;
+ unsigned long qflags;
+
if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
return -EBUSY;
/*
* Get a queue entry connect the FIB to it and send an notify
* the adapter a command is ready.
*/
- if (priority == FsaHigh) {
- hw_fib->header.XferState |= cpu_to_le32(HighPriority);
- qid = AdapHighCmdQueue;
- } else {
- hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
- qid = AdapNormCmdQueue;
- }
- q = &dev->queues->queue[qid];
+ hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
- if(wait)
- spin_lock_irqsave(&fibptr->event_lock, flags);
- if(aac_queue_get( dev, &index, qid, hw_fib, 1, fibptr, &nointr)<0)
- return -EWOULDBLOCK;
- dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index));
- dprintk((KERN_DEBUG "Fib contents:.\n"));
- dprintk((KERN_DEBUG " Command = %d.\n", hw_fib->header.Command));
- dprintk((KERN_DEBUG " XferState = %x.\n", hw_fib->header.XferState));
- dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib));
- dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
- dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
/*
* Fill in the Callback and CallbackContext if we are not
* going to wait.
fibptr->callback = callback;
fibptr->callback_data = callback_data;
}
- FIB_COUNTER_INCREMENT(aac_config.FibsSent);
- list_add_tail(&fibptr->queue, &q->pendingq);
- q->numpending++;
fibptr->done = 0;
fibptr->flags = 0;
- if(aac_insert_entry(dev, index, qid, (nointr & aac_config.irq_mod)) < 0)
- return -EWOULDBLOCK;
+ FIB_COUNTER_INCREMENT(aac_config.FibsSent);
+
+ dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index));
+ dprintk((KERN_DEBUG "Fib contents:.\n"));
+ dprintk((KERN_DEBUG " Command = %d.\n", hw_fib->header.Command));
+ dprintk((KERN_DEBUG " XferState = %x.\n", hw_fib->header.XferState));
+ dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib));
+ dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
+ dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
+
+ q = &dev->queues->queue[AdapNormCmdQueue];
+
+ if(wait)
+ spin_lock_irqsave(&fibptr->event_lock, flags);
+ spin_lock_irqsave(q->lock, qflags);
+ aac_queue_get( dev, &index, AdapNormCmdQueue, hw_fib, 1, fibptr, &nointr);
+
+ list_add_tail(&fibptr->queue, &q->pendingq);
+ q->numpending++;
+ *(q->headers.producer) = cpu_to_le32(index + 1);
+ spin_unlock_irqrestore(q->lock, qflags);
+ if (!(nointr & aac_config.irq_mod))
+ aac_adapter_notify(dev, AdapNormCmdQueue);
/*
* If the caller wanted us to wait for response wait now.
*/
if (wait) {
spin_unlock_irqrestore(&fibptr->event_lock, flags);
- down(&fibptr->event_wait);
+ /* Only set for first known interruptable command */
+ if (wait < 0) {
+ /*
+ * *VERY* Dangerous to time out a command, the
+ * assumption is made that we have no hope of
+ * functioning because an interrupt routing or other
+ * hardware failure has occurred.
+ */
+ unsigned long count = 36000000L; /* 3 minutes */
+ unsigned long qflags;
+ while (down_trylock(&fibptr->event_wait)) {
+ if (--count == 0) {
+ spin_lock_irqsave(q->lock, qflags);
+ q->numpending--;
+ list_del(&fibptr->queue);
+ spin_unlock_irqrestore(q->lock, qflags);
+ if (wait == -1) {
+ printk(KERN_ERR "aacraid: fib_send: first asynchronous command timed out.\n"
+ "Usually a result of a PCI interrupt routing problem;\n"
+ "update mother board BIOS or consider utilizing one of\n"
+ "the SAFE mode kernel options (acpi, apic etc)\n");
+ }
+ return -ETIMEDOUT;
+ }
+ udelay(5);
+ }
+ } else
+ down(&fibptr->event_wait);
if(fibptr->done == 0)
BUG();
case HostNormCmdQueue:
notify = HostNormCmdNotFull;
break;
- case HostHighCmdQueue:
- notify = HostHighCmdNotFull;
- break;
case HostNormRespQueue:
notify = HostNormRespNotFull;
break;
- case HostHighRespQueue:
- notify = HostHighRespNotFull;
- break;
default:
BUG();
return;
{
struct hw_fib * hw_fib = fibptr->hw_fib;
struct aac_dev * dev = fibptr->dev;
+ struct aac_queue * q;
unsigned long nointr = 0;
- if (hw_fib->header.XferState == 0)
+ unsigned long qflags;
+
+ if (hw_fib->header.XferState == 0) {
return 0;
+ }
/*
* If we plan to do anything check the structure type first.
*/
* send the completed cdb to the adapter.
*/
if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
+ u32 index;
hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
- if (hw_fib->header.XferState & cpu_to_le32(HighPriority)) {
- u32 index;
- if (size)
- {
- size += sizeof(struct aac_fibhdr);
- if (size > le16_to_cpu(hw_fib->header.SenderSize))
- return -EMSGSIZE;
- hw_fib->header.Size = cpu_to_le16(size);
- }
- if(aac_queue_get(dev, &index, AdapHighRespQueue, hw_fib, 1, NULL, &nointr) < 0) {
- return -EWOULDBLOCK;
- }
- if (aac_insert_entry(dev, index, AdapHighRespQueue, (nointr & (int)aac_config.irq_mod)) != 0) {
- }
- } else if (hw_fib->header.XferState &
- cpu_to_le32(NormalPriority)) {
- u32 index;
-
- if (size) {
- size += sizeof(struct aac_fibhdr);
- if (size > le16_to_cpu(hw_fib->header.SenderSize))
- return -EMSGSIZE;
- hw_fib->header.Size = cpu_to_le16(size);
- }
- if (aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr) < 0)
- return -EWOULDBLOCK;
- if (aac_insert_entry(dev, index, AdapNormRespQueue, (nointr & (int)aac_config.irq_mod)) != 0)
- {
- }
+ if (size) {
+ size += sizeof(struct aac_fibhdr);
+ if (size > le16_to_cpu(hw_fib->header.SenderSize))
+ return -EMSGSIZE;
+ hw_fib->header.Size = cpu_to_le16(size);
}
+ q = &dev->queues->queue[AdapNormRespQueue];
+ spin_lock_irqsave(q->lock, qflags);
+ aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr);
+ *(q->headers.producer) = cpu_to_le32(index + 1);
+ spin_unlock_irqrestore(q->lock, qflags);
+ if (!(nointr & (int)aac_config.irq_mod))
+ aac_adapter_notify(dev, AdapNormRespQueue);
}
else
{
memset(cp, 0, 256);
}
+
+/**
+ * aac_handle_aif - Handle a message from the firmware
+ * @dev: Which adapter this fib is from
+ * @fibptr: Pointer to fibptr from adapter
+ *
+ * This routine handles a driver notify fib from the adapter and
+ * dispatches it to the appropriate routine for handling.
+ */
+
+static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
+{
+ struct hw_fib * hw_fib = fibptr->hw_fib;
+ struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
+ int busy;
+ u32 container;
+ struct scsi_device *device;
+ enum {
+ NOTHING,
+ DELETE,
+ ADD,
+ CHANGE
+ } device_config_needed;
+
+ /* Sniff for container changes */
+
+ if (!dev)
+ return;
+ container = (u32)-1;
+
+ /*
+ * We have set this up to try and minimize the number of
+ * re-configures that take place. As a result of this when
+ * certain AIF's come in we will set a flag waiting for another
+ * type of AIF before setting the re-config flag.
+ */
+ switch (le32_to_cpu(aifcmd->command)) {
+ case AifCmdDriverNotify:
+ switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) {
+ /*
+ * Morph or Expand complete
+ */
+ case AifDenMorphComplete:
+ case AifDenVolumeExtendComplete:
+ container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
+ if (container >= dev->maximum_num_containers)
+ break;
+
+ /*
+ * Find the Scsi_Device associated with the SCSI
+ * address. Make sure we have the right array, and if
+ * so set the flag to initiate a new re-config once we
+ * see an AifEnConfigChange AIF come through.
+ */
+
+ if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
+ device = scsi_device_lookup(dev->scsi_host_ptr,
+ CONTAINER_TO_CHANNEL(container),
+ CONTAINER_TO_ID(container),
+ CONTAINER_TO_LUN(container));
+ if (device) {
+ dev->fsa_dev[container].config_needed = CHANGE;
+ dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
+ scsi_device_put(device);
+ }
+ }
+ }
+
+ /*
+ * If we are waiting on something and this happens to be
+ * that thing then set the re-configure flag.
+ */
+ if (container != (u32)-1) {
+ if (container >= dev->maximum_num_containers)
+ break;
+ if (dev->fsa_dev[container].config_waiting_on ==
+ le32_to_cpu(*(u32 *)aifcmd->data))
+ dev->fsa_dev[container].config_waiting_on = 0;
+ } else for (container = 0;
+ container < dev->maximum_num_containers; ++container) {
+ if (dev->fsa_dev[container].config_waiting_on ==
+ le32_to_cpu(*(u32 *)aifcmd->data))
+ dev->fsa_dev[container].config_waiting_on = 0;
+ }
+ break;
+
+ case AifCmdEventNotify:
+ switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) {
+ /*
+ * Add an Array.
+ */
+ case AifEnAddContainer:
+ container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
+ if (container >= dev->maximum_num_containers)
+ break;
+ dev->fsa_dev[container].config_needed = ADD;
+ dev->fsa_dev[container].config_waiting_on =
+ AifEnConfigChange;
+ break;
+
+ /*
+ * Delete an Array.
+ */
+ case AifEnDeleteContainer:
+ container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
+ if (container >= dev->maximum_num_containers)
+ break;
+ dev->fsa_dev[container].config_needed = DELETE;
+ dev->fsa_dev[container].config_waiting_on =
+ AifEnConfigChange;
+ break;
+
+ /*
+ * Container change detected. If we currently are not
+ * waiting on something else, setup to wait on a Config Change.
+ */
+ case AifEnContainerChange:
+ container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
+ if (container >= dev->maximum_num_containers)
+ break;
+ if (dev->fsa_dev[container].config_waiting_on)
+ break;
+ dev->fsa_dev[container].config_needed = CHANGE;
+ dev->fsa_dev[container].config_waiting_on =
+ AifEnConfigChange;
+ break;
+
+ case AifEnConfigChange:
+ break;
+
+ }
+
+ /*
+ * If we are waiting on something and this happens to be
+ * that thing then set the re-configure flag.
+ */
+ if (container != (u32)-1) {
+ if (container >= dev->maximum_num_containers)
+ break;
+ if (dev->fsa_dev[container].config_waiting_on ==
+ le32_to_cpu(*(u32 *)aifcmd->data))
+ dev->fsa_dev[container].config_waiting_on = 0;
+ } else for (container = 0;
+ container < dev->maximum_num_containers; ++container) {
+ if (dev->fsa_dev[container].config_waiting_on ==
+ le32_to_cpu(*(u32 *)aifcmd->data))
+ dev->fsa_dev[container].config_waiting_on = 0;
+ }
+ break;
+
+ case AifCmdJobProgress:
+ /*
+ * These are job progress AIF's. When a Clear is being
+ * done on a container it is initially created then hidden from
+ * the OS. When the clear completes we don't get a config
+ * change so we monitor the job status complete on a clear then
+ * wait for a container change.
+ */
+
+ if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
+ && ((((u32 *)aifcmd->data)[6] == ((u32 *)aifcmd->data)[5])
+ || (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess)))) {
+ for (container = 0;
+ container < dev->maximum_num_containers;
+ ++container) {
+ /*
+ * Stomp on all config sequencing for all
+ * containers?
+ */
+ dev->fsa_dev[container].config_waiting_on =
+ AifEnContainerChange;
+ dev->fsa_dev[container].config_needed = ADD;
+ }
+ }
+ if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
+ && (((u32 *)aifcmd->data)[6] == 0)
+ && (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning))) {
+ for (container = 0;
+ container < dev->maximum_num_containers;
+ ++container) {
+ /*
+ * Stomp on all config sequencing for all
+ * containers?
+ */
+ dev->fsa_dev[container].config_waiting_on =
+ AifEnContainerChange;
+ dev->fsa_dev[container].config_needed = DELETE;
+ }
+ }
+ break;
+ }
+
+ device_config_needed = NOTHING;
+ for (container = 0; container < dev->maximum_num_containers;
+ ++container) {
+ if ((dev->fsa_dev[container].config_waiting_on == 0)
+ && (dev->fsa_dev[container].config_needed != NOTHING)) {
+ device_config_needed =
+ dev->fsa_dev[container].config_needed;
+ dev->fsa_dev[container].config_needed = NOTHING;
+ break;
+ }
+ }
+ if (device_config_needed == NOTHING)
+ return;
+
+ /*
+ * If we decided that a re-configuration needs to be done,
+ * schedule it here on the way out the door, please close the door
+ * behind you.
+ */
+
+ busy = 0;
+
+
+ /*
+ * Find the Scsi_Device associated with the SCSI address,
+ * and mark it as changed, invalidating the cache. This deals
+ * with changes to existing device IDs.
+ */
+
+ if (!dev || !dev->scsi_host_ptr)
+ return;
+ /*
+ * force reload of disk info via probe_container
+ */
+ if ((device_config_needed == CHANGE)
+ && (dev->fsa_dev[container].valid == 1))
+ dev->fsa_dev[container].valid = 2;
+ if ((device_config_needed == CHANGE) ||
+ (device_config_needed == ADD))
+ probe_container(dev, container);
+ device = scsi_device_lookup(dev->scsi_host_ptr,
+ CONTAINER_TO_CHANNEL(container),
+ CONTAINER_TO_ID(container),
+ CONTAINER_TO_LUN(container));
+ if (device) {
+ switch (device_config_needed) {
+ case DELETE:
+ scsi_remove_device(device);
+ break;
+ case CHANGE:
+ if (!dev->fsa_dev[container].valid) {
+ scsi_remove_device(device);
+ break;
+ }
+ scsi_rescan_device(&device->sdev_gendev);
+
+ default:
+ break;
+ }
+ scsi_device_put(device);
+ }
+ if (device_config_needed == ADD) {
+ scsi_add_device(dev->scsi_host_ptr,
+ CONTAINER_TO_CHANNEL(container),
+ CONTAINER_TO_ID(container),
+ CONTAINER_TO_LUN(container));
+ }
+
+}
+
/**
* aac_command_thread - command processing thread
* @dev: Adapter to monitor
{
struct hw_fib *hw_fib, *hw_newfib;
struct fib *fib, *newfib;
- struct aac_queue_block *queues = dev->queues;
struct aac_fib_context *fibctx;
unsigned long flags;
DECLARE_WAITQUEUE(wait, current);
* Let the DPC know it has a place to send the AIF's to.
*/
dev->aif_thread = 1;
- add_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
+ add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
set_current_state(TASK_INTERRUPTIBLE);
+ dprintk ((KERN_INFO "aac_command_thread start\n"));
while(1)
{
- spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
- while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) {
+ spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
+ while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
struct list_head *entry;
struct aac_aifcmd * aifcmd;
set_current_state(TASK_RUNNING);
-
- entry = queues->queue[HostNormCmdQueue].cmdq.next;
+
+ entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
list_del(entry);
-
- spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
+
+ spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
fib = list_entry(entry, struct fib, fiblink);
/*
* We will process the FIB here or pass it to a
aifcmd = (struct aac_aifcmd *) hw_fib->data;
if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
/* Handle Driver Notify Events */
+ aac_handle_aif(dev, fib);
*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
fib_adapter_complete(fib, (u16)sizeof(u32));
} else {
u32 time_now, time_last;
unsigned long flagv;
-
+ unsigned num;
+ struct hw_fib ** hw_fib_pool, ** hw_fib_p;
+ struct fib ** fib_pool, ** fib_p;
+
+ /* Sniff events */
+ if ((aifcmd->command ==
+ cpu_to_le32(AifCmdEventNotify)) ||
+ (aifcmd->command ==
+ cpu_to_le32(AifCmdJobProgress))) {
+ aac_handle_aif(dev, fib);
+ }
+
time_now = jiffies/HZ;
+ /*
+ * Warning: no sleep allowed while
+ * holding spinlock. We take the estimate
+ * and pre-allocate a set of fibs outside the
+ * lock.
+ */
+ num = le32_to_cpu(dev->init->AdapterFibsSize)
+ / sizeof(struct hw_fib); /* some extra */
+ spin_lock_irqsave(&dev->fib_lock, flagv);
+ entry = dev->fib_list.next;
+ while (entry != &dev->fib_list) {
+ entry = entry->next;
+ ++num;
+ }
+ spin_unlock_irqrestore(&dev->fib_lock, flagv);
+ hw_fib_pool = NULL;
+ fib_pool = NULL;
+ if (num
+ && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL)))
+ && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) {
+ hw_fib_p = hw_fib_pool;
+ fib_p = fib_pool;
+ while (hw_fib_p < &hw_fib_pool[num]) {
+ if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) {
+ --hw_fib_p;
+ break;
+ }
+ if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) {
+ kfree(*(--hw_fib_p));
+ break;
+ }
+ }
+ if ((num = hw_fib_p - hw_fib_pool) == 0) {
+ kfree(fib_pool);
+ fib_pool = NULL;
+ kfree(hw_fib_pool);
+ hw_fib_pool = NULL;
+ }
+ } else if (hw_fib_pool) {
+ kfree(hw_fib_pool);
+ hw_fib_pool = NULL;
+ }
spin_lock_irqsave(&dev->fib_lock, flagv);
entry = dev->fib_list.next;
/*
* fib, and then set the event to wake up the
* thread that is waiting for it.
*/
+ hw_fib_p = hw_fib_pool;
+ fib_p = fib_pool;
while (entry != &dev->fib_list) {
/*
* Extract the fibctx
* Warning: no sleep allowed while
* holding spinlock
*/
- hw_newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC);
- newfib = kmalloc(sizeof(struct fib), GFP_ATOMIC);
- if (newfib && hw_newfib) {
+ if (hw_fib_p < &hw_fib_pool[num]) {
+ hw_newfib = *hw_fib_p;
+ *(hw_fib_p++) = NULL;
+ newfib = *fib_p;
+ *(fib_p++) = NULL;
/*
* Make the copy of the FIB
*/
fibctx->count++;
/*
* Set the event to wake up the
- * thread that will waiting.
+ * thread that is waiting.
*/
up(&fibctx->wait_sem);
} else {
printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
- if(newfib)
- kfree(newfib);
- if(hw_newfib)
- kfree(hw_newfib);
}
entry = entry->next;
}
*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
fib_adapter_complete(fib, sizeof(u32));
spin_unlock_irqrestore(&dev->fib_lock, flagv);
+ /* Free up the remaining resources */
+ hw_fib_p = hw_fib_pool;
+ fib_p = fib_pool;
+ while (hw_fib_p < &hw_fib_pool[num]) {
+ if (*hw_fib_p)
+ kfree(*hw_fib_p);
+ if (*fib_p)
+ kfree(*fib_p);
+ ++fib_p;
+ ++hw_fib_p;
+ }
+ if (hw_fib_pool)
+ kfree(hw_fib_pool);
+ if (fib_pool)
+ kfree(fib_pool);
}
- spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
kfree(fib);
+ spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
}
/*
* There are no more AIF's
*/
- spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
+ spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
schedule();
if(signal_pending(current))
break;
set_current_state(TASK_INTERRUPTIBLE);
}
- remove_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
+ if (dev->queues)
+ remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
dev->aif_thread = 0;
complete_and_exit(&dev->aif_completion, 0);
+ return 0;
}
unique_id++;
}
- if (pci_enable_device(pdev))
+ error = pci_enable_device(pdev);
+ if (error)
goto out;
if (pci_set_dma_mask(pdev, 0xFFFFFFFFULL) ||
shost->irq = pdev->irq;
shost->base = pci_resource_start(pdev, 0);
shost->unique_id = unique_id;
+ shost->max_cmd_len = 16;
aac = (struct aac_dev *)shost->hostdata;
aac->scsi_host_ptr = shost;
goto out_free_fibs;
aac->maximum_num_channels = aac_drivers[index].channels;
- aac_get_adapter_info(aac);
+ error = aac_get_adapter_info(aac);
+ if (error < 0)
+ goto out_deinit;
/*
* Lets override negotiations and drop the maximum SG limit to 34
printk(KERN_INFO "Adaptec %s driver (%s)\n",
AAC_DRIVERNAME, aac_driver_version);
- error = pci_module_init(&aac_pci_driver);
- if (error)
+ error = pci_register_driver(&aac_pci_driver);
+ if (error < 0)
return error;
aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops);
struct ahc_softc *ahc = dev_get_drvdata(dev);
u_long s;
+ if (ahc->platform_data && ahc->platform_data->host)
+ scsi_remove_host(ahc->platform_data->host);
+
ahc_lock(ahc, &s);
ahc_intr_enable(ahc, FALSE);
ahc_unlock(ahc, &s);
int i, j;
if (ahd->platform_data != NULL) {
- if (ahd->platform_data->host != NULL) {
- scsi_remove_host(ahd->platform_data->host);
- scsi_host_put(ahd->platform_data->host);
- }
-
/* destroy all of the device and target objects */
for (i = 0; i < AHD_NUM_TARGETS; i++) {
starget = ahd->platform_data->starget[i];
release_mem_region(ahd->platform_data->mem_busaddr,
0x1000);
}
+ if (ahd->platform_data->host)
+ scsi_host_put(ahd->platform_data->host);
+
free(ahd->platform_data, M_DEVBUF);
}
}
struct ahd_softc *ahd = pci_get_drvdata(pdev);
u_long s;
+ if (ahd->platform_data && ahd->platform_data->host)
+ scsi_remove_host(ahd->platform_data->host);
+
ahd_lock(ahd, &s);
ahd_intr_enable(ahd, FALSE);
ahd_unlock(ahd, &s);
int i, j;
if (ahc->platform_data != NULL) {
- if (ahc->platform_data->host != NULL) {
- scsi_remove_host(ahc->platform_data->host);
- scsi_host_put(ahc->platform_data->host);
- }
-
/* destroy all of the device and target objects */
for (i = 0; i < AHC_NUM_TARGETS; i++) {
starget = ahc->platform_data->starget[i];
0x1000);
}
+ if (ahc->platform_data->host)
+ scsi_host_put(ahc->platform_data->host);
+
free(ahc->platform_data, M_DEVBUF);
}
}
struct ahc_softc *ahc = pci_get_drvdata(pdev);
u_long s;
+ if (ahc->platform_data && ahc->platform_data->host)
+ scsi_remove_host(ahc->platform_data->host);
+
ahc_lock(ahc, &s);
ahc_intr_enable(ahc, FALSE);
ahc_unlock(ahc, &s);
transport_unregister_device(&shost->shost_gendev);
class_device_unregister(&shost->shost_classdev);
device_del(&shost->shost_gendev);
+ scsi_proc_hostdir_rm(shost->hostt);
}
EXPORT_SYMBOL(scsi_remove_host);
if (shost->work_q)
destroy_workqueue(shost->work_q);
- scsi_proc_hostdir_rm(shost->hostt);
scsi_destroy_command_freelist(shost);
kfree(shost->shost_data);
if ((phba->fc_flag & FC_FABRIC) ||
((phba->fc_topology == TOPOLOGY_LOOP) &&
(phba->fc_flag & FC_PUBLIC_LOOP)))
- node_name = wwn_to_u64(phba->fc_fabparam.nodeName.wwn);
+ node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
else
/* fabric is local port if there is no F/FL_Port */
- node_name = wwn_to_u64(phba->fc_nodename.wwn);
+ node_name = wwn_to_u64(phba->fc_nodename.u.wwn);
spin_unlock_irq(shost->host_lock);
/* Search the mapped list for this target ID */
list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
if (starget->id == ndlp->nlp_sid) {
- node_name = wwn_to_u64(ndlp->nlp_nodename.wwn);
+ node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
break;
}
}
/* Search the mapped list for this target ID */
list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
if (starget->id == ndlp->nlp_sid) {
- port_name = wwn_to_u64(ndlp->nlp_portname.wwn);
+ port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
break;
}
}
struct fc_rport_identifiers rport_ids;
/* Remote port has reappeared. Re-register w/ FC transport */
- rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.wwn);
- rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.wwn);
+ rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
+ rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
rport_ids.port_id = ndlp->nlp_DID;
rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
if (ndlp->nlp_type & NLP_FCP_TARGET)
#define NAME_CCITT_GR_TYPE 0xE
uint8_t IEEEextLsb; /* FC Word 0, bit 16:23, IEEE extended Lsb */
uint8_t IEEE[6]; /* FC IEEE address */
- };
+ } s;
uint8_t wwn[8];
- };
+ } u;
};
struct csp {
if (phba->SerialNumber[0] == 0) {
uint8_t *outptr;
- outptr = (uint8_t *) & phba->fc_nodename.IEEE[0];
+ outptr = &phba->fc_nodename.u.s.IEEE[0];
for (i = 0; i < 12; i++) {
status = *outptr++;
j = ((status & 0xf0) >> 4);
* Must done after lpfc_sli_hba_setup()
*/
- fc_host_node_name(host) = wwn_to_u64(phba->fc_nodename.wwn);
- fc_host_port_name(host) = wwn_to_u64(phba->fc_portname.wwn);
+ fc_host_node_name(host) = wwn_to_u64(phba->fc_nodename.u.wwn);
+ fc_host_port_name(host) = wwn_to_u64(phba->fc_portname.u.wwn);
fc_host_supported_classes(host) = FC_COS_CLASS3;
memset(fc_host_supported_fc4s(host), 0,
if(islogical) {
switch (cmd->cmnd[0]) {
case TEST_UNIT_READY:
- memset(cmd->request_buffer, 0, cmd->request_bufflen);
-
#if MEGA_HAVE_CLUSTERING
/*
* Do we support clustering and is the support enabled
return NULL;
#endif
- case MODE_SENSE:
+ case MODE_SENSE: {
+ char *buf;
+
+ if (cmd->use_sg) {
+ struct scatterlist *sg;
+
+ sg = (struct scatterlist *)cmd->request_buffer;
+ buf = kmap_atomic(sg->page, KM_IRQ0) +
+ sg->offset;
+ } else
+ buf = cmd->request_buffer;
memset(cmd->request_buffer, 0, cmd->cmnd[4]);
+ if (cmd->use_sg) {
+ struct scatterlist *sg;
+
+ sg = (struct scatterlist *)cmd->request_buffer;
+ kunmap_atomic(buf - sg->offset, KM_IRQ0);
+ }
cmd->result = (DID_OK << 16);
cmd->scsi_done(cmd);
return NULL;
+ }
case READ_CAPACITY:
case INQUIRY:
static void
mega_free_scb(adapter_t *adapter, scb_t *scb)
{
+ unsigned long length;
+
switch( scb->dma_type ) {
case MEGA_DMA_TYPE_NONE:
break;
case MEGA_BULK_DATA:
+ if (scb->cmd->use_sg == 0)
+ length = scb->cmd->request_bufflen;
+ else {
+ struct scatterlist *sgl =
+ (struct scatterlist *)scb->cmd->request_buffer;
+ length = sgl->length;
+ }
pci_unmap_page(adapter->dev, scb->dma_h_bulkdata,
- scb->cmd->request_bufflen, scb->dma_direction);
+ length, scb->dma_direction);
break;
case MEGA_SGLIST:
struct scatterlist *sgl;
struct page *page;
unsigned long offset;
+ unsigned int length;
Scsi_Cmnd *cmd;
int sgcnt;
int idx;