If you have problems with this please do ask on the mailing list.
--
-Authors: Richard Walker, Jamie Honan, Michael Hunold, Manu Abraham
+Authors: Richard Walker,
+ Jamie Honan,
+ Michael Hunold,
+ Manu Abraham,
+ Michael Krufky
timesource is not avalible, it defaults to PIT.
Format: { pit | tsc | cyclone | pmtmr }
+ disable_8254_timer
+ enable_8254_timer
+ [IA32/X86_64] Disable/Enable interrupt 0 timer routing
+ over the 8254 in addition to over the IO-APIC. The
+ kernel tries to set a sensible default.
+
hpet= [IA-32,HPET] option to disable HPET and use PIT.
Format: disable
New name for the ramdisk parameter.
See Documentation/ramdisk.txt.
+ rcu.blimit= [KNL,BOOT] Set maximum number of finished
+ RCU callbacks to process in one batch.
+
+ rcu.qhimark= [KNL,BOOT] Set threshold of queued
+ RCU callbacks over which batch limiting is disabled.
+
+ rcu.qlowmark= [KNL,BOOT] Set threshold of queued
+ RCU callbacks below which batch limiting is re-enabled.
+
+ rcu.rsinterval= [KNL,BOOT,SMP] Set the number of additional
+ RCU callbacks to queued before forcing reschedule
+ on all cpus.
+
rdinit= [KNL]
Format: <full_path>
Run specified binary instead of /init from the ramdisk,
12 -> Medion 7134 [16be:0003]
13 -> Typhoon TV+Radio 90031
14 -> ELSA EX-VISION 300TV [1048:226b]
- 15 -> ELSA EX-VISION 500TV [1048:226b]
+ 15 -> ELSA EX-VISION 500TV [1048:226a]
16 -> ASUS TV-FM 7134 [1043:4842,1043:4830,1043:4840]
17 -> AOPEN VA1000 POWER [1131:7133]
18 -> BMK MPEX No Tuner
74 -> LifeView FlyTV Platinum Mini2 [14c0:1212]
75 -> AVerMedia AVerTVHD MCE A180 [1461:1044]
76 -> SKNet MonsterTV Mobile [1131:4ee9]
- 77 -> Pinnacle PCTV 110i (saa7133) [11bd:002e]
+ 77 -> Pinnacle PCTV 40i/50i/110i (saa7133) [11bd:002e]
78 -> ASUSTeK P7131 Dual [1043:4862]
79 -> Sedna/MuchTV PC TV Cardbus TV/Radio (ITO25 Rev:2B)
80 -> ASUS Digimatrix TV [1043:0210]
DVB SUBSYSTEM AND DRIVERS
P: LinuxTV.org Project
-M: mchehab@infradead.org
M: v4l-dvb-maintainer@linuxtv.org
L: linux-dvb@linuxtv.org (subscription required)
W: http://linuxtv.org/
choice
prompt "ARM system type"
- default ARCH_RPC
+ default ARCH_VERSATILE
config ARCH_CLPS7500
bool "Cirrus-CL-PS7500FE"
void timer_dyn_reprogram(void)
{
struct dyn_tick_timer *dyn_tick = system_timer->dyn_tick;
+ unsigned long next, seq;
- if (dyn_tick) {
- write_seqlock(&xtime_lock);
- if (dyn_tick->state & DYN_TICK_ENABLED)
+ if (dyn_tick && (dyn_tick->state & DYN_TICK_ENABLED)) {
+ next = next_timer_interrupt();
+ do {
+ seq = read_seqbegin(&xtime_lock);
dyn_tick->reprogram(next_timer_interrupt() - jiffies);
- write_sequnlock(&xtime_lock);
+ } while (read_seqretry(&xtime_lock, seq));
}
}
static void __exit nas100d_power_exit(void)
{
+ if (!(machine_is_nas100d()))
+ return;
+
free_irq(NAS100D_RB_IRQ, NULL);
}
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
+ mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier
mov pc, lr
.section ".text.init", #alloc, #execinstr
#include <asm/setup.h>
#include <asm/pgtable.h>
+void (*pm_power_off)(void) = NULL;
+EXPORT_SYMBOL(pm_power_off);
+
asmlinkage void ret_from_fork(void);
/*
obj-y += cpu/
obj-y += timers/
-obj-$(CONFIG_ACPI) += acpi/
+obj-y += acpi/
obj-$(CONFIG_X86_BIOS_REBOOT) += reboot.o
obj-$(CONFIG_MCA) += mca.o
obj-$(CONFIG_X86_MSR) += msr.o
-obj-y := boot.o
+obj-$(CONFIG_ACPI) += boot.o
obj-$(CONFIG_X86_IO_APIC) += earlyquirk.o
obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o
disable_acpi();
return error;
}
-#ifdef __i386__
- check_acpi_pci();
-#endif
acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
#include <linux/pci.h>
#include <asm/pci-direct.h>
#include <asm/acpi.h>
+#include <asm/apic.h>
static int __init check_bridge(int vendor, int device)
{
+#ifdef CONFIG_ACPI
/* According to Nvidia all timer overrides are bogus. Just ignore
them all. */
if (vendor == PCI_VENDOR_ID_NVIDIA) {
acpi_skip_timer_override = 1;
}
+#endif
+ if (vendor == PCI_VENDOR_ID_ATI && timer_over_8254 == 1) {
+ timer_over_8254 = 0;
+ printk(KERN_INFO "ATI board detected. Disabling timer routing "
+ "over 8254.\n");
+ }
return 0;
}
c->x86_capability[4] = excap;
c->x86 = (tfms >> 8) & 15;
c->x86_model = (tfms >> 4) & 15;
- if (c->x86 == 0xf) {
+ if (c->x86 == 0xf)
c->x86 += (tfms >> 20) & 0xff;
+ if (c->x86 >= 0x6)
c->x86_model += ((tfms >> 16) & 0xF) << 4;
- }
c->x86_mask = tfms & 15;
} else {
/* Have CPUID level 0 only - unheard of */
{
unsigned long cr4;
unsigned long temp;
+ struct Xgt_desc_struct *cpu_gdt_descr;
spin_lock(&efi_rt_lock);
local_irq_save(efi_rt_eflags);
+ cpu_gdt_descr = &per_cpu(cpu_gdt_descr, 0);
+
/*
* If I don't have PSE, I should just duplicate two entries in page
* directory. If I have PSE, I just need to duplicate one entry in
*/
local_flush_tlb();
- per_cpu(cpu_gdt_descr, 0).address =
- __pa(per_cpu(cpu_gdt_descr, 0).address);
- load_gdt((struct Xgt_desc_struct *)__pa(&per_cpu(cpu_gdt_descr, 0)));
+ cpu_gdt_descr->address = __pa(cpu_gdt_descr->address);
+ load_gdt(cpu_gdt_descr);
}
static void efi_call_phys_epilog(void)
{
unsigned long cr4;
+ struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, 0);
- per_cpu(cpu_gdt_descr, 0).address =
- (unsigned long)__va(per_cpu(cpu_gdt_descr, 0).address);
- load_gdt((struct Xgt_desc_struct *)__va(&per_cpu(cpu_gdt_descr, 0)));
+ cpu_gdt_descr->address = __va(cpu_gdt_descr->address);
+ load_gdt(cpu_gdt_descr);
cr4 = read_cr4();
static DEFINE_SPINLOCK(ioapic_lock);
+int timer_over_8254 __initdata = 1;
+
/*
* Is the SiS APIC rmw bug present ?
* -1 = don't know, 0 = no, 1 = yes
apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
init_8259A(1);
timer_ack = 1;
- enable_8259A_irq(0);
+ if (timer_over_8254 > 0)
+ enable_8259A_irq(0);
pin1 = find_isa_irq_pin(0, mp_INT);
apic1 = find_isa_irq_apic(0, mp_INT);
print_IO_APIC();
}
+static int __init setup_disable_8254_timer(char *s)
+{
+ timer_over_8254 = -1;
+ return 1;
+}
+static int __init setup_enable_8254_timer(char *s)
+{
+ timer_over_8254 = 2;
+ return 1;
+}
+
+__setup("disable_8254_timer", setup_disable_8254_timer);
+__setup("enable_8254_timer", setup_enable_8254_timer);
+
/*
* Called after all the initialization is done. If we didnt find any
* APIC bugs then we can allow the modify fast path
__asm__ __volatile__ (
"\tljmp $"STR(__KERNEL_CS)",$1f\n"
"\t1:\n"
- "\tmovl $"STR(__KERNEL_DS)",%eax\n"
- "\tmovl %eax,%ds\n"
- "\tmovl %eax,%es\n"
- "\tmovl %eax,%fs\n"
- "\tmovl %eax,%gs\n"
- "\tmovl %eax,%ss\n"
- );
+ "\tmovl $"STR(__KERNEL_DS)",%%eax\n"
+ "\tmovl %%eax,%%ds\n"
+ "\tmovl %%eax,%%es\n"
+ "\tmovl %%eax,%%fs\n"
+ "\tmovl %%eax,%%gs\n"
+ "\tmovl %%eax,%%ss\n"
+ ::: "eax", "memory");
#undef STR
#undef __STR
}
wrmsr(base+i, 0, 0);
}
-static inline void write_watchdog_counter(const char *descr)
+static void write_watchdog_counter(const char *descr)
{
u64 count = (u64)cpu_khz * 1000;
* die_nmi will return ONLY if NOTIFY_STOP happens..
*/
die_nmi(regs, "NMI Watchdog detected LOCKUP");
-
+ } else {
last_irq_sums[cpu] = sum;
alert_counter[cpu] = 0;
}
if (efi_enabled)
efi_map_memmap();
+#ifdef CONFIG_X86_IO_APIC
+ check_acpi_pci(); /* Checks more than just ACPI actually */
+#endif
+
#ifdef CONFIG_ACPI
/*
* Parse the ACPI tables for possible boot-time SMP configuration.
write_seqlock_irqsave(&xtime_lock, flags);
xtime.tv_sec = sec;
xtime.tv_nsec = 0;
- write_sequnlock_irqrestore(&xtime_lock, flags);
- jiffies += sleep_length;
+ jiffies_64 += sleep_length;
wall_jiffies += sleep_length;
+ write_sequnlock_irqrestore(&xtime_lock, flags);
if (last_timer->resume)
last_timer->resume();
cur_timer = last_timer;
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.16-rc5
-# Mon Feb 27 16:15:43 2006
+# Thu Mar 2 16:39:10 2006
#
#
#
# Plug and Play support
#
-# CONFIG_PNP is not set
+CONFIG_PNP=y
+# CONFIG_PNP_DEBUG is not set
+
+#
+# Protocols
+#
+CONFIG_PNPACPI=y
#
# Block devices
# IDE chipset support/bugfixes
#
CONFIG_IDE_GENERIC=y
+# CONFIG_BLK_DEV_IDEPNP is not set
CONFIG_BLK_DEV_IDEPCI=y
# CONFIG_IDEPCI_SHARE_IRQ is not set
# CONFIG_BLK_DEV_OFFBOARD is not set
# CONFIG_BONDING is not set
# CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set
+# CONFIG_NET_SB1000 is not set
#
# ARCnet devices
u32* volatile cyclone_timer; /* Cyclone MPMC0 register */
if (!use_cyclone)
- return -ENODEV;
+ return 0;
printk(KERN_INFO "Summit chipset: Starting Cyclone Counter.\n");
void
mca_handler_bh(unsigned long paddr)
{
- printk(KERN_DEBUG "OS_MCA: process [pid: %d](%s) encounters MCA.\n",
- current->pid, current->comm);
+ printk(KERN_ERR
+ "OS_MCA: process [pid: %d](%s) encounters MCA (paddr=%lx)\n",
+ current->pid, current->comm, paddr);
spin_lock(&mca_bh_lock);
switch (mca_page_isolate(paddr)) {
printk(KERN_DEBUG "Page isolation: ( %lx ) success.\n", paddr);
break;
case ISOLATE_NG:
- printk(KERN_DEBUG "Page isolation: ( %lx ) failure.\n", paddr);
+ printk(KERN_CRIT "Page isolation: ( %lx ) failure.\n", paddr);
break;
default:
break;
return 0;
/*
- * If there is no bus error, record is weird but we need not to recover.
+ * The cache check and bus check bits have four possible states
+ * cc bc
+ * 0 0 Weird record, not recovered
+ * 1 0 Cache error, not recovered
+ * 0 1 I/O error, attempt recovery
+ * 1 1 Memory error, attempt recovery
*/
if (psp->bc == 0 || pbci == NULL)
- return 1;
+ return 0;
/*
* Sorry, we cannot handle so many.
static int __init sn2_ptc_init(void)
{
if (!ia64_platform_is("sn2"))
- return -ENOSYS;
+ return 0;
if (!(proc_sn2_ptc = create_proc_entry(PTC_BASENAME, 0444, NULL))) {
printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME);
int found_tiocx_device = 0;
if (!ia64_platform_is("sn2"))
- return -ENODEV;
+ return 0;
bus_register(&tiocx_bus_type);
mr r28,r6
mr r27,r7
+ /* Align the stack to 16-byte boundary for broken yaboot */
+ rldicr r1,r1,0,59
+
/* Make sure we are running in 64 bits mode */
bl .enable_64b_mode
0: srst %r2,%r1
jo 0b
sacf 0
- jh 1f # \0 found in string ?
ahi %r2,1 # strnlen_user result includes the \0
-1: slr %r2,%r3
+ # or return count+1 if \0 not found
+ slr %r2,%r3
br %r14
2: sacf 0
- lhi %r2,-EFAULT
+ slr %r2,%r2 # return 0 on exception
br %r14
.section __ex_table,"a"
.long 0b,2b
0: srst %r2,%r1
jo 0b
sacf 0
- jh 1f # \0 found in string ?
aghi %r2,1 # strnlen_user result includes the \0
-1: slgr %r2,%r3
+ # or return count+1 if \0 not found
+ slgr %r2,%r3
br %r14
2: sacf 0
- lghi %r2,-EFAULT
+ slgr %r2,%r2 # return 0 on exception
br %r14
.section __ex_table,"a"
.quad 0b,2b
nop
nop
- .section __ex_table
+ .section __ex_table,"a"
.align 4
.word 1b, __retl_efault, 2b, __retl_efault
.word 3b, __retl_efault, 4b, __retl_efault
mov 0, %o0
.size __do_int_store, .-__do_int_store
- .section __ex_table
+ .section __ex_table,"a"
.word 4b, __retl_efault
.word 5b, __retl_efault
.word 6b, __retl_efault
mov 0, %o0
.size __do_int_load, .-__do_int_load
- .section __ex_table
+ .section __ex_table,"a"
.word 4b, __retl_efault
.word 5b, __retl_efault
.word 6b, __retl_efault
.align 4; \
99: retl; \
mov 1, %o0; \
- .section __ex_table; \
+ .section __ex_table,"a";\
.align 4; \
.word 98b, 99b; \
.text; \
.align 4; \
99: retl; \
mov 1, %o0; \
- .section __ex_table; \
+ .section __ex_table,"a";\
.align 4; \
.word 98b, 99b; \
.text; \
.align 4; \
99: retl; \
mov 1, %o0; \
- .section __ex_table; \
+ .section __ex_table,"a";\
.align 4; \
.word 98b, 99b; \
.text; \
.align 4; \
99: retl; \
mov 1, %o0; \
- .section __ex_table; \
+ .section __ex_table,"a";\
.align 4; \
.word 98b, 99b; \
.text; \
.align 4; \
99: retl; \
mov %o1, %o0; \
- .section __ex_table; \
+ .section __ex_table,"a";\
.align 4; \
.word 98b, 99b; \
.text; \
.align 4; \
99: retl; \
mov 1, %o0; \
- .section __ex_table; \
+ .section __ex_table,"a";\
.align 4; \
.word 98b, 99b; \
.text; \
.align 4; \
99: retl; \
mov -1, %o0; \
- .section __ex_table; \
+ .section __ex_table,"a";\
.align 4; \
.word 98b, 99b; \
.text; \
.align 4; \
99: retl; \
mov -1, %o0; \
- .section __ex_table; \
+ .section __ex_table,"a";\
.align 4; \
.word 98b, 99b; \
.text; \
retl
clr %o0
- .section __ex_table,#alloc
+ .section __ex_table,"a"
.align 4
.word 10b, 30b
add %o2, %o3, %o0
.size __strncpy_from_user, .-__strncpy_from_user
- .section __ex_table,#alloc
+ .section __ex_table,"a"
.align 4
.word 60b, __retl_efault
.word 61b, __retl_efault
ba,pt %xcc, ret_from_solaris
nop
- .section __ex_table,#alloc
+ .section __ex_table,"a"
.align 4
.word exen, exenf
#include <asm/system.h>
#include <asm/pgtable.h>
+void (*pm_power_off)(void) = NULL;
+EXPORT_SYMBOL(pm_power_off);
+
extern void ret_from_fork (void);
"\tmovl %0,%%ss\n"
"\tmovl %0,%%fs\n"
"\tmovl %0,%%gs\n"
- : : "a" (__KERNEL_DS)
+ : : "a" (__KERNEL_DS) : "memory"
);
}
struct task_struct *current_set[NR_CPUS] = {&init_task, };
+void (*pm_power_off)(void) = NULL;
+EXPORT_SYMBOL(pm_power_off);
+
#if XCHAL_CP_NUM > 0
* Different hardware can have different requirements as to what pages
* it can do I/O directly to. A low level driver can call
* blk_queue_bounce_limit to have lower memory pages allocated as bounce
- * buffers for doing I/O to pages residing above @page. By default
- * the block layer sets this to the highest numbered "low" memory page.
+ * buffers for doing I/O to pages residing above @page.
**/
void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
{
unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
-
- /*
- * set appropriate bounce gfp mask -- unfortunately we don't have a
- * full 4GB zone, so we have to resort to low memory for any bounces.
- * ISA has its own < 16MB zone.
- */
- if (bounce_pfn < blk_max_low_pfn) {
- BUG_ON(dma_addr < BLK_BOUNCE_ISA);
+ int dma = 0;
+
+ q->bounce_gfp = GFP_NOIO;
+#if BITS_PER_LONG == 64
+ /* Assume anything <= 4GB can be handled by IOMMU.
+ Actually some IOMMUs can handle everything, but I don't
+ know of a way to test this here. */
+ if (bounce_pfn < (0xffffffff>>PAGE_SHIFT))
+ dma = 1;
+ q->bounce_pfn = max_low_pfn;
+#else
+ if (bounce_pfn < blk_max_low_pfn)
+ dma = 1;
+ q->bounce_pfn = bounce_pfn;
+#endif
+ if (dma) {
init_emergency_isa_pool();
q->bounce_gfp = GFP_NOIO | GFP_DMA;
- } else
- q->bounce_gfp = GFP_NOIO;
-
- q->bounce_pfn = bounce_pfn;
+ q->bounce_pfn = bounce_pfn;
+ }
}
EXPORT_SYMBOL(blk_queue_bounce_limit);
}
-static int __init
+static int __devinit
fore200e_pca_map(struct fore200e* fore200e)
{
DPRINTK(2, "device %s being mapped in memory\n", fore200e->name);
}
-static int __init
+static int __devinit
fore200e_pca_configure(struct fore200e* fore200e)
{
struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
}
-static int __init
+static int __devinit
fore200e_irq_request(struct fore200e* fore200e)
{
if (request_irq(fore200e->irq, fore200e_interrupt, SA_SHIRQ, fore200e->name, fore200e->atm_dev) < 0) {
}
-static int __init
+static int __devinit
fore200e_get_esi(struct fore200e* fore200e)
{
struct prom_data* prom = fore200e_kmalloc(sizeof(struct prom_data), GFP_KERNEL | GFP_DMA);
}
-static int __init
+static int __devinit
fore200e_alloc_rx_buf(struct fore200e* fore200e)
{
int scheme, magn, nbr, size, i;
}
-static int __init
+static int __devinit
fore200e_init_bs_queue(struct fore200e* fore200e)
{
int scheme, magn, i;
}
-static int __init
+static int __devinit
fore200e_init_rx_queue(struct fore200e* fore200e)
{
struct host_rxq* rxq = &fore200e->host_rxq;
}
-static int __init
+static int __devinit
fore200e_init_tx_queue(struct fore200e* fore200e)
{
struct host_txq* txq = &fore200e->host_txq;
}
-static int __init
+static int __devinit
fore200e_init_cmd_queue(struct fore200e* fore200e)
{
struct host_cmdq* cmdq = &fore200e->host_cmdq;
}
-static int __init
+static int __devinit
fore200e_initialize(struct fore200e* fore200e)
{
struct cp_queues __iomem * cpq;
}
-static void __init
+static void __devinit
fore200e_monitor_putc(struct fore200e* fore200e, char c)
{
struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
}
-static int __init
+static int __devinit
fore200e_monitor_getc(struct fore200e* fore200e)
{
struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
}
-static void __init
+static void __devinit
fore200e_monitor_puts(struct fore200e* fore200e, char* str)
{
while (*str) {
}
-static int __init
+static int __devinit
fore200e_start_fw(struct fore200e* fore200e)
{
int ok;
}
-static int __init
+static int __devinit
fore200e_load_fw(struct fore200e* fore200e)
{
u32* fw_data = (u32*) fore200e->bus->fw_data;
}
-static int __init
+static int __devinit
fore200e_register(struct fore200e* fore200e)
{
struct atm_dev* atm_dev;
}
-static int __init
+static int __devinit
fore200e_init(struct fore200e* fore200e)
{
if (fore200e_register(fore200e) < 0)
return -EBUSY;
fore200e_supply(fore200e);
-
+
/* all done, board initialization is now complete */
fore200e->state = FORE200E_STATE_COMPLETE;
return 0;
#include <linux/timer.h>
#include <linux/pci.h>
#include <linux/init.h>
+#include <linux/random.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include "DAC960.h"
Command->SegmentCount, Command->DmaDirection);
if (!end_that_request_first(Request, UpToDate, Command->BlockCount)) {
-
+ add_disk_randomness(Request->rq_disk);
end_that_request_last(Request, UpToDate);
if (Command->Completion) {
rng_hw_none,
rng_hw_intel,
rng_hw_amd,
+#ifdef __i386__
rng_hw_via,
+#endif
rng_hw_geode,
};
cnodeid_t node, maxn = -1;
if (!ia64_platform_is("sn2"))
- return -1;
+ return 0;
/*
* Sanity check the cycles/sec variable
spin_lock_irq(target->scsi_host->host_lock);
+ if (target->state == SRP_TARGET_DEAD ||
+ target->state == SRP_TARGET_REMOVED) {
+ scmnd->result = DID_BAD_TARGET << 16;
+ goto out;
+ }
+
if (scmnd->host_scribble == (void *) -1L)
goto out;
{PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00B, PCI_ANY_ID, PCI_ANY_ID},
{PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00C, PCI_ANY_ID, PCI_ANY_ID},
{PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B100, PCI_ANY_ID, PCI_ANY_ID},
+ {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B700, PCI_ANY_ID, PCI_ANY_ID},
+ {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B701, PCI_ANY_ID, PCI_ANY_ID},
{PCI_VENDOR_ID_ABOCOM, PCI_DEVICE_ID_ABOCOM_2BD1, PCI_ANY_ID, PCI_ANY_ID},
{PCI_VENDOR_ID_ASUSTEK, PCI_DEVICE_ID_ASUSTEK_0675, PCI_ANY_ID, PCI_ANY_ID},
{PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_T_CONCEPT, PCI_ANY_ID, PCI_ANY_ID},
{PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00B, "Billion", "B00B"},
{PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00C, "Billion", "B00C"},
{PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B100, "Seyeon", "B100"},
+ {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B700, "Primux II S0", "B700"},
+ {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B701, "Primux II S0 NT", "B701"},
{PCI_VENDOR_ID_ABOCOM, PCI_DEVICE_ID_ABOCOM_2BD1, "Abocom/Magitek", "2BD1"},
{PCI_VENDOR_ID_ASUSTEK, PCI_DEVICE_ID_ASUSTEK_0675, "Asuscom/Askey", "675"},
{PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_T_CONCEPT, "German telekom", "T-Concept"},
/*
* hfc_usb.c
*
- * $Id: hfc_usb.c,v 4.36 2005/04/08 09:55:13 martinb1 Exp $
+ * $Id: hfc_usb.c,v 2.3.2.13 2006/02/17 17:17:22 mbachem Exp $
*
* modular HiSax ISDN driver for Colognechip HFC-S USB chip
*
#include "hfc_usb.h"
static const char *hfcusb_revision =
- "$Revision: 4.36 $ $Date: 2005/04/08 09:55:13 $ ";
+ "$Revision: 2.3.2.13 $ $Date: 2006/02/17 17:17:22 $ ";
/* Hisax debug support
* use "modprobe debug=x" where x is bitfield of USB_DBG & ISDN_DBG
for (i = 0; list[i].name != NULL; i++)
if (list[i].num == num)
return (list[i].name);
- return "<unkown ERROR>";
+ return "<unknown ERROR>";
}
hfc->ctrl_urb->transfer_buffer = NULL;
hfc->ctrl_urb->transfer_buffer_length = 0;
hfc->ctrl_write.wIndex =
- hfc->ctrl_buff[hfc->ctrl_out_idx].hfc_reg;
+ cpu_to_le16(hfc->ctrl_buff[hfc->ctrl_out_idx].hfc_reg);
hfc->ctrl_write.wValue =
- hfc->ctrl_buff[hfc->ctrl_out_idx].reg_val;
+ cpu_to_le16(hfc->ctrl_buff[hfc->ctrl_out_idx].reg_val);
usb_submit_urb(hfc->ctrl_urb, GFP_ATOMIC); /* start transfer */
}
/* init the background machinery for control requests */
hfc->ctrl_read.bRequestType = 0xc0;
hfc->ctrl_read.bRequest = 1;
- hfc->ctrl_read.wLength = 1;
+ hfc->ctrl_read.wLength = cpu_to_le16(1);
hfc->ctrl_write.bRequestType = 0x40;
hfc->ctrl_write.bRequest = 0;
hfc->ctrl_write.wLength = 0;
vend_idx = 0xffff;
for (i = 0; hfcusb_idtab[i].idVendor; i++) {
- if (dev->descriptor.idVendor == hfcusb_idtab[i].idVendor
- && dev->descriptor.idProduct ==
- hfcusb_idtab[i].idProduct) {
+ if ((le16_to_cpu(dev->descriptor.idVendor) == hfcusb_idtab[i].idVendor)
+ && (le16_to_cpu(dev->descriptor.idProduct) == hfcusb_idtab[i].idProduct)) {
vend_idx = i;
continue;
}
usb_transfer_mode
= USB_INT;
packet_size =
- ep->desc.
- wMaxPacketSize;
+ le16_to_cpu(ep->desc.wMaxPacketSize);
break;
case USB_ENDPOINT_XFER_BULK:
if (ep_addr & 0x80)
usb_transfer_mode
= USB_BULK;
packet_size =
- ep->desc.
- wMaxPacketSize;
+ le16_to_cpu(ep->desc.wMaxPacketSize);
break;
case USB_ENDPOINT_XFER_ISOC:
if (ep_addr & 0x80)
usb_transfer_mode
= USB_ISOC;
iso_packet_size =
- ep->desc.
- wMaxPacketSize;
+ le16_to_cpu(ep->desc.wMaxPacketSize);
break;
default:
context->
fifonum = cidx;
context->fifos[cidx].hfc =
context;
- context->fifos[cidx].
- usb_packet_maxlen =
- ep->desc.
- wMaxPacketSize;
+ context->fifos[cidx].usb_packet_maxlen =
+ le16_to_cpu(ep->desc.wMaxPacketSize);
context->fifos[cidx].
intervall =
ep->desc.bInterval;
#ifdef ISDN_DEBUG_MODEM_OPEN
printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
#endif
+ module_put(info->owner);
return;
}
info->flags |= ISDN_ASYNC_CLOSING;
return st->target;
}
EXPORT_SYMBOL_GPL(wf_cpu_pid_run);
+
+MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
+MODULE_DESCRIPTION("PID algorithm for PowerMacs thermal control");
+MODULE_LICENSE("GPL");
info("found the stv0297 at i2c address: 0x%02x",alps_tdee4_stv0297_config.demod_address);
} else
/* try the sky v2.3 (vp310/Samsung tbdu18132(tsa5059)) */
- if ((fc->fe = vp310_attach(&skystar23_samsung_tbdu18132_config, &fc->i2c_adap)) != NULL) {
+ if ((fc->fe = vp310_mt312_attach(&skystar23_samsung_tbdu18132_config, &fc->i2c_adap)) != NULL) {
ops = fc->fe->ops;
ops->diseqc_send_master_cmd = flexcop_diseqc_send_master_cmd;
EXPORT_SYMBOL(bt878_device_control);
-struct cards card_list[] __devinitdata = {
+static struct cards card_list[] __devinitdata = {
{ 0x01010071, BTTV_BOARD_NEBULA_DIGITV, "Nebula Electronics DigiTV" },
{ 0x07611461, BTTV_BOARD_AVDVBT_761, "AverMedia AverTV DVB-T 761" },
*/
-struct dst_types dst_tlist[] = {
+static struct dst_types dst_tlist[] = {
{
.device_id = "200103A",
.offset = 0,
/*--------------------------------------------------------------------------*/
/*
- * Flags OR'ed in the capabilites field of struct dmx_demux.
+ * Flags OR'ed in the capabilities field of struct dmx_demux.
*/
#define DMX_TS_FILTERING 1
.pll_set = dvb_usb_pll_set_i2c,
};
-static struct lgdt330x_config cxusb_lgdt330x_config = {
+static struct lgdt330x_config cxusb_lgdt3303_config = {
.demod_address = 0x0e,
.demod_chip = LGDT3303,
.pll_set = dvb_usb_pll_set_i2c,
return -EIO;
}
-static int cxusb_lgdt330x_frontend_attach(struct dvb_usb_device *d)
+static int cxusb_lgdt3303_frontend_attach(struct dvb_usb_device *d)
{
if (usb_set_interface(d->udev,0,7) < 0)
err("set interface failed");
cxusb_ctrl_msg(d,CMD_DIGITAL, NULL, 0, NULL, 0);
- if ((d->fe = lgdt330x_attach(&cxusb_lgdt330x_config, &d->i2c_adap)) != NULL)
+ if ((d->fe = lgdt330x_attach(&cxusb_lgdt3303_config, &d->i2c_adap)) != NULL)
return 0;
return -EIO;
.streaming_ctrl = cxusb_streaming_ctrl,
.power_ctrl = cxusb_power_ctrl,
- .frontend_attach = cxusb_lgdt330x_frontend_attach,
+ .frontend_attach = cxusb_lgdt3303_frontend_attach,
.tuner_attach = cxusb_lgh064f_tuner_attach,
.i2c_algo = &cxusb_i2c_algo,
d->state = DVB_USB_STATE_INIT;
-/* check the capabilites and set appropriate variables */
+/* check the capabilities and set appropriate variables */
/* speed - when running at FULL speed we need a HW PID filter */
if (d->udev->speed == USB_SPEED_FULL && !(d->props.caps & DVB_USB_HAS_PID_FILTER)) {
/**
* struct dvb_usb_properties - properties of a dvb-usb-device
- * @caps: capabilites of the DVB USB device.
+ * @caps: capabilities of the DVB USB device.
* @pid_filter_count: number of PID filter position in the optional hardware
* PID-filter.
*
A DVB-S tuner module. Say Y when you want to support this frontend.
config DVB_MT312
- tristate "Zarlink MT312 based"
+ tristate "Zarlink VP310/MT312 based"
depends on DVB_CORE
help
A DVB-S tuner module. Say Y when you want to support this frontend.
kfree(state);
}
-static struct dvb_frontend_ops vp310_mt312_ops;
-
-struct dvb_frontend* vp310_attach(const struct mt312_config* config,
- struct i2c_adapter* i2c)
-{
- struct mt312_state* state = NULL;
-
- /* allocate memory for the internal state */
- state = kmalloc(sizeof(struct mt312_state), GFP_KERNEL);
- if (state == NULL)
- goto error;
-
- /* setup the state */
- state->config = config;
- state->i2c = i2c;
- memcpy(&state->ops, &vp310_mt312_ops, sizeof(struct dvb_frontend_ops));
- strcpy(state->ops.info.name, "Zarlink VP310 DVB-S");
-
- /* check if the demod is there */
- if (mt312_readreg(state, ID, &state->id) < 0)
- goto error;
- if (state->id != ID_VP310) {
- goto error;
- }
-
- /* create dvb_frontend */
- state->frequency = 90;
- state->frontend.ops = &state->ops;
- state->frontend.demodulator_priv = state;
- return &state->frontend;
-
-error:
- kfree(state);
- return NULL;
-}
-
-struct dvb_frontend* mt312_attach(const struct mt312_config* config,
- struct i2c_adapter* i2c)
-{
- struct mt312_state* state = NULL;
-
- /* allocate memory for the internal state */
- state = kmalloc(sizeof(struct mt312_state), GFP_KERNEL);
- if (state == NULL)
- goto error;
-
- /* setup the state */
- state->config = config;
- state->i2c = i2c;
- memcpy(&state->ops, &vp310_mt312_ops, sizeof(struct dvb_frontend_ops));
- strcpy(state->ops.info.name, "Zarlink MT312 DVB-S");
-
- /* check if the demod is there */
- if (mt312_readreg(state, ID, &state->id) < 0)
- goto error;
- if (state->id != ID_MT312) {
- goto error;
- }
-
- /* create dvb_frontend */
- state->frequency = 60;
- state->frontend.ops = &state->ops;
- state->frontend.demodulator_priv = state;
- return &state->frontend;
-
-error:
- kfree(state);
- return NULL;
-}
-
static struct dvb_frontend_ops vp310_mt312_ops = {
.info = {
.set_voltage = mt312_set_voltage,
};
+struct dvb_frontend* vp310_mt312_attach(const struct mt312_config* config,
+ struct i2c_adapter* i2c)
+{
+ struct mt312_state* state = NULL;
+
+ /* allocate memory for the internal state */
+ state = kmalloc(sizeof(struct mt312_state), GFP_KERNEL);
+ if (state == NULL)
+ goto error;
+
+ /* setup the state */
+ state->config = config;
+ state->i2c = i2c;
+ memcpy(&state->ops, &vp310_mt312_ops, sizeof(struct dvb_frontend_ops));
+
+ /* check if the demod is there */
+ if (mt312_readreg(state, ID, &state->id) < 0)
+ goto error;
+
+ switch (state->id) {
+ case ID_VP310:
+ strcpy(state->ops.info.name, "Zarlink VP310 DVB-S");
+ state->frequency = 90;
+ break;
+ case ID_MT312:
+ strcpy(state->ops.info.name, "Zarlink MT312 DVB-S");
+ state->frequency = 60;
+ break;
+ default:
+ printk (KERN_WARNING "Only Zarlink VP310/MT312 are supported chips.\n");
+ goto error;
+ }
+
+ /* create dvb_frontend */
+ state->frontend.ops = &state->ops;
+ state->frontend.demodulator_priv = state;
+ return &state->frontend;
+
+error:
+ kfree(state);
+ return NULL;
+}
+
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
MODULE_AUTHOR("Andreas Oberritter <obi@linuxtv.org>");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(mt312_attach);
-EXPORT_SYMBOL(vp310_attach);
+EXPORT_SYMBOL(vp310_mt312_attach);
int (*pll_set)(struct dvb_frontend* fe, struct dvb_frontend_parameters* params);
};
-extern struct dvb_frontend* mt312_attach(const struct mt312_config* config,
- struct i2c_adapter* i2c);
+struct dvb_frontend* vp310_mt312_attach(const struct mt312_config* config,
+ struct i2c_adapter* i2c);
-extern struct dvb_frontend* vp310_attach(const struct mt312_config* config,
- struct i2c_adapter* i2c);
#endif // MT312_H
break;
case QAM_128:
- delay = 150;
- sweeprate = 1000;
- break;
-
case QAM_256:
delay = 200;
sweeprate = 500;
len = ntohl(*(u32*) ptr);
ptr += 4;
if (len >= 512) {
- printk("dvb-ttpci: dpram file is way to big.\n");
+ printk("dvb-ttpci: dpram file is way too big.\n");
return -EINVAL;
}
if (crc != crc32_le(0, ptr, len)) {
* The same behaviour of missing VSYNC can be duplicated on budget
* cards, by seting DD1_INIT trigger mode 7 in 3rd nibble.
*/
-static int av7110_attach(struct saa7146_dev* dev, struct saa7146_pci_extension_data *pci_ext)
+static int __devinit av7110_attach(struct saa7146_dev* dev,
+ struct saa7146_pci_extension_data *pci_ext)
{
const int length = TS_WIDTH * TS_HEIGHT;
struct pci_dev *pdev = dev->pci;
goto out;
}
-static int av7110_detach(struct saa7146_dev* saa)
+static int __devexit av7110_detach(struct saa7146_dev* saa)
{
struct av7110 *av7110 = saa->ext_priv;
dprintk(4, "%p\n", av7110);
.module = THIS_MODULE,
.pci_tbl = &pci_tbl[0],
.attach = av7110_attach,
- .detach = av7110_detach,
+ .detach = __devexit_p(av7110_detach),
.irq_mask = MASK_19 | MASK_03 | MASK_10,
.irq_func = av7110_irq,
/* test DEBI */
iwdebi(av7110, DEBISWAP, DPRAM_BASE, 0x76543210, 4);
+ /* FIXME: Why does Nexus CA require 2x iwdebi for first init? */
+ iwdebi(av7110, DEBISWAP, DPRAM_BASE, 0x76543210, 4);
+
if ((ret=irdebi(av7110, DEBINOSWAP, DPRAM_BASE, 0, 4)) != 0x10325476) {
printk(KERN_ERR "dvb-ttpci: debi test in av7110_bootarm() failed: "
"%08x != %08x (check your BIOS 'Plug&Play OS' settings)\n",
}
-int __init av7110_ir_init(struct av7110 *av7110)
+int __devinit av7110_ir_init(struct av7110 *av7110)
{
static struct proc_dir_entry *e;
}
-void __exit av7110_ir_exit(struct av7110 *av7110)
+void __devexit av7110_ir_exit(struct av7110 *av7110)
{
int i;
//DBG("cpia_ioctl: %u\n", ioctlnr);
switch (ioctlnr) {
- /* query capabilites */
+ /* query capabilities */
case VIDIOCGCAP:
{
struct video_capability *b = arg;
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Contrast",
.minimum = 0,
- .maximum = 255,
+ .maximum = 127,
.step = 1,
.default_value = 64,
.flags = 0,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Saturation",
.minimum = 0,
- .maximum = 255,
+ .maximum = 127,
.step = 1,
.default_value = 64,
.flags = 0,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Contrast",
.minimum = 0,
- .maximum = 255,
+ .maximum = 127,
.step = 1,
.default_value = 64,
.flags = 0,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Saturation",
.minimum = 0,
- .maximum = 255,
+ .maximum = 127,
.step = 1,
.default_value = 64,
.flags = 0,
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-static int enable[SNDRV_CARDS] = {1, [1 ... (SNDRV_CARDS - 1)] = 0};
+static int enable[SNDRV_CARDS] = {1, [1 ... (SNDRV_CARDS - 1)] = 1};
module_param_array(index, int, NULL, 0444);
+module_param_array(enable, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for SAA7134 capture interface(s).");
+MODULE_PARM_DESC(enable, "Enable (or not) the SAA7134 capture interface(s).");
#define dprintk(fmt, arg...) if (debug) \
printk(KERN_DEBUG "%s/alsa: " fmt, dev->name , ##arg)
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .tda9887_conf = TDA9887_PRESENT | TDA9887_INTERCARRIER | TDA9887_PORT2_ACTIVE,
+ .tda9887_conf = TDA9887_PRESENT | TDA9887_INTERCARRIER | TDA9887_PORT2_INACTIVE,
.inputs = {{
.name = name_tv,
.vmux = 3,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .tda9887_conf = TDA9887_PRESENT | TDA9887_INTERCARRIER | TDA9887_PORT2_ACTIVE,
+ .tda9887_conf = TDA9887_PRESENT | TDA9887_INTERCARRIER | TDA9887_PORT2_INACTIVE,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
.name = name_tv,
.radio_type = UNSET,
.tuner_addr = 0x61,
.radio_addr = ADDR_UNSET,
- .tda9887_conf = TDA9887_PRESENT,
+ .tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
.name = name_tv,
.radio_type = UNSET,
.tuner_addr = 0x61,
.radio_addr = ADDR_UNSET,
- .tda9887_conf = TDA9887_PRESENT,
+ .tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
.name = name_tv,
}},
},
[SAA7134_BOARD_PINNACLE_PCTV_110i] = {
- .name = "Pinnacle PCTV 110i (saa7133)",
+ .name = "Pinnacle PCTV 40i/50i/110i (saa7133)",
.audio_clock = 0x00187de7,
.tuner_type = TUNER_PHILIPS_TDA8290,
.radio_type = UNSET,
},{
.name = name_comp1,
.vmux = 1,
+ .amux = LINE2,
+ },{
+ .name = name_comp2,
+ .vmux = 0,
.amux = LINE2,
},{
.name = name_svideo,
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7130,
.subvendor = 0x1048,
- .subdevice = 0x226b,
+ .subdevice = 0x226a,
.driver_data = SAA7134_BOARD_ELSA_500TV,
},{
.vendor = PCI_VENDOR_ID_PHILIPS,
/* power-up tuner chip */
saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x00040000, 0x00040000);
saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x00040000, 0x00000000);
+ case SAA7134_BOARD_PINNACLE_300I_DVBT_PAL:
+ /* this turns the remote control chip off to work around a bug in it */
+ saa_writeb(SAA7134_GPIO_GPMODE1, 0x80);
+ saa_writeb(SAA7134_GPIO_GPSTATUS1, 0x80);
+ break;
case SAA7134_BOARD_MONSTERTV_MOBILE:
/* power-up tuner chip */
saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x00040000, 0x00040000);
mt352_write(fe, fsm_ctl_cfg, sizeof(fsm_ctl_cfg));
mt352_write(fe, scan_ctl_cfg, sizeof(scan_ctl_cfg));
mt352_write(fe, irq_cfg, sizeof(irq_cfg));
+
return 0;
}
struct dvb_frontend_parameters* params,
u8* pllbuf)
{
- static int on = TDA9887_PRESENT | TDA9887_PORT2_INACTIVE;
- static int off = TDA9887_PRESENT | TDA9887_PORT2_ACTIVE;
+ u8 off[] = { 0x00, 0xf1};
+ u8 on[] = { 0x00, 0x71};
+ struct i2c_msg msg = {.addr=0x43, .flags=0, .buf=off, .len = sizeof(off)};
+
struct saa7134_dev *dev = fe->dvb->priv;
struct v4l2_frequency f;
f.tuner = 0;
f.type = V4L2_TUNER_DIGITAL_TV;
f.frequency = params->frequency / 1000 * 16 / 1000;
- saa7134_i2c_call_clients(dev,TDA9887_SET_CONFIG,&on);
+ i2c_transfer(&dev->i2c_adap, &msg, 1);
saa7134_i2c_call_clients(dev,VIDIOC_S_FREQUENCY,&f);
- saa7134_i2c_call_clients(dev,TDA9887_SET_CONFIG,&off);
+ msg.buf = on;
+ i2c_transfer(&dev->i2c_adap, &msg, 1);
pinnacle_antenna_pwr(dev, antenna_pwr);
int tda8290_probe(struct i2c_client *c)
{
- unsigned char soft_reset[] = { 0x00, 0x00 };
- unsigned char easy_mode_b[] = { 0x01, 0x02 };
- unsigned char easy_mode_g[] = { 0x01, 0x04 };
+ unsigned char soft_reset[] = { 0x00, 0x00 };
+ unsigned char easy_mode_b[] = { 0x01, 0x02 };
+ unsigned char easy_mode_g[] = { 0x01, 0x04 };
+ unsigned char restore_9886[] = { 0x00, 0xd6, 0x30 };
unsigned char addr_dto_lsb = 0x07;
unsigned char data;
return 0;
}
}
+ i2c_master_send(c, restore_9886, 3);
return -1;
}
struct v4l2_frequency *f = arg;
switch_v4l2();
- if (V4L2_TUNER_RADIO == f->type &&
- V4L2_TUNER_RADIO != t->mode) {
+ if ((V4L2_TUNER_RADIO == f->type && V4L2_TUNER_RADIO != t->mode)
+ || (V4L2_TUNER_DIGITAL_TV == f->type
+ && V4L2_TUNER_DIGITAL_TV != t->mode)) {
if (set_mode (client, t, f->type, "VIDIOC_S_FREQUENCY")
== EINVAL)
return 0;
the slave is bound to it). Otherwise it doesn't need this functions and
therfor they may not be initialized.
- The other fuctions are just for convenience, as they are for shure used by
+ The other fuctions are just for convenience, as they are for sure used by
most/all of the codecs. The last ones may be ommited, too.
See the structure declaration below for more information and which data has
while (!(zr36050_read_status1(ptr) & 0x4)) {
udelay(1);
- if (i++ > 200000) { // 200ms, there is for shure something wrong!!!
+ if (i++ > 200000) { // 200ms, there is for sure something wrong!!!
dprintk(1,
"%s: timout at wait_end (last status: 0x%02x)\n",
ptr->name, ptr->status1);
while (zr36060_read_status(ptr) & ZR060_CFSR_Busy) {
udelay(1);
- if (i++ > 200000) { // 200ms, there is for shure something wrong!!!
+ if (i++ > 200000) { // 200ms, there is for sure something wrong!!!
dprintk(1,
"%s: timout at wait_end (last status: 0x%02x)\n",
ptr->name, ptr->status);
case I2C_DRIVERID_VIDEODECODER:
DEBUG(printk(CARD_INFO "decoder attached\n",CARD));
- /* fetch the capabilites of the decoder */
+ /* fetch the capabilities of the decoder */
rv = i2c_control_device(&ztv->i2c, I2C_DRIVERID_VIDEODECODER, DECODER_GET_CAPABILITIES, &dc);
if (rv) {
DEBUG(printk(CARD_DEBUG "decoder is not V4L aware!\n",CARD));
#include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/device.h>
+#include <linux/platform_device.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT);
- switch (mmc_rsp_type(cmd->flags)) {
+ switch (mmc_resp_type(cmd)) {
case MMC_RSP_R1:
mmccmd |= SD_CMD_RT_1;
break;
static void au1xmmc_dma_callback(int irq, void *dev_id, struct pt_regs *regs)
{
struct au1xmmc_host *host = (struct au1xmmc_host *) dev_id;
- u32 status;
/* Avoid spurious interrupts */
.set_ios = au1xmmc_set_ios,
};
-static int au1xmmc_probe(struct device *dev)
+static int __devinit au1xmmc_probe(struct platform_device *pdev)
{
int i, ret = 0;
disable_irq(AU1100_SD_IRQ);
for(i = 0; i < AU1XMMC_CONTROLLER_COUNT; i++) {
- struct mmc_host *mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), dev);
+ struct mmc_host *mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev);
struct au1xmmc_host *host = 0;
if (!mmc) {
return 0;
}
-static int au1xmmc_remove(struct device *dev)
+static int __devexit au1xmmc_remove(struct platform_device *pdev)
{
int i;
return 0;
}
-static struct device_driver au1xmmc_driver = {
- .name = DRIVER_NAME,
- .bus = &platform_bus_type,
+static struct platform_driver au1xmmc_driver = {
.probe = au1xmmc_probe,
.remove = au1xmmc_remove,
.suspend = NULL,
- .resume = NULL
+ .resume = NULL,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
};
static int __init au1xmmc_init(void)
{
- return driver_register(&au1xmmc_driver);
+ return platform_driver_register(&au1xmmc_driver);
}
static void __exit au1xmmc_exit(void)
{
- driver_unregister(&au1xmmc_driver);
+ platform_driver_unregister(&au1xmmc_driver);
}
module_init(au1xmmc_init);
struct peespi *t1_espi_create(adapter_t *adapter)
{
- struct peespi *espi = kmalloc(sizeof(*espi), GFP_KERNEL);
-
- memset(espi, 0, sizeof(*espi));
+ struct peespi *espi = kzalloc(sizeof(*espi), GFP_KERNEL);
if (espi)
espi->adapter = adapter;
i++, mclist = mclist->next) {
memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
ETH_ALEN);
+ mac_addr = 0;
for (j = 0; j < ETH_ALEN; j++) {
mac_addr |= mclist->dmi_addr[j];
mac_addr <<= 8;
(base + len + 8 < base));
}
+/* Test for DMA addresses > 40-bit */
+static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
+ int len)
+{
+#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
+ if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
+ return (((u64) mapping + len) > DMA_40BIT_MASK);
+ return 0;
+#else
+ return 0;
+#endif
+}
+
static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
-static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
+/* Workaround 4GB and 40-bit hardware DMA bugs. */
+static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
u32 last_plus_one, u32 *start,
u32 base_flags, u32 mss)
{
if (tg3_4g_overflow_test(mapping, len))
would_hit_hwbug = 1;
+ if (tg3_40bit_overflow_test(tp, mapping, len))
+ would_hit_hwbug = 1;
+
if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
tg3_set_txd(tp, entry, mapping, len,
base_flags, (i == last)|(mss << 1));
/* If the workaround fails due to memory/mapping
* failure, silently drop this packet.
*/
- if (tigon3_4gb_hwbug_workaround(tp, skb, last_plus_one,
+ if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
&start, base_flags, mss))
goto out_unlock;
unsigned long tg3reg_base, tg3reg_len;
struct net_device *dev;
struct tg3 *tp;
- int i, err, pci_using_dac, pm_cap;
+ int i, err, pm_cap;
char str[40];
+ u64 dma_mask, persist_dma_mask;
if (tg3_version_printed++ == 0)
printk(KERN_INFO "%s", version);
goto err_out_free_res;
}
- /* Configure DMA attributes. */
- err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
- if (!err) {
- pci_using_dac = 1;
- err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
- if (err < 0) {
- printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
- "for consistent allocations\n");
- goto err_out_free_res;
- }
- } else {
- err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
- if (err) {
- printk(KERN_ERR PFX "No usable DMA configuration, "
- "aborting.\n");
- goto err_out_free_res;
- }
- pci_using_dac = 0;
- }
-
tg3reg_base = pci_resource_start(pdev, 0);
tg3reg_len = pci_resource_len(pdev, 0);
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
- if (pci_using_dac)
- dev->features |= NETIF_F_HIGHDMA;
dev->features |= NETIF_F_LLTX;
#if TG3_VLAN_TAG_USED
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
goto err_out_iounmap;
}
+ /* 5714, 5715 and 5780 cannot support DMA addresses > 40-bit.
+ * On 64-bit systems with IOMMU, use 40-bit dma_mask.
+ * On 64-bit systems without IOMMU, use 64-bit dma_mask and
+ * do DMA address check in tg3_start_xmit().
+ */
+ if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
+ persist_dma_mask = dma_mask = DMA_40BIT_MASK;
+#ifdef CONFIG_HIGHMEM
+ dma_mask = DMA_64BIT_MASK;
+#endif
+ } else if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
+ persist_dma_mask = dma_mask = DMA_32BIT_MASK;
+ else
+ persist_dma_mask = dma_mask = DMA_64BIT_MASK;
+
+ /* Configure DMA attributes. */
+ if (dma_mask > DMA_32BIT_MASK) {
+ err = pci_set_dma_mask(pdev, dma_mask);
+ if (!err) {
+ dev->features |= NETIF_F_HIGHDMA;
+ err = pci_set_consistent_dma_mask(pdev,
+ persist_dma_mask);
+ if (err < 0) {
+ printk(KERN_ERR PFX "Unable to obtain 64 bit "
+ "DMA for consistent allocations\n");
+ goto err_out_iounmap;
+ }
+ }
+ }
+ if (err || dma_mask == DMA_32BIT_MASK) {
+ err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (err) {
+ printk(KERN_ERR PFX "No usable DMA configuration, "
+ "aborting.\n");
+ goto err_out_iounmap;
+ }
+ }
+
tg3_init_bufmgr_config(tp);
#if TG3_TSO_SUPPORT != 0
} else
tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
- if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
- dev->features &= ~NETIF_F_HIGHDMA;
-
/* flow control autonegotiation is default behavior */
tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
{
struct parport_pc_pci *card;
struct parport_serial_private *priv = pci_get_drvdata (dev);
- int i = id->driver_data, n;
- int success = 0;
+ int n, success = 0;
priv->par = cards[id->driver_data];
card = &priv->par;
"hi" as an offset (see SYBA
def.) */
/* TODO: test if sharing interrupts works */
- printk (KERN_DEBUG "PCI parallel port detected: %04x:%04x, "
- "I/O at %#lx(%#lx)\n",
- parport_serial_pci_tbl[i].vendor,
- parport_serial_pci_tbl[i].device, io_lo, io_hi);
+ dev_dbg(&dev->dev, "PCI parallel port detected: I/O at "
+ "%#lx(%#lx)\n", io_lo, io_hi);
port = parport_pc_probe_port (io_lo, io_hi, PARPORT_IRQ_NONE,
PARPORT_DMA_NONE, dev);
if (port) {
if (card->postinit_hook)
card->postinit_hook (dev, card, !success);
- return success ? 0 : 1;
+ return 0;
}
static int __devinit parport_serial_pci_probe (struct pci_dev *dev,
* interrupt for this detection ccw uses the kernel event daemon to
* trigger the call to dasd_change_state. All this is done in the
* discipline code, see dasd_eckd.c.
- * After the analysis ccw is done (do_analysis returned 0 or error)
- * the block device is setup. Either a fake disk is added to allow
- * formatting or a proper device request queue is created.
+ * After the analysis ccw is done (do_analysis returned 0) the block
+ * device is setup.
+ * In case the analysis returns an error, the device setup is stopped
+ * (a fake disk was already added to allow formatting).
*/
static inline int
dasd_state_basic_to_ready(struct dasd_device * device)
rc = 0;
if (device->discipline->do_analysis != NULL)
rc = device->discipline->do_analysis(device);
- if (rc)
+ if (rc) {
+ if (rc != -EAGAIN)
+ device->state = DASD_STATE_UNFMT;
return rc;
+ }
+ /* make disk known with correct capacity */
dasd_setup_queue(device);
+ set_capacity(device->gdp, device->blocks << device->s2b_shift);
device->state = DASD_STATE_READY;
- if (dasd_scan_partitions(device) != 0)
+ rc = dasd_scan_partitions(device);
+ if (rc)
device->state = DASD_STATE_BASIC;
- return 0;
+ return rc;
}
/*
device->state = DASD_STATE_BASIC;
}
+/*
+ * Back to basic.
+ */
+static inline void
+dasd_state_unfmt_to_basic(struct dasd_device * device)
+{
+ device->state = DASD_STATE_BASIC;
+}
+
/*
* Make the device online and schedule the bottom half to start
* the requeueing of requests from the linux request queue to the
if (device->state == DASD_STATE_READY &&
device->target <= DASD_STATE_BASIC)
dasd_state_ready_to_basic(device);
-
- if (device->state == DASD_STATE_BASIC &&
+
+ if (device->state == DASD_STATE_UNFMT &&
+ device->target <= DASD_STATE_BASIC)
+ dasd_state_unfmt_to_basic(device);
+
+ if (device->state == DASD_STATE_BASIC &&
device->target <= DASD_STATE_KNOWN)
dasd_state_basic_to_known(device);
goto out;
}
- if (device->state < DASD_STATE_BASIC) {
+ if (device->state <= DASD_STATE_BASIC) {
DBF_DEV_EVENT(DBF_ERR, device, " %s",
" Cannot open unrecognized device");
rc = -ENODEV;
{
struct block_device *bdev;
- /* Make the disk known. */
- set_capacity(device->gdp, device->blocks << device->s2b_shift);
bdev = bdget_disk(device->gdp, 0);
if (!bdev || blkdev_get(bdev, FMODE_READ, 1) < 0)
return -ENODEV;
* new: the dasd_device structure is allocated.
* known: the discipline for the device is identified.
* basic: the device can do basic i/o.
- * accept: the device is analysed (format is known).
+ * unfmt: the device could not be analyzed (format is unknown).
* ready: partition detection is done and the device is can do block io.
* online: the device accepts requests from the block device queue.
*
#define DASD_STATE_NEW 0
#define DASD_STATE_KNOWN 1
#define DASD_STATE_BASIC 2
-#define DASD_STATE_READY 3
-#define DASD_STATE_ONLINE 4
+#define DASD_STATE_UNFMT 3
+#define DASD_STATE_READY 4
+#define DASD_STATE_ONLINE 5
#include <linux/module.h>
#include <linux/wait.h>
case DASD_STATE_BASIC:
seq_printf(m, "basic");
break;
+ case DASD_STATE_UNFMT:
+ seq_printf(m, "unnformatted");
+ break;
case DASD_STATE_READY:
case DASD_STATE_ONLINE:
seq_printf(m, "active ");
goto out;
}
switch (sda_area->response.code) {
+ case 0x0001: /* everything ok */
+ ret = 0;
+ break;
case 0x0003: /* invalid request block */
case 0x0007:
ret = -EINVAL;
case 0x0101: /* facility not provided */
ret = -EOPNOTSUPP;
break;
+ default: /* something went wrong */
+ ret = -EIO;
}
out:
free_page((unsigned long)sda_area);
driver_unregister(&smsg_driver);
return -EIO; /* better errno ? */
}
- rc = iucv_connect (&smsg_pathid, 1, 0, "*MSG ", 0, 0, 0, 0,
+ rc = iucv_connect (&smsg_pathid, 255, 0, "*MSG ", 0, 0, 0, 0,
smsg_handle, 0);
if (rc) {
printk(KERN_ERR "SMSGIUCV: failed to connect to *MSG");
SDev = cd->device;
if (!sense) {
- sense = kmalloc(sizeof(*sense), GFP_KERNEL);
+ sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
if (!sense) {
err = -ENOMEM;
goto out;
if (up->port.info == NULL)
goto ack_tx_int;
xmit = &up->port.info->xmit;
- if (uart_circ_empty(xmit)) {
- uart_write_wakeup(&up->port);
+ if (uart_circ_empty(xmit))
goto ack_tx_int;
- }
if (uart_tx_stopped(&up->port))
goto ack_tx_int;
void uart_write_wakeup(struct uart_port *port)
{
struct uart_info *info = port->info;
+ /*
+ * This means you called this function _after_ the port was
+ * closed. No cookie for you.
+ */
+ BUG_ON(!info);
tasklet_schedule(&info->tlet);
}
}
static int
-uart_write(struct tty_struct *tty, const unsigned char * buf, int count)
+uart_write(struct tty_struct *tty, const unsigned char *buf, int count)
{
struct uart_state *state = tty->driver_data;
- struct uart_port *port = state->port;
- struct circ_buf *circ = &state->info->xmit;
+ struct uart_port *port;
+ struct circ_buf *circ;
unsigned long flags;
int c, ret = 0;
+ /*
+ * This means you called this function _after_ the port was
+ * closed. No cookie for you.
+ */
+ if (!state || !state->info) {
+ WARN_ON(1);
+ return -EL3HLT;
+ }
+
+ port = state->port;
+ circ = &state->info->xmit;
+
if (!circ->buf)
return 0;
struct uart_port *port = state->port;
unsigned long flags;
+ /*
+ * This means you called this function _after_ the port was
+ * closed. No cookie for you.
+ */
+ if (!state || !state->info) {
+ WARN_ON(1);
+ return;
+ }
+
DPRINTK("uart_flush_buffer(%d) called\n", tty->index);
spin_lock_irqsave(&port->lock, flags);
int retval;
if (!ia64_platform_is("sn2"))
- return -ENODEV;
+ return 0;
printk(KERN_INFO "sn_console: Console driver init\n");
down(&port->sem);
- if (port->open_count == 0)
- goto out;
+ if (port->open_count == 0) {
+ up(&port->sem);
+ return;
+ }
--port->open_count;
if (port->open_count == 0) {
module_put(port->serial->type->driver.owner);
}
- kref_put(&port->serial->kref, destroy_serial);
-
-out:
up(&port->sem);
+ kref_put(&port->serial->kref, destroy_serial);
}
static int serial_write (struct tty_struct * tty, const unsigned char *buf, int count)
if (!rc)
return;
- dprintk(DEBUG_9P, "tcall id %d rcall id %d\n", tc->id, rc->id);
v9ses = a;
if (rc->id == RCLUNK)
v9fs_put_idpool(fid, &v9ses->fidpool);
if (!trans || trans->status != Connected || !ts)
return -EIO;
+ oldfs = get_fs();
set_fs(get_ds());
/* The cast to a user pointer is valid due to the set_fs() */
ret = vfs_write(ts->out_file, (void __user *)v, len, &ts->out_file->f_pos);
fid = v9fs_get_idpool(&v9ses->fidpool);
if (fid < 0) {
eprintk(KERN_WARNING, "no free fids available\n");
- err = -ENOSPC;
- goto error;
+ return -ENOSPC;
}
err = v9fs_t_walk(v9ses, pfid, fid, NULL, &fcall);
nfid = v9fs_get_idpool(&v9ses->fidpool);
if (nfid < 0) {
eprintk(KERN_WARNING, "no free fids available\n");
- err = -ENOSPC;
- goto error;
+ return ERR_PTR(-ENOSPC);
}
err = v9fs_t_walk(v9ses, fid, nfid, (char *) dentry->d_name.name,
int result = 0;
dprintk(DEBUG_VFS, "dir: %p dentry: (%s) %p nameidata: %p\n",
- dir, dentry->d_iname, dentry, nameidata);
+ dir, dentry->d_name.name, dentry, nameidata);
sb = dir->i_sb;
v9ses = v9fs_inode2v9ses(dir);
stat_result = v9fs_t_stat(v9ses, newfid, &fcall);
if (stat_result < 0) {
dprintk(DEBUG_ERROR, "stat error\n");
- kfree(fcall);
v9fs_t_clunk(v9ses, newfid);
} else {
/* Setup the Root Inode */
int * /* type of buf returned */ , const int long_op);
extern int checkSMBhdr(struct smb_hdr *smb, __u16 mid);
extern int checkSMB(struct smb_hdr *smb, __u16 mid, int length);
-extern int is_valid_oplock_break(struct smb_hdr *smb);
+extern int is_valid_oplock_break(struct smb_hdr *smb, struct TCP_Server_Info *);
extern int is_size_safe_to_change(struct cifsInodeInfo *);
extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *);
extern unsigned int smbCalcSize(struct smb_hdr *ptr);
smallbuf = NULL;
}
wake_up_process(task_to_wake);
- } else if ((is_valid_oplock_break(smb_buffer) == FALSE)
+ } else if ((is_valid_oplock_break(smb_buffer, server) == FALSE)
&& (isMultiRsp == FALSE)) {
cERROR(1, ("No task to wake, unknown frame rcvd!"));
cifs_dump_mem("Received Data is: ",(char *)smb_buffer,
return 0;
}
int
-is_valid_oplock_break(struct smb_hdr *buf)
+is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
{
struct smb_com_lock_req * pSMB = (struct smb_com_lock_req *)buf;
struct list_head *tmp;
read_lock(&GlobalSMBSeslock);
list_for_each(tmp, &GlobalTreeConnectionList) {
tcon = list_entry(tmp, struct cifsTconInfo, cifsConnectionList);
- if (tcon->tid == buf->Tid) {
+ if ((tcon->tid == buf->Tid) && (srv == tcon->ses->server)) {
cifs_stats_inc(&tcon->num_oplock_brks);
list_for_each(tmp1,&tcon->openFileList){
netfile = list_entry(tmp1,struct cifsFileInfo,
ifr = ifc.ifc_req;
ifr32 = compat_ptr(ifc32.ifcbuf);
for (i = 0, j = 0;
- i + sizeof (struct ifreq32) < ifc32.ifc_len && j < ifc.ifc_len;
+ i + sizeof (struct ifreq32) <= ifc32.ifc_len && j < ifc.ifc_len;
i += sizeof (struct ifreq32), j += sizeof (struct ifreq)) {
if (copy_in_user(ifr32, ifr, sizeof (struct ifreq32)))
return -EFAULT;
/* These two macros may change in future, to provide better st_ino
semantics. */
-#define CRAMINO(x) ((x)->offset?(x)->offset<<2:1)
+#define CRAMINO(x) (((x)->offset && (x)->size)?(x)->offset<<2:1)
#define OFFSET(x) ((x)->i_ino)
static int cramfs_iget5_set(struct inode *inode, void *opaque)
{
+ static struct timespec zerotime;
struct cramfs_inode *cramfs_inode = opaque;
+ inode->i_mode = cramfs_inode->mode;
+ inode->i_uid = cramfs_inode->uid;
+ inode->i_size = cramfs_inode->size;
+ inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1;
+ inode->i_blksize = PAGE_CACHE_SIZE;
+ inode->i_gid = cramfs_inode->gid;
+ /* Struct copy intentional */
+ inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime;
inode->i_ino = CRAMINO(cramfs_inode);
+ /* inode->i_nlink is left 1 - arguably wrong for directories,
+ but it's the best we can do without reading the directory
+ contents. 1 yields the right result in GNU find, even
+ without -noleaf option. */
+ if (S_ISREG(inode->i_mode)) {
+ inode->i_fop = &generic_ro_fops;
+ inode->i_data.a_ops = &cramfs_aops;
+ } else if (S_ISDIR(inode->i_mode)) {
+ inode->i_op = &cramfs_dir_inode_operations;
+ inode->i_fop = &cramfs_directory_operations;
+ } else if (S_ISLNK(inode->i_mode)) {
+ inode->i_op = &page_symlink_inode_operations;
+ inode->i_data.a_ops = &cramfs_aops;
+ } else {
+ inode->i_size = 0;
+ inode->i_blocks = 0;
+ init_special_inode(inode, inode->i_mode,
+ old_decode_dev(cramfs_inode->size));
+ }
return 0;
}
struct inode *inode = iget5_locked(sb, CRAMINO(cramfs_inode),
cramfs_iget5_test, cramfs_iget5_set,
cramfs_inode);
- static struct timespec zerotime;
-
if (inode && (inode->i_state & I_NEW)) {
- inode->i_mode = cramfs_inode->mode;
- inode->i_uid = cramfs_inode->uid;
- inode->i_size = cramfs_inode->size;
- inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1;
- inode->i_blksize = PAGE_CACHE_SIZE;
- inode->i_gid = cramfs_inode->gid;
- /* Struct copy intentional */
- inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime;
- inode->i_ino = CRAMINO(cramfs_inode);
- /* inode->i_nlink is left 1 - arguably wrong for directories,
- but it's the best we can do without reading the directory
- contents. 1 yields the right result in GNU find, even
- without -noleaf option. */
- if (S_ISREG(inode->i_mode)) {
- inode->i_fop = &generic_ro_fops;
- inode->i_data.a_ops = &cramfs_aops;
- } else if (S_ISDIR(inode->i_mode)) {
- inode->i_op = &cramfs_dir_inode_operations;
- inode->i_fop = &cramfs_directory_operations;
- } else if (S_ISLNK(inode->i_mode)) {
- inode->i_op = &page_symlink_inode_operations;
- inode->i_data.a_ops = &cramfs_aops;
- } else {
- inode->i_size = 0;
- inode->i_blocks = 0;
- init_special_inode(inode, inode->i_mode,
- old_decode_dev(cramfs_inode->size));
- }
unlock_new_inode(inode);
}
return inode;
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, filp_ctor, filp_dtor);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
dcache_init(mempages);
inode_init(mempages);
{
int ret;
- ret = -ERESTARTSYS;
- if (mutex_lock_interruptible(PIPE_MUTEX(*inode)))
- goto err_nolock_nocleanup;
-
+ mutex_lock(PIPE_MUTEX(*inode));
if (!inode->i_pipe) {
ret = -ENOMEM;
if(!pipe_new(inode))
err_nocleanup:
mutex_unlock(PIPE_MUTEX(*inode));
-
-err_nolock_nocleanup:
return ret;
}
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
*/
+#include <linux/config.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/capability.h>
#include <linux/cdev.h>
#include <linux/fsnotify.h>
+#include <linux/sysctl.h>
+#include <linux/percpu_counter.h>
+
+#include <asm/atomic.h>
/* sysctl tunables... */
struct files_stat_struct files_stat = {
.max_files = NR_FILE
};
-EXPORT_SYMBOL(files_stat); /* Needed by unix.o */
-
/* public. Not pretty! */
- __cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock);
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock);
-static DEFINE_SPINLOCK(filp_count_lock);
+static struct percpu_counter nr_files __cacheline_aligned_in_smp;
-/* slab constructors and destructors are called from arbitrary
- * context and must be fully threaded - use a local spinlock
- * to protect files_stat.nr_files
- */
-void filp_ctor(void *objp, struct kmem_cache *cachep, unsigned long cflags)
+static inline void file_free_rcu(struct rcu_head *head)
{
- if ((cflags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR) {
- unsigned long flags;
- spin_lock_irqsave(&filp_count_lock, flags);
- files_stat.nr_files++;
- spin_unlock_irqrestore(&filp_count_lock, flags);
- }
+ struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
+ kmem_cache_free(filp_cachep, f);
}
-void filp_dtor(void *objp, struct kmem_cache *cachep, unsigned long dflags)
+static inline void file_free(struct file *f)
{
- unsigned long flags;
- spin_lock_irqsave(&filp_count_lock, flags);
- files_stat.nr_files--;
- spin_unlock_irqrestore(&filp_count_lock, flags);
+ percpu_counter_dec(&nr_files);
+ call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
}
-static inline void file_free_rcu(struct rcu_head *head)
+/*
+ * Return the total number of open files in the system
+ */
+static int get_nr_files(void)
{
- struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
- kmem_cache_free(filp_cachep, f);
+ return percpu_counter_read_positive(&nr_files);
}
-static inline void file_free(struct file *f)
+/*
+ * Return the maximum number of open files in the system
+ */
+int get_max_files(void)
{
- call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
+ return files_stat.max_files;
}
+EXPORT_SYMBOL_GPL(get_max_files);
+
+/*
+ * Handle nr_files sysctl
+ */
+#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
+int proc_nr_files(ctl_table *table, int write, struct file *filp,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ files_stat.nr_files = get_nr_files();
+ return proc_dointvec(table, write, filp, buffer, lenp, ppos);
+}
+#else
+int proc_nr_files(ctl_table *table, int write, struct file *filp,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ return -ENOSYS;
+}
+#endif
/* Find an unused file structure and return a pointer to it.
* Returns NULL, if there are no more free file structures or
/*
* Privileged users can go above max_files
*/
- if (files_stat.nr_files >= files_stat.max_files &&
- !capable(CAP_SYS_ADMIN))
- goto over;
+ if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
+ /*
+ * percpu_counters are inaccurate. Do an expensive check before
+ * we go and fail.
+ */
+ if (percpu_counter_sum(&nr_files) >= files_stat.max_files)
+ goto over;
+ }
f = kmem_cache_alloc(filp_cachep, GFP_KERNEL);
if (f == NULL)
goto fail;
+ percpu_counter_inc(&nr_files);
memset(f, 0, sizeof(*f));
if (security_file_alloc(f))
goto fail_sec;
over:
/* Ran out of filps - report that */
- if (files_stat.nr_files > old_max) {
+ if (get_nr_files() > old_max) {
printk(KERN_INFO "VFS: file-max limit %d reached\n",
- files_stat.max_files);
- old_max = files_stat.nr_files;
+ get_max_files());
+ old_max = get_nr_files();
}
goto fail;
if (files_stat.max_files < NR_FILE)
files_stat.max_files = NR_FILE;
files_defer_init();
+ percpu_counter_init(&nr_files);
}
c->nextblock->dirty_size = 0;
}
#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
- if (!jffs2_can_mark_obsolete(c) && c->nextblock && (c->nextblock->free_size % c->wbuf_pagesize)) {
+ if (!jffs2_can_mark_obsolete(c) && c->wbuf_pagesize && c->nextblock && (c->nextblock->free_size % c->wbuf_pagesize)) {
/* If we're going to start writing into a block which already
contains data, and the end of the data isn't page-aligned,
skip a little and align it. */
ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
{
int blocksize, offset, size;
+ loff_t i_size;
dasd_information_t *info;
struct hd_geometry *geo;
char type[5] = {0,};
unsigned char *data;
Sector sect;
+ blocksize = bdev_hardsect_size(bdev);
+ if (blocksize <= 0)
+ return 0;
+ i_size = i_size_read(bdev->bd_inode);
+ if (i_size == 0)
+ return 0;
+
if ((info = kmalloc(sizeof(dasd_information_t), GFP_KERNEL)) == NULL)
goto out_noinfo;
if ((geo = kmalloc(sizeof(struct hd_geometry), GFP_KERNEL)) == NULL)
if (ioctl_by_bdev(bdev, BIODASDINFO, (unsigned long)info) != 0 ||
ioctl_by_bdev(bdev, HDIO_GETGEO, (unsigned long)geo) != 0)
goto out_noioctl;
-
- if ((blocksize = bdev_hardsect_size(bdev)) <= 0)
- goto out_badsect;
/*
* Get volume label, extract name and type.
} else {
printk("CMS1/%8s:", name);
offset = (info->label_block + 1);
- size = bdev->bd_inode->i_size >> 9;
+ size = i_size >> 9;
}
put_partition(state, 1, offset*(blocksize >> 9),
size-offset*(blocksize >> 9));
else
printk("(nonl)/%8s:", name);
offset = (info->label_block + 1);
- size = (bdev->bd_inode->i_size >> 9);
+ size = i_size >> 9;
put_partition(state, 1, offset*(blocksize >> 9),
size-offset*(blocksize >> 9));
}
return 1;
out_readerr:
-out_badsect:
out_noioctl:
kfree(label);
out_nolab:
.fasync = pipe_rdwr_fasync,
};
-struct file_operations read_pipe_fops = {
+static struct file_operations read_pipe_fops = {
.llseek = no_llseek,
.read = pipe_read,
.readv = pipe_readv,
.fasync = pipe_read_fasync,
};
-struct file_operations write_pipe_fops = {
+static struct file_operations write_pipe_fops = {
.llseek = no_llseek,
.read = bad_pipe_r,
.write = pipe_write,
.fasync = pipe_write_fasync,
};
-struct file_operations rdwr_pipe_fops = {
+static struct file_operations rdwr_pipe_fops = {
.llseek = no_llseek,
.read = pipe_read,
.readv = pipe_readv,
{
pte_t *pte, ptent;
spinlock_t *ptl;
- unsigned long pfn;
struct page *page;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
continue;
mss->resident += PAGE_SIZE;
- pfn = pte_pfn(ptent);
- if (!pfn_valid(pfn))
+
+ page = vm_normal_page(vma, addr, ptent);
+ if (!page)
continue;
- page = pfn_to_page(pfn);
- if (page_count(page) >= 2) {
+ if (page_mapcount(page) >= 2) {
if (pte_dirty(ptent))
mss->shared_dirty += PAGE_SIZE;
else
struct mem_size_stats mss;
memset(&mss, 0, sizeof mss);
- if (vma->vm_mm)
+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
smaps_pgd_range(vma, vma->vm_start, vma->vm_end, &mss);
return show_map_internal(m, v, &mss);
}
inode->i_gid = dir->i_gid;
d_instantiate(dentry, inode);
dget(dentry);
+ dir->i_mtime = dir->i_ctime = CURRENT_TIME;
} else
iput(inode);
}
/* mark, that this generation number is used */
if (de->de_gen_number_bit_string)
set_bit(GET_GENERATION_NUMBER(deh_offset(deh)),
- (unsigned long *)de->de_gen_number_bit_string);
+ de->de_gen_number_bit_string);
// calculate pointer to name and namelen
de->de_entry_num = i;
struct reiserfs_de_head *deh;
INITIALIZE_PATH(path);
struct reiserfs_dir_entry de;
- int bit_string[MAX_GENERATION_NUMBER / (sizeof(int) * 8) + 1];
+ DECLARE_BITMAP(bit_string, MAX_GENERATION_NUMBER + 1);
int gen_number;
char small_buf[32 + DEH_SIZE]; /* 48 bytes now and we avoid kmalloc
if we create file with short name */
/* find the proper place for the new entry */
memset(bit_string, 0, sizeof(bit_string));
- de.de_gen_number_bit_string = (char *)bit_string;
+ de.de_gen_number_bit_string = bit_string;
retval = reiserfs_find_entry(dir, name, namelen, &path, &de);
if (retval != NAME_NOT_FOUND) {
if (buffer != small_buf)
}
gen_number =
- find_first_zero_bit((unsigned long *)bit_string,
+ find_first_zero_bit(bit_string,
MAX_GENERATION_NUMBER + 1);
if (gen_number > MAX_GENERATION_NUMBER) {
/* there is no free generation number */
}
inode->i_uid = le32_to_cpu(fe->uid);
- if ( inode->i_uid == -1 ) inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
+ if (inode->i_uid == -1 || UDF_QUERY_FLAG(inode->i_sb,
+ UDF_FLAG_UID_IGNORE))
+ inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
inode->i_gid = le32_to_cpu(fe->gid);
- if ( inode->i_gid == -1 ) inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
+ if (inode->i_gid == -1 || UDF_QUERY_FLAG(inode->i_sb,
+ UDF_FLAG_GID_IGNORE))
+ inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
if (!inode->i_nlink)
return err;
}
- if (inode->i_uid != UDF_SB(inode->i_sb)->s_uid)
+ if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
+ fe->uid = cpu_to_le32(-1);
+ else if (inode->i_uid != UDF_SB(inode->i_sb)->s_uid)
fe->uid = cpu_to_le32(inode->i_uid);
- if (inode->i_gid != UDF_SB(inode->i_sb)->s_gid)
+ if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET))
+ fe->gid = cpu_to_le32(-1);
+ else if (inode->i_gid != UDF_SB(inode->i_sb)->s_gid)
fe->gid = cpu_to_le32(inode->i_gid);
udfperms = ((inode->i_mode & S_IRWXO) ) |
Opt_gid, Opt_uid, Opt_umask, Opt_session, Opt_lastblock,
Opt_anchor, Opt_volume, Opt_partition, Opt_fileset,
Opt_rootdir, Opt_utf8, Opt_iocharset,
- Opt_err
+ Opt_err, Opt_uforget, Opt_uignore, Opt_gforget, Opt_gignore
};
static match_table_t tokens = {
{Opt_adinicb, "adinicb"},
{Opt_shortad, "shortad"},
{Opt_longad, "longad"},
+ {Opt_uforget, "uid=forget"},
+ {Opt_uignore, "uid=ignore"},
+ {Opt_gforget, "gid=forget"},
+ {Opt_gignore, "gid=ignore"},
{Opt_gid, "gid=%u"},
{Opt_uid, "uid=%u"},
{Opt_umask, "umask=%o"},
uopt->flags |= (1 << UDF_FLAG_NLS_MAP);
break;
#endif
+ case Opt_uignore:
+ uopt->flags |= (1 << UDF_FLAG_UID_IGNORE);
+ break;
+ case Opt_uforget:
+ uopt->flags |= (1 << UDF_FLAG_UID_FORGET);
+ break;
+ case Opt_gignore:
+ uopt->flags |= (1 << UDF_FLAG_GID_IGNORE);
+ break;
+ case Opt_gforget:
+ uopt->flags |= (1 << UDF_FLAG_GID_FORGET);
+ break;
default:
printk(KERN_ERR "udf: bad mount option \"%s\" "
"or missing value\n", p);
#define UDF_FLAG_VARCONV 8
#define UDF_FLAG_NLS_MAP 9
#define UDF_FLAG_UTF8 10
+#define UDF_FLAG_UID_FORGET 11 /* save -1 for uid to disk */
+#define UDF_FLAG_UID_IGNORE 12 /* use sb uid instead of on disk uid */
+#define UDF_FLAG_GID_FORGET 13
+#define UDF_FLAG_GID_IGNORE 14
#define UDF_PART_FLAG_UNALLOC_BITMAP 0x0001
#define UDF_PART_FLAG_UNALLOC_TABLE 0x0002
asm("mcr%? p15, 0, %0, c8, c6, 1" : : "r" (kaddr));
if (tlb_flag(TLB_V6_I_PAGE))
asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (kaddr));
+
+ /* The ARM ARM states that the completion of a TLB maintenance
+ * operation is only guaranteed by a DSB instruction
+ */
+ if (tlb_flag(TLB_V6_U_PAGE | TLB_V6_D_PAGE | TLB_V6_I_PAGE))
+ asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero));
}
/*
#define __get_unaligned(ptr, size) ({ \
const void *__gu_p = ptr; \
- __typeof__(*(ptr)) val; \
+ __u64 val; \
switch (size) { \
case 1: \
val = *(const __u8 *)__gu_p; \
default: \
bad_unaligned_access_length(); \
}; \
- val; \
+ (__typeof__(*(ptr)))val; \
})
#define __put_unaligned(val, ptr, size) \
void switch_ipi_to_APIC_timer(void *cpumask);
#define ARCH_APICTIMER_STOPS_ON_C3 1
+extern int timer_over_8254;
+
#else /* !CONFIG_X86_LOCAL_APIC */
static inline void lapic_shutdown(void) { }
static inline void eeh_add_device_early(struct device_node *dn) { }
+static inline void eeh_add_device_late(struct pci_dev *dev) { }
+
static inline void eeh_remove_device(struct pci_dev *dev) { }
static inline void eeh_add_device_tree_early(struct device_node *dn) { }
"mfxer %0\n"
"std %0, 296(%2)\n"
: "=&r" (tmp1), "=&r" (tmp2)
- : "b" (newregs));
+ : "b" (newregs)
+ : "memory");
}
}
#else
extern void account_vtime(struct task_struct *);
extern void account_tick_vtime(struct task_struct *);
extern void account_system_vtime(struct task_struct *);
+#else
+#define account_vtime(x) do { /* empty */ } while (0)
#endif
#define finish_arch_switch(prev) do { \
"4: ba 3b\n" \
" mov %5, %0\n" \
" .previous\n" \
- " .section __ex_table,#alloc\n" \
+ " .section __ex_table,\"a\"\n" \
" .align 4\n" \
" .word 1b, 4b\n" \
" .word 2b, 4b\n" \
"b 2b\n\t" \
" mov %3, %0\n\n\t" \
".previous\n\t" \
- ".section __ex_table,#alloc\n\t" \
+ ".section __ex_table,\"a\"\n\t" \
".align 4\n\t" \
".word 1b, 3b\n\t" \
".previous\n\n\t" \
__asm__ __volatile__( \
"/* Put user asm ret, inline. */\n" \
"1:\t" "st"#size "a %1, [%2] %%asi\n\n\t" \
- ".section __ex_table,#alloc\n\t" \
+ ".section __ex_table,\"a\"\n\t" \
".align 4\n\t" \
".word 1b, __ret_efault\n\n\t" \
".previous\n\n\t" \
"ret\n\t" \
" restore %%g0, %3, %%o0\n\n\t" \
".previous\n\t" \
- ".section __ex_table,#alloc\n\t" \
+ ".section __ex_table,\"a\"\n\t" \
".align 4\n\t" \
".word 1b, 3b\n\n\t" \
".previous\n\n\t" \
"b 2b\n\t" \
" mov %3, %0\n\n\t" \
".previous\n\t" \
- ".section __ex_table,#alloc\n\t" \
+ ".section __ex_table,\"a\"\n\t" \
".align 4\n\t" \
".word 1b, 3b\n\n\t" \
".previous\n\t" \
__asm__ __volatile__( \
"/* Get user asm ret, inline. */\n" \
"1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \
- ".section __ex_table,#alloc\n\t" \
+ ".section __ex_table,\"a\"\n\t" \
".align 4\n\t" \
".word 1b,__ret_efault\n\n\t" \
".previous\n\t" \
"ret\n\t" \
" restore %%g0, %2, %%o0\n\n\t" \
".previous\n\t" \
- ".section __ex_table,#alloc\n\t" \
+ ".section __ex_table,\"a\"\n\t" \
".align 4\n\t" \
".word 1b, 3b\n\n\t" \
".previous\n\t" \
extern int get_unused_fd(void);
extern void FASTCALL(put_unused_fd(unsigned int fd));
struct kmem_cache;
-extern void filp_ctor(void * objp, struct kmem_cache *cachep, unsigned long cflags);
-extern void filp_dtor(void * objp, struct kmem_cache *cachep, unsigned long dflags);
extern struct file ** alloc_fd_array(int);
extern void free_fd_array(struct file **, int);
int max_files; /* tunable */
};
extern struct files_stat_struct files_stat;
+extern int get_max_files(void);
struct inodes_stat_t {
int nr_inodes;
extern struct file_operations read_fifo_fops;
extern struct file_operations write_fifo_fops;
extern struct file_operations rdwr_fifo_fops;
-extern struct file_operations read_pipe_fops;
-extern struct file_operations write_pipe_fops;
-extern struct file_operations rdwr_pipe_fops;
extern int fs_may_remount_ro(struct super_block *);
extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
+#ifdef CONFIG_NO_IDLE_HZ
+extern ktime_t hrtimer_get_next_event(void);
+#endif
+
static inline int hrtimer_active(const struct hrtimer *timer)
{
return timer->state == HRTIMER_PENDING;
CACHE(32768)
CACHE(65536)
CACHE(131072)
-#ifndef CONFIG_MMU
+#if (NR_CPUS > 512) || (MAX_NUMNODES > 256) || !defined(CONFIG_MMU)
CACHE(262144)
+#endif
+#ifndef CONFIG_MMU
CACHE(524288)
CACHE(1048576)
#ifdef CONFIG_LARGE_ALLOCS
#include <linux/mmzone.h>
#include <linux/notifier.h>
+struct page;
+struct zone;
+struct pglist_data;
+
#ifdef CONFIG_MEMORY_HOTPLUG
/*
* pgdat resizing functions
#define PCI_DEVICE_ID_CCD_B00B 0xb00b
#define PCI_DEVICE_ID_CCD_B00C 0xb00c
#define PCI_DEVICE_ID_CCD_B100 0xb100
+#define PCI_DEVICE_ID_CCD_B700 0xb700
+#define PCI_DEVICE_ID_CCD_B701 0xb701
#define PCI_VENDOR_ID_EXAR 0x13a8
#define PCI_DEVICE_ID_EXAR_XR17C152 0x0152
}
void percpu_counter_mod(struct percpu_counter *fbc, long amount);
+long percpu_counter_sum(struct percpu_counter *fbc);
static inline long percpu_counter_read(struct percpu_counter *fbc)
{
return fbc->count;
}
+static inline long percpu_counter_sum(struct percpu_counter *fbc)
+{
+ return percpu_counter_read_positive(fbc);
+}
+
#endif /* CONFIG_SMP */
static inline void percpu_counter_inc(struct percpu_counter *fbc)
long batch; /* Batch # for current RCU batch */
struct rcu_head *nxtlist;
struct rcu_head **nxttail;
- long count; /* # of queued items */
+ long qlen; /* # of queued callbacks */
struct rcu_head *curlist;
struct rcu_head **curtail;
struct rcu_head *donelist;
struct rcu_head **donetail;
+ long blimit; /* Upper limit on a processed batch */
int cpu;
struct rcu_head barrier;
+#ifdef CONFIG_SMP
+ long last_rs_qlen; /* qlen during the last resched */
+#endif
};
DECLARE_PER_CPU(struct rcu_data, rcu_data);
int de_entrylen;
int de_namelen;
char *de_name;
- char *de_gen_number_bit_string;
+ unsigned long *de_gen_number_bit_string;
__u32 de_dir_id;
__u32 de_objectid;
return rem;
}
+#ifdef CONFIG_NO_IDLE_HZ
+/**
+ * hrtimer_get_next_event - get the time until next expiry event
+ *
+ * Returns the delta to the next expiry event or KTIME_MAX if no timer
+ * is pending.
+ */
+ktime_t hrtimer_get_next_event(void)
+{
+ struct hrtimer_base *base = __get_cpu_var(hrtimer_bases);
+ ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
+ unsigned long flags;
+ int i;
+
+ for (i = 0; i < MAX_HRTIMER_BASES; i++, base++) {
+ struct hrtimer *timer;
+
+ spin_lock_irqsave(&base->lock, flags);
+ if (!base->first) {
+ spin_unlock_irqrestore(&base->lock, flags);
+ continue;
+ }
+ timer = rb_entry(base->first, struct hrtimer, node);
+ delta.tv64 = timer->expires.tv64;
+ spin_unlock_irqrestore(&base->lock, flags);
+ delta = ktime_sub(delta, base->get_time());
+ if (delta.tv64 < mindelta.tv64)
+ mindelta.tv64 = delta.tv64;
+ }
+ if (mindelta.tv64 < 0)
+ mindelta.tv64 = 0;
+ return mindelta;
+}
+#endif
+
/**
* hrtimer_init - initialize a timer to the given clock
*
/* Fake initialization required by compiler */
static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
-static int maxbatch = 10000;
+static int blimit = 10;
+static int qhimark = 10000;
+static int qlowmark = 100;
+#ifdef CONFIG_SMP
+static int rsinterval = 1000;
+#endif
+
+static atomic_t rcu_barrier_cpu_count;
+static struct semaphore rcu_barrier_sema;
+static struct completion rcu_barrier_completion;
+
+#ifdef CONFIG_SMP
+static void force_quiescent_state(struct rcu_data *rdp,
+ struct rcu_ctrlblk *rcp)
+{
+ int cpu;
+ cpumask_t cpumask;
+ set_need_resched();
+ if (unlikely(rdp->qlen - rdp->last_rs_qlen > rsinterval)) {
+ rdp->last_rs_qlen = rdp->qlen;
+ /*
+ * Don't send IPI to itself. With irqs disabled,
+ * rdp->cpu is the current cpu.
+ */
+ cpumask = rcp->cpumask;
+ cpu_clear(rdp->cpu, cpumask);
+ for_each_cpu_mask(cpu, cpumask)
+ smp_send_reschedule(cpu);
+ }
+}
+#else
+static inline void force_quiescent_state(struct rcu_data *rdp,
+ struct rcu_ctrlblk *rcp)
+{
+ set_need_resched();
+}
+#endif
/**
* call_rcu - Queue an RCU callback for invocation after a grace period.
rdp = &__get_cpu_var(rcu_data);
*rdp->nxttail = head;
rdp->nxttail = &head->next;
-
- if (unlikely(++rdp->count > 10000))
- set_need_resched();
-
+ if (unlikely(++rdp->qlen > qhimark)) {
+ rdp->blimit = INT_MAX;
+ force_quiescent_state(rdp, &rcu_ctrlblk);
+ }
local_irq_restore(flags);
}
-static atomic_t rcu_barrier_cpu_count;
-static struct semaphore rcu_barrier_sema;
-static struct completion rcu_barrier_completion;
-
/**
* call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
* @head: structure to be used for queueing the RCU updates.
rdp = &__get_cpu_var(rcu_bh_data);
*rdp->nxttail = head;
rdp->nxttail = &head->next;
- rdp->count++;
-/*
- * Should we directly call rcu_do_batch() here ?
- * if (unlikely(rdp->count > 10000))
- * rcu_do_batch(rdp);
- */
+
+ if (unlikely(++rdp->qlen > qhimark)) {
+ rdp->blimit = INT_MAX;
+ force_quiescent_state(rdp, &rcu_bh_ctrlblk);
+ }
+
local_irq_restore(flags);
}
next = rdp->donelist = list->next;
list->func(list);
list = next;
- rdp->count--;
- if (++count >= maxbatch)
+ rdp->qlen--;
+ if (++count >= rdp->blimit)
break;
}
+ if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark)
+ rdp->blimit = blimit;
if (!rdp->donelist)
rdp->donetail = &rdp->donelist;
else
rdp->quiescbatch = rcp->completed;
rdp->qs_pending = 0;
rdp->cpu = cpu;
+ rdp->blimit = blimit;
}
static void __devinit rcu_online_cpu(int cpu)
synchronize_rcu();
}
-module_param(maxbatch, int, 0);
+module_param(blimit, int, 0);
+module_param(qhimark, int, 0);
+module_param(qlowmark, int, 0);
+#ifdef CONFIG_SMP
+module_param(rsinterval, int, 0);
+#endif
EXPORT_SYMBOL_GPL(rcu_batches_completed);
EXPORT_SYMBOL(call_rcu); /* WARNING: GPL-only in April 2006. */
EXPORT_SYMBOL(call_rcu_bh); /* WARNING: GPL-only in April 2006. */
*/
if (unlikely(preempt_count()))
return;
+ if (unlikely(system_state != SYSTEM_RUNNING))
+ return;
do {
add_preempt_count(PREEMPT_ACTIVE);
schedule();
runqueue_t *rq = cpu_rq(cpu);
unsigned long flags;
+ idle->timestamp = sched_clock();
idle->sleep_avg = 0;
idle->array = NULL;
idle->prio = MAX_PRIO;
#include <asm/uaccess.h>
#include <asm/processor.h>
+extern int proc_nr_files(ctl_table *table, int write, struct file *filp,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+
#if defined(CONFIG_SYSCTL)
/* External variables not in a header file. */
.data = &files_stat,
.maxlen = 3*sizeof(int),
.mode = 0444,
- .proc_handler = &proc_dointvec,
+ .proc_handler = &proc_nr_files,
},
{
.ctl_name = FS_MAXFILE,
struct list_head *list;
struct timer_list *nte;
unsigned long expires;
+ unsigned long hr_expires = MAX_JIFFY_OFFSET;
+ ktime_t hr_delta;
tvec_t *varray[4];
int i, j;
+ hr_delta = hrtimer_get_next_event();
+ if (hr_delta.tv64 != KTIME_MAX) {
+ struct timespec tsdelta;
+ tsdelta = ktime_to_timespec(hr_delta);
+ hr_expires = timespec_to_jiffies(&tsdelta);
+ if (hr_expires < 3)
+ return hr_expires + jiffies;
+ }
+ hr_expires += jiffies;
+
base = &__get_cpu_var(tvec_bases);
spin_lock(&base->t_base.lock);
expires = base->timer_jiffies + (LONG_MAX >> 1);
}
}
spin_unlock(&base->t_base.lock);
+
+ if (time_before(hr_expires, expires))
+ return hr_expires;
+
return expires;
}
#endif
void do_timer(struct pt_regs *regs)
{
jiffies_64++;
+ /* prevent loading jiffies before storing new jiffies_64 value. */
+ barrier();
update_times();
softlockup_tick(regs);
}
return policy;
}
-static void gather_stats(struct page *, void *);
+static void gather_stats(struct page *, void *, int pte_dirty);
static void migrate_page_add(struct page *page, struct list_head *pagelist,
unsigned long flags);
continue;
if (flags & MPOL_MF_STATS)
- gather_stats(page, private);
+ gather_stats(page, private, pte_dirty(*pte));
else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
migrate_page_add(page, private, flags);
else
struct numa_maps {
unsigned long pages;
unsigned long anon;
- unsigned long mapped;
+ unsigned long active;
+ unsigned long writeback;
unsigned long mapcount_max;
+ unsigned long dirty;
+ unsigned long swapcache;
unsigned long node[MAX_NUMNODES];
};
-static void gather_stats(struct page *page, void *private)
+static void gather_stats(struct page *page, void *private, int pte_dirty)
{
struct numa_maps *md = private;
int count = page_mapcount(page);
- if (count)
- md->mapped++;
+ md->pages++;
+ if (pte_dirty || PageDirty(page))
+ md->dirty++;
- if (count > md->mapcount_max)
- md->mapcount_max = count;
+ if (PageSwapCache(page))
+ md->swapcache++;
- md->pages++;
+ if (PageActive(page))
+ md->active++;
+
+ if (PageWriteback(page))
+ md->writeback++;
if (PageAnon(page))
md->anon++;
+ if (count > md->mapcount_max)
+ md->mapcount_max = count;
+
md->node[page_to_nid(page)]++;
cond_resched();
}
+#ifdef CONFIG_HUGETLB_PAGE
+static void check_huge_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct numa_maps *md)
+{
+ unsigned long addr;
+ struct page *page;
+
+ for (addr = start; addr < end; addr += HPAGE_SIZE) {
+ pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & HPAGE_MASK);
+ pte_t pte;
+
+ if (!ptep)
+ continue;
+
+ pte = *ptep;
+ if (pte_none(pte))
+ continue;
+
+ page = pte_page(pte);
+ if (!page)
+ continue;
+
+ gather_stats(page, md, pte_dirty(*ptep));
+ }
+}
+#else
+static inline void check_huge_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct numa_maps *md)
+{
+}
+#endif
+
int show_numa_map(struct seq_file *m, void *v)
{
struct task_struct *task = m->private;
struct vm_area_struct *vma = v;
struct numa_maps *md;
+ struct file *file = vma->vm_file;
+ struct mm_struct *mm = vma->vm_mm;
int n;
char buffer[50];
- if (!vma->vm_mm)
+ if (!mm)
return 0;
md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
if (!md)
return 0;
- if (!is_vm_hugetlb_page(vma))
+ mpol_to_str(buffer, sizeof(buffer),
+ get_vma_policy(task, vma, vma->vm_start));
+
+ seq_printf(m, "%08lx %s", vma->vm_start, buffer);
+
+ if (file) {
+ seq_printf(m, " file=");
+ seq_path(m, file->f_vfsmnt, file->f_dentry, "\n\t= ");
+ } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
+ seq_printf(m, " heap");
+ } else if (vma->vm_start <= mm->start_stack &&
+ vma->vm_end >= mm->start_stack) {
+ seq_printf(m, " stack");
+ }
+
+ if (is_vm_hugetlb_page(vma)) {
+ check_huge_range(vma, vma->vm_start, vma->vm_end, md);
+ seq_printf(m, " huge");
+ } else {
check_pgd_range(vma, vma->vm_start, vma->vm_end,
- &node_online_map, MPOL_MF_STATS, md);
+ &node_online_map, MPOL_MF_STATS, md);
+ }
- if (md->pages) {
- mpol_to_str(buffer, sizeof(buffer),
- get_vma_policy(task, vma, vma->vm_start));
+ if (!md->pages)
+ goto out;
- seq_printf(m, "%08lx %s pages=%lu mapped=%lu maxref=%lu",
- vma->vm_start, buffer, md->pages,
- md->mapped, md->mapcount_max);
+ if (md->anon)
+ seq_printf(m," anon=%lu",md->anon);
- if (md->anon)
- seq_printf(m," anon=%lu",md->anon);
+ if (md->dirty)
+ seq_printf(m," dirty=%lu",md->dirty);
- for_each_online_node(n)
- if (md->node[n])
- seq_printf(m, " N%d=%lu", n, md->node[n]);
+ if (md->pages != md->anon && md->pages != md->dirty)
+ seq_printf(m, " mapped=%lu", md->pages);
- seq_putc(m, '\n');
- }
+ if (md->mapcount_max > 1)
+ seq_printf(m, " mapmax=%lu", md->mapcount_max);
+
+ if (md->swapcache)
+ seq_printf(m," swapcache=%lu", md->swapcache);
+
+ if (md->active < md->pages && !is_vm_hugetlb_page(vma))
+ seq_printf(m," active=%lu", md->active);
+
+ if (md->writeback)
+ seq_printf(m," writeback=%lu", md->writeback);
+
+ for_each_online_node(n)
+ if (md->node[n])
+ seq_printf(m, " N%d=%lu", n, md->node[n]);
+out:
+ seq_putc(m, '\n');
kfree(md);
if (m->count < m->size)
struct cache_sizes *sizes;
struct cache_names *names;
int i;
+ int order;
for (i = 0; i < NUM_INIT_LISTS; i++) {
kmem_list3_init(&initkmem_list3[i]);
cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, cache_line_size());
- cache_estimate(0, cache_cache.buffer_size, cache_line_size(), 0,
- &left_over, &cache_cache.num);
+ for (order = 0; order < MAX_ORDER; order++) {
+ cache_estimate(order, cache_cache.buffer_size,
+ cache_line_size(), 0, &left_over, &cache_cache.num);
+ if (cache_cache.num)
+ break;
+ }
if (!cache_cache.num)
BUG();
-
+ cache_cache.gfporder = order;
cache_cache.colour = left_over / cache_cache.colour_off;
cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
sizeof(struct slab), cache_line_size());
size_t size, size_t align, unsigned long flags)
{
size_t left_over = 0;
+ int gfporder;
- for (;; cachep->gfporder++) {
+ for (gfporder = 0 ; gfporder <= MAX_GFP_ORDER; gfporder++) {
unsigned int num;
size_t remainder;
- if (cachep->gfporder > MAX_GFP_ORDER) {
- cachep->num = 0;
- break;
- }
-
- cache_estimate(cachep->gfporder, size, align, flags,
- &remainder, &num);
+ cache_estimate(gfporder, size, align, flags, &remainder, &num);
if (!num)
continue;
+
/* More than offslab_limit objects will cause problems */
- if (flags & CFLGS_OFF_SLAB && cachep->num > offslab_limit)
+ if ((flags & CFLGS_OFF_SLAB) && num > offslab_limit)
break;
+ /* Found something acceptable - save it away */
cachep->num = num;
+ cachep->gfporder = gfporder;
left_over = remainder;
+ /*
+ * A VFS-reclaimable slab tends to have most allocations
+ * as GFP_NOFS and we really don't want to have to be allocating
+ * higher-order pages when we are unable to shrink dcache.
+ */
+ if (flags & SLAB_RECLAIM_ACCOUNT)
+ break;
+
/*
* Large number of objects is good, but very large slabs are
* currently bad for the gfp()s.
*/
- if (cachep->gfporder >= slab_break_gfp_order)
+ if (gfporder >= slab_break_gfp_order)
break;
- if ((left_over * 8) <= (PAGE_SIZE << cachep->gfporder))
- /* Acceptable internal fragmentation */
+ /*
+ * Acceptable internal fragmentation?
+ */
+ if ((left_over * 8) <= (PAGE_SIZE << gfporder))
break;
}
return left_over;
size = ALIGN(size, align);
- if ((flags & SLAB_RECLAIM_ACCOUNT) && size <= PAGE_SIZE) {
- /*
- * A VFS-reclaimable slab tends to have most allocations
- * as GFP_NOFS and we really don't want to have to be allocating
- * higher-order pages when we are unable to shrink dcache.
- */
- cachep->gfporder = 0;
- cache_estimate(cachep->gfporder, size, align, flags,
- &left_over, &cachep->num);
- } else
- left_over = calculate_slab_order(cachep, size, align, flags);
+ left_over = calculate_slab_order(cachep, size, align, flags);
if (!cachep->num) {
printk("kmem_cache_create: couldn't create cache %s.\n", name);
"slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n",
cachep->name, cachep->num, slabp, slabp->inuse);
for (i = 0;
- i < sizeof(slabp) + cachep->num * sizeof(kmem_bufctl_t);
+ i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t);
i++) {
if ((i % 16) == 0)
printk("\n%03x:", i);
if (count >= FBC_BATCH || count <= -FBC_BATCH) {
spin_lock(&fbc->lock);
fbc->count += count;
+ *pcount = 0;
spin_unlock(&fbc->lock);
- count = 0;
+ } else {
+ *pcount = count;
}
- *pcount = count;
put_cpu();
}
EXPORT_SYMBOL(percpu_counter_mod);
+
+/*
+ * Add up all the per-cpu counts, return the result. This is a more accurate
+ * but much slower version of percpu_counter_read_positive()
+ */
+long percpu_counter_sum(struct percpu_counter *fbc)
+{
+ long ret;
+ int cpu;
+
+ spin_lock(&fbc->lock);
+ ret = fbc->count;
+ for_each_cpu(cpu) {
+ long *pcount = per_cpu_ptr(fbc->counters, cpu);
+ ret += *pcount;
+ }
+ spin_unlock(&fbc->lock);
+ return ret < 0 ? 0 : ret;
+}
+EXPORT_SYMBOL(percpu_counter_sum);
#endif
/*
static void sigd_put_skb(struct sk_buff *skb)
{
#ifdef WAIT_FOR_DEMON
- static unsigned long silence;
DECLARE_WAITQUEUE(wait,current);
add_wait_queue(&sigd_sleep,&wait);
while (!sigd) {
set_current_state(TASK_UNINTERRUPTIBLE);
- if (time_after(jiffies, silence) || silence == 0) {
- printk(KERN_INFO "atmsvc: waiting for signaling demon "
- "...\n");
- silence = (jiffies+30*HZ)|1;
- }
+ DPRINTK("atmsvc: waiting for signaling demon...\n");
schedule();
}
current->state = TASK_RUNNING;
remove_wait_queue(&sigd_sleep,&wait);
#else
if (!sigd) {
- if (net_ratelimit())
- printk(KERN_WARNING "atmsvc: no signaling demon\n");
+ DPRINTK("atmsvc: no signaling demon\n");
kfree_skb(skb);
return;
}
{
struct net_device *dev = arg;
struct net_bridge_port *p;
+ struct net_bridge *br;
rtnl_lock();
p = dev->br_port;
if (!p)
goto done;
-
- if (netif_carrier_ok(p->dev)) {
- u32 cost = port_cost(p->dev);
-
- spin_lock_bh(&p->br->lock);
- if (p->state == BR_STATE_DISABLED) {
- p->path_cost = cost;
- br_stp_enable_port(p);
+ br = p->br;
+
+ if (netif_carrier_ok(dev))
+ p->path_cost = port_cost(dev);
+
+ if (br->dev->flags & IFF_UP) {
+ spin_lock_bh(&br->lock);
+ if (netif_carrier_ok(dev)) {
+ if (p->state == BR_STATE_DISABLED)
+ br_stp_enable_port(p);
+ } else {
+ if (p->state != BR_STATE_DISABLED)
+ br_stp_disable_port(p);
}
- spin_unlock_bh(&p->br->lock);
- } else {
- spin_lock_bh(&p->br->lock);
- if (p->state != BR_STATE_DISABLED)
- br_stp_disable_port(p);
- spin_unlock_bh(&p->br->lock);
+ spin_unlock_bh(&br->lock);
}
done:
rtnl_unlock();
rcu_assign_pointer(dev->br_port, NULL);
+ kobject_uevent(&p->kobj, KOBJ_REMOVE);
kobject_del(&p->kobj);
call_rcu(&p->rcu, destroy_nbp_rcu);
br_init_port(p);
p->state = BR_STATE_DISABLED;
INIT_WORK(&p->carrier_check, port_carrier_check, dev);
- kobject_init(&p->kobj);
+ br_stp_port_timer_init(p);
+ kobject_init(&p->kobj);
kobject_set_name(&p->kobj, SYSFS_BRIDGE_PORT_ATTR);
p->kobj.ktype = &brport_ktype;
p->kobj.parent = &(dev->class_dev.kobj);
p->state = BR_STATE_BLOCKING;
p->topology_change_ack = 0;
p->config_pending = 0;
-
- br_stp_port_timer_init(p);
}
/* called under bridge lock */
* net/dccp/ccids/ccid3.c
*
* Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
- * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz>
+ * Copyright (c) 2005-6 Ian McDonald <imcdnzl@gmail.com>
*
* An implementation of the DCCP protocol
*
p_prev = hcrx->ccid3hcrx_p;
/* Calculate loss event rate */
- if (!list_empty(&hcrx->ccid3hcrx_li_hist))
+ if (!list_empty(&hcrx->ccid3hcrx_li_hist)) {
+ u32 i_mean = dccp_li_hist_calc_i_mean(&hcrx->ccid3hcrx_li_hist);
+
/* Scaling up by 1000000 as fixed decimal */
- hcrx->ccid3hcrx_p = 1000000 / dccp_li_hist_calc_i_mean(&hcrx->ccid3hcrx_li_hist);
+ if (i_mean != 0)
+ hcrx->ccid3hcrx_p = 1000000 / i_mean;
+ }
if (hcrx->ccid3hcrx_p > p_prev) {
ccid3_hc_rx_send_feedback(sk);
write_unlock_bh(&queue_lock);
status = ipq_receive_peer(NLMSG_DATA(nlh), type,
- skblen - NLMSG_LENGTH(0));
+ nlmsglen - NLMSG_LENGTH(0));
if (status < 0)
RCV_SKB_FAIL(status);
write_unlock_bh(&queue_lock);
status = ipq_receive_peer(NLMSG_DATA(nlh), type,
- skblen - NLMSG_LENGTH(0));
+ nlmsglen - NLMSG_LENGTH(0));
if (status < 0)
RCV_SKB_FAIL(status);
struct sock *sk = NULL;
struct unix_sock *u;
- if (atomic_read(&unix_nr_socks) >= 2*files_stat.max_files)
+ if (atomic_read(&unix_nr_socks) >= 2*get_max_files())
goto out;
sk = sk_alloc(PF_UNIX, GFP_KERNEL, &unix_proto, 1);
id->cu_model);
ADD(alias, "dt", id->match_flags&CCW_DEVICE_ID_MATCH_DEVICE_TYPE,
id->dev_type);
- ADD(alias, "dm", id->match_flags&CCW_DEVICE_ID_MATCH_DEVICE_TYPE,
+ ADD(alias, "dm", id->match_flags&CCW_DEVICE_ID_MATCH_DEVICE_MODEL,
id->dev_model);
return 1;
}
kctl.private_free = snd_ctl_elem_user_free;
_kctl = snd_ctl_new(&kctl, access);
if (_kctl == NULL) {
- kfree(_kctl->private_data);
+ kfree(ue);
return -ENOMEM;
}
_kctl->private_data = ue;
for (idx = 0; idx < _kctl->count; idx++)
_kctl->vd[idx].owner = file;
err = snd_ctl_add(card, _kctl);
- if (err < 0) {
- snd_ctl_free_one(_kctl);
+ if (err < 0)
return err;
- }
down_write(&card->controls_rwsem);
card->user_ctl_count++;