Add some cpus:
# /bin/echo 0-7 > cpus
+Add some mems:
+# /bin/echo 0-7 > mems
+
Now attach your shell to this cpuset:
# /bin/echo $$ > tasks
See also Documentation/pm.txt, pci=noacpi
+ acpi_apic_instance= [ACPI, IOAPIC]
+ Format: <int>
+ 2: use 2nd APIC table, if available
+ 1,0: use 1st APIC table
+ default: 0
+
acpi_sleep= [HW,ACPI] Sleep options
Format: { s3_bios, s3_mode }
See Documentation/power/video.txt
lapic [IA-32,APIC] Enable the local APIC even if BIOS
disabled it.
+ lapic_timer_c2_ok [IA-32,x86-64,APIC] trust the local apic timer in
+ C2 power state.
+
lasi= [HW,SCSI] PARISC LASI driver for the 53c700 chip
Format: addr:<io>,irq:<irq>
nolapic [IA-32,APIC] Do not enable or use the local APIC.
+ nolapic_timer [IA-32,APIC] Do not use the local APIC timer.
+
noltlbs [PPC] Do not use large page/tlb entries for kernel
lowmem mapping on PPC40x.
To use the amateur radio protocols within Linux you will need to get a
-suitable copy of the AX.25 Utilities. More detailed information about these
-and associated programs can be found on http://zone.pspt.fi/~jsn/.
-
-For more information about the AX.25, NET/ROM and ROSE protocol stacks, see
-the AX25-HOWTO written by Terry Dawson <terry@perf.no.itg.telstra.com.au>
-who is also the AX.25 Utilities maintainer.
+suitable copy of the AX.25 Utilities. More detailed information about
+AX.25, NET/ROM and ROSE, associated programs and and utilities can be
+found on http://www.linux-ax25.org.
There is an active mailing list for discussing Linux amateur radio matters
-called linux-hams. To subscribe to it, send a message to
+called linux-hams@vger.kernel.org. To subscribe to it, send a message to
majordomo@vger.kernel.org with the words "subscribe linux-hams" in the body
-of the message, the subject field is ignored.
-
-Jonathan G4KLX
-
-g4klx@g4klx.demon.co.uk
+of the message, the subject field is ignored. You don't need to be
+subscribed to post but of course that means you might miss an answer.
--------------
Usage:
- pci_save_state(dev, buffer);
+ pci_save_state(struct pci_dev *dev);
Description:
- Save first 64 bytes of PCI config space. Buffer must be allocated by
- caller.
+ Save first 64 bytes of PCI config space, along with any additional
+ PCI-Express or PCI-X information.
pci_restore_state
-----------------
Usage:
- pci_restore_state(dev, buffer);
+ pci_restore_state(struct pci_dev *dev);
Description:
- Restore previously saved config space. (First 64 bytes only);
-
- If buffer is NULL, then restore what information we know about the
- device from bootup: BARs and interrupt line.
+ Restore previously saved config space.
pci_set_power_state
-------------------
Usage:
- pci_set_power_state(dev, state);
+ pci_set_power_state(struct pci_dev *dev, pci_power_t state);
Description:
Transition device to low power state using PCI PM Capabilities
---------------
Usage:
- pci_enable_wake(dev, state, enable);
+ pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable);
Description:
Enable device to generate PME# during low power state using PCI PM
'p' - Will dump the current registers and flags to your console.
+'q' - Will dump a list of all running timers.
+
'r' - Turns off keyboard raw mode and sets it to XLATE.
's' - Will attempt to sync all mounted filesystems.
W: http://www.stud.uni-karlsruhe.de/~uh1b/
S: Maintained
+IPS SCSI RAID DRIVER
+P: Adaptec OEM Raid Solutions
+M: aacraid@adaptec.com
+L: linux-scsi@vger.kernel.org
+W: http://www.adaptec.com/
+S: Maintained
+
+DPT_I2O SCSI RAID DRIVER
+P: Adaptec OEM Raid Solutions
+M: aacraid@adaptec.com
+L: linux-scsi@vger.kernel.org
+W: http://www.adaptec.com/
+S: Maintained
+
AACRAID SCSI RAID DRIVER
P: Adaptec OEM Raid Solutions
+M: aacraid@adaptec.com
L: linux-scsi@vger.kernel.org
-W: http://linux.dell.com/storage.shtml
+W: http://www.adaptec.com/
S: Supported
ACPI
T: quilt http://khali.linux-fr.org/devel/linux-2.6/jdelvare-i2c/
S: Maintained
-I2O
-P: Markus Lidel
-M: markus.lidel@shadowconnect.com
-W: http://i2o.shadowconnect.com/
-S: Maintained
-
i386 BOOT CODE
P: Riley H. Williams
M: Riley@Williams.Name
S: Maintained
SCTP PROTOCOL
+P: Vlad Yasevich
+M: vladislav.yasevich@hp.com
P: Sridhar Samudrala
M: sri@us.ibm.com
L: lksctp-developers@lists.sourceforge.net
+W: http://lksctp.sourceforge.net
S: Supported
SCx200 CPU SUPPORT
S: Maintained
SONY VAIO CONTROL DEVICE DRIVER
-P: Stelian Pop
-M: stelian@popies.net
P: Mattia Dongili
M: malattia@linux.it
-W: http://popies.net/sonypi/
+L: linux-acpi@vger.kernel.org
+W: http://www.linux.it/~malattia/wiki/index.php/Sony_drivers
S: Maintained
SOUND
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 21
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc6
NAME = Nocturnal Monster Puppy
# *DOCUMENTATION*
{
return dma_chan[channel].active;
}
+EXPORT_SYMBOL(dma_channel_active);
void set_dma_page(dmach_t channel, char pagenr)
{
at91_sys_write(AT91_SMC_SETUP(3), AT91_SMC_NWESETUP_(0) | AT91_SMC_NCS_WRSETUP_(0)
| AT91_SMC_NRDSETUP_(0) | AT91_SMC_NCS_RDSETUP_(0));
- at91_sys_write(AT91_SMC_PULSE(3), AT91_SMC_NWEPULSE_(2) | AT91_SMC_NCS_WRPULSE_(5)
- | AT91_SMC_NRDPULSE_(2) | AT91_SMC_NCS_RDPULSE_(5));
+ at91_sys_write(AT91_SMC_PULSE(3), AT91_SMC_NWEPULSE_(3) | AT91_SMC_NCS_WRPULSE_(3)
+ | AT91_SMC_NRDPULSE_(3) | AT91_SMC_NCS_RDPULSE_(3));
- at91_sys_write(AT91_SMC_CYCLE(3), AT91_SMC_NWECYCLE_(7) | AT91_SMC_NRDCYCLE_(7));
+ at91_sys_write(AT91_SMC_CYCLE(3), AT91_SMC_NWECYCLE_(5) | AT91_SMC_NRDCYCLE_(5));
if (data->bus_width_16)
mode = AT91_SMC_DBW_16;
else
mode = AT91_SMC_DBW_8;
- at91_sys_write(AT91_SMC_MODE(3), mode | AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_TDF_(1));
+ at91_sys_write(AT91_SMC_MODE(3), mode | AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_TDF_(2));
/* enable pin */
if (data->enable_pin)
comment "IOP32x Platform Types"
+config MACH_EP80219
+ bool
+
config MACH_GLANTANK
bool "Enable support for the IO-Data GLAN Tank"
help
config ARCH_IQ31244
bool "Enable support for EP80219/IQ31244"
+ select MACH_EP80219
help
Say Y here if you want to run your kernel on the Intel EP80219
evaluation kit for the Intel 80219 processor (a IOP321 variant)
#include <asm/arch/time.h>
/*
- * The EP80219 and IQ31244 use the same machine ID. To find out
- * which of the two we're running on, we look at the processor ID.
+ * Until March of 2007 iq31244 platforms and ep80219 platforms shared the
+ * same machine id, and the processor type was used to select board type.
+ * However this assumption breaks for an iq80219 board which is an iop219
+ * processor on an iq31244 board. The force_ep80219 flag has been added
+ * for old boot loaders using the iq31244 machine id for an ep80219 platform.
*/
+static int force_ep80219;
+
static int is_80219(void)
{
extern int processor_id;
return !!((processor_id & 0xffffffe0) == 0x69052e20);
}
+static int is_ep80219(void)
+{
+ if (machine_is_ep80219() || force_ep80219)
+ return 1;
+ else
+ return 0;
+}
+
/*
* EP80219/IQ31244 timer tick configuration.
*/
static void __init iq31244_timer_init(void)
{
- if (is_80219()) {
+ if (is_ep80219()) {
/* 33.333 MHz crystal. */
iop_init_time(200000000);
} else {
static int __init iq31244_pci_init(void)
{
- if (machine_is_iq31244()) {
+ if (is_ep80219())
+ pci_common_init(&ep80219_pci);
+ else if (machine_is_iq31244()) {
if (is_80219()) {
- pci_common_init(&ep80219_pci);
- } else {
- pci_common_init(&iq31244_pci);
+ printk("note: iq31244 board type has been selected\n");
+ printk("note: to select ep80219 operation:\n");
+ printk("\t1/ specify \"force_ep80219\" on the kernel"
+ " command line\n");
+ printk("\t2/ update boot loader to pass"
+ " the ep80219 id: %d\n", MACH_TYPE_EP80219);
}
+ pci_common_init(&iq31244_pci);
}
return 0;
platform_device_register(&iq31244_flash_device);
platform_device_register(&iq31244_serial_device);
- if (is_80219())
+ if (is_ep80219())
pm_power_off = ep80219_power_off;
}
+static int __init force_ep80219_setup(char *str)
+{
+ force_ep80219 = 1;
+ return 1;
+}
+
+__setup("force_ep80219", force_ep80219_setup);
+
MACHINE_START(IQ31244, "Intel IQ31244")
/* Maintainer: Intel Corp. */
.phys_io = IQ31244_UART,
.timer = &iq31244_timer,
.init_machine = iq31244_init_machine,
MACHINE_END
+
+/* There should have been an ep80219 machine identifier from the beginning.
+ * Boot roms older than March 2007 do not know the ep80219 machine id. Pass
+ * "force_ep80219" on the kernel command line, otherwise iq31244 operation
+ * will be selected.
+ */
+MACHINE_START(EP80219, "Intel EP80219")
+ /* Maintainer: Intel Corp. */
+ .phys_io = IQ31244_UART,
+ .io_pg_offst = ((IQ31244_UART) >> 18) & 0xfffc,
+ .boot_params = 0xa0000100,
+ .map_io = iq31244_map_io,
+ .init_irq = iop32x_init_irq,
+ .timer = &iq31244_timer,
+ .init_machine = iq31244_init_machine,
+MACHINE_END
/* setup PM */
+#ifdef CONFIG_PM_H1940
memcpy(phys_to_virt(H1940_SUSPEND_RESUMEAT), h1940_pm_return, 1024);
+#endif
s3c2410_pm_init();
}
static void __init rx3715_init_machine(void)
{
+#ifdef CONFIG_PM_H1940
memcpy(phys_to_virt(H1940_SUSPEND_RESUMEAT), h1940_pm_return, 1024);
+#endif
s3c2410_pm_init();
s3c24xx_fb_set_platdata(&rx3715_lcdcfg);
static void s3c2443_irq_demux_dma(unsigned int irq, struct irq_desc *desc)
{
- s3c2443_irq_demux(IRQ_S3C2443_DMA1, 6);
+ s3c2443_irq_demux(IRQ_S3C2443_DMA0, 6);
}
#define INTMSK_DMA (1UL << (IRQ_S3C2443_DMA - IRQ_EINT0))
if ((err = pcibios_enable_resources(dev, mask)) < 0)
return err;
- return pcibios_enable_irq(dev);
+ if (!dev->msi_enabled)
+ pcibios_enable_irq(dev);
+ return 0;
}
int pcibios_assign_resources(void)
if ((err = pcibios_enable_resources(dev, mask)) < 0)
return err;
- pcibios_enable_irq(dev);
+ if (!dev->msi_enabled)
+ pcibios_enable_irq(dev);
return 0;
}
jmp _m_s
check_vesa:
+#ifdef CONFIG_FIRMWARE_EDID
+ leaw modelist+1024, %di
+ movw $0x4f00, %ax
+ int $0x10
+ cmpw $0x004f, %ax
+ jnz setbad
+
+ movw 4(%di), %ax
+ movw %ax, vbe_version
+#endif
leaw modelist+1024, %di
subb $VIDEO_FIRST_VESA>>8, %bh
movw %bx, %cx # Get mode information structure
rep
stosl
+ cmpw $0x0200, vbe_version # only do EDID on >= VBE2.0
+ jl no_edid
+
pushw %es # save ES
xorw %di, %di # Report Capability
pushw %di
svga_prefix: .byte VIDEO_FIRST_BIOS>>8 # Default prefix for BIOS modes
graphic_mode: .byte 0 # Graphic mode with a linear frame buffer
dac_size: .byte 6 # DAC bit depth
+vbe_version: .word 0 # VBE bios version
# Status messages
keymsg: .ascii "Press <RETURN> to see video modes available, "
#include <linux/clockchips.h>
#include <linux/acpi_pmtmr.h>
#include <linux/module.h>
+#include <linux/dmi.h>
#include <asm/atomic.h>
#include <asm/smp.h>
/* Local APIC timer verification ok */
static int local_apic_timer_verify_ok;
+/* Disable local APIC timer from the kernel commandline or via dmi quirk */
+static int local_apic_timer_disabled;
+/* Local APIC timer works in C2 */
+int local_apic_timer_c2_ok;
+EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
/*
* Debug level, exported for io_apic.c
long delta, deltapm;
int pm_referenced = 0;
+ if (boot_cpu_has(X86_FEATURE_LAPIC_TIMER_BROKEN))
+ local_apic_timer_disabled = 1;
+
+ /*
+ * The local apic timer can be disabled via the kernel
+ * commandline or from the test above. Register the lapic
+ * timer as a dummy clock event source on SMP systems, so the
+ * broadcast mechanism is used. On UP systems simply ignore it.
+ */
+ if (local_apic_timer_disabled) {
+ /* No broadcast on UP ! */
+ if (num_possible_cpus() > 1)
+ setup_APIC_timer();
+ return;
+ }
+
apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
"calibrating APIC timer ...\n");
apic_printk(APIC_VERBOSE, "... jiffies result ok\n");
else
local_apic_timer_verify_ok = 0;
- }
+ } else
+ local_irq_enable();
if (!local_apic_timer_verify_ok) {
printk(KERN_WARNING
}
early_param("nolapic", parse_nolapic);
+static int __init parse_disable_lapic_timer(char *arg)
+{
+ local_apic_timer_disabled = 1;
+ return 0;
+}
+early_param("nolapic_timer", parse_disable_lapic_timer);
+
+static int __init parse_lapic_timer_c2_ok(char *arg)
+{
+ local_apic_timer_c2_ok = 1;
+ return 0;
+}
+early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
+
static int __init apic_set_verbosity(char *str)
{
if (strcmp("debug", str) == 0)
extern void vide(void);
__asm__(".align 4\nvide: ret");
+#define ENABLE_C1E_MASK 0x18000000
+#define CPUID_PROCESSOR_SIGNATURE 1
+#define CPUID_XFAM 0x0ff00000
+#define CPUID_XFAM_K8 0x00000000
+#define CPUID_XFAM_10H 0x00100000
+#define CPUID_XFAM_11H 0x00200000
+#define CPUID_XMOD 0x000f0000
+#define CPUID_XMOD_REV_F 0x00040000
+
+/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
+static __cpuinit int amd_apic_timer_broken(void)
+{
+ u32 lo, hi;
+ u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
+ switch (eax & CPUID_XFAM) {
+ case CPUID_XFAM_K8:
+ if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
+ break;
+ case CPUID_XFAM_10H:
+ case CPUID_XFAM_11H:
+ rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
+ if (lo & ENABLE_C1E_MASK)
+ return 1;
+ break;
+ default:
+ /* err on the side of caution */
+ return 1;
+ }
+ return 0;
+}
+
static void __cpuinit init_amd(struct cpuinfo_x86 *c)
{
u32 l, h;
if (cpuid_eax(0x80000000) >= 0x80000006)
num_cache_leaves = 3;
+
+ if (amd_apic_timer_broken())
+ set_bit(X86_FEATURE_LAPIC_TIMER_BROKEN, c->x86_capability);
}
static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned int size)
#include <linux/errno.h>
#include <linux/hpet.h>
#include <linux/init.h>
+#include <linux/sysdev.h>
+#include <linux/pm.h>
#include <asm/hpet.h>
#include <asm/io.h>
cnt += delta;
hpet_writel(cnt, HPET_T0_CMP);
- return ((long)(hpet_readl(HPET_COUNTER) - cnt ) > 0);
+ return ((long)(hpet_readl(HPET_COUNTER) - cnt ) > 0) ? -ETIME : 0;
}
/*
out_nohpet:
iounmap(hpet_virt_address);
hpet_virt_address = NULL;
+ boot_hpet_disable = 1;
return 0;
}
return IRQ_HANDLED;
}
#endif
+
+
+/*
+ * Suspend/resume part
+ */
+
+#ifdef CONFIG_PM
+
+static int hpet_suspend(struct sys_device *sys_device, pm_message_t state)
+{
+ unsigned long cfg = hpet_readl(HPET_CFG);
+
+ cfg &= ~(HPET_CFG_ENABLE|HPET_CFG_LEGACY);
+ hpet_writel(cfg, HPET_CFG);
+
+ return 0;
+}
+
+static int hpet_resume(struct sys_device *sys_device)
+{
+ unsigned int id;
+
+ hpet_start_counter();
+
+ id = hpet_readl(HPET_ID);
+
+ if (id & HPET_ID_LEGSUP)
+ hpet_enable_int();
+
+ return 0;
+}
+
+static struct sysdev_class hpet_class = {
+ set_kset_name("hpet"),
+ .suspend = hpet_suspend,
+ .resume = hpet_resume,
+};
+
+static struct sys_device hpet_device = {
+ .id = 0,
+ .cls = &hpet_class,
+};
+
+
+static __init int hpet_register_sysfs(void)
+{
+ int err;
+
+ if (!is_hpet_capable())
+ return 0;
+
+ err = sysdev_class_register(&hpet_class);
+
+ if (!err) {
+ err = sysdev_register(&hpet_device);
+ if (err)
+ sysdev_class_unregister(&hpet_class);
+ }
+
+ return err;
+}
+
+device_initcall(hpet_register_sysfs);
+
+#endif
outb(LATCH >> 8 , PIT_CH0); /* MSB */
break;
- case CLOCK_EVT_MODE_ONESHOT:
+ /*
+ * Avoid unnecessary state transitions, as it confuses
+ * Geode / Cyrix based boxen.
+ */
case CLOCK_EVT_MODE_SHUTDOWN:
+ if (evt->mode == CLOCK_EVT_MODE_UNUSED)
+ break;
case CLOCK_EVT_MODE_UNUSED:
+ if (evt->mode == CLOCK_EVT_MODE_SHUTDOWN)
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
/* One shot setup */
outb_p(0x38, PIT_MODE);
udelay(10);
return 0;
}
-int __init irqbalance_disable(char *str)
+int __devinit irqbalance_disable(char *str)
{
irqbalance_disabled = 1;
return 1;
return error;
}
+static int apply_microcode_on_cpu(int cpu)
+{
+ struct cpuinfo_x86 *c = cpu_data + cpu;
+ struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
+ cpumask_t old;
+ unsigned int val[2];
+ int err = 0;
+
+ if (!uci->mc)
+ return -EINVAL;
+
+ old = current->cpus_allowed;
+ set_cpus_allowed(current, cpumask_of_cpu(cpu));
+
+ /* Check if the microcode we have in memory matches the CPU */
+ if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
+ cpu_has(c, X86_FEATURE_IA64) || uci->sig != cpuid_eax(0x00000001))
+ err = -EINVAL;
+
+ if (!err && ((c->x86_model >= 5) || (c->x86 > 6))) {
+ /* get processor flags from MSR 0x17 */
+ rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
+ if (uci->pf != (1 << ((val[1] >> 18) & 7)))
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ wrmsr(MSR_IA32_UCODE_REV, 0, 0);
+ /* see notes above for revision 1.07. Apparent chip bug */
+ sync_core();
+ /* get the current revision from MSR 0x8B */
+ rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
+ if (uci->rev != val[1])
+ err = -EINVAL;
+ }
+
+ if (!err)
+ apply_microcode(cpu);
+ else
+ printk(KERN_ERR "microcode: Could not apply microcode to CPU%d:"
+ " sig=0x%x, pf=0x%x, rev=0x%x\n",
+ cpu, uci->sig, uci->pf, uci->rev);
+
+ set_cpus_allowed(current, old);
+ return err;
+}
+
static void microcode_init_cpu(int cpu)
{
cpumask_t old;
set_cpus_allowed(current, cpumask_of_cpu(cpu));
mutex_lock(µcode_mutex);
collect_cpu_info(cpu);
- if (uci->valid && system_state == SYSTEM_RUNNING)
+ if (uci->valid && system_state == SYSTEM_RUNNING &&
+ !suspend_cpu_hotplug)
cpu_request_microcode(cpu);
mutex_unlock(µcode_mutex);
set_cpus_allowed(current, old);
return 0;
pr_debug("Microcode:CPU %d added\n", cpu);
- memset(uci, 0, sizeof(*uci));
+ /* If suspend_cpu_hotplug is set, the system is resuming and we should
+ * use the data from before the suspend.
+ */
+ if (suspend_cpu_hotplug) {
+ err = apply_microcode_on_cpu(cpu);
+ if (err)
+ microcode_fini_cpu(cpu);
+ }
+ if (!uci->valid)
+ memset(uci, 0, sizeof(*uci));
err = sysfs_create_group(&sys_dev->kobj, &mc_attr_group);
if (err)
return err;
- microcode_init_cpu(cpu);
+ if (!uci->valid)
+ microcode_init_cpu(cpu);
+
return 0;
}
if (!cpu_online(cpu))
return 0;
pr_debug("Microcode:CPU %d removed\n", cpu);
- microcode_fini_cpu(cpu);
+ /* If suspend_cpu_hotplug is set, the system is suspending and we should
+ * keep the microcode in memory for the resume.
+ */
+ if (!suspend_cpu_hotplug)
+ microcode_fini_cpu(cpu);
sysfs_remove_group(&sys_dev->kobj, &mc_attr_group);
return 0;
}
/* checks for a bit availability (hack for oprofile) */
int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
{
+ int cpu;
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
-
- return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
+ for_each_possible_cpu (cpu) {
+ if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
+ return 0;
+ }
+ return 1;
}
/* checks the an msr for availability */
int avail_to_resrv_perfctr_nmi(unsigned int msr)
{
unsigned int counter;
+ int cpu;
counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
- return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
+ for_each_possible_cpu (cpu) {
+ if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
+ return 0;
+ }
+ return 1;
}
-int reserve_perfctr_nmi(unsigned int msr)
+static int __reserve_perfctr_nmi(int cpu, unsigned int msr)
{
unsigned int counter;
+ if (cpu < 0)
+ cpu = smp_processor_id();
counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
- if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner)))
+ if (!test_and_set_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
return 1;
return 0;
}
-void release_perfctr_nmi(unsigned int msr)
+static void __release_perfctr_nmi(int cpu, unsigned int msr)
{
unsigned int counter;
+ if (cpu < 0)
+ cpu = smp_processor_id();
counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
- clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner));
+ clear_bit(counter, &per_cpu(perfctr_nmi_owner, cpu));
}
-int reserve_evntsel_nmi(unsigned int msr)
+int reserve_perfctr_nmi(unsigned int msr)
+{
+ int cpu, i;
+ for_each_possible_cpu (cpu) {
+ if (!__reserve_perfctr_nmi(cpu, msr)) {
+ for_each_possible_cpu (i) {
+ if (i >= cpu)
+ break;
+ __release_perfctr_nmi(i, msr);
+ }
+ return 0;
+ }
+ }
+ return 1;
+}
+
+void release_perfctr_nmi(unsigned int msr)
+{
+ int cpu;
+ for_each_possible_cpu (cpu) {
+ __release_perfctr_nmi(cpu, msr);
+ }
+}
+
+int __reserve_evntsel_nmi(int cpu, unsigned int msr)
{
unsigned int counter;
+ if (cpu < 0)
+ cpu = smp_processor_id();
counter = nmi_evntsel_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
- if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0]))
+ if (!test_and_set_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]))
return 1;
return 0;
}
-void release_evntsel_nmi(unsigned int msr)
+static void __release_evntsel_nmi(int cpu, unsigned int msr)
{
unsigned int counter;
+ if (cpu < 0)
+ cpu = smp_processor_id();
counter = nmi_evntsel_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
- clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0]);
+ clear_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]);
+}
+
+int reserve_evntsel_nmi(unsigned int msr)
+{
+ int cpu, i;
+ for_each_possible_cpu (cpu) {
+ if (!__reserve_evntsel_nmi(cpu, msr)) {
+ for_each_possible_cpu (i) {
+ if (i >= cpu)
+ break;
+ __release_evntsel_nmi(i, msr);
+ }
+ return 0;
+ }
+ }
+ return 1;
+}
+
+void release_evntsel_nmi(unsigned int msr)
+{
+ int cpu;
+ for_each_possible_cpu (cpu) {
+ __release_evntsel_nmi(cpu, msr);
+ }
}
static __cpuinit inline int nmi_known_cpu(void)
for_each_possible_cpu(cpu)
prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count;
local_irq_enable();
- mdelay((10*1000)/nmi_hz); // wait 10 ticks
+ mdelay((20*1000)/nmi_hz); // wait 20 ticks
for_each_possible_cpu(cpu) {
#ifdef CONFIG_SMP
perfctr_msr = MSR_K7_PERFCTR0;
evntsel_msr = MSR_K7_EVNTSEL0;
- if (!reserve_perfctr_nmi(perfctr_msr))
+ if (!__reserve_perfctr_nmi(-1, perfctr_msr))
goto fail;
- if (!reserve_evntsel_nmi(evntsel_msr))
+ if (!__reserve_evntsel_nmi(-1, evntsel_msr))
goto fail1;
wrmsrl(perfctr_msr, 0UL);
wd->check_bit = 1ULL<<63;
return 1;
fail1:
- release_perfctr_nmi(perfctr_msr);
+ __release_perfctr_nmi(-1, perfctr_msr);
fail:
return 0;
}
wrmsr(wd->evntsel_msr, 0, 0);
- release_evntsel_nmi(wd->evntsel_msr);
- release_perfctr_nmi(wd->perfctr_msr);
+ __release_evntsel_nmi(-1, wd->evntsel_msr);
+ __release_perfctr_nmi(-1, wd->perfctr_msr);
}
#define P6_EVNTSEL0_ENABLE (1 << 22)
perfctr_msr = MSR_P6_PERFCTR0;
evntsel_msr = MSR_P6_EVNTSEL0;
- if (!reserve_perfctr_nmi(perfctr_msr))
+ if (!__reserve_perfctr_nmi(-1, perfctr_msr))
goto fail;
- if (!reserve_evntsel_nmi(evntsel_msr))
+ if (!__reserve_evntsel_nmi(-1, evntsel_msr))
goto fail1;
wrmsrl(perfctr_msr, 0UL);
wd->check_bit = 1ULL<<39;
return 1;
fail1:
- release_perfctr_nmi(perfctr_msr);
+ __release_perfctr_nmi(-1, perfctr_msr);
fail:
return 0;
}
wrmsr(wd->evntsel_msr, 0, 0);
- release_evntsel_nmi(wd->evntsel_msr);
- release_perfctr_nmi(wd->perfctr_msr);
+ __release_evntsel_nmi(-1, wd->evntsel_msr);
+ __release_perfctr_nmi(-1, wd->perfctr_msr);
}
/* Note that these events don't tick when the CPU idles. This means
cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4);
}
- if (!reserve_perfctr_nmi(perfctr_msr))
+ if (!__reserve_perfctr_nmi(-1, perfctr_msr))
goto fail;
- if (!reserve_evntsel_nmi(evntsel_msr))
+ if (!__reserve_evntsel_nmi(-1, evntsel_msr))
goto fail1;
evntsel = P4_ESCR_EVENT_SELECT(0x3F)
wd->check_bit = 1ULL<<39;
return 1;
fail1:
- release_perfctr_nmi(perfctr_msr);
+ __release_perfctr_nmi(-1, perfctr_msr);
fail:
return 0;
}
wrmsr(wd->cccr_msr, 0, 0);
wrmsr(wd->evntsel_msr, 0, 0);
- release_evntsel_nmi(wd->evntsel_msr);
- release_perfctr_nmi(wd->perfctr_msr);
+ __release_evntsel_nmi(-1, wd->evntsel_msr);
+ __release_perfctr_nmi(-1, wd->perfctr_msr);
}
#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0;
- if (!reserve_perfctr_nmi(perfctr_msr))
+ if (!__reserve_perfctr_nmi(-1, perfctr_msr))
goto fail;
- if (!reserve_evntsel_nmi(evntsel_msr))
+ if (!__reserve_evntsel_nmi(-1, evntsel_msr))
goto fail1;
wrmsrl(perfctr_msr, 0UL);
wd->check_bit = 1ULL << (eax.split.bit_width - 1);
return 1;
fail1:
- release_perfctr_nmi(perfctr_msr);
+ __release_perfctr_nmi(-1, perfctr_msr);
fail:
return 0;
}
return;
wrmsr(wd->evntsel_msr, 0, 0);
- release_evntsel_nmi(wd->evntsel_msr);
- release_perfctr_nmi(wd->perfctr_msr);
+ __release_evntsel_nmi(-1, wd->evntsel_msr);
+ __release_perfctr_nmi(-1, wd->perfctr_msr);
}
void setup_apic_nmi_watchdog (void *unused)
#include "mach_timer.h"
+static int tsc_enabled;
+
/*
* On some systems the TSC frequency does not
* change with the cpu frequency. So we need
/*
* Fall back to jiffies if there's no TSC available:
*/
- if (tsc_unstable || unlikely(tsc_disable))
+ if (unlikely(!tsc_enabled))
/* No locking but a rare wrong value is not a big deal: */
return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
{
if (!tsc_unstable) {
tsc_unstable = 1;
+ tsc_enabled = 0;
/* Can be called before registration */
if (clocksource_tsc.mult)
clocksource_change_rating(&clocksource_tsc, 0);
if (check_tsc_unstable()) {
clocksource_tsc.rating = 0;
clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
- }
+ } else
+ tsc_enabled = 1;
+
clocksource_register(&clocksource_tsc);
return;
void (*flush_tlb)(int);
void (*set_initial_ap_state)(int, int);
void (*halt)(void);
+ void (*set_lazy_mode)(int mode);
} vmi_ops;
/* XXX move this to alternative.h */
}
#endif
+static void vmi_set_lazy_mode(int mode)
+{
+ static DEFINE_PER_CPU(int, lazy_mode);
+
+ if (!vmi_ops.set_lazy_mode)
+ return;
+
+ /* Modes should never nest or overlap */
+ BUG_ON(__get_cpu_var(lazy_mode) && !(mode == PARAVIRT_LAZY_NONE ||
+ mode == PARAVIRT_LAZY_FLUSH));
+
+ if (mode == PARAVIRT_LAZY_FLUSH) {
+ vmi_ops.set_lazy_mode(0);
+ vmi_ops.set_lazy_mode(__get_cpu_var(lazy_mode));
+ } else {
+ vmi_ops.set_lazy_mode(mode);
+ __get_cpu_var(lazy_mode) = mode;
+ }
+}
+
static inline int __init check_vmi_rom(struct vrom_header *rom)
{
struct pci_header *pci;
para_wrap(load_esp0, vmi_load_esp0, set_kernel_stack, UpdateKernelStack);
para_fill(set_iopl_mask, SetIOPLMask);
para_fill(io_delay, IODelay);
- para_fill(set_lazy_mode, SetLazyMode);
+ para_wrap(set_lazy_mode, vmi_set_lazy_mode, set_lazy_mode, SetLazyMode);
/* user and kernel flush are just handled with different flags to FlushTLB */
para_wrap(flush_tlb_user, vmi_flush_tlb_user, flush_tlb, FlushTLB);
#include <linux/blkdev.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
+#include <linux/interrupt.h>
#include <asm/uaccess.h>
#include <asm/mmx.h>
#ifndef CONFIG_X86_WP_WORKS_OK
if (unlikely(boot_cpu_data.wp_works_ok == 0) &&
((unsigned long )to) < TASK_SIZE) {
+ /*
+ * When we are in an atomic section (see
+ * mm/filemap.c:file_read_actor), return the full
+ * length to take the slow path.
+ */
+ if (in_atomic())
+ return n;
+
/*
* CPU does not honor the WP bit when writing
* from supervisory mode, and due to preemption or SMP,
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
+ arch_flush_lazy_mmu_mode();
return (void*) vaddr;
}
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
+ arch_flush_lazy_mmu_mode();
return (void*) vaddr;
}
DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2950"),
},
},
+ {
+ .callback = set_bf_sort,
+ .ident = "Dell PowerEdge R900",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R900"),
+ },
+ },
{
.callback = set_bf_sort,
.ident = "HP ProLiant BL20p G3",
if ((err = pcibios_enable_resources(dev, mask)) < 0)
return err;
- return pcibios_enable_irq(dev);
+ if (!dev->msi_enabled)
+ return pcibios_enable_irq(dev);
+ return 0;
}
void pcibios_disable_device (struct pci_dev *dev)
{
- if (pcibios_disable_irq)
+ if (!dev->msi_enabled && pcibios_disable_irq)
pcibios_disable_irq(dev);
}
bool
select PCI if (!IA64_HP_SIM)
select ACPI if (!IA64_HP_SIM)
+ select PM if (!IA64_HP_SIM)
default y
help
The Itanium Processor Family is Intel's 64-bit successor to
{
struct msi_msg msg;
unsigned long dest_phys_id;
- unsigned int irq, vector;
+ int irq, vector;
irq = create_irq();
if (irq < 0)
smp_callin (void)
{
int cpuid, phys_id, itc_master;
+ struct cpuinfo_ia64 *last_cpuinfo, *this_cpuinfo;
extern void ia64_init_itm(void);
extern volatile int time_keeper_id;
* Get our bogomips.
*/
ia64_init_itm();
- calibrate_delay();
+
+ /*
+ * Delay calibration can be skipped if new processor is identical to the
+ * previous processor.
+ */
+ last_cpuinfo = cpu_data(cpuid - 1);
+ this_cpuinfo = local_cpu_data;
+ if (last_cpuinfo->itc_freq != this_cpuinfo->itc_freq ||
+ last_cpuinfo->proc_freq != this_cpuinfo->proc_freq ||
+ last_cpuinfo->features != this_cpuinfo->features ||
+ last_cpuinfo->revision != this_cpuinfo->revision ||
+ last_cpuinfo->family != this_cpuinfo->family ||
+ last_cpuinfo->archrev != this_cpuinfo->archrev ||
+ last_cpuinfo->model != this_cpuinfo->model)
+ calibrate_delay();
local_cpu_data->loops_per_jiffy = loops_per_jiffy;
#ifdef CONFIG_IA32_SUPPORT
if (stack_size > MAX_USER_STACK_SIZE)
stack_size = MAX_USER_STACK_SIZE;
- current->thread.rbs_bot = STACK_TOP - stack_size;
+ current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
}
/*
if (ret < 0)
return ret;
- return acpi_pci_irq_enable(dev);
+ if (!dev->msi_enabled)
+ return acpi_pci_irq_enable(dev);
+ return 0;
}
void
pcibios_disable_device (struct pci_dev *dev)
{
BUG_ON(atomic_read(&dev->enable_cnt));
- acpi_pci_irq_disable(dev);
+ if (!dev->msi_enabled)
+ acpi_pci_irq_disable(dev);
}
void
config MIPS_MT_SMTC_INSTANT_REPLAY
bool "Low-latency Dispatch of Deferred SMTC IPIs"
- depends on MIPS_MT_SMTC
+ depends on MIPS_MT_SMTC && !PREEMPT
default y
help
SMTC pseudo-interrupts between TCs are deferred and queued
char **arg = (char **) fw_arg1;
char **env = (char **) fw_arg2;
struct callvectors *cv = (struct callvectors *) fw_arg3;
- uint32_t tmp;
int i;
/* save the PROM vectors for debugging use */
static void __init setup_l3cache(unsigned long size);
/* setup code for a handoff from a version 1 PMON 2000 PROM */
-void PMON_v1_setup()
+static void PMON_v1_setup(void)
{
/* A wired TLB entry for the GT64120A and the serial port. The
GT64120A is going to be hit on every IRQ anyway - there's
* aligned and should be uncached to avoid cache flushing after every
* update.
*/
- vdma_pagetable_start = alloc_bootmem_low_pages(VDMA_PGTBL_SIZE);
+ vdma_pagetable_start =
+ (unsigned long) alloc_bootmem_low_pages(VDMA_PGTBL_SIZE);
if (!vdma_pagetable_start)
BUG();
dma_cache_wback_inv(vdma_pagetable_start, VDMA_PGTBL_SIZE);
SAVE_AT
SAVE_TEMP
LONG_L v0, PT_STATUS(sp)
- and v0, 1
+#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+ and v0, ST0_IEP
+#else
+ and v0, ST0_IE
+#endif
beqz v0, 1f
jal trace_hardirqs_on
b 2f
.align 5
NESTED(handle_int, PT_SIZE, sp)
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /*
+ * Check to see if the interrupted code has just disabled
+ * interrupts and ignore this interrupt for now if so.
+ *
+ * local_irq_disable() disables interrupts and then calls
+ * trace_hardirqs_off() to track the state. If an interrupt is taken
+ * after interrupts are disabled but before the state is updated
+ * it will appear to restore_all that it is incorrectly returning with
+ * interrupts disabled
+ */
+ .set push
+ .set noat
+ mfc0 k0, CP0_STATUS
+#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+ and k0, ST0_IEP
+ bnez k0, 1f
+
+ mfc0 k0, EP0_EPC
+ .set noreorder
+ j k0
+ rfe
+#else
+ and k0, ST0_IE
+ bnez k0, 1f
+
+ eret
+#endif
+1:
+ .set pop
+#endif
SAVE_ALL
CLI
TRACE_IRQS_OFF
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <asm/cpu.h>
#include <asm/hazards.h>
#include <asm/mmu_context.h>
#include <asm/smp.h>
+#include <asm/mips-boards/maltaint.h>
#include <asm/mipsregs.h>
#include <asm/cacheflush.h>
#include <asm/time.h>
void ipi_decode(struct smtc_ipi *);
static void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
-static void setup_cross_vpe_interrupts(void);
+static void setup_cross_vpe_interrupts(unsigned int nvpe);
void init_smtc_stats(void);
/* Global SMTC Status */
int imstuckcount[2][8];
/* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */
-int vpemask[2][8] = {{0,1,1,0,0,0,0,1},{0,1,0,0,0,0,0,1}};
+int vpemask[2][8] = {
+ {0, 0, 1, 0, 0, 0, 0, 1},
+ {0, 0, 0, 0, 0, 0, 0, 1}
+};
int tcnoprog[NR_CPUS];
static atomic_t idle_hook_initialized = {0};
static int clock_hang_reported[NR_CPUS];
/* If we have multiple VPEs running, set up the cross-VPE interrupt */
- if (nvpe > 1)
- setup_cross_vpe_interrupts();
+ setup_cross_vpe_interrupts(nvpe);
/* Set up queue of free IPI "messages". */
nipi = NR_CPUS * IPIBUF_PER_CPU;
int setup_irq_smtc(unsigned int irq, struct irqaction * new,
unsigned long hwmask)
{
+ unsigned int vpe = current_cpu_data.vpe_id;
+
irq_hwmask[irq] = hwmask;
+#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
+ vpemask[vpe][irq - MIPSCPU_INT_BASE] = 1;
+#endif
return setup_irq(irq, new);
}
smtc_ipi_nq(&freeIPIq, pipi);
switch (type_copy) {
case SMTC_CLOCK_TICK:
+ irq_enter();
+ kstat_this_cpu.irqs[MIPSCPU_INT_BASE + MIPSCPU_INT_CPUCTR]++;
/* Invoke Clock "Interrupt" */
ipi_timer_latch[dest_copy] = 0;
#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
clock_hang_reported[dest_copy] = 0;
#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
local_timer_interrupt(0, NULL);
+ irq_exit();
break;
case LINUX_SMP_IPI:
switch ((int)arg_copy) {
static struct irqaction irq_ipi;
-static void setup_cross_vpe_interrupts(void)
+static void setup_cross_vpe_interrupts(unsigned int nvpe)
{
+ if (nvpe < 1)
+ return;
+
if (!cpu_has_vint)
panic("SMTC Kernel requires Vectored Interupt support");
/*
* SMTC-specific hacks invoked from elsewhere in the kernel.
+ *
+ * smtc_ipi_replay is called from raw_local_irq_restore which is only ever
+ * called with interrupts disabled. We do rely on interrupts being disabled
+ * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would
+ * result in a recursive call to raw_local_irq_restore().
*/
-void smtc_ipi_replay(void)
+static void __smtc_ipi_replay(void)
{
+ unsigned int cpu = smp_processor_id();
+
/*
* To the extent that we've ever turned interrupts off,
* we may have accumulated deferred IPIs. This is subtle.
* is clear, and we'll handle it as a real pseudo-interrupt
* and not a pseudo-pseudo interrupt.
*/
- if (IPIQ[smp_processor_id()].depth > 0) {
- struct smtc_ipi *pipi;
- extern void self_ipi(struct smtc_ipi *);
+ if (IPIQ[cpu].depth > 0) {
+ while (1) {
+ struct smtc_ipi_q *q = &IPIQ[cpu];
+ struct smtc_ipi *pipi;
+ extern void self_ipi(struct smtc_ipi *);
+
+ spin_lock(&q->lock);
+ pipi = __smtc_ipi_dq(q);
+ spin_unlock(&q->lock);
+ if (!pipi)
+ break;
- while ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()]))) {
self_ipi(pipi);
- smtc_cpu_stats[smp_processor_id()].selfipis++;
+ smtc_cpu_stats[cpu].selfipis++;
}
}
}
+void smtc_ipi_replay(void)
+{
+ raw_local_irq_disable();
+ __smtc_ipi_replay();
+}
+
EXPORT_SYMBOL(smtc_ipi_replay);
void smtc_idle_loop_hook(void)
* is in use, there should never be any.
*/
#ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
- smtc_ipi_replay();
+ {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __smtc_ipi_replay();
+ local_irq_restore(flags);
+ }
#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
}
{
}
-static void local_r3k_flush_data_cache_page(unsigned long addr)
+static void local_r3k_flush_data_cache_page(void *addr)
{
}
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1994 - 2003 by Ralf Baechle
+ * Copyright (C) 1994 - 2003, 07 by Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2007 MIPS Technologies, Inc.
*/
#include <linux/init.h>
#include <linux/kernel.h>
EXPORT_SYMBOL(__flush_dcache_page);
+void __flush_anon_page(struct page *page, unsigned long vmaddr)
+{
+ if (pages_do_alias((unsigned long)page_address(page), vmaddr)) {
+ void *kaddr;
+
+ kaddr = kmap_coherent(page, vmaddr);
+ flush_data_cache_page((unsigned long)kaddr);
+ kunmap_coherent(kaddr);
+ }
+}
+
+EXPORT_SYMBOL(__flush_anon_page);
+
void __update_cache(struct vm_area_struct *vma, unsigned long address,
pte_t pte)
{
asmlinkage void sb1_cache_error(void)
{
- uint64_t cerr_dpa;
uint32_t errctl, cerr_i, cerr_d, dpalo, dpahi, eepc, res;
+ unsigned long long cerr_dpa;
#ifdef CONFIG_SIBYTE_BW_TRACE
/* Freeze the trace buffer now */
{
unsigned short way;
int valid;
- uint64_t taglo, va, tlo_tmp;
uint32_t taghi, taglolo, taglohi;
+ unsigned long long taglo, va;
+ uint64_t tlo_tmp;
uint8_t lru;
int res = 0;
{
int valid, way;
unsigned char state;
- uint64_t taglo, pa;
uint32_t taghi, taglolo, taglohi;
+ unsigned long long taglo, pa;
uint8_t ecc, lru;
int res = 0;
}
if (data) {
- uint64_t datalo;
uint32_t datalohi, datalolo, datahi;
+ unsigned long long datalo;
int offset;
char bad_ecc = 0;
#include <dma-coherence.h>
+static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr)
+{
+ unsigned long addr = plat_dma_addr_to_phys(dma_addr);
+
+ return (unsigned long)phys_to_virt(addr);
+}
+
/*
* Warning on the terminology - Linux calls an uncached area coherent;
* MIPS terminology calls memory areas with hardware maintained coherency
enum dma_data_direction direction)
{
if (cpu_is_noncoherent_r10000(dev))
- __dma_sync(plat_dma_addr_to_phys(dma_addr) + PAGE_OFFSET, size,
+ __dma_sync(dma_addr_to_virt(dma_addr), size,
direction);
plat_unmap_dma_mem(dma_addr);
if (cpu_is_noncoherent_r10000(dev)) {
unsigned long addr;
- addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle);
+ addr = dma_addr_to_virt(dma_handle);
__dma_sync(addr, size, direction);
}
}
if (!plat_device_is_coherent(dev)) {
unsigned long addr;
- addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle);
+ addr = dma_addr_to_virt(dma_handle);
__dma_sync(addr, size, direction);
}
}
if (cpu_is_noncoherent_r10000(dev)) {
unsigned long addr;
- addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle);
+ addr = dma_addr_to_virt(dma_handle);
__dma_sync(addr + offset, size, direction);
}
}
if (!plat_device_is_coherent(dev)) {
unsigned long addr;
- addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle);
+ addr = dma_addr_to_virt(dma_handle);
__dma_sync(addr + offset, size, direction);
}
}
siginfo_t info;
#if 0
- printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", smp_processor_id(),
+ printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
current->comm, current->pid, field, address, write,
field, regs->cp0_epc);
#endif
printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at "
"virtual address %0*lx, epc == %0*lx, ra == %0*lx\n",
- smp_processor_id(), field, address, field, regs->cp0_epc,
+ raw_smp_processor_id(), field, address, field, regs->cp0_epc,
field, regs->regs[31]);
die("Oops", regs);
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
- pgd = (pgd_t *) pgd_current[smp_processor_id()] + offset;
+ pgd = (pgd_t *) pgd_current[raw_smp_processor_id()] + offset;
pgd_k = init_mm.pgd + offset;
if (!pgd_present(*pgd_k))
static inline void kmap_coherent_init(void) {}
#endif
-static inline void *kmap_coherent(struct page *page, unsigned long addr)
+void *kmap_coherent(struct page *page, unsigned long addr)
{
enum fixed_addresses idx;
unsigned long vaddr, flags, entrylo;
#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
-static inline void kunmap_coherent(struct page *page)
+void kunmap_coherent(struct page *page)
{
#ifndef CONFIG_MIPS_MT_SMTC
unsigned int wired;
#ifdef CONFIG_FLATMEM
free_area_init(zones_size);
#else
- pfn = 0;
+ pfn = min_low_pfn;
for (i = 0; i < MAX_NR_ZONES; i++)
for (j = 0; j < zones_size[i]; j++, pfn++)
if (!page_is_ram(pfn))
for (i = 0; i < DM_NUM_CHANNELS; i++) {
const u64 base_val = CPHYSADDR(&page_descr[i]) |
V_DM_DSCR_BASE_RINGSZ(1);
- volatile void *base_reg =
- IOADDR(A_DM_REGISTER(i, R_DM_DSCR_BASE));
+ void *base_reg = IOADDR(A_DM_REGISTER(i, R_DM_DSCR_BASE));
__raw_writeq(base_val, base_reg);
__raw_writeq(base_val | M_DM_DSCR_BASE_RESET, base_reg);
/*
* See if the PCI bus has been configured by the firmware.
*/
- reg = *((volatile uint64_t *) IOADDR(A_SCD_SYSTEM_CFG));
+ reg = __raw_readq(IOADDR(A_SCD_SYSTEM_CFG));
if (!(reg & M_BCM1480_SYS_PCI_HOST)) {
bcm1480_bus_status |= PCI_DEVICE_MODE;
} else {
#include <linux/pci.h>
+#include <asm/irq.h>
int __init pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{
/*
* See if the PCI bus has been configured by the firmware.
*/
- reg = *((volatile uint64_t *) IOADDR(A_SCD_SYSTEM_CFG));
+ reg = __raw_readq(IOADDR(A_SCD_SYSTEM_CFG));
if (!(reg & M_SYS_PCI_HOST)) {
sb1250_bus_status |= PCI_DEVICE_MODE;
} else {
unsigned long flags;
unsigned int irq_dirty;
- i = first_cpu(mask);
- if (next_cpu(i, mask) <= NR_CPUS) {
+ if (cpus_weight(mask) != 1) {
printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq);
return;
}
+ i = first_cpu(mask);
/* Convert logical CPU to physical CPU */
cpu = cpu_logical_map(i);
* independent of board/firmware
*/
-static volatile void *mailbox_0_set_regs[] = {
+static void *mailbox_0_set_regs[] = {
IOADDR(A_BCM1480_IMR_CPU0_BASE + R_BCM1480_IMR_MAILBOX_0_SET_CPU),
IOADDR(A_BCM1480_IMR_CPU1_BASE + R_BCM1480_IMR_MAILBOX_0_SET_CPU),
IOADDR(A_BCM1480_IMR_CPU2_BASE + R_BCM1480_IMR_MAILBOX_0_SET_CPU),
IOADDR(A_BCM1480_IMR_CPU3_BASE + R_BCM1480_IMR_MAILBOX_0_SET_CPU),
};
-static volatile void *mailbox_0_clear_regs[] = {
+static void *mailbox_0_clear_regs[] = {
IOADDR(A_BCM1480_IMR_CPU0_BASE + R_BCM1480_IMR_MAILBOX_0_CLR_CPU),
IOADDR(A_BCM1480_IMR_CPU1_BASE + R_BCM1480_IMR_MAILBOX_0_CLR_CPU),
IOADDR(A_BCM1480_IMR_CPU2_BASE + R_BCM1480_IMR_MAILBOX_0_CLR_CPU),
IOADDR(A_BCM1480_IMR_CPU3_BASE + R_BCM1480_IMR_MAILBOX_0_CLR_CPU),
};
-static volatile void *mailbox_0_regs[] = {
+static void *mailbox_0_regs[] = {
IOADDR(A_BCM1480_IMR_CPU0_BASE + R_BCM1480_IMR_MAILBOX_0_CPU),
IOADDR(A_BCM1480_IMR_CPU1_BASE + R_BCM1480_IMR_MAILBOX_0_CPU),
IOADDR(A_BCM1480_IMR_CPU2_BASE + R_BCM1480_IMR_MAILBOX_0_CPU),
#define LEDS_PHYS MLEDS_PHYS
#endif
-#define setled(index, c) \
- ((unsigned char *)(IOADDR(LEDS_PHYS)+0x20))[(3-(index))<<3] = (c)
void setleds(char *str)
{
+ void *reg;
int i;
+
for (i = 0; i < 4; i++) {
- if (!str[i]) {
- setled(i, ' ');
- } else {
- setled(i, str[i]);
- }
+ reg = IOADDR(LEDS_PHYS) + 0x20 + ((3 - i) << 3);
+
+ if (!str[i])
+ writeb(' ', reg);
+ else
+ writeb(str[i], reg);
}
}
-#endif
+
+#endif /* LEDS_PHYS */
#ifdef CONFIG_PPC64
struct thread_info *t = current_thread_info();
- if (t->flags & _TIF_ABI_PENDING)
- t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT);
+ if (test_ti_thread_flag(t, TIF_ABI_PENDING)) {
+ clear_ti_thread_flag(t, TIF_ABI_PENDING);
+ if (test_ti_thread_flag(t, TIF_32BIT))
+ clear_ti_thread_flag(t, TIF_32BIT);
+ else
+ set_ti_thread_flag(t, TIF_32BIT);
+ }
#endif
discard_lazy_cpu_state();
/*
* postcall is performed immediately before function return which
- * allows liberal use of volatile registers.
+ * allows liberal use of volatile registers. We branch around this
+ * in early init (eg when populating the MMU hashtable) by using an
+ * unconditional cpu feature.
*/
#define HCALL_INST_POSTCALL \
+BEGIN_FTR_SECTION; \
+ b 1f; \
+END_FTR_SECTION(0, 1); \
ld r4,STK_PARM(r3)(r1); /* validate opcode */ \
cmpldi cr7,r4,MAX_HCALL_OPCODE; \
bgt- cr7,1f; \
blr /* return r3 = status */
+/*
+ * plpar_hcall_raw can be called in real mode. kexec/kdump need some
+ * hypervisor calls to be executed in real mode. So plpar_hcall_raw
+ * does not access the per cpu hypervisor call statistics variables,
+ * since these variables may not be present in the RMO region.
+ */
+_GLOBAL(plpar_hcall_raw)
+ HMT_MEDIUM
+
+ mfcr r0
+ stw r0,8(r1)
+
+ std r4,STK_PARM(r4)(r1) /* Save ret buffer */
+
+ mr r4,r5
+ mr r5,r6
+ mr r6,r7
+ mr r7,r8
+ mr r8,r9
+ mr r9,r10
+
+ HVSC /* invoke the hypervisor */
+
+ ld r12,STK_PARM(r4)(r1)
+ std r4, 0(r12)
+ std r5, 8(r12)
+ std r6, 16(r12)
+ std r7, 24(r12)
+
+ lwz r0,8(r1)
+ mtcrf 0xff,r0
+
+ blr /* return r3 = status */
+
_GLOBAL(plpar_hcall9)
HMT_MEDIUM
/* TODO: Use bulk call */
for (i = 0; i < hpte_count; i++)
- plpar_pte_remove(0, i, 0, &dummy1, &dummy2);
+ plpar_pte_remove_raw(0, i, 0, &dummy1, &dummy2);
}
/*
return rc;
}
+/* plpar_pte_remove_raw can be called in real mode. It calls plpar_hcall_raw */
+static inline long plpar_pte_remove_raw(unsigned long flags, unsigned long ptex,
+ unsigned long avpn, unsigned long *old_pteh_ret,
+ unsigned long *old_ptel_ret)
+{
+ long rc;
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ rc = plpar_hcall_raw(H_REMOVE, retbuf, flags, ptex, avpn);
+
+ *old_pteh_ret = retbuf[0];
+ *old_ptel_ret = retbuf[1];
+
+ return rc;
+}
+
static inline long plpar_pte_read(unsigned long flags, unsigned long ptex,
unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
{
if (h.token == NULL)
return;
- h.token -= dcr_n * h.stride;
+ h.token += dcr_n * h.stride;
iounmap(h.token);
h.token = NULL;
}
/* allocate 2 internal temporary buffers (512 bytes size each) for
* the SDMA */
- sdma_buf_offset = qe_muram_alloc(512 * 2, 64);
+ sdma_buf_offset = qe_muram_alloc(512 * 2, 4096);
if (IS_MURAM_ERR(sdma_buf_offset))
return -ENOMEM;
out_be32(&sdma->sdebcr, sdma_buf_offset & QE_SDEBCR_BA_MASK);
- out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK | (0x1 >>
- QE_SDMR_CEN_SHIFT)));
+ out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK |
+ (0x1 << QE_SDMR_CEN_SHIFT)));
return 0;
}
* shall not cross any page boundaries (vmalloc area!) when writing
* the new instruction.
*/
- addr = (u32 *)ALIGN((unsigned long)args->ptr, 4);
+ addr = (u32 *)((unsigned long)args->ptr & -4UL);
if ((unsigned long)args->ptr & 2)
instr = ((*addr) & 0xffff0000) | args->new;
else
DECLARE_EXPORT(__movmem_i4_even);
DECLARE_EXPORT(__movmem_i4_odd);
DECLARE_EXPORT(__movmemSI12_i4);
-DECLARE_EXPORT(__sdivsi3_i4i);
-DECLARE_EXPORT(__udiv_qrnnd_16);
-DECLARE_EXPORT(__udivsi3_i4i);
#else /* GCC 3.x */
DECLARE_EXPORT(__movstr_i4_even);
DECLARE_EXPORT(__movstr_i4_odd);
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret;
}
+EXPORT_SYMBOL(atomic_cmpxchg);
int atomic_add_unless(atomic_t *v, int a, int u)
{
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret != u;
}
+EXPORT_SYMBOL(atomic_add_unless);
/* Atomic operations are already serializing */
void atomic_set(atomic_t *v, int i)
struct chan *chan;
LIST_HEAD(list);
struct list_head *ele;
+ unsigned long flags;
- spin_lock_irq(&irqs_to_free_lock);
+ spin_lock_irqsave(&irqs_to_free_lock, flags);
list_splice_init(&irqs_to_free, &list);
- INIT_LIST_HEAD(&irqs_to_free);
- spin_unlock_irq(&irqs_to_free_lock);
+ spin_unlock_irqrestore(&irqs_to_free_lock, flags);
list_for_each(ele, &list){
chan = list_entry(ele, struct chan, free_list);
static void close_one_chan(struct chan *chan, int delay_free_irq)
{
+ unsigned long flags;
+
if(!chan->opened)
return;
if(delay_free_irq){
- spin_lock_irq(&irqs_to_free_lock);
+ spin_lock_irqsave(&irqs_to_free_lock, flags);
list_add(&chan->free_list, &irqs_to_free);
- spin_unlock_irq(&irqs_to_free_lock);
+ spin_unlock_irqrestore(&irqs_to_free_lock, flags);
}
else {
if(chan->input)
err_msg = NULL;
err = (*dev->remove)(n, &err_msg);
switch(err){
+ case 0:
+ err_msg = "";
+ break;
case -ENODEV:
if(err_msg == NULL)
err_msg = "Device doesn't exist";
static DEFINE_MUTEX(ubd_lock);
-/* XXX - this made sense in 2.4 days, now it's only used as a boolean, and
- * probably it doesn't make sense even for that. */
-static int do_ubd;
-
static int ubd_open(struct inode * inode, struct file * filp);
static int ubd_release(struct inode * inode, struct file * file);
static int ubd_ioctl(struct inode * inode, struct file * file,
struct platform_device pdev;
struct request_queue *queue;
spinlock_t lock;
+ int active;
};
#define DEFAULT_COW { \
.shared = 0, \
.cow = DEFAULT_COW, \
.lock = SPIN_LOCK_UNLOCKED, \
+ .active = 0, \
}
/* Protected by ubd_lock */
struct ubd *dev;
int n;
- do_ubd = 0;
n = os_read_file(thread_fd, &req, sizeof(req));
if(n != sizeof(req)){
printk(KERN_ERR "Pid %d - spurious interrupt in ubd_handler, "
rq = req.req;
dev = rq->rq_disk->private_data;
+ dev->active = 0;
ubd_finish(rq, req.error);
reactivate_fd(thread_fd, UBD_IRQ);
}
}
else {
- if(do_ubd || (req = elv_next_request(q)) == NULL)
+ struct ubd *dev = q->queuedata;
+ if(dev->active || (req = elv_next_request(q)) == NULL)
return;
err = prepare_request(req, &io_req);
if(!err){
- do_ubd = 1;
+ dev->active = 1;
n = os_write_file(thread_fd, (char *) &io_req,
sizeof(io_req));
if(n != sizeof(io_req))
#define u32 uint32_t
#endif
+#include "sysdep/ptrace.h"
+
#define MCONSOLE_MAGIC (0xcafebabe)
#define MCONSOLE_MAX_DATA (512)
#define MCONSOLE_VERSION 2
#endif
#ifdef UML_CONFIG_MODE_SKAS
struct skas_regs {
- /* x86_64 ptrace uses sizeof(user_regs_struct) as its register
- * file size, while i386 uses FRAME_SIZE. Therefore, we need
- * to use UM_FRAME_SIZE here instead of HOST_FRAME_SIZE.
- */
unsigned long regs[MAX_REG_NR];
unsigned long fp[HOST_FP_SIZE];
struct faultinfo faultinfo;
void mem_init(void)
{
- max_low_pfn = (high_physmem - uml_physmem) >> PAGE_SHIFT;
-
/* clear the zero-page */
memset((void *) empty_zero_page, 0, PAGE_SIZE);
/* this will put all low memory onto the freelists */
totalram_pages = free_all_bootmem();
+ max_low_pfn = totalram_pages;
#ifdef CONFIG_HIGHMEM
totalhigh_pages = highmem >> PAGE_SHIFT;
totalram_pages += totalhigh_pages;
static inline long do_syscall_stub(struct mm_id * mm_idp, void **addr)
{
unsigned long regs[MAX_REG_NR];
- int n;
+ int n, i;
long ret, offset;
unsigned long * data;
unsigned long * syscall;
(unsigned long) &__syscall_stub_start);
n = ptrace_setregs(pid, regs);
- if(n < 0)
+ if(n < 0){
+ printk("Registers - \n");
+ for(i = 0; i < MAX_REG_NR; i++)
+ printk("\t%d\t0x%lx\n", i, regs[i]);
panic("do_syscall_stub : PTRACE_SETREGS failed, errno = %d\n",
- n);
+ -n);
+ }
wait_stub_done(pid, 0, "do_syscall_stub");
if((n < 0) || !WIFSTOPPED(status) ||
(WSTOPSIG(status) != SIGUSR1 && WSTOPSIG(status) != SIGTRAP)){
- unsigned long regs[HOST_FRAME_SIZE];
+ unsigned long regs[MAX_REG_NR];
if(ptrace(PTRACE_GETREGS, pid, 0, regs) < 0)
printk("Failed to get registers from stub, "
int i;
printk("Stub registers -\n");
- for(i = 0; i < HOST_FRAME_SIZE; i++)
+ for(i = 0; i < ARRAY_SIZE(regs); i++)
printk("\t%d - %lx\n", i, regs[i]);
}
panic("%s : failed to wait for SIGUSR1/SIGTRAP, "
int copy_context_skas0(unsigned long new_stack, int pid)
{
int err;
- unsigned long regs[HOST_FRAME_SIZE];
+ unsigned long regs[MAX_REG_NR];
unsigned long fp_regs[HOST_FP_SIZE];
unsigned long current_stack = current_stub_stack();
struct stub_data *data = (struct stub_data *) current_stack;
/* These are set once at boot time and not changed thereafter */
-static unsigned long exec_regs[HOST_FRAME_SIZE];
+static unsigned long exec_regs[MAX_REG_NR];
static unsigned long exec_fp_regs[HOST_FP_SIZE];
static unsigned long exec_fpx_regs[HOST_XFP_SIZE];
static int have_fpx_regs = 1;
{
int err;
+ memset(exec_regs, 0, sizeof(exec_regs));
err = ptrace(PTRACE_GETREGS, pid, 0, exec_regs);
if(err)
panic("check_ptrace : PTRACE_GETREGS failed, errno = %d",
void get_safe_registers(unsigned long *regs, unsigned long *fp_regs)
{
- memcpy(regs, exec_regs, HOST_FRAME_SIZE * sizeof(unsigned long));
+ memcpy(regs, exec_regs, sizeof(exec_regs));
if(fp_regs != NULL)
memcpy(fp_regs, exec_fp_regs,
HOST_FP_SIZE * sizeof(unsigned long));
/* These are set once at boot time and not changed thereafter */
-static unsigned long exec_regs[HOST_FRAME_SIZE];
+static unsigned long exec_regs[MAX_REG_NR];
static unsigned long exec_fp_regs[HOST_FP_SIZE];
void init_thread_registers(union uml_pt_regs *to)
void get_safe_registers(unsigned long *regs, unsigned long *fp_regs)
{
- memcpy(regs, exec_regs, HOST_FRAME_SIZE * sizeof(unsigned long));
+ memcpy(regs, exec_regs, sizeof(exec_regs));
if(fp_regs != NULL)
memcpy(fp_regs, exec_fp_regs,
HOST_FP_SIZE * sizeof(unsigned long));
}
EXPORT_SYMBOL(__udelay);
-
-void __const_udelay(unsigned long usecs)
-{
- int i, n;
-
- n = (loops_per_jiffy * HZ * usecs) / MILLION;
- for(i=0;i<n;i++)
- cpu_relax();
-}
-
-EXPORT_SYMBOL(__const_udelay);
static void ldt_get_host_info(void)
{
long ret;
- struct ldt_entry * ldt, *tmp;
+ struct ldt_entry * ldt;
+ short *tmp;
int i, size, k, order;
spin_lock(&host_ldt_lock);
}
EXPORT_SYMBOL(__udelay);
-
-void __const_udelay(unsigned long usecs)
-{
- unsigned long i, n;
-
- n = (loops_per_jiffy * HZ * usecs) / MILLION;
- for(i=0;i<n;i++)
- cpu_relax();
-}
-
-EXPORT_SYMBOL(__const_udelay);
jmp _m_s
check_vesa:
+#ifdef CONFIG_FIRMWARE_EDID
+ leaw modelist+1024, %di
+ movw $0x4f00, %ax
+ int $0x10
+ cmpw $0x004f, %ax
+ jnz setbad
+
+ movw 4(%di), %ax
+ movw %ax, vbe_version
+#endif
leaw modelist+1024, %di
subb $VIDEO_FIRST_VESA>>8, %bh
movw %bx, %cx # Get mode information structure
rep
stosl
+ cmpw $0x0200, vbe_version # only do EDID on >= VBE2.0
+ jl no_edid
+
pushw %es # save ES
xorw %di, %di # Report Capability
pushw %di
svga_prefix: .byte VIDEO_FIRST_BIOS>>8 # Default prefix for BIOS modes
graphic_mode: .byte 0 # Graphic mode with a linear frame buffer
dac_size: .byte 6 # DAC bit depth
+vbe_version: .word 0 # VBE bios version
# Status messages
keymsg: .ascii "Press <RETURN> to see video modes available, "
{
pgd_t *slot0 = pgd_offset(current->mm, 0UL);
low_ptr = *slot0;
+ /* FIXME: We're playing with the current task's page tables here, which
+ * is potentially dangerous on SMP systems.
+ */
set_pgd(slot0, *pgd_offset(current->mm, PAGE_OFFSET));
- WARN_ON(num_online_cpus() != 1);
local_flush_tlb();
}
int disable_apic_timer __initdata;
+/* Local APIC timer works in C2? */
+int local_apic_timer_c2_ok;
+EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
+
static struct resource *ioapic_resources;
static struct resource lapic_resource = {
.name = "Local APIC",
void smp_send_timer_broadcast_ipi(void)
{
+ int cpu = smp_processor_id();
cpumask_t mask;
cpus_and(mask, cpu_online_map, timer_interrupt_broadcast_ipi_mask);
+
+ if (cpu_isset(cpu, mask)) {
+ cpu_clear(cpu, mask);
+ add_pda(apic_timer_irqs, 1);
+ smp_local_timer_interrupt();
+ }
+
if (!cpus_empty(mask)) {
send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
}
}
early_param("nolapic", setup_nolapic);
+static int __init parse_lapic_timer_c2_ok(char *arg)
+{
+ local_apic_timer_c2_ok = 1;
+ return 0;
+}
+early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
+
static __init int setup_noapictimer(char *str)
{
if (str[0] != ' ' && str[0] != 0)
void (*f)(void);
};
-static struct __initdata chipset early_qrk[] = {
+static struct chipset early_qrk[] __initdata = {
{ PCI_VENDOR_ID_NVIDIA, nvidia_bugs },
{ PCI_VENDOR_ID_VIA, via_bugs },
{ PCI_VENDOR_ID_ATI, ati_bugs },
/*
* ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
- * (these are usually mapped to vectors 0x20-0x2f)
+ * (these are usually mapped to vectors 0x30-0x3f)
*/
/*
* outb_p - this has to work on a wide range of PC hardware.
*/
outb_p(0x11, 0x20); /* ICW1: select 8259A-1 init */
- outb_p(IRQ0_VECTOR, 0x21); /* ICW2: 8259A-1 IR0-7 mapped to 0x20-0x27 */
+ outb_p(IRQ0_VECTOR, 0x21); /* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 */
outb_p(0x04, 0x21); /* 8259A-1 (the master) has a slave on IR2 */
if (auto_eoi)
outb_p(0x03, 0x21); /* master does Auto EOI */
outb_p(0x01, 0x21); /* master expects normal EOI */
outb_p(0x11, 0xA0); /* ICW1: select 8259A-2 init */
- outb_p(IRQ8_VECTOR, 0xA1); /* ICW2: 8259A-2 IR0-7 mapped to 0x28-0x2f */
+ outb_p(IRQ8_VECTOR, 0xA1); /* ICW2: 8259A-2 IR0-7 mapped to 0x38-0x3f */
outb_p(0x02, 0xA1); /* 8259A-2 is a slave on master's IR2 */
outb_p(0x01, 0xA1); /* (slave's support for AEOI in flat mode
is to be investigated) */
/* checks for a bit availability (hack for oprofile) */
int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
{
+ int cpu;
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
-
- return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
+ for_each_possible_cpu (cpu) {
+ if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
+ return 0;
+ }
+ return 1;
}
/* checks the an msr for availability */
int avail_to_resrv_perfctr_nmi(unsigned int msr)
{
unsigned int counter;
+ int cpu;
counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
- return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
+ for_each_possible_cpu (cpu) {
+ if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
+ return 0;
+ }
+ return 1;
}
-int reserve_perfctr_nmi(unsigned int msr)
+static int __reserve_perfctr_nmi(int cpu, unsigned int msr)
{
unsigned int counter;
+ if (cpu < 0)
+ cpu = smp_processor_id();
counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
- if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner)))
+ if (!test_and_set_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
return 1;
return 0;
}
-void release_perfctr_nmi(unsigned int msr)
+static void __release_perfctr_nmi(int cpu, unsigned int msr)
{
unsigned int counter;
+ if (cpu < 0)
+ cpu = smp_processor_id();
counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
- clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner));
+ clear_bit(counter, &per_cpu(perfctr_nmi_owner, cpu));
}
-int reserve_evntsel_nmi(unsigned int msr)
+int reserve_perfctr_nmi(unsigned int msr)
+{
+ int cpu, i;
+ for_each_possible_cpu (cpu) {
+ if (!__reserve_perfctr_nmi(cpu, msr)) {
+ for_each_possible_cpu (i) {
+ if (i >= cpu)
+ break;
+ __release_perfctr_nmi(i, msr);
+ }
+ return 0;
+ }
+ }
+ return 1;
+}
+
+void release_perfctr_nmi(unsigned int msr)
+{
+ int cpu;
+ for_each_possible_cpu (cpu)
+ __release_perfctr_nmi(cpu, msr);
+}
+
+int __reserve_evntsel_nmi(int cpu, unsigned int msr)
{
unsigned int counter;
+ if (cpu < 0)
+ cpu = smp_processor_id();
counter = nmi_evntsel_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
- if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner)))
+ if (!test_and_set_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]))
return 1;
return 0;
}
-void release_evntsel_nmi(unsigned int msr)
+static void __release_evntsel_nmi(int cpu, unsigned int msr)
{
unsigned int counter;
+ if (cpu < 0)
+ cpu = smp_processor_id();
counter = nmi_evntsel_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
- clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner));
+ clear_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]);
+}
+
+int reserve_evntsel_nmi(unsigned int msr)
+{
+ int cpu, i;
+ for_each_possible_cpu (cpu) {
+ if (!__reserve_evntsel_nmi(cpu, msr)) {
+ for_each_possible_cpu (i) {
+ if (i >= cpu)
+ break;
+ __release_evntsel_nmi(i, msr);
+ }
+ return 0;
+ }
+ }
+ return 1;
+}
+
+void release_evntsel_nmi(unsigned int msr)
+{
+ int cpu;
+ for_each_possible_cpu (cpu) {
+ __release_evntsel_nmi(cpu, msr);
+ }
}
static __cpuinit inline int nmi_known_cpu(void)
for (cpu = 0; cpu < NR_CPUS; cpu++)
counts[cpu] = cpu_pda(cpu)->__nmi_count;
local_irq_enable();
- mdelay((10*1000)/nmi_hz); // wait 10 ticks
+ mdelay((20*1000)/nmi_hz); // wait 20 ticks
for_each_online_cpu(cpu) {
if (!per_cpu(nmi_watchdog_ctlblk, cpu).enabled)
perfctr_msr = MSR_K7_PERFCTR0;
evntsel_msr = MSR_K7_EVNTSEL0;
- if (!reserve_perfctr_nmi(perfctr_msr))
+ if (!__reserve_perfctr_nmi(-1, perfctr_msr))
goto fail;
- if (!reserve_evntsel_nmi(evntsel_msr))
+ if (!__reserve_evntsel_nmi(-1, evntsel_msr))
goto fail1;
/* Simulator may not support it */
wd->check_bit = 1ULL<<63;
return 1;
fail2:
- release_evntsel_nmi(evntsel_msr);
+ __release_evntsel_nmi(-1, evntsel_msr);
fail1:
- release_perfctr_nmi(perfctr_msr);
+ __release_perfctr_nmi(-1, perfctr_msr);
fail:
return 0;
}
wrmsr(wd->evntsel_msr, 0, 0);
- release_evntsel_nmi(wd->evntsel_msr);
- release_perfctr_nmi(wd->perfctr_msr);
+ __release_evntsel_nmi(-1, wd->evntsel_msr);
+ __release_perfctr_nmi(-1, wd->perfctr_msr);
}
/* Note that these events don't tick when the CPU idles. This means
cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4);
}
- if (!reserve_perfctr_nmi(perfctr_msr))
+ if (!__reserve_perfctr_nmi(-1, perfctr_msr))
goto fail;
- if (!reserve_evntsel_nmi(evntsel_msr))
+ if (!__reserve_evntsel_nmi(-1, evntsel_msr))
goto fail1;
evntsel = P4_ESCR_EVENT_SELECT(0x3F)
wd->check_bit = 1ULL<<39;
return 1;
fail1:
- release_perfctr_nmi(perfctr_msr);
+ __release_perfctr_nmi(-1, perfctr_msr);
fail:
return 0;
}
wrmsr(wd->cccr_msr, 0, 0);
wrmsr(wd->evntsel_msr, 0, 0);
- release_evntsel_nmi(wd->evntsel_msr);
- release_perfctr_nmi(wd->perfctr_msr);
+ __release_evntsel_nmi(-1, wd->evntsel_msr);
+ __release_perfctr_nmi(-1, wd->perfctr_msr);
}
#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0;
- if (!reserve_perfctr_nmi(perfctr_msr))
+ if (!__reserve_perfctr_nmi(-1, perfctr_msr))
goto fail;
- if (!reserve_evntsel_nmi(evntsel_msr))
+ if (!__reserve_evntsel_nmi(-1, evntsel_msr))
goto fail1;
wrmsrl(perfctr_msr, 0UL);
wd->check_bit = 1ULL << (eax.split.bit_width - 1);
return 1;
fail1:
- release_perfctr_nmi(perfctr_msr);
+ __release_perfctr_nmi(-1, perfctr_msr);
fail:
return 0;
}
wrmsr(wd->evntsel_msr, 0, 0);
- release_evntsel_nmi(wd->evntsel_msr);
- release_perfctr_nmi(wd->perfctr_msr);
+ __release_evntsel_nmi(-1, wd->evntsel_msr);
+ __release_perfctr_nmi(-1, wd->perfctr_msr);
}
void setup_apic_nmi_watchdog(void *unused)
int elv_register(struct elevator_type *e)
{
+ char *def = "";
spin_lock_irq(&elv_list_lock);
BUG_ON(elevator_find(e->elevator_name));
list_add_tail(&e->list, &elv_list);
spin_unlock_irq(&elv_list_lock);
- printk(KERN_INFO "io scheduler %s registered", e->elevator_name);
if (!strcmp(e->elevator_name, chosen_elevator) ||
(!*chosen_elevator &&
!strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
- printk(" (default)");
- printk("\n");
+ def = " (default)";
+
+ printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name, def);
return 0;
}
EXPORT_SYMBOL_GPL(elv_register);
/* temporary */
if (major == 0) {
for (index = ARRAY_SIZE(major_names)-1; index > 0; index--) {
- if (is_lanana_major(index))
- continue;
if (major_names[index] == NULL)
break;
}
* considered part of another segment, since that might
* change with the bounce page.
*/
- high = page_to_pfn(bv->bv_page) >= q->bounce_pfn;
+ high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
if (high || highprv)
goto new_hw_segment;
if (cluster) {
open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
register_hotcpu_notifier(&blk_cpu_notifier);
- blk_max_low_pfn = max_low_pfn;
- blk_max_pfn = max_pfn;
+ blk_max_low_pfn = max_low_pfn - 1;
+ blk_max_pfn = max_pfn - 1;
return 0;
}
static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
unsigned int more)
{
- if (out)
- flush_dcache_page(scatterwalk_page(walk));
+ if (out) {
+ struct page *page;
+
+ page = walk->sg->page + ((walk->offset - 1) >> PAGE_SHIFT);
+ flush_dcache_page(page);
+ }
if (more) {
walk->offset += PAGE_SIZE - 1;
memcpy_dir(buf, vaddr, len_this_page, out);
scatterwalk_unmap(vaddr, out);
- scatterwalk_advance(walk, nbytes);
+ scatterwalk_advance(walk, len_this_page);
if (nbytes == len_this_page)
break;
notify_info->notify.value = (u16) notify_value;
notify_info->notify.handler_obj = handler_obj;
- acpi_ex_relinquish_interpreter();
+ acpi_ex_exit_interpreter();
acpi_ev_notify_dispatch(notify_info);
- acpi_ex_reacquire_interpreter();
+ status = acpi_ex_enter_interpreter();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
}
if (!handler_obj) {
u32 bit_width, acpi_integer * value)
{
acpi_status status;
+ acpi_status status2;
acpi_adr_space_handler handler;
acpi_adr_space_setup region_setup;
union acpi_operand_object *handler_desc;
* setup will potentially execute control methods
* (e.g., _REG method for this region)
*/
- acpi_ex_relinquish_interpreter();
+ acpi_ex_exit_interpreter();
status = region_setup(region_obj, ACPI_REGION_ACTIVATE,
handler_desc->address_space.context,
/* Re-enter the interpreter */
- acpi_ex_reacquire_interpreter();
+ status2 = acpi_ex_enter_interpreter();
+ if (ACPI_FAILURE(status2)) {
+ return_ACPI_STATUS(status2);
+ }
/* Check for failure of the Region Setup */
* exit the interpreter because the handler *might* block -- we don't
* know what it will do, so we can't hold the lock on the intepreter.
*/
- acpi_ex_relinquish_interpreter();
+ acpi_ex_exit_interpreter();
}
/* Call the handler */
* We just returned from a non-default handler, we must re-enter the
* interpreter
*/
- acpi_ex_reacquire_interpreter();
+ status2 = acpi_ex_enter_interpreter();
+ if (ACPI_FAILURE(status2)) {
+ return_ACPI_STATUS(status2);
+ }
}
return_ACPI_STATUS(status);
return (AE_BAD_PARAMETER);
}
- /* Must lock interpreter to prevent race conditions */
+ status = acpi_ex_enter_interpreter();
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
- acpi_ex_enter_interpreter();
status = acpi_ev_acquire_global_lock(timeout);
acpi_ex_exit_interpreter();
* Get the sync_level. If method is serialized, a mutex will be
* created for this method when it is parsed.
*/
- if (method_flags & AML_METHOD_SERIALIZED) {
+ if (acpi_gbl_all_methods_serialized) {
+ obj_desc->method.sync_level = 0;
+ obj_desc->method.method_flags |= AML_METHOD_SERIALIZED;
+ } else if (method_flags & AML_METHOD_SERIALIZED) {
/*
* ACPI 1.0: sync_level = 0
* ACPI 2.0: sync_level = sync_level in method declaration
acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
{
acpi_status status;
+ acpi_status status2;
ACPI_FUNCTION_TRACE(ex_system_wait_semaphore);
/* We must wait, so unlock the interpreter */
- acpi_ex_relinquish_interpreter();
+ acpi_ex_exit_interpreter();
status = acpi_os_wait_semaphore(semaphore, 1, timeout);
/* Reacquire the interpreter */
- acpi_ex_reacquire_interpreter();
+ status2 = acpi_ex_enter_interpreter();
+ if (ACPI_FAILURE(status2)) {
+
+ /* Report fatal error, could not acquire interpreter */
+
+ return_ACPI_STATUS(status2);
+ }
}
return_ACPI_STATUS(status);
acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
{
acpi_status status;
+ acpi_status status2;
ACPI_FUNCTION_TRACE(ex_system_wait_mutex);
/* We must wait, so unlock the interpreter */
- acpi_ex_relinquish_interpreter();
+ acpi_ex_exit_interpreter();
status = acpi_os_acquire_mutex(mutex, timeout);
/* Reacquire the interpreter */
- acpi_ex_reacquire_interpreter();
+ status2 = acpi_ex_enter_interpreter();
+ if (ACPI_FAILURE(status2)) {
+
+ /* Report fatal error, could not acquire interpreter */
+
+ return_ACPI_STATUS(status2);
+ }
}
return_ACPI_STATUS(status);
acpi_status acpi_ex_system_do_suspend(acpi_integer how_long)
{
+ acpi_status status;
+
ACPI_FUNCTION_ENTRY();
/* Since this thread will sleep, we must release the interpreter */
- acpi_ex_relinquish_interpreter();
+ acpi_ex_exit_interpreter();
acpi_os_sleep(how_long);
/* And now we must get the interpreter again */
- acpi_ex_reacquire_interpreter();
- return (AE_OK);
+ status = acpi_ex_enter_interpreter();
+ return (status);
}
/*******************************************************************************
*
* PARAMETERS: None
*
- * RETURN: None
+ * RETURN: Status
*
- * DESCRIPTION: Enter the interpreter execution region. Failure to enter
- * the interpreter region is a fatal system error. Used in
- * conjunction with exit_interpreter.
+ * DESCRIPTION: Enter the interpreter execution region. Failure to enter
+ * the interpreter region is a fatal system error
*
******************************************************************************/
-void acpi_ex_enter_interpreter(void)
+acpi_status acpi_ex_enter_interpreter(void)
{
acpi_status status;
status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO,
- "Could not acquire AML Interpreter mutex"));
+ ACPI_ERROR((AE_INFO, "Could not acquire interpreter mutex"));
}
- return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ex_reacquire_interpreter
- *
- * PARAMETERS: None
- *
- * RETURN: None
- *
- * DESCRIPTION: Reacquire the interpreter execution region from within the
- * interpreter code. Failure to enter the interpreter region is a
- * fatal system error. Used in conjuction with
- * relinquish_interpreter
- *
- ******************************************************************************/
-
-void acpi_ex_reacquire_interpreter(void)
-{
- ACPI_FUNCTION_TRACE(ex_reacquire_interpreter);
-
- /*
- * If the global serialized flag is set, do not release the interpreter,
- * since it was not actually released by acpi_ex_relinquish_interpreter.
- * This forces the interpreter to be single threaded.
- */
- if (!acpi_gbl_all_methods_serialized) {
- acpi_ex_enter_interpreter();
- }
-
- return_VOID;
+ return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* RETURN: None
*
- * DESCRIPTION: Exit the interpreter execution region. This is the top level
- * routine used to exit the interpreter when all processing has
- * been completed.
+ * DESCRIPTION: Exit the interpreter execution region
+ *
+ * Cases where the interpreter is unlocked:
+ * 1) Completion of the execution of a control method
+ * 2) Method blocked on a Sleep() AML opcode
+ * 3) Method blocked on an Acquire() AML opcode
+ * 4) Method blocked on a Wait() AML opcode
+ * 5) Method blocked to acquire the global lock
+ * 6) Method blocked to execute a serialized control method that is
+ * already executing
+ * 7) About to invoke a user-installed opregion handler
*
******************************************************************************/
status = acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO,
- "Could not release AML Interpreter mutex"));
- }
-
- return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ex_relinquish_interpreter
- *
- * PARAMETERS: None
- *
- * RETURN: None
- *
- * DESCRIPTION: Exit the interpreter execution region, from within the
- * interpreter - before attempting an operation that will possibly
- * block the running thread.
- *
- * Cases where the interpreter is unlocked internally
- * 1) Method to be blocked on a Sleep() AML opcode
- * 2) Method to be blocked on an Acquire() AML opcode
- * 3) Method to be blocked on a Wait() AML opcode
- * 4) Method to be blocked to acquire the global lock
- * 5) Method to be blocked waiting to execute a serialized control method
- * that is currently executing
- * 6) About to invoke a user-installed opregion handler
- *
- ******************************************************************************/
-
-void acpi_ex_relinquish_interpreter(void)
-{
- ACPI_FUNCTION_TRACE(ex_relinquish_interpreter);
-
- /*
- * If the global serialized flag is set, do not release the interpreter.
- * This forces the interpreter to be single threaded.
- */
- if (!acpi_gbl_all_methods_serialized) {
- acpi_ex_exit_interpreter();
+ ACPI_ERROR((AE_INFO, "Could not release interpreter mutex"));
}
return_VOID;
*
* RETURN: none
*
- * DESCRIPTION: Truncate an ACPI Integer to 32 bits if the execution mode is
- * 32-bit, as determined by the revision of the DSDT.
+ * DESCRIPTION: Truncate a number to 32-bits if the currently executing method
+ * belongs to a 32-bit ACPI table.
*
******************************************************************************/
/*
* 2) Enable all wakeup GPEs
*/
+ status = acpi_hw_disable_all_gpes();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
acpi_gbl_system_awake_and_running = FALSE;
status = acpi_hw_enable_all_wakeup_gpes();
ret = acpi_bus_get_device(*ibm->handle, &ibm->device);
if (ret < 0) {
printk(IBM_ERR "%s device not present\n", ibm->name);
- return 0;
+ return -ENODEV;
}
acpi_driver_data(ibm->device) = ibm;
status = acpi_install_notify_handler(*ibm->handle, ibm->type,
dispatch_notify, ibm);
if (ACPI_FAILURE(status)) {
- printk(IBM_ERR "acpi_install_notify_handler(%s) failed: %d\n",
- ibm->name, status);
+ if (status == AE_ALREADY_EXISTS) {
+ printk(IBM_NOTICE "another device driver is already handling %s events\n",
+ ibm->name);
+ } else {
+ printk(IBM_ERR "acpi_install_notify_handler(%s) failed: %d\n",
+ ibm->name, status);
+ }
return -ENODEV;
}
ibm->notify_installed = 1;
return ret;
}
+static void ibm_exit(struct ibm_struct *ibm);
+
static int __init ibm_init(struct ibm_struct *ibm)
{
int ret;
if (ibm->notify) {
ret = setup_notify(ibm);
+ if (ret == -ENODEV) {
+ printk(IBM_NOTICE "disabling subdriver %s\n",
+ ibm->name);
+ ibm_exit(ibm);
+ return 0;
+ }
if (ret < 0)
return ret;
}
* Execute the method via the interpreter. The interpreter is locked
* here before calling into the AML parser
*/
- acpi_ex_enter_interpreter();
+ status = acpi_ex_enter_interpreter();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
status = acpi_ps_execute_method(info);
acpi_ex_exit_interpreter();
} else {
* resolution, we must lock it because we could access an opregion.
* The opregion access code assumes that the interpreter is locked.
*/
- acpi_ex_enter_interpreter();
+ status = acpi_ex_enter_interpreter();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
/* Function has a strange interface */
u32 level, void *context, void **return_value)
{
acpi_object_type type;
- acpi_status status = AE_OK;
+ acpi_status status;
struct acpi_init_walk_info *info =
(struct acpi_init_walk_info *)context;
struct acpi_namespace_node *node =
/*
* Must lock the interpreter before executing AML code
*/
- acpi_ex_enter_interpreter();
+ status = acpi_ex_enter_interpreter();
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
/*
* Each of these types can contain executable AML code within the
struct acpi_buffer *return_buffer)
{
acpi_status status;
+ acpi_status status2;
struct acpi_evaluate_info *info;
acpi_size buffer_space_needed;
u32 i;
* Delete the internal return object. NOTE: Interpreter must be
* locked to avoid race condition.
*/
- acpi_ex_enter_interpreter();
+ status2 = acpi_ex_enter_interpreter();
+ if (ACPI_SUCCESS(status2)) {
- /* Remove one reference on the return object (should delete it) */
+ /* Remove one reference on the return object (should delete it) */
- acpi_ut_remove_reference(info->return_object);
- acpi_ex_exit_interpreter();
+ acpi_ut_remove_reference(info->return_object);
+ acpi_ex_exit_interpreter();
+ }
}
cleanup:
struct acpi_processor_cx *cx)
{
struct acpi_processor_power *pwr = &pr->power;
+ u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
/*
* Check, if one of the previous states already marked the lapic
if (pwr->timer_broadcast_on_state < state)
return;
- if (cx->type >= ACPI_STATE_C2)
+ if (cx->type >= type)
pr->power.timer_broadcast_on_state = state;
}
static struct acpi_table_desc initial_tables[ACPI_MAX_TABLES] __initdata;
-void acpi_table_print_madt_entry(struct acpi_subtable_header * header)
+static int acpi_apic_instance __initdata;
+
+void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
if (!header)
return;
if (!handler)
return -EINVAL;
- /* Locate the table (if exists). There should only be one. */
- acpi_get_table(id, 0, &table_header);
+ if (strncmp(id, ACPI_SIG_MADT, 4) == 0)
+ acpi_get_table(id, acpi_apic_instance, &table_header);
+ else
+ acpi_get_table(id, 0, &table_header);
if (!table_header) {
printk(KERN_WARNING PREFIX "%4.4s not present\n", id);
int __init acpi_table_parse(char *id, acpi_table_handler handler)
{
struct acpi_table_header *table = NULL;
+
if (!handler)
return -EINVAL;
- acpi_get_table(id, 0, &table);
+ if (strncmp(id, ACPI_SIG_MADT, 4) == 0)
+ acpi_get_table(id, acpi_apic_instance, &table);
+ else
+ acpi_get_table(id, 0, &table);
+
if (table) {
handler(table);
return 0;
return 1;
}
+/*
+ * The BIOS is supposed to supply a single APIC/MADT,
+ * but some report two. Provide a knob to use either.
+ * (don't you wish instance 0 and 1 were not the same?)
+ */
+static void __init check_multiple_madt(void)
+{
+ struct acpi_table_header *table = NULL;
+
+ acpi_get_table(ACPI_SIG_MADT, 2, &table);
+ if (table) {
+ printk(KERN_WARNING PREFIX
+ "BIOS bug: multiple APIC/MADT found,"
+ " using %d\n", acpi_apic_instance);
+ printk(KERN_WARNING PREFIX
+ "If \"acpi_apic_instance=%d\" works better, "
+ "notify linux-acpi@vger.kernel.org\n",
+ acpi_apic_instance ? 0 : 2);
+
+ } else
+ acpi_apic_instance = 0;
+
+ return;
+}
+
/*
* acpi_table_init()
*
* result: sdt_entry[] is initialized
*/
-
int __init acpi_table_init(void)
{
acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
+ check_multiple_madt();
+ return 0;
+}
+
+static int __init acpi_parse_apic_instance(char *str)
+{
+
+ acpi_apic_instance = simple_strtoul(str, NULL, 0);
+
+ printk(KERN_NOTICE PREFIX "Shall use APIC/MADT table %d\n",
+ acpi_apic_instance);
+
return 0;
}
+
+early_param("acpi_apic_instance", acpi_parse_apic_instance);
board_ahci_pi = 1,
board_ahci_vt8251 = 2,
board_ahci_ign_iferr = 3,
+ board_ahci_sb600 = 4,
/* global controller registers */
HOST_CAP = 0x00, /* host capabilities */
AHCI_FLAG_NO_NCQ = (1 << 24),
AHCI_FLAG_IGN_IRQ_IF_ERR = (1 << 25), /* ignore IRQ_IF_ERR */
AHCI_FLAG_HONOR_PI = (1 << 26), /* honor PORTS_IMPL */
+ AHCI_FLAG_IGN_SERR_INTERNAL = (1 << 27), /* ignore SERR_INTERNAL */
};
struct ahci_cmd_hdr {
.udma_mask = 0x7f, /* udma0-6 ; FIXME */
.port_ops = &ahci_ops,
},
+ /* board_ahci_sb600 */
+ {
+ .sht = &ahci_sht,
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
+ ATA_FLAG_SKIP_D2H_BSY |
+ AHCI_FLAG_IGN_SERR_INTERNAL,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .udma_mask = 0x7f, /* udma0-6 ; FIXME */
+ .port_ops = &ahci_ops,
+ },
+
};
static const struct pci_device_id ahci_pci_tbl[] = {
PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
/* ATI */
- { PCI_VDEVICE(ATI, 0x4380), board_ahci }, /* ATI SB600 non-raid */
+ { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 non-raid */
{ PCI_VDEVICE(ATI, 0x4381), board_ahci }, /* ATI SB600 raid */
/* VIA */
if (ap->flags & AHCI_FLAG_IGN_IRQ_IF_ERR)
irq_stat &= ~PORT_IRQ_IF_ERR;
- if (irq_stat & PORT_IRQ_TF_ERR)
+ if (irq_stat & PORT_IRQ_TF_ERR) {
err_mask |= AC_ERR_DEV;
+ if (ap->flags & AHCI_FLAG_IGN_SERR_INTERNAL)
+ serror &= ~SERR_INTERNAL;
+ }
if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
err_mask |= AC_ERR_HOST_BUS;
*gtf_address = 0UL;
*obj_loc = 0UL;
- if (noacpi)
+ if (libata_noacpi)
return 0;
if (ata_msg_probe(ap))
ata_dev_printk(atadev, KERN_DEBUG, "%s: ENTER: port#: %d\n",
__FUNCTION__, ap->port_no);
- if (noacpi || !(ap->cbl == ATA_CBL_SATA))
+ if (libata_noacpi || !(ap->cbl == ATA_CBL_SATA))
return 0;
if (!ata_dev_enabled(atadev) || (ap->flags & ATA_FLAG_DISABLED))
unsigned long gtf_address;
unsigned long obj_loc;
- if (noacpi)
+ if (libata_noacpi)
return 0;
/*
* TBD - implement PATA support. For now,
struct acpi_object_list input;
union acpi_object in_params[1];
- if (noacpi)
+ if (libata_noacpi)
return 0;
if (ata_msg_probe(ap))
module_param(ata_probe_timeout, int, 0444);
MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
-int noacpi;
-module_param(noacpi, int, 0444);
+int libata_noacpi = 1;
+module_param_named(noacpi, libata_noacpi, int, 0444);
MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
MODULE_AUTHOR("Jeff Garzik");
dev->max_sectors = ATA_MAX_SECTORS;
}
+ if (ata_device_blacklisted(dev) & ATA_HORKAGE_MAX_SEC_128)
+ dev->max_sectors = min(ATA_MAX_SECTORS_128, dev->max_sectors);
+
+ /* limit ATAPI DMA to R/W commands only */
+ if (ata_device_blacklisted(dev) & ATA_HORKAGE_DMA_RW_ONLY)
+ dev->horkage |= ATA_HORKAGE_DMA_RW_ONLY;
+
if (ap->ops->dev_config)
ap->ops->dev_config(ap, dev);
{ "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
{ "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
+ /* Weird ATAPI devices */
+ { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 |
+ ATA_HORKAGE_DMA_RW_ONLY },
+
/* Devices we expect to fail diagnostics */
/* Devices where NCQ should be avoided */
{ "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
/* http://thread.gmane.org/gmane.linux.ide/14907 */
{ "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
+ /* NCQ is broken */
+ { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ },
+ /* NCQ hard hangs device under heavier load, needs hard power cycle */
+ { "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ },
+ /* Blacklist entries taken from Silicon Image 3124/3132
+ Windows driver .inf file - also several Linux problem reports */
+ { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
+ { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
+ { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
/* Devices with NCQ limits */
struct ata_port *ap = qc->ap;
int rc = 0; /* Assume ATAPI DMA is OK by default */
+ /* some drives can only do ATAPI DMA on read/write */
+ if (unlikely(qc->dev->horkage & ATA_HORKAGE_DMA_RW_ONLY)) {
+ struct scsi_cmnd *cmd = qc->scsicmd;
+ u8 *scsicmd = cmd->cmnd;
+
+ switch (scsicmd[0]) {
+ case READ_10:
+ case WRITE_10:
+ case READ_12:
+ case WRITE_12:
+ case READ_6:
+ case WRITE_6:
+ /* atapi dma maybe ok */
+ break;
+ default:
+ /* turn off atapi dma */
+ return 1;
+ }
+ }
+
if (ap->ops->check_atapi_dma)
rc = ap->ops->check_atapi_dma(qc);
{
struct ata_port *ap = qc->ap;
- ap->ops->tf_read(ap, &qc->result_tf);
qc->result_tf.flags = qc->tf.flags;
+ ap->ops->tf_read(ap, &qc->result_tf);
}
/**
* RETURNS:
* 0 on success, AC_ERR_* mask on failure
*/
-static unsigned int atapi_eh_request_sense(struct ata_device *dev,
- unsigned char *sense_buf)
+static unsigned int atapi_eh_request_sense(struct ata_queued_cmd *qc)
{
+ struct ata_device *dev = qc->dev;
+ unsigned char *sense_buf = qc->scsicmd->sense_buffer;
struct ata_port *ap = dev->ap;
struct ata_taskfile tf;
u8 cdb[ATAPI_CDB_LEN];
DPRINTK("ATAPI request sense\n");
- ata_tf_init(dev, &tf);
-
/* FIXME: is this needed? */
memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
- /* XXX: why tf_read here? */
- ap->ops->tf_read(ap, &tf);
-
- /* fill these in, for the case where they are -not- overwritten */
+ /* initialize sense_buf with the error register,
+ * for the case where they are -not- overwritten
+ */
sense_buf[0] = 0x70;
- sense_buf[2] = tf.feature >> 4;
+ sense_buf[2] = qc->result_tf.feature >> 4;
+
+ /* some devices time out if garbage left in tf */
+ ata_tf_init(dev, &tf);
memset(cdb, 0, ATAPI_CDB_LEN);
cdb[0] = REQUEST_SENSE;
case ATA_DEV_ATAPI:
if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
- tmp = atapi_eh_request_sense(qc->dev,
- qc->scsicmd->sense_buffer);
+ tmp = atapi_eh_request_sense(qc);
if (!tmp) {
/* ATA_QCFLAG_SENSE_VALID is used to
* tell atapi_qc_complete() that sense
{
struct ata_eh_context *ehc = &ap->eh_context;
struct ata_device *dev;
+ unsigned int new_mask = 0;
unsigned long flags;
int i, rc = 0;
DPRINTK("ENTER\n");
- for (i = 0; i < ATA_MAX_DEVICES; i++) {
+ /* For PATA drive side cable detection to work, IDENTIFY must
+ * be done backwards such that PDIAG- is released by the slave
+ * device before the master device is identified.
+ */
+ for (i = ATA_MAX_DEVICES - 1; i >= 0; i--) {
unsigned int action, readid_flags = 0;
dev = &ap->device[i];
if (action & ATA_EH_REVALIDATE && ata_dev_ready(dev)) {
if (ata_port_offline(ap)) {
rc = -EIO;
- break;
+ goto err;
}
ata_eh_about_to_do(ap, dev, ATA_EH_REVALIDATE);
rc = ata_dev_revalidate(dev, readid_flags);
if (rc)
- break;
+ goto err;
ata_eh_done(ap, dev, ATA_EH_REVALIDATE);
rc = ata_dev_read_id(dev, &dev->class, readid_flags,
dev->id);
- if (rc == 0) {
- ehc->i.flags |= ATA_EHI_PRINTINFO;
- rc = ata_dev_configure(dev);
- ehc->i.flags &= ~ATA_EHI_PRINTINFO;
- } else if (rc == -ENOENT) {
+ switch (rc) {
+ case 0:
+ new_mask |= 1 << i;
+ break;
+ case -ENOENT:
/* IDENTIFY was issued to non-existent
* device. No need to reset. Just
* thaw and kill the device.
*/
ata_eh_thaw_port(ap);
- dev->class = ATA_DEV_UNKNOWN;
- rc = 0;
- }
-
- if (rc) {
dev->class = ATA_DEV_UNKNOWN;
break;
+ default:
+ dev->class = ATA_DEV_UNKNOWN;
+ goto err;
}
+ }
+ }
- if (ata_dev_enabled(dev)) {
- spin_lock_irqsave(ap->lock, flags);
- ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
- spin_unlock_irqrestore(ap->lock, flags);
+ /* Configure new devices forward such that user doesn't see
+ * device detection messages backwards.
+ */
+ for (i = 0; i < ATA_MAX_DEVICES; i++) {
+ dev = &ap->device[i];
- /* new device discovered, configure xfermode */
- ehc->i.flags |= ATA_EHI_SETMODE;
- }
- }
+ if (!(new_mask & (1 << i)))
+ continue;
+
+ ehc->i.flags |= ATA_EHI_PRINTINFO;
+ rc = ata_dev_configure(dev);
+ ehc->i.flags &= ~ATA_EHI_PRINTINFO;
+ if (rc)
+ goto err;
+
+ spin_lock_irqsave(ap->lock, flags);
+ ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ /* new device discovered, configure xfermode */
+ ehc->i.flags |= ATA_EHI_SETMODE;
}
- if (rc)
- *r_failed_dev = dev;
+ return 0;
- DPRINTK("EXIT\n");
+ err:
+ *r_failed_dev = dev;
+ DPRINTK("EXIT rc=%d\n", rc);
return rc;
}
scsi_cmd[8] = args[3];
scsi_cmd[10] = args[4];
scsi_cmd[12] = args[5];
- scsi_cmd[13] = args[6] & 0x0f;
+ scsi_cmd[13] = args[6] & 0x4f;
scsi_cmd[14] = args[0];
/* Good values for timeout and retries? Values below
extern int atapi_enabled;
extern int atapi_dmadir;
extern int libata_fua;
-extern int noacpi;
+extern int libata_noacpi;
extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev);
extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
u64 block, u32 n_block, unsigned int tf_flags,
/* Cases the state machine will not complete correctly without help */
if ((tf->flags & ATA_TFLAG_LBA48) || tf->protocol == ATA_PROT_ATAPI_DMA)
{
- len = qc->nbytes;
+ len = qc->nbytes / 2;
if (tf->flags & ATA_TFLAG_WRITE)
len |= 0x06000000;
int (*platform_notify)(struct device * dev) = NULL;
int (*platform_notify_remove)(struct device * dev) = NULL;
-/*
- * Detect the LANANA-assigned LOCAL/EXPERIMENTAL majors
- */
-bool is_lanana_major(unsigned int major)
-{
- if (major >= 60 && major <= 63)
- return 1;
- if (major >= 120 && major <= 127)
- return 1;
- if (major >= 240 && major <= 254)
- return 1;
- return 0;
-}
-
/*
* sysfs bindings for devices.
*/
void driver_unregister(struct device_driver * drv)
{
bus_remove_driver(drv);
- wait_for_completion(&drv->unloaded);
+ /*
+ * If the driver is a module, we are probably in
+ * the module unload path, and we want to wait
+ * for everything to unload before we can actually
+ * finish the unload.
+ */
+ if (drv->owner)
+ wait_for_completion(&drv->unloaded);
}
/**
"does not support reading geometry\n");
drv->heads = 255;
drv->sectors = 32; // Sectors per track
+ drv->cylinders = total_size + 1;
drv->raid_level = RAID_UNKNOWN;
} else {
drv->heads = inq_buff->data_byte[6];
If you have an Alchemy AU1000 processor (MIPS based) and you want
to use a console on a serial port, say Y. Otherwise, say N.
+config SERIAL_DEC
+ bool "DECstation serial support"
+ depends on MACH_DECSTATION
+ default y
+ help
+ This selects whether you want to be asked about drivers for
+ DECstation serial ports.
+
+ Note that the answer to this question won't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about DECstation serial ports.
+
+config SERIAL_DEC_CONSOLE
+ bool "Support for console on a DECstation serial port"
+ depends on SERIAL_DEC
+ default y
+ help
+ If you say Y here, it will be possible to use a serial port as the
+ system console (the system console is the device which receives all
+ kernel messages and warnings and which allows logins in single user
+ mode). Note that the firmware uses ttyS0 as the serial console on
+ the Maxine and ttyS2 on the others.
+
+ If unsure, say Y.
+
+config ZS
+ bool "Z85C30 Serial Support"
+ depends on SERIAL_DEC
+ default y
+ help
+ Documentation on the Zilog 85C350 serial communications controller
+ is downloadable at <http://www.zilog.com/pdfs/serial/z85c30.pdf>
+
config A2232
tristate "Commodore A2232 serial support (EXPERIMENTAL)"
depends on EXPERIMENTAL && ZORRO && BROKEN_ON_SMP
i830-objs := i830_drv.o i830_dma.o i830_irq.o
i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o
radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
-ffb-objs := ffb_drv.o ffb_context.o
sis-objs := sis_drv.o sis_mm.o
savage-objs := savage_drv.o savage_bci.o savage_state.o
via-objs := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o via_dmablit.o
obj-$(CONFIG_DRM_I810) += i810.o
obj-$(CONFIG_DRM_I830) += i830.o
obj-$(CONFIG_DRM_I915) += i915.o
-obj-$(CONFIG_DRM_FFB) += ffb.o
obj-$(CONFIG_DRM_SIS) += sis.o
obj-$(CONFIG_DRM_SAVAGE)+= savage.o
obj-$(CONFIG_DRM_VIA) +=via.o
+++ /dev/null
-/* $Id: ffb_context.c,v 1.5 2001/08/09 17:47:51 davem Exp $
- * ffb_context.c: Creator/Creator3D DRI/DRM context switching.
- *
- * Copyright (C) 2000 David S. Miller (davem@redhat.com)
- *
- * Almost entirely stolen from tdfx_context.c, see there
- * for authors.
- */
-
-#include <asm/upa.h>
-
-#include "ffb.h"
-#include "drmP.h"
-
-#include "ffb_drv.h"
-
-static int DRM(alloc_queue) (drm_device_t * dev, int is_2d_only) {
- ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
- int i;
-
- for (i = 0; i < FFB_MAX_CTXS; i++) {
- if (fpriv->hw_state[i] == NULL)
- break;
- }
- if (i == FFB_MAX_CTXS)
- return -1;
-
- fpriv->hw_state[i] = kmalloc(sizeof(struct ffb_hw_context), GFP_KERNEL);
- if (fpriv->hw_state[i] == NULL)
- return -1;
-
- fpriv->hw_state[i]->is_2d_only = is_2d_only;
-
- /* Plus one because 0 is the special DRM_KERNEL_CONTEXT. */
- return i + 1;
-}
-
-static void ffb_save_context(ffb_dev_priv_t * fpriv, int idx)
-{
- ffb_fbcPtr ffb = fpriv->regs;
- struct ffb_hw_context *ctx;
- int i;
-
- ctx = fpriv->hw_state[idx - 1];
- if (idx == 0 || ctx == NULL)
- return;
-
- if (ctx->is_2d_only) {
- /* 2D applications only care about certain pieces
- * of state.
- */
- ctx->drawop = upa_readl(&ffb->drawop);
- ctx->ppc = upa_readl(&ffb->ppc);
- ctx->wid = upa_readl(&ffb->wid);
- ctx->fg = upa_readl(&ffb->fg);
- ctx->bg = upa_readl(&ffb->bg);
- ctx->xclip = upa_readl(&ffb->xclip);
- ctx->fbc = upa_readl(&ffb->fbc);
- ctx->rop = upa_readl(&ffb->rop);
- ctx->cmp = upa_readl(&ffb->cmp);
- ctx->matchab = upa_readl(&ffb->matchab);
- ctx->magnab = upa_readl(&ffb->magnab);
- ctx->pmask = upa_readl(&ffb->pmask);
- ctx->xpmask = upa_readl(&ffb->xpmask);
- ctx->lpat = upa_readl(&ffb->lpat);
- ctx->fontxy = upa_readl(&ffb->fontxy);
- ctx->fontw = upa_readl(&ffb->fontw);
- ctx->fontinc = upa_readl(&ffb->fontinc);
-
- /* stencil/stencilctl only exists on FFB2+ and later
- * due to the introduction of 3DRAM-III.
- */
- if (fpriv->ffb_type == ffb2_vertical_plus ||
- fpriv->ffb_type == ffb2_horizontal_plus) {
- ctx->stencil = upa_readl(&ffb->stencil);
- ctx->stencilctl = upa_readl(&ffb->stencilctl);
- }
-
- for (i = 0; i < 32; i++)
- ctx->area_pattern[i] = upa_readl(&ffb->pattern[i]);
- ctx->ucsr = upa_readl(&ffb->ucsr);
- return;
- }
-
- /* Fetch drawop. */
- ctx->drawop = upa_readl(&ffb->drawop);
-
- /* If we were saving the vertex registers, this is where
- * we would do it. We would save 32 32-bit words starting
- * at ffb->suvtx.
- */
-
- /* Capture rendering attributes. */
-
- ctx->ppc = upa_readl(&ffb->ppc); /* Pixel Processor Control */
- ctx->wid = upa_readl(&ffb->wid); /* Current WID */
- ctx->fg = upa_readl(&ffb->fg); /* Constant FG color */
- ctx->bg = upa_readl(&ffb->bg); /* Constant BG color */
- ctx->consty = upa_readl(&ffb->consty); /* Constant Y */
- ctx->constz = upa_readl(&ffb->constz); /* Constant Z */
- ctx->xclip = upa_readl(&ffb->xclip); /* X plane clip */
- ctx->dcss = upa_readl(&ffb->dcss); /* Depth Cue Scale Slope */
- ctx->vclipmin = upa_readl(&ffb->vclipmin); /* Primary XY clip, minimum */
- ctx->vclipmax = upa_readl(&ffb->vclipmax); /* Primary XY clip, maximum */
- ctx->vclipzmin = upa_readl(&ffb->vclipzmin); /* Primary Z clip, minimum */
- ctx->vclipzmax = upa_readl(&ffb->vclipzmax); /* Primary Z clip, maximum */
- ctx->dcsf = upa_readl(&ffb->dcsf); /* Depth Cue Scale Front Bound */
- ctx->dcsb = upa_readl(&ffb->dcsb); /* Depth Cue Scale Back Bound */
- ctx->dczf = upa_readl(&ffb->dczf); /* Depth Cue Scale Z Front */
- ctx->dczb = upa_readl(&ffb->dczb); /* Depth Cue Scale Z Back */
- ctx->blendc = upa_readl(&ffb->blendc); /* Alpha Blend Control */
- ctx->blendc1 = upa_readl(&ffb->blendc1); /* Alpha Blend Color 1 */
- ctx->blendc2 = upa_readl(&ffb->blendc2); /* Alpha Blend Color 2 */
- ctx->fbc = upa_readl(&ffb->fbc); /* Frame Buffer Control */
- ctx->rop = upa_readl(&ffb->rop); /* Raster Operation */
- ctx->cmp = upa_readl(&ffb->cmp); /* Compare Controls */
- ctx->matchab = upa_readl(&ffb->matchab); /* Buffer A/B Match Ops */
- ctx->matchc = upa_readl(&ffb->matchc); /* Buffer C Match Ops */
- ctx->magnab = upa_readl(&ffb->magnab); /* Buffer A/B Magnitude Ops */
- ctx->magnc = upa_readl(&ffb->magnc); /* Buffer C Magnitude Ops */
- ctx->pmask = upa_readl(&ffb->pmask); /* RGB Plane Mask */
- ctx->xpmask = upa_readl(&ffb->xpmask); /* X Plane Mask */
- ctx->ypmask = upa_readl(&ffb->ypmask); /* Y Plane Mask */
- ctx->zpmask = upa_readl(&ffb->zpmask); /* Z Plane Mask */
-
- /* Auxiliary Clips. */
- ctx->auxclip0min = upa_readl(&ffb->auxclip[0].min);
- ctx->auxclip0max = upa_readl(&ffb->auxclip[0].max);
- ctx->auxclip1min = upa_readl(&ffb->auxclip[1].min);
- ctx->auxclip1max = upa_readl(&ffb->auxclip[1].max);
- ctx->auxclip2min = upa_readl(&ffb->auxclip[2].min);
- ctx->auxclip2max = upa_readl(&ffb->auxclip[2].max);
- ctx->auxclip3min = upa_readl(&ffb->auxclip[3].min);
- ctx->auxclip3max = upa_readl(&ffb->auxclip[3].max);
-
- ctx->lpat = upa_readl(&ffb->lpat); /* Line Pattern */
- ctx->fontxy = upa_readl(&ffb->fontxy); /* XY Font Coordinate */
- ctx->fontw = upa_readl(&ffb->fontw); /* Font Width */
- ctx->fontinc = upa_readl(&ffb->fontinc); /* Font X/Y Increment */
-
- /* These registers/features only exist on FFB2 and later chips. */
- if (fpriv->ffb_type >= ffb2_prototype) {
- ctx->dcss1 = upa_readl(&ffb->dcss1); /* Depth Cue Scale Slope 1 */
- ctx->dcss2 = upa_readl(&ffb->dcss2); /* Depth Cue Scale Slope 2 */
- ctx->dcss2 = upa_readl(&ffb->dcss3); /* Depth Cue Scale Slope 3 */
- ctx->dcs2 = upa_readl(&ffb->dcs2); /* Depth Cue Scale 2 */
- ctx->dcs3 = upa_readl(&ffb->dcs3); /* Depth Cue Scale 3 */
- ctx->dcs4 = upa_readl(&ffb->dcs4); /* Depth Cue Scale 4 */
- ctx->dcd2 = upa_readl(&ffb->dcd2); /* Depth Cue Depth 2 */
- ctx->dcd3 = upa_readl(&ffb->dcd3); /* Depth Cue Depth 3 */
- ctx->dcd4 = upa_readl(&ffb->dcd4); /* Depth Cue Depth 4 */
-
- /* And stencil/stencilctl only exists on FFB2+ and later
- * due to the introduction of 3DRAM-III.
- */
- if (fpriv->ffb_type == ffb2_vertical_plus ||
- fpriv->ffb_type == ffb2_horizontal_plus) {
- ctx->stencil = upa_readl(&ffb->stencil);
- ctx->stencilctl = upa_readl(&ffb->stencilctl);
- }
- }
-
- /* Save the 32x32 area pattern. */
- for (i = 0; i < 32; i++)
- ctx->area_pattern[i] = upa_readl(&ffb->pattern[i]);
-
- /* Finally, stash away the User Constol/Status Register. */
- ctx->ucsr = upa_readl(&ffb->ucsr);
-}
-
-static void ffb_restore_context(ffb_dev_priv_t * fpriv, int old, int idx)
-{
- ffb_fbcPtr ffb = fpriv->regs;
- struct ffb_hw_context *ctx;
- int i;
-
- ctx = fpriv->hw_state[idx - 1];
- if (idx == 0 || ctx == NULL)
- return;
-
- if (ctx->is_2d_only) {
- /* 2D applications only care about certain pieces
- * of state.
- */
- upa_writel(ctx->drawop, &ffb->drawop);
-
- /* If we were restoring the vertex registers, this is where
- * we would do it. We would restore 32 32-bit words starting
- * at ffb->suvtx.
- */
-
- upa_writel(ctx->ppc, &ffb->ppc);
- upa_writel(ctx->wid, &ffb->wid);
- upa_writel(ctx->fg, &ffb->fg);
- upa_writel(ctx->bg, &ffb->bg);
- upa_writel(ctx->xclip, &ffb->xclip);
- upa_writel(ctx->fbc, &ffb->fbc);
- upa_writel(ctx->rop, &ffb->rop);
- upa_writel(ctx->cmp, &ffb->cmp);
- upa_writel(ctx->matchab, &ffb->matchab);
- upa_writel(ctx->magnab, &ffb->magnab);
- upa_writel(ctx->pmask, &ffb->pmask);
- upa_writel(ctx->xpmask, &ffb->xpmask);
- upa_writel(ctx->lpat, &ffb->lpat);
- upa_writel(ctx->fontxy, &ffb->fontxy);
- upa_writel(ctx->fontw, &ffb->fontw);
- upa_writel(ctx->fontinc, &ffb->fontinc);
-
- /* stencil/stencilctl only exists on FFB2+ and later
- * due to the introduction of 3DRAM-III.
- */
- if (fpriv->ffb_type == ffb2_vertical_plus ||
- fpriv->ffb_type == ffb2_horizontal_plus) {
- upa_writel(ctx->stencil, &ffb->stencil);
- upa_writel(ctx->stencilctl, &ffb->stencilctl);
- upa_writel(0x80000000, &ffb->fbc);
- upa_writel((ctx->stencilctl | 0x80000),
- &ffb->rawstencilctl);
- upa_writel(ctx->fbc, &ffb->fbc);
- }
-
- for (i = 0; i < 32; i++)
- upa_writel(ctx->area_pattern[i], &ffb->pattern[i]);
- upa_writel((ctx->ucsr & 0xf0000), &ffb->ucsr);
- return;
- }
-
- /* Restore drawop. */
- upa_writel(ctx->drawop, &ffb->drawop);
-
- /* If we were restoring the vertex registers, this is where
- * we would do it. We would restore 32 32-bit words starting
- * at ffb->suvtx.
- */
-
- /* Restore rendering attributes. */
-
- upa_writel(ctx->ppc, &ffb->ppc); /* Pixel Processor Control */
- upa_writel(ctx->wid, &ffb->wid); /* Current WID */
- upa_writel(ctx->fg, &ffb->fg); /* Constant FG color */
- upa_writel(ctx->bg, &ffb->bg); /* Constant BG color */
- upa_writel(ctx->consty, &ffb->consty); /* Constant Y */
- upa_writel(ctx->constz, &ffb->constz); /* Constant Z */
- upa_writel(ctx->xclip, &ffb->xclip); /* X plane clip */
- upa_writel(ctx->dcss, &ffb->dcss); /* Depth Cue Scale Slope */
- upa_writel(ctx->vclipmin, &ffb->vclipmin); /* Primary XY clip, minimum */
- upa_writel(ctx->vclipmax, &ffb->vclipmax); /* Primary XY clip, maximum */
- upa_writel(ctx->vclipzmin, &ffb->vclipzmin); /* Primary Z clip, minimum */
- upa_writel(ctx->vclipzmax, &ffb->vclipzmax); /* Primary Z clip, maximum */
- upa_writel(ctx->dcsf, &ffb->dcsf); /* Depth Cue Scale Front Bound */
- upa_writel(ctx->dcsb, &ffb->dcsb); /* Depth Cue Scale Back Bound */
- upa_writel(ctx->dczf, &ffb->dczf); /* Depth Cue Scale Z Front */
- upa_writel(ctx->dczb, &ffb->dczb); /* Depth Cue Scale Z Back */
- upa_writel(ctx->blendc, &ffb->blendc); /* Alpha Blend Control */
- upa_writel(ctx->blendc1, &ffb->blendc1); /* Alpha Blend Color 1 */
- upa_writel(ctx->blendc2, &ffb->blendc2); /* Alpha Blend Color 2 */
- upa_writel(ctx->fbc, &ffb->fbc); /* Frame Buffer Control */
- upa_writel(ctx->rop, &ffb->rop); /* Raster Operation */
- upa_writel(ctx->cmp, &ffb->cmp); /* Compare Controls */
- upa_writel(ctx->matchab, &ffb->matchab); /* Buffer A/B Match Ops */
- upa_writel(ctx->matchc, &ffb->matchc); /* Buffer C Match Ops */
- upa_writel(ctx->magnab, &ffb->magnab); /* Buffer A/B Magnitude Ops */
- upa_writel(ctx->magnc, &ffb->magnc); /* Buffer C Magnitude Ops */
- upa_writel(ctx->pmask, &ffb->pmask); /* RGB Plane Mask */
- upa_writel(ctx->xpmask, &ffb->xpmask); /* X Plane Mask */
- upa_writel(ctx->ypmask, &ffb->ypmask); /* Y Plane Mask */
- upa_writel(ctx->zpmask, &ffb->zpmask); /* Z Plane Mask */
-
- /* Auxiliary Clips. */
- upa_writel(ctx->auxclip0min, &ffb->auxclip[0].min);
- upa_writel(ctx->auxclip0max, &ffb->auxclip[0].max);
- upa_writel(ctx->auxclip1min, &ffb->auxclip[1].min);
- upa_writel(ctx->auxclip1max, &ffb->auxclip[1].max);
- upa_writel(ctx->auxclip2min, &ffb->auxclip[2].min);
- upa_writel(ctx->auxclip2max, &ffb->auxclip[2].max);
- upa_writel(ctx->auxclip3min, &ffb->auxclip[3].min);
- upa_writel(ctx->auxclip3max, &ffb->auxclip[3].max);
-
- upa_writel(ctx->lpat, &ffb->lpat); /* Line Pattern */
- upa_writel(ctx->fontxy, &ffb->fontxy); /* XY Font Coordinate */
- upa_writel(ctx->fontw, &ffb->fontw); /* Font Width */
- upa_writel(ctx->fontinc, &ffb->fontinc); /* Font X/Y Increment */
-
- /* These registers/features only exist on FFB2 and later chips. */
- if (fpriv->ffb_type >= ffb2_prototype) {
- upa_writel(ctx->dcss1, &ffb->dcss1); /* Depth Cue Scale Slope 1 */
- upa_writel(ctx->dcss2, &ffb->dcss2); /* Depth Cue Scale Slope 2 */
- upa_writel(ctx->dcss3, &ffb->dcss2); /* Depth Cue Scale Slope 3 */
- upa_writel(ctx->dcs2, &ffb->dcs2); /* Depth Cue Scale 2 */
- upa_writel(ctx->dcs3, &ffb->dcs3); /* Depth Cue Scale 3 */
- upa_writel(ctx->dcs4, &ffb->dcs4); /* Depth Cue Scale 4 */
- upa_writel(ctx->dcd2, &ffb->dcd2); /* Depth Cue Depth 2 */
- upa_writel(ctx->dcd3, &ffb->dcd3); /* Depth Cue Depth 3 */
- upa_writel(ctx->dcd4, &ffb->dcd4); /* Depth Cue Depth 4 */
-
- /* And stencil/stencilctl only exists on FFB2+ and later
- * due to the introduction of 3DRAM-III.
- */
- if (fpriv->ffb_type == ffb2_vertical_plus ||
- fpriv->ffb_type == ffb2_horizontal_plus) {
- /* Unfortunately, there is a hardware bug on
- * the FFB2+ chips which prevents a normal write
- * to the stencil control register from working
- * as it should.
- *
- * The state controlled by the FFB stencilctl register
- * really gets transferred to the per-buffer instances
- * of the stencilctl register in the 3DRAM chips.
- *
- * The bug is that FFB does not update buffer C correctly,
- * so we have to do it by hand for them.
- */
-
- /* This will update buffers A and B. */
- upa_writel(ctx->stencil, &ffb->stencil);
- upa_writel(ctx->stencilctl, &ffb->stencilctl);
-
- /* Force FFB to use buffer C 3dram regs. */
- upa_writel(0x80000000, &ffb->fbc);
- upa_writel((ctx->stencilctl | 0x80000),
- &ffb->rawstencilctl);
-
- /* Now restore the correct FBC controls. */
- upa_writel(ctx->fbc, &ffb->fbc);
- }
- }
-
- /* Restore the 32x32 area pattern. */
- for (i = 0; i < 32; i++)
- upa_writel(ctx->area_pattern[i], &ffb->pattern[i]);
-
- /* Finally, stash away the User Constol/Status Register.
- * The only state we really preserve here is the picking
- * control.
- */
- upa_writel((ctx->ucsr & 0xf0000), &ffb->ucsr);
-}
-
-#define FFB_UCSR_FB_BUSY 0x01000000
-#define FFB_UCSR_RP_BUSY 0x02000000
-#define FFB_UCSR_ALL_BUSY (FFB_UCSR_RP_BUSY|FFB_UCSR_FB_BUSY)
-
-static void FFBWait(ffb_fbcPtr ffb)
-{
- int limit = 100000;
-
- do {
- u32 regval = upa_readl(&ffb->ucsr);
-
- if ((regval & FFB_UCSR_ALL_BUSY) == 0)
- break;
- } while (--limit);
-}
-
-int ffb_driver_context_switch(drm_device_t * dev, int old, int new)
-{
- ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
-
-#ifdef DRM_DMA_HISTOGRAM
- dev->ctx_start = get_cycles();
-#endif
-
- DRM_DEBUG("Context switch from %d to %d\n", old, new);
-
- if (new == dev->last_context || dev->last_context == 0) {
- dev->last_context = new;
- return 0;
- }
-
- FFBWait(fpriv->regs);
- ffb_save_context(fpriv, old);
- ffb_restore_context(fpriv, old, new);
- FFBWait(fpriv->regs);
-
- dev->last_context = new;
-
- return 0;
-}
-
-int ffb_driver_resctx(struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long arg)
-{
- drm_ctx_res_t res;
- drm_ctx_t ctx;
- int i;
-
- DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
- if (copy_from_user(&res, (drm_ctx_res_t __user *) arg, sizeof(res)))
- return -EFAULT;
- if (res.count >= DRM_RESERVED_CONTEXTS) {
- memset(&ctx, 0, sizeof(ctx));
- for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
- ctx.handle = i;
- if (copy_to_user(&res.contexts[i], &i, sizeof(i)))
- return -EFAULT;
- }
- }
- res.count = DRM_RESERVED_CONTEXTS;
- if (copy_to_user((drm_ctx_res_t __user *) arg, &res, sizeof(res)))
- return -EFAULT;
- return 0;
-}
-
-int ffb_driver_addctx(struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long arg)
-{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->dev;
- drm_ctx_t ctx;
- int idx;
-
- if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
- return -EFAULT;
- idx = DRM(alloc_queue) (dev, (ctx.flags & _DRM_CONTEXT_2DONLY));
- if (idx < 0)
- return -ENFILE;
-
- DRM_DEBUG("%d\n", ctx.handle);
- ctx.handle = idx;
- if (copy_to_user((drm_ctx_t __user *) arg, &ctx, sizeof(ctx)))
- return -EFAULT;
- return 0;
-}
-
-int ffb_driver_modctx(struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long arg)
-{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->dev;
- ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
- struct ffb_hw_context *hwctx;
- drm_ctx_t ctx;
- int idx;
-
- if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
- return -EFAULT;
-
- idx = ctx.handle;
- if (idx <= 0 || idx >= FFB_MAX_CTXS)
- return -EINVAL;
-
- hwctx = fpriv->hw_state[idx - 1];
- if (hwctx == NULL)
- return -EINVAL;
-
- if ((ctx.flags & _DRM_CONTEXT_2DONLY) == 0)
- hwctx->is_2d_only = 0;
- else
- hwctx->is_2d_only = 1;
-
- return 0;
-}
-
-int ffb_driver_getctx(struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long arg)
-{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->dev;
- ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
- struct ffb_hw_context *hwctx;
- drm_ctx_t ctx;
- int idx;
-
- if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
- return -EFAULT;
-
- idx = ctx.handle;
- if (idx <= 0 || idx >= FFB_MAX_CTXS)
- return -EINVAL;
-
- hwctx = fpriv->hw_state[idx - 1];
- if (hwctx == NULL)
- return -EINVAL;
-
- if (hwctx->is_2d_only != 0)
- ctx.flags = _DRM_CONTEXT_2DONLY;
- else
- ctx.flags = 0;
-
- if (copy_to_user((drm_ctx_t __user *) arg, &ctx, sizeof(ctx)))
- return -EFAULT;
-
- return 0;
-}
-
-int ffb_driver_switchctx(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
-{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->dev;
- drm_ctx_t ctx;
-
- if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
- return -EFAULT;
- DRM_DEBUG("%d\n", ctx.handle);
- return ffb_driver_context_switch(dev, dev->last_context, ctx.handle);
-}
-
-int ffb_driver_newctx(struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long arg)
-{
- drm_ctx_t ctx;
-
- if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
- return -EFAULT;
- DRM_DEBUG("%d\n", ctx.handle);
-
- return 0;
-}
-
-int ffb_driver_rmctx(struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long arg)
-{
- drm_ctx_t ctx;
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->dev;
- ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
- int idx;
-
- if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
- return -EFAULT;
- DRM_DEBUG("%d\n", ctx.handle);
-
- idx = ctx.handle - 1;
- if (idx < 0 || idx >= FFB_MAX_CTXS)
- return -EINVAL;
-
- kfree(fpriv->hw_state[idx]);
- fpriv->hw_state[idx] = NULL;
- return 0;
-}
-
-void ffb_set_context_ioctls(void)
-{
- DRM(ioctls)[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)].func = ffb_driver_addctx;
- DRM(ioctls)[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)].func = ffb_driver_rmctx;
- DRM(ioctls)[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)].func = ffb_driver_modctx;
- DRM(ioctls)[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)].func = ffb_driver_getctx;
- DRM(ioctls)[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)].func =
- ffb_driver_switchctx;
- DRM(ioctls)[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)].func = ffb_driver_newctx;
- DRM(ioctls)[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)].func = ffb_driver_resctx;
-
-}
+++ /dev/null
-/* $Id: ffb_drv.c,v 1.16 2001/10/18 16:00:24 davem Exp $
- * ffb_drv.c: Creator/Creator3D direct rendering driver.
- *
- * Copyright (C) 2000 David S. Miller (davem@redhat.com)
- */
-
-#include "ffb.h"
-#include "drmP.h"
-
-#include "ffb_drv.h"
-
-#include <linux/smp_lock.h>
-#include <asm/shmparam.h>
-#include <asm/oplib.h>
-#include <asm/upa.h>
-
-#define DRIVER_AUTHOR "David S. Miller"
-
-#define DRIVER_NAME "ffb"
-#define DRIVER_DESC "Creator/Creator3D"
-#define DRIVER_DATE "20000517"
-
-#define DRIVER_MAJOR 0
-#define DRIVER_MINOR 0
-#define DRIVER_PATCHLEVEL 1
-
-typedef struct _ffb_position_t {
- int node;
- int root;
-} ffb_position_t;
-
-static ffb_position_t *ffb_position;
-
-static void get_ffb_type(ffb_dev_priv_t * ffb_priv, int instance)
-{
- volatile unsigned char *strap_bits;
- unsigned char val;
-
- strap_bits = (volatile unsigned char *)
- (ffb_priv->card_phys_base + 0x00200000UL);
-
- /* Don't ask, you have to read the value twice for whatever
- * reason to get correct contents.
- */
- val = upa_readb(strap_bits);
- val = upa_readb(strap_bits);
- switch (val & 0x78) {
- case (0x0 << 5) | (0x0 << 3):
- ffb_priv->ffb_type = ffb1_prototype;
- printk("ffb%d: Detected FFB1 pre-FCS prototype\n", instance);
- break;
- case (0x0 << 5) | (0x1 << 3):
- ffb_priv->ffb_type = ffb1_standard;
- printk("ffb%d: Detected FFB1\n", instance);
- break;
- case (0x0 << 5) | (0x3 << 3):
- ffb_priv->ffb_type = ffb1_speedsort;
- printk("ffb%d: Detected FFB1-SpeedSort\n", instance);
- break;
- case (0x1 << 5) | (0x0 << 3):
- ffb_priv->ffb_type = ffb2_prototype;
- printk("ffb%d: Detected FFB2/vertical pre-FCS prototype\n",
- instance);
- break;
- case (0x1 << 5) | (0x1 << 3):
- ffb_priv->ffb_type = ffb2_vertical;
- printk("ffb%d: Detected FFB2/vertical\n", instance);
- break;
- case (0x1 << 5) | (0x2 << 3):
- ffb_priv->ffb_type = ffb2_vertical_plus;
- printk("ffb%d: Detected FFB2+/vertical\n", instance);
- break;
- case (0x2 << 5) | (0x0 << 3):
- ffb_priv->ffb_type = ffb2_horizontal;
- printk("ffb%d: Detected FFB2/horizontal\n", instance);
- break;
- case (0x2 << 5) | (0x2 << 3):
- ffb_priv->ffb_type = ffb2_horizontal;
- printk("ffb%d: Detected FFB2+/horizontal\n", instance);
- break;
- default:
- ffb_priv->ffb_type = ffb2_vertical;
- printk("ffb%d: Unknown boardID[%08x], assuming FFB2\n",
- instance, val);
- break;
- };
-}
-
-static void ffb_apply_upa_parent_ranges(int parent,
- struct linux_prom64_registers *regs)
-{
- struct linux_prom64_ranges ranges[PROMREG_MAX];
- char name[128];
- int len, i;
-
- prom_getproperty(parent, "name", name, sizeof(name));
- if (strcmp(name, "upa") != 0)
- return;
-
- len =
- prom_getproperty(parent, "ranges", (void *)ranges, sizeof(ranges));
- if (len <= 0)
- return;
-
- len /= sizeof(struct linux_prom64_ranges);
- for (i = 0; i < len; i++) {
- struct linux_prom64_ranges *rng = &ranges[i];
- u64 phys_addr = regs->phys_addr;
-
- if (phys_addr >= rng->ot_child_base &&
- phys_addr < (rng->ot_child_base + rng->or_size)) {
- regs->phys_addr -= rng->ot_child_base;
- regs->phys_addr += rng->ot_parent_base;
- return;
- }
- }
-
- return;
-}
-
-static int ffb_init_one(drm_device_t * dev, int prom_node, int parent_node,
- int instance)
-{
- struct linux_prom64_registers regs[2 * PROMREG_MAX];
- ffb_dev_priv_t *ffb_priv = (ffb_dev_priv_t *) dev->dev_private;
- int i;
-
- ffb_priv->prom_node = prom_node;
- if (prom_getproperty(ffb_priv->prom_node, "reg",
- (void *)regs, sizeof(regs)) <= 0) {
- return -EINVAL;
- }
- ffb_apply_upa_parent_ranges(parent_node, ®s[0]);
- ffb_priv->card_phys_base = regs[0].phys_addr;