attached to /proc/acpi/hotkey/poll_method, which is dnyamically
created. Please use command "cat /proc/acpi/hotkey/polling_method"
to retrieve it.
+
+Note: Use cmdline "acpi_generic_hotkey" to over-ride
+loading any platform specific drivers.
acpi_fake_ecdt [HW,ACPI] Workaround failure due to BIOS lacking ECDT
+ acpi_generic_hotkey [HW,ACPI]
+ Allow consolidated generic hotkey driver to
+ over-ride platform specific driver.
+ See also Documentation/acpi-hotkey.txt.
+
ad1816= [HW,OSS]
Format: <io>,<irq>,<dma>,<dma2>
See also Documentation/sound/oss/AD1816.
in the kernel as they aren't compatible with hotplug or PCI domains or
having sane locking.
-pcibios_present() and Since ages, you don't need to test presence
-pci_present() of PCI subsystem when trying to talk to it.
- If it's not there, the list of PCI devices
- is empty and all functions for searching for
- devices just return NULL.
-pcibios_(read|write)_* Superseded by their pci_(read|write)_*
- counterparts.
-pcibios_find_* Superseded by their pci_get_* counterparts.
-pci_for_each_dev() Superseded by pci_get_device()
-pci_for_each_dev_reverse() Superseded by pci_find_device_reverse()
-pci_for_each_bus() Superseded by pci_find_next_bus()
pci_find_device() Superseded by pci_get_device()
pci_find_subsys() Superseded by pci_get_subsys()
pci_find_slot() Superseded by pci_get_slot()
-pcibios_find_class() Superseded by pci_get_class()
-pci_find_class() Superseded by pci_get_class()
-pci_(read|write)_*_nodev() Superseded by pci_bus_(read|write)_*()
M: greg@kroah.com
S: Maintained
+PCIE HOTPLUG DRIVER
+P: Kristen Carlson Accardi
+M: kristen.c.accardi@intel.com
+L: pcihpd-discuss@lists.sourceforge.net
+S: Maintained
+
PCMCIA SUBSYSTEM
P: Linux PCMCIA Team
L: http://lists.infradead.org/mailman/listinfo/linux-pcmcia
L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
S: Maintained
+SHPC HOTPLUG DRIVER
+P: Kristen Carlson Accardi
+M: kristen.c.accardi@intel.com
+L: pcihpd-discuss@lists.sourceforge.net
+S: Maintained
+
SPARC (sparc32):
P: William L. Irwin
M: wli@holomorphy.com
#endif
/* Attach the domains */
- for_each_online_cpu(i) {
+ for_each_cpu_mask(i, *cpu_map) {
struct sched_domain *sd;
#ifdef CONFIG_SCHED_SMT
sd = &per_cpu(cpu_domains, i);
/* The HvReleaseData is the root of the information shared between
* the hypervisor and Linux.
*/
-
-/*
- * WARNING - magic here
- *
- * Ok, this is a horrid hack below, but marginally better than the
- * alternatives. What we really want is just to initialize
- * hvReleaseData in C as in the #if 0 section here. However, gcc
- * refuses to believe that (u32)&x is a constant expression, so will
- * not allow the xMsNucDataOffset field to be properly initialized.
- * So, we declare hvReleaseData in inline asm instead. We use inline
- * asm, rather than a .S file, because the assembler won't generate
- * the necessary relocation for the LparMap either, unless that symbol
- * is declared in the same source file. Finally, we put the asm in a
- * dummy, attribute-used function, instead of at file scope, because
- * file scope asms don't allow contraints. We want to use the "i"
- * constraints to put sizeof() and offsetof() expressions in there,
- * because including asm/offsets.h in C code then stringifying causes
- * all manner of warnings.
- */
-#if 0
struct HvReleaseData hvReleaseData = {
.xDesc = 0xc8a5d9c4, /* "HvRD" ebcdic */
.xSize = sizeof(struct HvReleaseData),
.xVpdAreasPtrOffset = offsetof(struct naca_struct, xItVpdAreas),
.xSlicNacaAddr = &naca, /* 64-bit Naca address */
- .xMsNucDataOffset = (u32)((unsigned long)&xLparMap - KERNELBASE),
+ .xMsNucDataOffset = LPARMAP_PHYS,
.xFlags = HVREL_TAGSINACTIVE /* tags inactive */
/* 64 bit */
/* shared processors */
0xa7, 0x40, 0xf2, 0x4b,
0xf4, 0x4b, 0xf6, 0xf4 },
};
-#endif
-
-
-extern struct HvReleaseData hvReleaseData;
-
-static void __attribute_used__ hvReleaseData_wrapper(void)
-{
- /* This doesn't appear to need any alignment (even 4 byte) */
- asm volatile (
- " lparMapPhys = xLparMap - %3\n"
- " .data\n"
- " .globl hvReleaseData\n"
- "hvReleaseData:\n"
- " .long 0xc8a5d9c4\n" /* xDesc */
- /* "HvRD" in ebcdic */
- " .short %0\n" /* xSize */
- " .short %1\n" /* xVpdAreasPtrOffset */
- " .llong naca\n" /* xSlicNacaAddr */
- " .long lparMapPhys\n" /* xMsNucDataOffset */
- " .long 0\n" /* xRsvd1 */
- " .short %2\n" /* xFlags */
- " .short 4\n" /* xVrmIndex - v5r2m0 */
- " .short 3\n" /* xMinSupportedPlicVrmIndex - v5r1m0 */
- " .short 3\n" /* xMinCompatablePlicVrmIndex - v5r1m0 */
- " .long 0xd38995a4\n" /* xVrmName */
- " .long 0xa740f24b\n" /* "Linux 2.4.64" ebcdic */
- " .long 0xf44bf6f4\n"
- " . = hvReleaseData + %0\n"
- " .previous\n"
- : : "i"(sizeof(hvReleaseData)),
- "i"(offsetof(struct naca_struct, xItVpdAreas)),
- "i"(HVREL_TAGSINACTIVE /* tags inactive, 64 bit, */
- /* shared processors, HMT allowed */
- | 6), /* TEMP: This allows non-GA drivers */
- "i"(KERNELBASE)
- );
-}
-
-struct LparMap __attribute__((aligned (16))) xLparMap = {
- .xNumberEsids = HvEsidsToMap,
- .xNumberRanges = HvRangesToMap,
- .xSegmentTableOffs = STAB0_PAGE,
-
- .xEsids = {
- { .xKernelEsid = GET_ESID(KERNELBASE),
- .xKernelVsid = KERNEL_VSID(KERNELBASE), },
- { .xKernelEsid = GET_ESID(VMALLOCBASE),
- .xKernelVsid = KERNEL_VSID(VMALLOCBASE), },
- },
-
- .xRanges = {
- { .xPages = HvPagesToMap,
- .xOffset = 0,
- .xVPN = KERNEL_VSID(KERNELBASE) << (SID_SHIFT - PAGE_SHIFT),
- },
- },
-};
extern void system_reset_iSeries(void);
extern void machine_check_iSeries(void);
obj-$(CONFIG_KPROBES) += kprobes.o
CFLAGS_ioctl32.o += -Ifs/
+
+ifeq ($(CONFIG_PPC_ISERIES),y)
+arch/ppc64/kernel/head.o: arch/ppc64/kernel/lparmap.s
+AFLAGS_head.o += -Iarch/ppc64/kernel
+endif
#include <asm/cputable.h>
#include <asm/setup.h>
#include <asm/hvcall.h>
+#include <asm/iSeries/LparMap.h>
#ifdef CONFIG_PPC_ISERIES
#define DO_SOFT_DISABLE
.globl fwnmi_data_area
fwnmi_data_area:
+#ifdef CONFIG_PPC_ISERIES
+ . = LPARMAP_PHYS
+#include "lparmap.s"
+#endif /* CONFIG_PPC_ISERIES */
+
/*
* Vectors for the FWNMI option. Share common code.
*/
--- /dev/null
+/*
+ * Copyright (C) 2005 Stephen Rothwell IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <asm/mmu.h>
+#include <asm/page.h>
+#include <asm/iSeries/LparMap.h>
+
+const struct LparMap __attribute__((__section__(".text"))) xLparMap = {
+ .xNumberEsids = HvEsidsToMap,
+ .xNumberRanges = HvRangesToMap,
+ .xSegmentTableOffs = STAB0_PAGE,
+
+ .xEsids = {
+ { .xKernelEsid = GET_ESID(KERNELBASE),
+ .xKernelVsid = KERNEL_VSID(KERNELBASE), },
+ { .xKernelEsid = GET_ESID(VMALLOCBASE),
+ .xKernelVsid = KERNEL_VSID(VMALLOCBASE), },
+ },
+
+ .xRanges = {
+ { .xPages = HvPagesToMap,
+ .xOffset = 0,
+ .xVPN = KERNEL_VSID(KERNELBASE) << (SID_SHIFT - PAGE_SHIFT),
+ },
+ },
+};
extern char line_buf[80];
#endif /*ENABLE_DEBUGGER*/
-int acpi_specific_hotkey_enabled;
+int acpi_specific_hotkey_enabled = TRUE;
EXPORT_SYMBOL(acpi_specific_hotkey_enabled);
static unsigned int acpi_irq_irq;
int __init
acpi_hotkey_setup(char *str)
{
- acpi_specific_hotkey_enabled = TRUE;
+ acpi_specific_hotkey_enabled = FALSE;
return 1;
}
-__setup("acpi_specific_hotkey", acpi_hotkey_setup);
+__setup("acpi_generic_hotkey", acpi_hotkey_setup);
/*
* max_cstate is defined in the base kernel so modules can
pr_debug("device class '%s': release.\n", cd->class_id);
+ if (cd->devt_attr) {
+ kfree(cd->devt_attr);
+ cd->devt_attr = NULL;
+ }
+
if (cls->release)
cls->release(cd);
else {
if (class_dev->dev)
sysfs_remove_link(&class_dev->kobj, "device");
- if (class_dev->devt_attr) {
+ if (class_dev->devt_attr)
class_device_remove_file(class_dev, class_dev->devt_attr);
- kfree(class_dev->devt_attr);
- class_dev->devt_attr = NULL;
- }
class_device_remove_attrs(class_dev);
kobject_hotplug(&class_dev->kobj, KOBJ_REMOVE);
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>, <dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
*/
#ifndef _PCIEHP_H
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>, <dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>, <dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>,<dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>, <dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>, <dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <dely.l.sy@intel.com>
+ * Send feedback to <kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>, <dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>, <dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>,<dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com>
*
*/
#ifndef _SHPCHP_H
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>, <dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>, <dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>,<dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>, <dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>, <dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <dely.l.sy@intel.com>
+ * Send feedback to <kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>,<dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>, <dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>, <dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
*/
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <greg@kroah.com>, <dely.l.sy@intel.com>
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
*/
}
}
-static void disable_msi_mode(struct pci_dev *dev, int pos, int type)
+void disable_msi_mode(struct pci_dev *dev, int pos, int type)
{
u16 control;
if (!pci_msi_enable || !dev)
return status;
+ if (dev->no_msi)
+ return status;
+
temp = dev->irq;
if ((status = msi_init()) < 0)
#define pci_msi_quirk 0
#endif
+#ifdef CONFIG_PCI_MSI
+void disable_msi_mode(struct pci_dev *dev, int pos, int type);
+#else
+static inline void disable_msi_mode(struct pci_dev *dev, int pos, int type) { }
+#endif
+
extern int pcie_mch_quirk;
extern struct device_attribute pci_dev_attrs[];
extern struct class_device_attribute class_device_attr_cpuaffinity;
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch );
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_pcie_mch );
+
+/*
+ * It's possible for the MSI to get corrupted if shpc and acpi
+ * are used together on certain PXH-based systems.
+ */
+static void __devinit quirk_pcie_pxh(struct pci_dev *dev)
+{
+ disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI),
+ PCI_CAP_ID_MSI);
+ dev->no_msi = 1;
+
+ printk(KERN_WARNING "PCI: PXH quirk detected, "
+ "disabling MSI for SHPC device\n");
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_0, quirk_pcie_pxh);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_1, quirk_pcie_pxh);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_pcie_pxh);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_pcie_pxh);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_pcie_pxh);
+
+
static void __devinit quirk_netmos(struct pci_dev *dev)
{
unsigned int num_parallel = (dev->subsystem_device & 0xf0) >> 4;
goto exit;
}
- x = le16_to_cpu(*(__le16 *) &data[2]);
- y = le16_to_cpu(*(__le16 *) &data[4]);
-
input_regs(dev, regs);
if (data[1] & 0x10) { /* in prox */
}
}
- if (data[1] & 0x80) {
+ if (data[1] & 0x90) {
+ x = le16_to_cpu(*(__le16 *) &data[2]);
+ y = le16_to_cpu(*(__le16 *) &data[4]);
input_report_abs(dev, ABS_X, x);
input_report_abs(dev, ABS_Y, y);
- }
- if (wacom->tool[0] != BTN_TOOL_MOUSE) {
- input_report_abs(dev, ABS_PRESSURE, le16_to_cpu(*(__le16 *) &data[6]));
- input_report_key(dev, BTN_TOUCH, data[1] & 0x01);
- input_report_key(dev, BTN_STYLUS, data[1] & 0x02);
- input_report_key(dev, BTN_STYLUS2, data[1] & 0x04);
+ if (wacom->tool[0] != BTN_TOOL_MOUSE) {
+ input_report_abs(dev, ABS_PRESSURE, le16_to_cpu(*(__le16 *) &data[6]));
+ input_report_key(dev, BTN_TOUCH, data[1] & 0x01);
+ input_report_key(dev, BTN_STYLUS, data[1] & 0x02);
+ input_report_key(dev, BTN_STYLUS2, data[1] & 0x04);
+ }
}
input_report_key(dev, wacom->tool[0], data[1] & 0x10);
/* Cintiq doesn't send data when RDY bit isn't set */
if ((wacom->features->type == CINTIQ) && !(data[1] & 0x40))
- return;
+ goto exit;
if (wacom->features->type >= INTUOS3) {
input_report_abs(dev, ABS_X, (data[2] << 9) | (data[3] << 1) | ((data[9] >> 1) & 1));
* The USB Monitor, inspired by Dave Harding's USBMon.
*
* mon_main.c: Main file, module initiation and exit, registrations, etc.
+ *
+ * Copyright (C) 2005 Pete Zaitcev (zaitcev@redhat.com)
*/
#include <linux/kernel.h>
mondir = debugfs_create_dir("usbmon", NULL);
if (IS_ERR(mondir)) {
- printk(KERN_NOTICE TAG ": debugs is not available\n");
+ printk(KERN_NOTICE TAG ": debugfs is not available\n");
return -ENODEV;
}
if (mondir == NULL) {
/*
* The USB Monitor, inspired by Dave Harding's USBMon.
+ *
+ * Copyright (C) 2005 Pete Zaitcev (zaitcev@redhat.com)
*/
#ifndef __USB_MON_H
int
fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
{
- int err;
+ int err, flags = info->flags;
if (var->activate & FB_ACTIVATE_INV_MODE) {
struct fb_videomode mode1, mode2;
!list_empty(&info->modelist))
err = fb_add_videomode(&mode, &info->modelist);
- if (!err && info->flags & FBINFO_MISC_USEREVENT) {
+ if (!err && (flags & FBINFO_MISC_USEREVENT)) {
struct fb_event event;
info->flags &= ~FBINFO_MISC_USEREVENT;
return -ENODEV;
}
- /* Map the fb and MMIO regions */
- dinfo->aperture.virtual = (u8 __iomem *)ioremap_nocache
- (dinfo->aperture.physical, dinfo->aperture.size);
- if (!dinfo->aperture.virtual) {
- ERR_MSG("Cannot remap FB region.\n");
- cleanup(dinfo);
- return -ENODEV;
- }
- dinfo->mmio_base =
- (u8 __iomem *)ioremap_nocache(dinfo->mmio_base_phys,
- INTEL_REG_SIZE);
- if (!dinfo->mmio_base) {
- ERR_MSG("Cannot remap MMIO region.\n");
- cleanup(dinfo);
- return -ENODEV;
- }
-
/* Get the chipset info. */
dinfo->pci_chipset = pdev->device;
dinfo->accel = 0;
}
+ if (MB(voffset) < stolen_size)
+ offset = (stolen_size >> 12);
+ else
+ offset = ROUND_UP_TO_PAGE(MB(voffset))/GTT_PAGE_SIZE;
+
/* Framebuffer parameters - Use all the stolen memory if >= vram */
- if (ROUND_UP_TO_PAGE(stolen_size) >= MB(vram)) {
+ if (ROUND_UP_TO_PAGE(stolen_size) >= ((offset << 12) + MB(vram))) {
dinfo->fb.size = ROUND_UP_TO_PAGE(stolen_size);
+ dinfo->fb.offset = 0;
dinfo->fbmem_gart = 0;
} else {
dinfo->fb.size = MB(vram);
return -ENODEV;
}
- if (MB(voffset) < stolen_size)
- offset = (stolen_size >> 12);
- else
- offset = ROUND_UP_TO_PAGE(MB(voffset))/GTT_PAGE_SIZE;
-
/* set the mem offsets - set them after the already used pages */
if (dinfo->accel) {
dinfo->ring.offset = offset + gtt_info.current_memory;
+ (dinfo->cursor.size >> 12);
}
+ /* Map the fb and MMIO regions */
+ /* ioremap only up to the end of used aperture */
+ dinfo->aperture.virtual = (u8 __iomem *)ioremap_nocache
+ (dinfo->aperture.physical, (dinfo->fb.offset << 12)
+ + dinfo->fb.size);
+ if (!dinfo->aperture.virtual) {
+ ERR_MSG("Cannot remap FB region.\n");
+ cleanup(dinfo);
+ return -ENODEV;
+ }
+
+ dinfo->mmio_base =
+ (u8 __iomem *)ioremap_nocache(dinfo->mmio_base_phys,
+ INTEL_REG_SIZE);
+ if (!dinfo->mmio_base) {
+ ERR_MSG("Cannot remap MMIO region.\n");
+ cleanup(dinfo);
+ return -ENODEV;
+ }
+
/* Allocate memories (which aren't stolen) */
if (dinfo->accel) {
if (!(dinfo->gtt_ring_mem =
return ERR_PTR(ret);
}
- dev->last_wd = ret;
+ dev->last_wd = watch->wd;
watch->mask = mask;
atomic_set(&watch->count, 0);
INIT_LIST_HEAD(&watch->d_list);
{
jfs_info("In jfs_delete_inode, inode = 0x%p", inode);
+ if (is_bad_inode(inode) ||
+ (JFS_IP(inode)->fileset != cpu_to_le32(FILESYSTEM_I)))
+ return;
+
if (test_cflag(COMMIT_Freewmap, inode))
jfs_free_zero_link(inode);
static bio_end_io_t lbmIODone;
static void lbmStartIO(struct lbuf * bp);
static void lmGCwrite(struct jfs_log * log, int cant_block);
-static int lmLogSync(struct jfs_log * log, int nosyncwait);
+static int lmLogSync(struct jfs_log * log, int hard_sync);
* if new sync address is available
* (normally the case if sync() is executed by back-ground
* process).
- * if not, explicitly run jfs_blogsync() to initiate
- * getting of new sync address.
* calculate new value of i_nextsync which determines when
* this code is called again.
*
* PARAMETERS: log - log structure
- * nosyncwait - 1 if called asynchronously
+ * hard_sync - 1 to force all metadata to be written
*
* RETURN: 0
*
* serialization: LOG_LOCK() held on entry/exit
*/
-static int lmLogSync(struct jfs_log * log, int nosyncwait)
+static int lmLogSync(struct jfs_log * log, int hard_sync)
{
int logsize;
int written; /* written since last syncpt */
unsigned long flags;
/* push dirty metapages out to disk */
- list_for_each_entry(sbi, &log->sb_list, log_list) {
- filemap_flush(sbi->ipbmap->i_mapping);
- filemap_flush(sbi->ipimap->i_mapping);
- filemap_flush(sbi->direct_inode->i_mapping);
- }
+ if (hard_sync)
+ list_for_each_entry(sbi, &log->sb_list, log_list) {
+ filemap_fdatawrite(sbi->ipbmap->i_mapping);
+ filemap_fdatawrite(sbi->ipimap->i_mapping);
+ filemap_fdatawrite(sbi->direct_inode->i_mapping);
+ }
+ else
+ list_for_each_entry(sbi, &log->sb_list, log_list) {
+ filemap_flush(sbi->ipbmap->i_mapping);
+ filemap_flush(sbi->ipimap->i_mapping);
+ filemap_flush(sbi->direct_inode->i_mapping);
+ }
/*
* forward syncpt
/* next syncpt trigger = written + more */
log->nextsync = written + more;
- /* return if lmLogSync() from outside of transaction, e.g., sync() */
- if (nosyncwait)
- return lsn;
-
/* if number of bytes written from last sync point is more
* than 1/4 of the log size, stop new transactions from
* starting until all current transactions are completed
*
* FUNCTION: write log SYNCPT record for specified log
*
- * PARAMETERS: log - log structure
+ * PARAMETERS: log - log structure
+ * hard_sync - set to 1 to force metadata to be written
*/
-void jfs_syncpt(struct jfs_log *log)
+void jfs_syncpt(struct jfs_log *log, int hard_sync)
{ LOG_LOCK(log);
- lmLogSync(log, 1);
+ lmLogSync(log, hard_sync);
LOG_UNLOCK(log);
}
extern int lmGroupCommit(struct jfs_log *, struct tblock *);
extern int jfsIOWait(void *);
extern void jfs_flush_journal(struct jfs_log * log, int wait);
-extern void jfs_syncpt(struct jfs_log *log);
+extern void jfs_syncpt(struct jfs_log *log, int hard_sync);
#endif /* _H_JFS_LOGMGR */
* synchronize with logsync barrier
*/
if (test_bit(log_SYNCBARRIER, &log->flag)) {
+ TXN_UNLOCK();
+
+ /* write dirty metadata & forward log syncpt */
+ jfs_syncpt(log, 1);
+
jfs_info("log barrier off: 0x%x", log->lsn);
/* enable new transactions start */
/* wakeup all waitors for logsync barrier */
TXN_WAKEUP(&log->syncwait);
- TXN_UNLOCK();
-
- /* forward log syncpt */
- jfs_syncpt(log);
-
goto wakeup;
}
}
/* only anonymous txn.
* Remove from anon_list
*/
+ TXN_LOCK();
list_del_init(&jfs_ip->anon_inode_list);
+ TXN_UNLOCK();
}
jfs_ip->atlhead = tlck->next;
} else {
{
struct jfs_inode_info *ji = JFS_IP(inode);
+ BUG_ON(!list_empty(&ji->anon_inode_list));
+
spin_lock_irq(&ji->ag_lock);
if (ji->active_ag != -1) {
struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap;
/* log == NULL indicates read-only mount */
if (log) {
jfs_flush_journal(log, wait);
- jfs_syncpt(log);
+ jfs_syncpt(log, 0);
}
return 0;
error = vfs_rename_other(old_dir,old_dentry,new_dir,new_dentry);
if (!error) {
const char *new_name = old_dentry->d_name.name;
- fsnotify_move(old_dir, new_dir, old_name, new_name, is_dir, new_dentry->d_inode);
+ fsnotify_move(old_dir, new_dir, old_name, new_name, is_dir,
+ new_dentry->d_inode, old_dentry->d_inode);
}
fsnotify_oldname_free(old_name);
nfs_wb_all(inode);
}
error = NFS_PROTO(inode)->setattr(dentry, &fattr, attr);
- if (error == 0) {
+ if (error == 0)
nfs_refresh_inode(inode, &fattr);
+ nfs_end_data_update(inode);
+ unlock_kernel();
+ return error;
+}
+
+/**
+ * nfs_setattr_update_inode - Update inode metadata after a setattr call.
+ * @inode: pointer to struct inode
+ * @attr: pointer to struct iattr
+ *
+ * Note: we do this in the *proc.c in order to ensure that
+ * it works for things like exclusive creates too.
+ */
+void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr)
+{
+ if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0) {
if ((attr->ia_valid & ATTR_MODE) != 0) {
- int mode;
- mode = inode->i_mode & ~S_IALLUGO;
- mode |= attr->ia_mode & S_IALLUGO;
+ int mode = attr->ia_mode & S_IALLUGO;
+ mode |= inode->i_mode & ~S_IALLUGO;
inode->i_mode = mode;
}
if ((attr->ia_valid & ATTR_UID) != 0)
inode->i_uid = attr->ia_uid;
if ((attr->ia_valid & ATTR_GID) != 0)
inode->i_gid = attr->ia_gid;
- if ((attr->ia_valid & ATTR_SIZE) != 0) {
- inode->i_size = attr->ia_size;
- vmtruncate(inode, attr->ia_size);
- }
- }
- if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0)
NFS_FLAGS(inode) |= NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
- nfs_end_data_update(inode);
- unlock_kernel();
- return error;
+ }
+ if ((attr->ia_valid & ATTR_SIZE) != 0) {
+ inode->i_size = attr->ia_size;
+ vmtruncate(inode, attr->ia_size);
+ }
}
/*
dprintk("NFS call setattr\n");
fattr->valid = 0;
status = rpc_call(NFS_CLIENT(inode), NFS3PROC_SETATTR, &arg, fattr, 0);
+ if (status == 0)
+ nfs_setattr_update_inode(inode, sattr);
dprintk("NFS reply setattr: %d\n", status);
return status;
}
* not sure this buys us anything (and I'd have
* to revamp the NFSv3 XDR code) */
status = nfs3_proc_setattr(dentry, &fattr, sattr);
+ if (status == 0)
+ nfs_setattr_update_inode(dentry->d_inode, sattr);
nfs_refresh_inode(dentry->d_inode, &fattr);
dprintk("NFS reply setattr (post-create): %d\n", status);
}
.rpc_argp = &arg,
.rpc_resp = &res,
};
+ int status;
fattr->valid = 0;
} else
memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid));
- return rpc_call_sync(server->client, &msg, 0);
+ status = rpc_call_sync(server->client, &msg, 0);
+ return status;
}
static int nfs4_do_setattr(struct nfs_server *server, struct nfs_fattr *fattr,
status = nfs4_do_setattr(NFS_SERVER(inode), fattr,
NFS_FH(inode), sattr, state);
+ if (status == 0)
+ nfs_setattr_update_inode(inode, sattr);
if (state != NULL)
nfs4_close_state(state, FMODE_WRITE);
put_rpccred(cred);
struct nfs_fattr fattr;
status = nfs4_do_setattr(NFS_SERVER(dir), &fattr,
NFS_FH(state->inode), sattr, state);
- if (status == 0)
+ if (status == 0) {
+ nfs_setattr_update_inode(state->inode, sattr);
goto out;
+ }
} else if (flags != 0)
goto out;
nfs4_close_state(state, flags);
dprintk("NFS call setattr\n");
fattr->valid = 0;
status = rpc_call(NFS_CLIENT(inode), NFSPROC_SETATTR, &arg, fattr, 0);
+ if (status == 0)
+ nfs_setattr_update_inode(inode, sattr);
dprintk("NFS reply setattr: %d\n", status);
return status;
}
if (xdr_decode_word(buf, base, &entries) ||
entries > NFS_ACL_MAX_ENTRIES)
return -EINVAL;
+ nfsacl_desc.desc.array_maxlen = entries;
err = xdr_decode_array2(buf, base + 4, &nfsacl_desc.desc);
if (err)
return err;
svc_exit_thread(rqstp);
/* Release module */
+ unlock_kernel();
module_put_and_exit(0);
}
fact that the vfs and ntfs inodes are one struct in memory to find
the ntfs inode in memory if present. Also, the ntfs inode has its
own locking so it does not matter if the vfs inode is locked.
+ - Fix bug in mft record writing where we forgot to set the device in
+ the buffers when mapping them after the VM had discarded them
+ Thanks to Martin MOKREJÅ for the bug report.
2.1.22 - Many bug and race fixes and error handling improvements.
LCN lcn;
unsigned int vcn_ofs;
+ bh->b_bdev = vol->sb->s_bdev;
/* Obtain the vcn and offset of the current block. */
vcn = ((VCN)mft_no << vol->mft_record_size_bits) +
(block_start - m_start);
LCN lcn;
unsigned int vcn_ofs;
+ bh->b_bdev = vol->sb->s_bdev;
/* Obtain the vcn and offset of the current block. */
vcn = ((VCN)ni->mft_no << vol->mft_record_size_bits) +
(block_start - m_start);
};
#define desc_empty(desc) \
- (!((desc)->a + (desc)->b))
+ (!((desc)->a | (desc)->b))
#define desc_equal(desc1, desc2) \
(((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
#ifndef _LPARMAP_H
#define _LPARMAP_H
+#ifndef __ASSEMBLY__
+
#include <asm/types.h>
/*
} xRanges[HvRangesToMap];
};
-extern struct LparMap xLparMap;
+extern const struct LparMap xLparMap;
+
+#endif /* __ASSEMBLY__ */
+
+/* the fixed address where the LparMap exists */
+#define LPARMAP_PHYS 0x7000
#endif /* _LPARMAP_H */
* casting is the right thing, but 32-bit UML can't have 64-bit virtual
* addresses
*/
-#define __pa(virt) to_phys((void *) (unsigned long) virt)
-#define __va(phys) to_virt((unsigned long) phys)
+#define __pa(virt) to_phys((void *) (unsigned long) (virt))
+#define __va(phys) to_virt((unsigned long) (phys))
#define page_to_pfn(page) ((page) - mem_map)
#define pfn_to_page(pfn) (mem_map + (pfn))
#define ID_MASK 0x00200000
#define desc_empty(desc) \
- (!((desc)->a + (desc)->b))
+ (!((desc)->a | (desc)->b))
#define desc_equal(desc1, desc2) \
(((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
*/
static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
const char *old_name, const char *new_name,
- int isdir, struct inode *target)
+ int isdir, struct inode *target, struct inode *source)
{
u32 cookie = inotify_get_cookie();
inotify_inode_queue_event(target, IN_DELETE_SELF, 0, NULL);
inotify_inode_is_dead(target);
}
+
+ if (source) {
+ inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL);
+ }
}
/*
#define IN_CREATE 0x00000100 /* Subfile was created */
#define IN_DELETE 0x00000200 /* Subfile was deleted */
#define IN_DELETE_SELF 0x00000400 /* Self was deleted */
+#define IN_MOVE_SELF 0x00000800 /* Self was moved */
/* the following are legal events. they are sent as needed to any watch */
#define IN_UNMOUNT 0x00002000 /* Backing fs was unmounted */
*/
#define IN_ALL_EVENTS (IN_ACCESS | IN_MODIFY | IN_ATTRIB | IN_CLOSE_WRITE | \
IN_CLOSE_NOWRITE | IN_OPEN | IN_MOVED_FROM | \
- IN_MOVED_TO | IN_DELETE | IN_CREATE | IN_DELETE_SELF)
+ IN_MOVED_TO | IN_DELETE | IN_CREATE | IN_DELETE_SELF | \
+ IN_MOVE_SELF)
#ifdef __KERNEL__
extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *);
extern void nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping);
extern int nfs_setattr(struct dentry *, struct iattr *);
+extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr);
extern void nfs_begin_attr_update(struct inode *);
extern void nfs_end_attr_update(struct inode *);
extern void nfs_begin_data_update(struct inode *);
/* keep track of device state */
unsigned int is_enabled:1; /* pci_enable_device has been called */
unsigned int is_busmaster:1; /* device is busmaster */
-
+ unsigned int no_msi:1; /* device may not use msi */
+
u32 saved_config_space[16]; /* config space saved at suspend time */
struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */
int rom_attr_enabled; /* has display of the rom attribute been enabled? */
#define PCI_VENDOR_ID_INTEL 0x8086
#define PCI_DEVICE_ID_INTEL_EESSC 0x0008
#define PCI_DEVICE_ID_INTEL_21145 0x0039
+#define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320
+#define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321
+#define PCI_DEVICE_ID_INTEL_PXH_0 0x0329
+#define PCI_DEVICE_ID_INTEL_PXH_1 0x032A
+#define PCI_DEVICE_ID_INTEL_PXHV 0x032C
#define PCI_DEVICE_ID_INTEL_82375 0x0482
#define PCI_DEVICE_ID_INTEL_82424 0x0483
#define PCI_DEVICE_ID_INTEL_82378 0x0484
struct xdr_array2_desc {
unsigned int elem_size;
unsigned int array_len;
+ unsigned int array_maxlen;
xdr_xcode_elem_t xcode;
};
{
struct task_struct *t;
- if (p->flags & SIGNAL_GROUP_EXIT)
+ if (p->signal->flags & SIGNAL_GROUP_EXIT)
/*
* The process is in the middle of dying already.
*/
u16 flags;
/* All of a TSO frame must be composed of paged data. */
- BUG_ON(skb->len != skb->data_len);
+ if (skb->len != skb->data_len)
+ return tcp_fragment(sk, skb, len, mss_now);
buff = sk_stream_alloc_pskb(sk, 0, 0, GFP_ATOMIC);
if (unlikely(buff == NULL))
sent_pkts = 0;
while ((skb = sk->sk_send_head)) {
+ unsigned int limit;
+
tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
BUG_ON(!tso_segs);
break;
}
+ limit = mss_now;
if (tso_segs > 1) {
- u32 limit = tcp_window_allows(tp, skb,
- mss_now, cwnd_quota);
+ limit = tcp_window_allows(tp, skb,
+ mss_now, cwnd_quota);
if (skb->len < limit) {
unsigned int trim = skb->len % mss_now;
if (trim)
limit = skb->len - trim;
}
- if (skb->len > limit) {
- if (tso_fragment(sk, skb, limit, mss_now))
- break;
- }
- } else if (unlikely(skb->len > mss_now)) {
- if (unlikely(tcp_fragment(sk, skb, mss_now, mss_now)))
- break;
}
+ if (skb->len > limit &&
+ unlikely(tso_fragment(sk, skb, limit, mss_now)))
+ break;
+
TCP_SKB_CB(skb)->when = tcp_time_stamp;
if (unlikely(tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC))))
cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH);
if (likely(cwnd_quota)) {
+ unsigned int limit;
+
BUG_ON(!tso_segs);
+ limit = mss_now;
if (tso_segs > 1) {
- u32 limit = tcp_window_allows(tp, skb,
- mss_now, cwnd_quota);
+ limit = tcp_window_allows(tp, skb,
+ mss_now, cwnd_quota);
if (skb->len < limit) {
unsigned int trim = skb->len % mss_now;
if (trim)
limit = skb->len - trim;
}
- if (skb->len > limit) {
- if (unlikely(tso_fragment(sk, skb, limit, mss_now)))
- return;
- }
- } else if (unlikely(skb->len > mss_now)) {
- if (unlikely(tcp_fragment(sk, skb, mss_now, mss_now)))
- return;
}
+ if (skb->len > limit &&
+ unlikely(tso_fragment(sk, skb, limit, mss_now)))
+ return;
+
/* Send it out now. */
TCP_SKB_CB(skb)->when = tcp_time_stamp;
if (!raw_sk) {
if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
IP6_INC_STATS_BH(IPSTATS_MIB_INUNKNOWNPROTOS);
- icmpv6_param_prob(skb, ICMPV6_UNK_NEXTHDR, nhoff);
+ icmpv6_send(skb, ICMPV6_PARAMPROB,
+ ICMPV6_UNK_NEXTHDR, nhoff,
+ skb->dev);
}
- } else {
+ } else
IP6_INC_STATS_BH(IPSTATS_MIB_INDELIVERS);
- kfree_skb(skb);
- }
+ kfree_skb(skb);
}
rcu_read_unlock();
return 0;
if (skb->ip_summed != CHECKSUM_UNNECESSARY) {
if (skb->ip_summed == CHECKSUM_HW) {
+ skb_postpull_rcsum(skb, skb->nh.raw,
+ skb->h.raw - skb->nh.raw);
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (csum_ipv6_magic(&skb->nh.ipv6h->saddr,
&skb->nh.ipv6h->daddr,
return -EINVAL;
} else {
if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
+ desc->array_len > desc->array_maxlen ||
(unsigned long) base + 4 + desc->array_len *
desc->elem_size > buf->len)
return -EINVAL;