F: net/ieee802154/
F: drivers/ieee802154/
+IIO SUBSYSTEM AND DRIVERS
+M: Jonathan Cameron <jic23@cam.ac.uk>
+L: linux-iio@vger.kernel.org
+S: Maintained
+F: drivers/staging/iio/
+
IKANOS/ADI EAGLE ADSL USB DRIVER
M: Matthieu Castet <castet.matthieu@free.fr>
M: Stanislaw Gruszka <stf_xl@wp.pl>
{
struct thread_info *thread = current_thread_info();
int ret;
+ enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
oops_enter();
console_verbose();
bust_spinlocks(1);
if (!user_mode(regs))
- report_bug(regs->ARM_pc, regs);
+ bug_type = report_bug(regs->ARM_pc, regs);
+ if (bug_type != BUG_TRAP_TYPE_NONE)
+ str = "Oops - BUG";
ret = __die(str, err, thread, regs);
if (regs && kexec_should_crash(thread->task))
#include <asm/page.h>
#define PROC_INFO \
+ . = ALIGN(4); \
VMLINUX_SYMBOL(__proc_info_begin) = .; \
*(.proc.info.init) \
VMLINUX_SYMBOL(__proc_info_end) = .;
.default_device = &sdp4430_lcd_device,
};
-static void omap_4430sdp_display_init(void)
+static void __init omap_4430sdp_display_init(void)
{
int r;
#define board_mux NULL
#endif
-static void omap4_sdp4430_wifi_mux_init(void)
+static void __init omap4_sdp4430_wifi_mux_init(void)
{
omap_mux_init_gpio(GPIO_WIFI_IRQ, OMAP_PIN_INPUT |
OMAP_PIN_OFF_WAKEUPENABLE);
.board_tcxo_clock = WL12XX_TCXOCLOCK_26,
};
-static void omap4_sdp4430_wifi_init(void)
+static void __init omap4_sdp4430_wifi_init(void)
{
+ int ret;
+
omap4_sdp4430_wifi_mux_init();
- if (wl12xx_set_platform_data(&omap4_sdp4430_wlan_data))
- pr_err("Error setting wl12xx data\n");
- platform_device_register(&omap_vwlan_device);
+ ret = wl12xx_set_platform_data(&omap4_sdp4430_wlan_data);
+ if (ret)
+ pr_err("Error setting wl12xx data: %d\n", ret);
+ ret = platform_device_register(&omap_vwlan_device);
+ if (ret)
+ pr_err("Error registering wl12xx device: %d\n", ret);
}
static void __init omap_4430sdp_init(void)
{ OMAP3_EVM_EHCI_SELECT, GPIOF_OUT_INIT_LOW, "select EHCI port" },
};
+static void __init omap3_evm_wl12xx_init(void)
+{
+#ifdef CONFIG_WL12XX_PLATFORM_DATA
+ int ret;
+
+ /* WL12xx WLAN Init */
+ ret = wl12xx_set_platform_data(&omap3evm_wlan_data);
+ if (ret)
+ pr_err("error setting wl12xx data: %d\n", ret);
+ ret = platform_device_register(&omap3evm_wlan_regulator);
+ if (ret)
+ pr_err("error registering wl12xx device: %d\n", ret);
+#endif
+}
+
static void __init omap3_evm_init(void)
{
omap3_evm_get_revision();
omap_ads7846_init(1, OMAP3_EVM_TS_GPIO, 310, NULL);
omap3evm_init_smsc911x();
omap3_evm_display_init();
-
-#ifdef CONFIG_WL12XX_PLATFORM_DATA
- /* WL12xx WLAN Init */
- if (wl12xx_set_platform_data(&omap3evm_wlan_data))
- pr_err("error setting wl12xx data\n");
- platform_device_register(&omap3evm_wlan_regulator);
-#endif
+ omap3_evm_wl12xx_init();
}
MACHINE_START(OMAP3EVM, "OMAP3 EVM")
static void __init omap4_panda_init(void)
{
int package = OMAP_PACKAGE_CBS;
+ int ret;
if (omap_rev() == OMAP4430_REV_ES1_0)
package = OMAP_PACKAGE_CBL;
omap4_mux_init(board_mux, NULL, package);
- if (wl12xx_set_platform_data(&omap_panda_wlan_data))
- pr_err("error setting wl12xx data\n");
+ ret = wl12xx_set_platform_data(&omap_panda_wlan_data);
+ if (ret)
+ pr_err("error setting wl12xx data: %d\n", ret);
omap4_panda_i2c_init();
platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices));
void __init zoom_peripherals_init(void)
{
- if (wl12xx_set_platform_data(&omap_zoom_wlan_data))
- pr_err("error setting wl12xx data\n");
+ int ret = wl12xx_set_platform_data(&omap_zoom_wlan_data);
+
+ if (ret)
+ pr_err("error setting wl12xx data: %d\n", ret);
omap_i2c_init();
platform_device_register(&omap_vwlan_device);
}
}
-static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
- struct omap_mmc_platform_data *mmc)
+static int omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
+ struct omap_mmc_platform_data *mmc)
{
char *hc_name;
#define MAX_OMAP_MMC_HWMOD_NAME_LEN 16
-void __init omap_init_hsmmc(struct omap2_hsmmc_info *hsmmcinfo, int ctrl_nr)
+void omap_init_hsmmc(struct omap2_hsmmc_info *hsmmcinfo, int ctrl_nr)
{
struct omap_hwmod *oh;
struct platform_device *pdev;
kfree(mmc_data);
}
-void __init omap2_hsmmc_init(struct omap2_hsmmc_info *controllers)
+void omap2_hsmmc_init(struct omap2_hsmmc_info *controllers)
{
u32 reg;
static char *omap_mux_options;
-static int __init _omap_mux_init_gpio(struct omap_mux_partition *partition,
- int gpio, int val)
+static int _omap_mux_init_gpio(struct omap_mux_partition *partition,
+ int gpio, int val)
{
struct omap_mux_entry *e;
struct omap_mux *gpio_mux = NULL;
return 0;
}
-int __init omap_mux_init_gpio(int gpio, int val)
+int omap_mux_init_gpio(int gpio, int val)
{
struct omap_mux_partition *partition;
int ret;
return -ENODEV;
}
-static int __init _omap_mux_get_by_name(struct omap_mux_partition *partition,
- const char *muxname,
- struct omap_mux **found_mux)
+static int _omap_mux_get_by_name(struct omap_mux_partition *partition,
+ const char *muxname,
+ struct omap_mux **found_mux)
{
struct omap_mux *mux = NULL;
struct omap_mux_entry *e;
return -ENODEV;
}
-int __init omap_mux_init_signal(const char *muxname, int val)
+int omap_mux_init_signal(const char *muxname, int val)
{
struct omap_mux_partition *partition = NULL;
struct omap_mux *mux = NULL;
omap_mux_package_init_balls(package_balls, superset);
}
-static void omap_mux_init_signals(struct omap_mux_partition *partition,
- struct omap_board_mux *board_mux)
+static void __init omap_mux_init_signals(struct omap_mux_partition *partition,
+ struct omap_board_mux *board_mux)
{
omap_mux_set_cmdline_signals();
omap_mux_write_array(partition, board_mux);
{
}
-static void omap_mux_init_signals(struct omap_mux_partition *partition,
- struct omap_board_mux *board_mux)
+static void __init omap_mux_init_signals(struct omap_mux_partition *partition,
+ struct omap_board_mux *board_mux)
{
}
#include <linux/linkage.h>
#include <linux/init.h>
+ __CPUINIT
/*
* OMAP4 specific entry point for secondary CPU to jump from ROM
* code. This routine also provides a holding flag into which
if (oh->_state != _HWMOD_STATE_INITIALIZED &&
oh->_state != _HWMOD_STATE_IDLE &&
oh->_state != _HWMOD_STATE_DISABLED) {
- WARN(1, "omap_hwmod: %s: enabled state can only be entered "
- "from initialized, idle, or disabled state\n", oh->name);
+ WARN(1, "omap_hwmod: %s: enabled state can only be entered from initialized, idle, or disabled state\n",
+ oh->name);
return -EINVAL;
}
pr_debug("omap_hwmod: %s: idling\n", oh->name);
if (oh->_state != _HWMOD_STATE_ENABLED) {
- WARN(1, "omap_hwmod: %s: idle state can only be entered from "
- "enabled state\n", oh->name);
+ WARN(1, "omap_hwmod: %s: idle state can only be entered from enabled state\n",
+ oh->name);
return -EINVAL;
}
if (oh->_state != _HWMOD_STATE_IDLE &&
oh->_state != _HWMOD_STATE_ENABLED) {
- WARN(1, "omap_hwmod: %s: disabled state can only be entered "
- "from idle, or enabled state\n", oh->name);
+ WARN(1, "omap_hwmod: %s: disabled state can only be entered from idle, or enabled state\n",
+ oh->name);
return -EINVAL;
}
BUG_ON(!oh);
if (!oh->class->sysc || !oh->class->sysc->sysc_flags) {
- WARN(1, "omap_device: %s: OCP barrier impossible due to "
- "device configuration\n", oh->name);
+ WARN(1, "omap_device: %s: OCP barrier impossible due to device configuration\n",
+ oh->name);
return;
}
#include "common.h"
#include <plat/cpu.h>
+#include <plat/irqs.h>
#include <plat/prcm.h>
#include "vp.h"
omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_NO);
}
-static void omap_uart_set_forceidle(struct platform_device *pdev)
+static void omap_uart_set_smartidle(struct platform_device *pdev)
{
struct omap_device *od = to_omap_device(pdev);
- omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_FORCE);
+ omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_SMART);
}
#else
static void omap_uart_enable_wakeup(struct platform_device *pdev, bool enable)
{}
static void omap_uart_set_noidle(struct platform_device *pdev) {}
-static void omap_uart_set_forceidle(struct platform_device *pdev) {}
+static void omap_uart_set_smartidle(struct platform_device *pdev) {}
#endif /* CONFIG_PM */
#ifdef CONFIG_OMAP_MUX
omap_up.uartclk = OMAP24XX_BASE_BAUD * 16;
omap_up.flags = UPF_BOOT_AUTOCONF;
omap_up.get_context_loss_count = omap_pm_get_dev_context_loss_count;
- omap_up.set_forceidle = omap_uart_set_forceidle;
+ omap_up.set_forceidle = omap_uart_set_smartidle;
omap_up.set_noidle = omap_uart_set_noidle;
omap_up.enable_wakeup = omap_uart_enable_wakeup;
omap_up.dma_rx_buf_size = info->dma_rx_buf_size;
* omap_vc_i2c_init - initialize I2C interface to PMIC
* @voltdm: voltage domain containing VC data
*
- * Use PMIC supplied seetings for I2C high-speed mode and
+ * Use PMIC supplied settings for I2C high-speed mode and
* master code (if set) and program the VC I2C configuration
* register.
*
if (initialized) {
if (voltdm->pmic->i2c_high_speed != i2c_high_speed)
- pr_warn("%s: I2C config for all channels must match.",
- __func__);
+ pr_warn("%s: I2C config for vdd_%s does not match other channels (%u).",
+ __func__, voltdm->name, i2c_high_speed);
return;
}
u32 val;
if (!voltdm->pmic || !voltdm->pmic->uv_to_vsel) {
- pr_err("%s: PMIC info requried to configure vc for"
- "vdd_%s not populated.Hence cannot initialize vc\n",
- __func__, voltdm->name);
+ pr_err("%s: No PMIC info for vdd_%s\n", __func__, voltdm->name);
return;
}
u32 val, sys_clk_rate, timeout, waittime;
u32 vddmin, vddmax, vstepmin, vstepmax;
+ if (!voltdm->pmic || !voltdm->pmic->uv_to_vsel) {
+ pr_err("%s: No PMIC info for vdd_%s\n", __func__, voltdm->name);
+ return;
+ }
+
if (!voltdm->read || !voltdm->write) {
pr_err("%s: No read/write API for accessing vdd_%s regs\n",
__func__, voltdm->name);
and r1, r1, #7 @ mask of the bits for current cache only
cmp r1, #2 @ see what cache we have at this level
blt skip @ skip if no cache, or just i-cache
+#ifdef CONFIG_PREEMPT
+ save_and_disable_irqs r9 @ make cssr&csidr read atomic
+#endif
mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
isb @ isb to sych the new cssr&csidr
mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
+#ifdef CONFIG_PREEMPT
+ restore_irqs_notrace r9
+#endif
and r2, r1, #7 @ extract the length of the cache lines
add r2, r2, #4 @ add 4 (line length offset)
ldr r4, =0x3ff
return pdev ? pci_name(pdev) : "<null>";
}
+static inline const char *eeh_driver_name(struct pci_dev *pdev)
+{
+ return (pdev && pdev->driver) ? pdev->driver->name : "<null>";
+}
+
#endif /* CONFIG_EEH */
#else /* CONFIG_PCI */
#ifndef __ASSEMBLY__
-#define instruction_pointer(regs) ((regs)->nip)
-#define user_stack_pointer(regs) ((regs)->gpr[1])
+#define GET_IP(regs) ((regs)->nip)
+#define GET_USP(regs) ((regs)->gpr[1])
+#define GET_FP(regs) (0)
+#define SET_FP(regs, val)
+
+#ifdef CONFIG_SMP
+extern unsigned long profile_pc(struct pt_regs *regs);
+#define profile_pc profile_pc
+#endif
+
+#include <asm-generic/ptrace.h>
+
#define kernel_stack_pointer(regs) ((regs)->gpr[1])
static inline int is_syscall_success(struct pt_regs *regs)
{
return -regs->gpr[3];
}
-#ifdef CONFIG_SMP
-extern unsigned long profile_pc(struct pt_regs *regs);
-#else
-#define profile_pc(regs) instruction_pointer(regs)
-#endif
-
#ifdef __powerpc64__
#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
#else
static inline notrace void decrementer_check_overflow(void)
{
u64 now = get_tb_or_rtc();
- u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
+ u64 *next_tb;
+
+ preempt_disable();
+ next_tb = &__get_cpu_var(decrementers_next_tb);
if (now >= *next_tb)
set_dec(1);
+ preempt_enable();
}
notrace void arch_local_irq_restore(unsigned long en)
int cpu;
slb_set_size(SLB_MIN_SIZE);
- stop_topology_update();
printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id());
while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) &&
rc = atomic_read(&data->error);
atomic_set(&data->error, rc);
- start_topology_update();
pSeries_coalesce_init();
if (wake_when_done) {
atomic_set(&data.error, 0);
data.token = rtas_token("ibm,suspend-me");
data.complete = &done;
+ stop_topology_update();
/* Call function on all CPUs. One of us will make the
* rtas call
if (atomic_read(&data.error) != 0)
printk(KERN_ERR "Error doing global join\n");
+ start_topology_update();
+
return atomic_read(&data.error);
}
#else /* CONFIG_PPC_PSERIES */
static unsigned int pnv_get_one_msi(struct pnv_phb *phb)
{
- unsigned int id;
+ unsigned long flags;
+ unsigned int id, rc;
+
+ spin_lock_irqsave(&phb->lock, flags);
- spin_lock(&phb->lock);
id = find_next_zero_bit(phb->msi_map, phb->msi_count, phb->msi_next);
if (id >= phb->msi_count && phb->msi_next)
id = find_next_zero_bit(phb->msi_map, phb->msi_count, 0);
if (id >= phb->msi_count) {
- spin_unlock(&phb->lock);
- return 0;
+ rc = 0;
+ goto out;
}
__set_bit(id, phb->msi_map);
- spin_unlock(&phb->lock);
- return id + phb->msi_base;
+ rc = id + phb->msi_base;
+out:
+ spin_unlock_irqrestore(&phb->lock, flags);
+ return rc;
}
static void pnv_put_msi(struct pnv_phb *phb, unsigned int hwirq)
{
+ unsigned long flags;
unsigned int id;
if (WARN_ON(hwirq < phb->msi_base ||
hwirq >= (phb->msi_base + phb->msi_count)))
return;
id = hwirq - phb->msi_base;
- spin_lock(&phb->lock);
+
+ spin_lock_irqsave(&phb->lock, flags);
__clear_bit(id, phb->msi_map);
- spin_unlock(&phb->lock);
+ spin_unlock_irqrestore(&phb->lock, flags);
}
static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
printk (KERN_ERR "EEH: %d reads ignored for recovering device at "
"location=%s driver=%s pci addr=%s\n",
pdn->eeh_check_count, location,
- dev->driver->name, eeh_pci_name(dev));
+ eeh_driver_name(dev), eeh_pci_name(dev));
printk (KERN_ERR "EEH: Might be infinite loop in %s driver\n",
- dev->driver->name);
+ eeh_driver_name(dev));
dump_stack();
}
goto dn_unlock;
#include <asm/machdep.h>
#include <asm/mmu.h>
#include <asm/rtas.h>
+#include <asm/topology.h>
static u64 stream_id;
static struct device suspend_dev;
ssleep(1);
} while (rc == -EAGAIN);
- if (!rc)
+ if (!rc) {
+ stop_topology_update();
rc = pm_suspend(PM_SUSPEND_MEM);
+ start_topology_update();
+ }
stream_id = 0;
* For the moment only implement delivery to all cpus or one cpu.
* Get current irq_server for the given irq
*/
- ret = cache_hwirq_map(ics, d->irq, cpumask);
+ ret = cache_hwirq_map(ics, hw_irq, cpumask);
if (ret == -1) {
char cpulist[128];
cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
#define DUMP_REG(x) \
pr_debug("%-30s : 0x%016llx\n", #x, in_be64(hose->cfg_data + x))
-#ifdef CONFIG_WSP_DD1_WORKAROUND_BAD_PCIE_CLASS
- /* WSP DD1 has a bogus class code by default in the PCI-E
- * root complex's built-in P2P bridge */
+ /*
+ * Some WSP variants has a bogus class code by default in the PCI-E
+ * root complex's built-in P2P bridge
+ */
val = in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1);
pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", val);
out_be64(hose->cfg_data + PCIE_REG_SYS_CFG1,
(val & ~PCIE_REG_SYS_CFG1_CLASS_CODE) | (PCI_CLASS_BRIDGE_PCI << 8));
pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1));
-#endif /* CONFIG_WSP_DD1_WORKAROUND_BAD_PCIE_CLASS */
#ifdef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
/* XXX Disable TCE caching, it doesn't work on DD1 */
extern void fpu_init(void);
extern void mxcsr_feature_mask_init(void);
extern int init_fpu(struct task_struct *child);
-extern asmlinkage void math_state_restore(void);
+extern void math_state_restore(void);
extern void __math_state_restore(void);
extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
}
}
+/*
+ * Were we in an interrupt that interrupted kernel mode?
+ *
+ * We can do a kernel_fpu_begin/end() pair *ONLY* if that
+ * pair does nothing at all: TS_USEDFPU must be clear (so
+ * that we don't try to save the FPU state), and TS must
+ * be set (so that the clts/stts pair does nothing that is
+ * visible in the interrupted kernel thread).
+ */
+static inline bool interrupted_kernel_fpu_idle(void)
+{
+ return !(current_thread_info()->status & TS_USEDFPU) &&
+ (read_cr0() & X86_CR0_TS);
+}
+
+/*
+ * Were we in user mode (or vm86 mode) when we were
+ * interrupted?
+ *
+ * Doing kernel_fpu_begin/end() is ok if we are running
+ * in an interrupt context from user mode - we'll just
+ * save the FPU state as required.
+ */
+static inline bool interrupted_user_mode(void)
+{
+ struct pt_regs *regs = get_irq_regs();
+ return regs && user_mode_vm(regs);
+}
+
+/*
+ * Can we use the FPU in kernel mode with the
+ * whole "kernel_fpu_begin/end()" sequence?
+ *
+ * It's always ok in process context (ie "not interrupt")
+ * but it is sometimes ok even from an irq.
+ */
+static inline bool irq_fpu_usable(void)
+{
+ return !in_interrupt() ||
+ interrupted_user_mode() ||
+ interrupted_kernel_fpu_idle();
+}
+
static inline void kernel_fpu_begin(void)
{
struct thread_info *me = current_thread_info();
+
+ WARN_ON_ONCE(!irq_fpu_usable());
preempt_disable();
if (me->status & TS_USEDFPU)
__save_init_fpu(me->task);
preempt_enable();
}
-static inline bool irq_fpu_usable(void)
-{
- struct pt_regs *regs;
-
- return !in_interrupt() || !(regs = get_irq_regs()) || \
- user_mode(regs) || (read_cr0() & X86_CR0_TS);
-}
-
/*
* Some instructions like VIA's padlock instructions generate a spurious
* DNA fault but don't modify SSE registers. And these instructions
*/
static inline void save_init_fpu(struct task_struct *tsk)
{
+ WARN_ON_ONCE(task_thread_info(tsk)->status & TS_USEDFPU);
preempt_disable();
__save_init_fpu(tsk);
stts();
hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
cpuc->pebs_enabled |= 1ULL << hwc->idx;
- WARN_ON_ONCE(cpuc->enabled);
if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
intel_pmu_lbr_enable(event);
if (!x86_pmu.lbr_nr)
return;
- WARN_ON_ONCE(cpuc->enabled);
-
/*
* Reset the LBR stack if we changed task context to
* avoid data leaks.
* Careful.. There are problems with IBM-designed IRQ13 behaviour.
* Don't touch unless you *really* know how it works.
*
- * Must be called with kernel preemption disabled (in this case,
- * local interrupts are disabled at the call-site in entry.S).
+ * Must be called with kernel preemption disabled (eg with local
+ * local interrupts as in the case of do_device_not_available).
*/
-asmlinkage void math_state_restore(void)
+void math_state_restore(void)
{
struct thread_info *thread = current_thread_info();
struct task_struct *tsk = thread->task;
dotraplinkage void __kprobes
do_device_not_available(struct pt_regs *regs, long error_code)
{
+ WARN_ON_ONCE(!user_mode_vm(regs));
#ifdef CONFIG_MATH_EMULATION
if (read_cr0() & X86_CR0_EM) {
struct math_emu_info info = { };
int __init pci_xen_hvm_init(void)
{
- if (!xen_feature(XENFEAT_hvm_pirqs))
+ if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs))
return 0;
#ifdef CONFIG_ACPI
play_dead_common();
HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
cpu_bringup();
+ /*
+ * Balance out the preempt calls - as we are running in cpu_idle
+ * loop which has been called at bootup from cpu_bringup_and_idle.
+ * The cpucpu_bringup_and_idle called cpu_bringup which made a
+ * preempt_disable() So this preempt_enable will balance it out.
+ */
+ preempt_enable();
}
#else /* !CONFIG_HOTPLUG_CPU */
ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
if (ioc) {
ioc_cgroup_changed(ioc);
- put_io_context(ioc, NULL);
+ put_io_context(ioc);
}
}
}
if (rq->cmd_flags & REQ_ELVPRIV) {
elv_put_request(q, rq);
if (rq->elv.icq)
- put_io_context(rq->elv.icq->ioc, q);
+ put_io_context(rq->elv.icq->ioc);
}
mempool_free(rq, q->rq.rq_pool);
spin_unlock_irq(q->queue_lock);
/* create icq if missing */
- if (unlikely(et->icq_cache && !icq))
+ if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) {
icq = ioc_create_icq(q, gfp_mask);
+ if (!icq)
+ goto fail_icq;
+ }
- /* rqs are guaranteed to have icq on elv_set_request() if requested */
- if (likely(!et->icq_cache || icq))
- rq = blk_alloc_request(q, icq, rw_flags, gfp_mask);
+ rq = blk_alloc_request(q, icq, rw_flags, gfp_mask);
+fail_icq:
if (unlikely(!rq)) {
/*
* Allocation failed presumably due to memory. Undo anything
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
drive_stat_acct(req, 0);
- elv_bio_merged(q, req, bio);
return true;
}
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
drive_stat_acct(req, 0);
- elv_bio_merged(q, req, bio);
return true;
}
* on %current's plugged list. Returns %true if merge was successful,
* otherwise %false.
*
- * This function is called without @q->queue_lock; however, elevator is
- * accessed iff there already are requests on the plugged list which in
- * turn guarantees validity of the elevator.
- *
- * Note that, on successful merge, elevator operation
- * elevator_bio_merged_fn() will be called without queue lock. Elevator
- * must be ready for this.
+ * Plugging coalesces IOs from the same issuer for the same purpose without
+ * going through @q->queue_lock. As such it's more of an issuing mechanism
+ * than scheduling, and the request, while may have elvpriv data, is not
+ * added on the elevator at this point. In addition, we don't have
+ * reliable access to the elevator outside queue lock. Only check basic
+ * merging parameters without querying the elevator.
*/
static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
unsigned int *request_count)
(*request_count)++;
- if (rq->q != q)
+ if (rq->q != q || !blk_rq_merge_ok(rq, bio))
continue;
- el_ret = elv_try_merge(rq, bio);
+ el_ret = blk_try_merge(rq, bio);
if (el_ret == ELEVATOR_BACK_MERGE) {
ret = bio_attempt_back_merge(q, rq, bio);
if (ret)
el_ret = elv_merge(q, &req, bio);
if (el_ret == ELEVATOR_BACK_MERGE) {
if (bio_attempt_back_merge(q, req, bio)) {
+ elv_bio_merged(q, req, bio);
if (!attempt_back_merge(q, req))
elv_merged_request(q, req, el_ret);
goto out_unlock;
}
} else if (el_ret == ELEVATOR_FRONT_MERGE) {
if (bio_attempt_front_merge(q, req, bio)) {
+ elv_bio_merged(q, req, bio);
if (!attempt_front_merge(q, req))
elv_merged_request(q, req, el_ret);
goto out_unlock;
}
EXPORT_SYMBOL(get_io_context);
-/*
- * Releasing ioc may nest into another put_io_context() leading to nested
- * fast path release. As the ioc's can't be the same, this is okay but
- * makes lockdep whine. Keep track of nesting and use it as subclass.
- */
-#ifdef CONFIG_LOCKDEP
-#define ioc_release_depth(q) ((q) ? (q)->ioc_release_depth : 0)
-#define ioc_release_depth_inc(q) (q)->ioc_release_depth++
-#define ioc_release_depth_dec(q) (q)->ioc_release_depth--
-#else
-#define ioc_release_depth(q) 0
-#define ioc_release_depth_inc(q) do { } while (0)
-#define ioc_release_depth_dec(q) do { } while (0)
-#endif
-
static void icq_free_icq_rcu(struct rcu_head *head)
{
struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
if (rcu_dereference_raw(ioc->icq_hint) == icq)
rcu_assign_pointer(ioc->icq_hint, NULL);
- if (et->ops.elevator_exit_icq_fn) {
- ioc_release_depth_inc(q);
+ if (et->ops.elevator_exit_icq_fn)
et->ops.elevator_exit_icq_fn(icq);
- ioc_release_depth_dec(q);
- }
/*
* @icq->q might have gone away by the time RCU callback runs
struct io_context *ioc = container_of(work, struct io_context,
release_work);
struct request_queue *last_q = NULL;
+ unsigned long flags;
- spin_lock_irq(&ioc->lock);
+ /*
+ * Exiting icq may call into put_io_context() through elevator
+ * which will trigger lockdep warning. The ioc's are guaranteed to
+ * be different, use a different locking subclass here. Use
+ * irqsave variant as there's no spin_lock_irq_nested().
+ */
+ spin_lock_irqsave_nested(&ioc->lock, flags, 1);
while (!hlist_empty(&ioc->icq_list)) {
struct io_cq *icq = hlist_entry(ioc->icq_list.first,
*/
if (last_q) {
spin_unlock(last_q->queue_lock);
- spin_unlock_irq(&ioc->lock);
+ spin_unlock_irqrestore(&ioc->lock, flags);
blk_put_queue(last_q);
} else {
- spin_unlock_irq(&ioc->lock);
+ spin_unlock_irqrestore(&ioc->lock, flags);
}
last_q = this_q;
- spin_lock_irq(this_q->queue_lock);
- spin_lock(&ioc->lock);
+ spin_lock_irqsave(this_q->queue_lock, flags);
+ spin_lock_nested(&ioc->lock, 1);
continue;
}
ioc_exit_icq(icq);
if (last_q) {
spin_unlock(last_q->queue_lock);
- spin_unlock_irq(&ioc->lock);
+ spin_unlock_irqrestore(&ioc->lock, flags);
blk_put_queue(last_q);
} else {
- spin_unlock_irq(&ioc->lock);
+ spin_unlock_irqrestore(&ioc->lock, flags);
}
kmem_cache_free(iocontext_cachep, ioc);
/**
* put_io_context - put a reference of io_context
* @ioc: io_context to put
- * @locked_q: request_queue the caller is holding queue_lock of (hint)
*
* Decrement reference count of @ioc and release it if the count reaches
- * zero. If the caller is holding queue_lock of a queue, it can indicate
- * that with @locked_q. This is an optimization hint and the caller is
- * allowed to pass in %NULL even when it's holding a queue_lock.
+ * zero.
*/
-void put_io_context(struct io_context *ioc, struct request_queue *locked_q)
+void put_io_context(struct io_context *ioc)
{
- struct request_queue *last_q = locked_q;
unsigned long flags;
if (ioc == NULL)
return;
BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
- if (locked_q)
- lockdep_assert_held(locked_q->queue_lock);
-
- if (!atomic_long_dec_and_test(&ioc->refcount))
- return;
/*
- * Destroy @ioc. This is a bit messy because icq's are chained
- * from both ioc and queue, and ioc->lock nests inside queue_lock.
- * The inner ioc->lock should be held to walk our icq_list and then
- * for each icq the outer matching queue_lock should be grabbed.
- * ie. We need to do reverse-order double lock dancing.
- *
- * Another twist is that we are often called with one of the
- * matching queue_locks held as indicated by @locked_q, which
- * prevents performing double-lock dance for other queues.
- *
- * So, we do it in two stages. The fast path uses the queue_lock
- * the caller is holding and, if other queues need to be accessed,
- * uses trylock to avoid introducing locking dependency. This can
- * handle most cases, especially if @ioc was performing IO on only
- * single device.
- *
- * If trylock doesn't cut it, we defer to @ioc->release_work which
- * can do all the double-locking dancing.
+ * Releasing ioc requires reverse order double locking and we may
+ * already be holding a queue_lock. Do it asynchronously from wq.
*/
- spin_lock_irqsave_nested(&ioc->lock, flags,
- ioc_release_depth(locked_q));
-
- while (!hlist_empty(&ioc->icq_list)) {
- struct io_cq *icq = hlist_entry(ioc->icq_list.first,
- struct io_cq, ioc_node);
- struct request_queue *this_q = icq->q;
-
- if (this_q != last_q) {
- if (last_q && last_q != locked_q)
- spin_unlock(last_q->queue_lock);
- last_q = NULL;
-
- if (!spin_trylock(this_q->queue_lock))
- break;
- last_q = this_q;
- continue;
- }
- ioc_exit_icq(icq);
+ if (atomic_long_dec_and_test(&ioc->refcount)) {
+ spin_lock_irqsave(&ioc->lock, flags);
+ if (!hlist_empty(&ioc->icq_list))
+ schedule_work(&ioc->release_work);
+ spin_unlock_irqrestore(&ioc->lock, flags);
}
-
- if (last_q && last_q != locked_q)
- spin_unlock(last_q->queue_lock);
-
- spin_unlock_irqrestore(&ioc->lock, flags);
-
- /* if no icq is left, we're done; otherwise, kick release_work */
- if (hlist_empty(&ioc->icq_list))
- kmem_cache_free(iocontext_cachep, ioc);
- else
- schedule_work(&ioc->release_work);
}
EXPORT_SYMBOL(put_io_context);
task_unlock(task);
atomic_dec(&ioc->nr_tasks);
- put_io_context(ioc, NULL);
+ put_io_context(ioc);
}
/**
{
return attempt_merge(q, rq, next);
}
+
+bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
+{
+ if (!rq_mergeable(rq))
+ return false;
+
+ /* don't merge file system requests and discard requests */
+ if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
+ return false;
+
+ /* don't merge discard requests and secure discard requests */
+ if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
+ return false;
+
+ /* different data direction or already started, don't merge */
+ if (bio_data_dir(bio) != rq_data_dir(rq))
+ return false;
+
+ /* must be same device and not a special request */
+ if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
+ return false;
+
+ /* only merge integrity protected bio into ditto rq */
+ if (bio_integrity(bio) != blk_integrity_rq(rq))
+ return false;
+
+ return true;
+}
+
+int blk_try_merge(struct request *rq, struct bio *bio)
+{
+ if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector)
+ return ELEVATOR_BACK_MERGE;
+ else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector)
+ return ELEVATOR_FRONT_MERGE;
+ return ELEVATOR_NO_MERGE;
+}
struct request *next);
void blk_recalc_rq_segments(struct request *rq);
void blk_rq_set_mixed_merge(struct request *rq);
+bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
+int blk_try_merge(struct request *rq, struct bio *bio);
void blk_queue_congestion_threshold(struct request_queue *q);
mutex_lock(&bsg_mutex);
idr_remove(&bsg_minor_idr, bcd->minor);
- sysfs_remove_link(&q->kobj, "bsg");
+ if (q->kobj.sd)
+ sysfs_remove_link(&q->kobj, "bsg");
device_unregister(bcd->class_dev);
bcd->class_dev = NULL;
kref_put(&bcd->ref, bsg_kref_release_function);
/*
* Lookup the cfqq that this bio will be queued with and allow
- * merge only if rq is queued there. This function can be called
- * from plug merge without queue_lock. In such cases, ioc of @rq
- * and %current are guaranteed to be equal. Avoid lookup which
- * requires queue_lock by using @rq's cic.
+ * merge only if rq is queued there.
*/
- if (current->io_context == RQ_CIC(rq)->icq.ioc) {
- cic = RQ_CIC(rq);
- } else {
- cic = cfq_cic_lookup(cfqd, current->io_context);
- if (!cic)
- return false;
- }
+ cic = cfq_cic_lookup(cfqd, current->io_context);
+ if (!cic)
+ return false;
cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
return cfqq == RQ_CFQQ(rq);
cfqd->active_queue = NULL;
if (cfqd->active_cic) {
- put_io_context(cfqd->active_cic->icq.ioc, cfqd->queue);
+ put_io_context(cfqd->active_cic->icq.ioc);
cfqd->active_cic = NULL;
}
}
*/
static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
+ enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
+
cfq_log_cfqq(cfqd, cfqq, "preempt");
+ cfq_slice_expired(cfqd, 1);
/*
* workload type is changed, don't save slice, otherwise preempt
* doesn't happen
*/
- if (cfqq_type(cfqd->active_queue) != cfqq_type(cfqq))
+ if (old_type != cfqq_type(cfqq))
cfqq->cfqg->saved_workload_slice = 0;
- cfq_slice_expired(cfqd, 1);
-
/*
* Put the new queue at the front of the of the current list,
* so we know that it will be selected next.
/*
* can we safely merge with this request?
*/
-int elv_rq_merge_ok(struct request *rq, struct bio *bio)
+bool elv_rq_merge_ok(struct request *rq, struct bio *bio)
{
- if (!rq_mergeable(rq))
- return 0;
-
- /*
- * Don't merge file system requests and discard requests
- */
- if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
- return 0;
-
- /*
- * Don't merge discard requests and secure discard requests
- */
- if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
- return 0;
-
- /*
- * different data direction or already started, don't merge
- */
- if (bio_data_dir(bio) != rq_data_dir(rq))
- return 0;
-
- /*
- * must be same device and not a special request
- */
- if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
- return 0;
-
- /*
- * only merge integrity protected bio into ditto rq
- */
- if (bio_integrity(bio) != blk_integrity_rq(rq))
+ if (!blk_rq_merge_ok(rq, bio))
return 0;
if (!elv_iosched_allow_merge(rq, bio))
}
EXPORT_SYMBOL(elv_rq_merge_ok);
-int elv_try_merge(struct request *__rq, struct bio *bio)
-{
- int ret = ELEVATOR_NO_MERGE;
-
- /*
- * we can merge and sequence is ok, check if it's possible
- */
- if (elv_rq_merge_ok(__rq, bio)) {
- if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector)
- ret = ELEVATOR_BACK_MERGE;
- else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector)
- ret = ELEVATOR_FRONT_MERGE;
- }
-
- return ret;
-}
-
static struct elevator_type *elevator_find(const char *name)
{
struct elevator_type *e;
/*
* First try one-hit cache.
*/
- if (q->last_merge) {
- ret = elv_try_merge(q->last_merge, bio);
+ if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) {
+ ret = blk_try_merge(q->last_merge, bio);
if (ret != ELEVATOR_NO_MERGE) {
*req = q->last_merge;
return ret;
static inline void BLEND_OP(int I, u64 *W)
{
- W[I % 16] += s1(W[(I-2) % 16]) + W[(I-7) % 16] + s0(W[(I-15) % 16]);
+ W[I & 15] += s1(W[(I-2) & 15]) + W[(I-7) & 15] + s0(W[(I-15) & 15]);
}
static void
int i;
u64 W[16];
- /* load the input */
- for (i = 0; i < 16; i++)
- LOAD_OP(i, W, input);
-
/* load the state into our registers */
a=state[0]; b=state[1]; c=state[2]; d=state[3];
e=state[4]; f=state[5]; g=state[6]; h=state[7];
-#define SHA512_0_15(i, a, b, c, d, e, f, g, h) \
- t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[i]; \
- t2 = e0(a) + Maj(a, b, c); \
- d += t1; \
- h = t1 + t2
-
-#define SHA512_16_79(i, a, b, c, d, e, f, g, h) \
- BLEND_OP(i, W); \
- t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[(i)%16]; \
- t2 = e0(a) + Maj(a, b, c); \
- d += t1; \
- h = t1 + t2
-
- for (i = 0; i < 16; i += 8) {
- SHA512_0_15(i, a, b, c, d, e, f, g, h);
- SHA512_0_15(i + 1, h, a, b, c, d, e, f, g);
- SHA512_0_15(i + 2, g, h, a, b, c, d, e, f);
- SHA512_0_15(i + 3, f, g, h, a, b, c, d, e);
- SHA512_0_15(i + 4, e, f, g, h, a, b, c, d);
- SHA512_0_15(i + 5, d, e, f, g, h, a, b, c);
- SHA512_0_15(i + 6, c, d, e, f, g, h, a, b);
- SHA512_0_15(i + 7, b, c, d, e, f, g, h, a);
- }
- for (i = 16; i < 80; i += 8) {
- SHA512_16_79(i, a, b, c, d, e, f, g, h);
- SHA512_16_79(i + 1, h, a, b, c, d, e, f, g);
- SHA512_16_79(i + 2, g, h, a, b, c, d, e, f);
- SHA512_16_79(i + 3, f, g, h, a, b, c, d, e);
- SHA512_16_79(i + 4, e, f, g, h, a, b, c, d);
- SHA512_16_79(i + 5, d, e, f, g, h, a, b, c);
- SHA512_16_79(i + 6, c, d, e, f, g, h, a, b);
- SHA512_16_79(i + 7, b, c, d, e, f, g, h, a);
+ /* now iterate */
+ for (i=0; i<80; i+=8) {
+ if (!(i & 8)) {
+ int j;
+
+ if (i < 16) {
+ /* load the input */
+ for (j = 0; j < 16; j++)
+ LOAD_OP(i + j, W, input);
+ } else {
+ for (j = 0; j < 16; j++) {
+ BLEND_OP(i + j, W);
+ }
+ }
+ }
+
+ t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i ] + W[(i & 15)];
+ t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
+ t1 = g + e1(d) + Ch(d,e,f) + sha512_K[i+1] + W[(i & 15) + 1];
+ t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
+ t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[(i & 15) + 2];
+ t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
+ t1 = e + e1(b) + Ch(b,c,d) + sha512_K[i+3] + W[(i & 15) + 3];
+ t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
+ t1 = d + e1(a) + Ch(a,b,c) + sha512_K[i+4] + W[(i & 15) + 4];
+ t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
+ t1 = c + e1(h) + Ch(h,a,b) + sha512_K[i+5] + W[(i & 15) + 5];
+ t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
+ t1 = b + e1(g) + Ch(g,h,a) + sha512_K[i+6] + W[(i & 15) + 6];
+ t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
+ t1 = a + e1(f) + Ch(f,g,h) + sha512_K[i+7] + W[(i & 15) + 7];
+ t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
}
state[0] += a; state[1] += b; state[2] += c; state[3] += d;
}
static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
+static void cpu_device_release(struct device *dev)
+{
+ /*
+ * This is an empty function to prevent the driver core from spitting a
+ * warning at us. Yes, I know this is directly opposite of what the
+ * documentation for the driver core and kobjects say, and the author
+ * of this code has already been publically ridiculed for doing
+ * something as foolish as this. However, at this point in time, it is
+ * the only way to handle the issue of statically allocated cpu
+ * devices. The different architectures will have their cpu device
+ * code reworked to properly handle this in the near future, so this
+ * function will then be changed to correctly free up the memory held
+ * by the cpu device.
+ *
+ * Never copy this way of doing things, or you too will be made fun of
+ * on the linux-kerenl list, you have been warned.
+ */
+}
+
/*
* register_cpu - Setup a sysfs device for a CPU.
* @cpu - cpu->hotpluggable field set to 1 will generate a control file in
int error;
cpu->node_id = cpu_to_node(num);
+ memset(&cpu->dev, 0x00, sizeof(struct device));
cpu->dev.id = num;
cpu->dev.bus = &cpu_subsys;
+ cpu->dev.release = cpu_device_release;
error = device_register(&cpu->dev);
if (!error && cpu->hotpluggable)
register_cpu_control(cpu);
}
static int add_memory_section(int nid, struct mem_section *section,
+ struct memory_block **mem_p,
unsigned long state, enum mem_add_context context)
{
- struct memory_block *mem;
+ struct memory_block *mem = NULL;
+ int scn_nr = __section_nr(section);
int ret = 0;
mutex_lock(&mem_sysfs_mutex);
- mem = find_memory_block(section);
+ if (context == BOOT) {
+ /* same memory block ? */
+ if (mem_p && *mem_p)
+ if (scn_nr >= (*mem_p)->start_section_nr &&
+ scn_nr <= (*mem_p)->end_section_nr) {
+ mem = *mem_p;
+ kobject_get(&mem->dev.kobj);
+ }
+ } else
+ mem = find_memory_block(section);
+
if (mem) {
mem->section_count++;
kobject_put(&mem->dev.kobj);
- } else
+ } else {
ret = init_memory_block(&mem, section, state);
+ /* store memory_block pointer for next loop */
+ if (!ret && context == BOOT)
+ if (mem_p)
+ *mem_p = mem;
+ }
if (!ret) {
if (context == HOTPLUG &&
*/
int register_new_memory(int nid, struct mem_section *section)
{
- return add_memory_section(nid, section, MEM_OFFLINE, HOTPLUG);
+ return add_memory_section(nid, section, NULL, MEM_OFFLINE, HOTPLUG);
}
int unregister_memory_section(struct mem_section *section)
int ret;
int err;
unsigned long block_sz;
+ struct memory_block *mem = NULL;
ret = subsys_system_register(&memory_subsys, NULL);
if (ret)
for (i = 0; i < NR_MEM_SECTIONS; i++) {
if (!present_section_nr(i))
continue;
- err = add_memory_section(0, __nr_to_section(i), MEM_ONLINE,
+ /* don't need to reuse memory_block if only one per block */
+ err = add_memory_section(0, __nr_to_section(i),
+ (sections_per_block == 1) ? NULL : &mem,
+ MEM_ONLINE,
BOOT);
if (!ret)
ret = err;
if (!present_section_nr(section_nr))
continue;
mem_sect = __nr_to_section(section_nr);
+
+ /* same memblock ? */
+ if (mem_blk)
+ if ((section_nr >= mem_blk->start_section_nr) &&
+ (section_nr <= mem_blk->end_section_nr))
+ continue;
+
mem_blk = find_memory_block_hinted(mem_sect, mem_blk);
+
ret = register_mem_sect_under_node(mem_blk, nid);
if (!err)
err = ret;
err = bcma_sprom_get(bus);
if (err == -ENOENT) {
pr_err("No SPROM available\n");
- } else if (err) {
+ } else if (err)
pr_err("Failed to get SPROM: %d\n", err);
- return -ENOENT;
- }
/* Register found cores */
bcma_register_cores(bus);
core->bus = bus;
err = bcma_get_next_core(bus, &eromptr, NULL, core_num, core);
- if (err == -ENODEV) {
- core_num++;
- continue;
- } else if (err == -ENXIO)
- continue;
- else if (err == -ESPIPE)
- break;
- else if (err < 0)
+ if (err < 0) {
+ kfree(core);
+ if (err == -ENODEV) {
+ core_num++;
+ continue;
+ } else if (err == -ENXIO) {
+ continue;
+ } else if (err == -ESPIPE) {
+ break;
+ }
return err;
+ }
core->core_index = core_num++;
bus->nr_cores++;
out_put_disk:
while (dr--) {
del_timer_sync(&motor_off_timer[dr]);
- if (disks[dr]->queue)
+ if (disks[dr]->queue) {
blk_cleanup_queue(disks[dr]->queue);
+ /*
+ * put_disk() is not paired with add_disk() and
+ * will put queue reference one extra time. fix it.
+ */
+ disks[dr]->queue = NULL;
+ }
put_disk(disks[dr]);
}
return err;
platform_device_unregister(&floppy_device[drive]);
}
blk_cleanup_queue(disks[drive]->queue);
+
+ /*
+ * These disks have not called add_disk(). Don't put down
+ * queue reference in put_disk().
+ */
+ if (!(allowed_drive_mask & (1 << drive)) ||
+ fdc_state[FDC(drive)].version == FDC_NONE)
+ disks[drive]->queue = NULL;
+
put_disk(disks[drive]);
}
return __splice_from_pipe(pipe, sd, lo_splice_actor);
}
-static int
+static ssize_t
do_lo_receive(struct loop_device *lo,
struct bio_vec *bvec, int bsize, loff_t pos)
{
struct lo_read_data cookie;
struct splice_desc sd;
struct file *file;
- long retval;
+ ssize_t retval;
cookie.lo = lo;
cookie.page = bvec->bv_page;
file = lo->lo_backing_file;
retval = splice_direct_to_actor(file, &sd, lo_direct_splice_actor);
- if (retval < 0)
- return retval;
- if (retval != bvec->bv_len)
- return -EIO;
- return 0;
+ return retval;
}
static int
lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
{
struct bio_vec *bvec;
- int i, ret = 0;
+ ssize_t s;
+ int i;
bio_for_each_segment(bvec, bio, i) {
- ret = do_lo_receive(lo, bvec, bsize, pos);
- if (ret < 0)
+ s = do_lo_receive(lo, bvec, bsize, pos);
+ if (s < 0)
+ return s;
+
+ if (s != bvec->bv_len) {
+ zero_fill_bio(bio);
break;
+ }
pos += bvec->bv_len;
}
- return ret;
+ return 0;
}
static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
* when the read completes.
* @data Callback data passed to the callback function
* when the read completes.
- * @barrier If non-zero, this command must be completed before
- * issuing any other commands.
* @dir Direction (read or write)
*
* return value
*/
static void mtip_hw_submit_io(struct driver_data *dd, sector_t start,
int nsect, int nents, int tag, void *callback,
- void *data, int barrier, int dir)
+ void *data, int dir)
{
struct host_to_dev_fis *fis;
struct mtip_port *port = dd->port;
*((unsigned int *) &fis->lba_low) = (start & 0xFFFFFF);
*((unsigned int *) &fis->lba_low_ex) = ((start >> 24) & 0xFFFFFF);
fis->device = 1 << 6;
- if (barrier)
- fis->device |= FUA_BIT;
fis->features = nsect & 0xFF;
fis->features_ex = (nsect >> 8) & 0xFF;
fis->sect_count = ((tag << 3) | (tag >> 5));
tag,
bio_endio,
bio,
- bio->bi_rw & REQ_FUA,
bio_data_dir(bio));
} else
bio_io_error(bio);
blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
blk_queue_physical_block_size(dd->queue, 4096);
blk_queue_io_min(dd->queue, 4096);
+ /*
+ * write back cache is not supported in the device. FUA depends on
+ * write back cache support, hence setting flush support to zero.
+ */
blk_queue_flush(dd->queue, 0);
/* Set the capacity of the device in 512 byte sectors. */
/* BAR number used to access the HBA registers. */
#define MTIP_ABAR 5
-/* Forced Unit Access Bit */
-#define FUA_BIT 0x80
-
#ifdef DEBUG
#define dbg_printk(format, arg...) \
printk(pr_fmt(format), ##arg);
atomic_t resumeflag; /* Atomic variable to track suspend/resume */
- atomic_t eh_active; /* Flag for error handling tracking */
-
struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
};
/* used to tell the module to turn on full debugging messages */
static bool debug;
-/* used to keep tray locked at all times */
-static int keeplocked;
/* default compatibility mode */
static bool autoclose=1;
static bool autoeject;
cdinfo(CD_CLOSE, "Use count for \"/dev/%s\" now zero\n", cdi->name);
cdrom_dvd_rw_close_write(cdi);
- if ((cdo->capability & CDC_LOCK) && !keeplocked) {
+ if ((cdo->capability & CDC_LOCK) && !cdi->keeplocked) {
cdinfo(CD_CLOSE, "Unlocking door!\n");
cdo->lock_door(cdi, 0);
}
curslot = info->hdr.curslot;
kfree(info);
- if (cdi->use_count > 1 || keeplocked) {
+ if (cdi->use_count > 1 || cdi->keeplocked) {
if (slot == CDSL_CURRENT) {
return curslot;
} else {
if (!nr)
return -ENOMEM;
- if (!access_ok(VERIFY_WRITE, ubuf, nframes * CD_FRAMESIZE_RAW)) {
- ret = -EFAULT;
- goto out;
- }
-
cgc.data_direction = CGC_DATA_READ;
while (nframes > 0) {
if (nr > nframes)
ret = cdrom_read_block(cdi, &cgc, lba, nr, 1, CD_FRAMESIZE_RAW);
if (ret)
break;
- if (__copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) {
+ if (copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) {
ret = -EFAULT;
break;
}
nframes -= nr;
lba += nr;
}
-out:
kfree(cgc.buffer);
return ret;
}
if (!CDROM_CAN(CDC_OPEN_TRAY))
return -ENOSYS;
- if (cdi->use_count != 1 || keeplocked)
+ if (cdi->use_count != 1 || cdi->keeplocked)
return -EBUSY;
if (CDROM_CAN(CDC_LOCK)) {
int ret = cdi->ops->lock_door(cdi, 0);
if (!CDROM_CAN(CDC_OPEN_TRAY))
return -ENOSYS;
- if (keeplocked)
+ if (cdi->keeplocked)
return -EBUSY;
cdi->options &= ~(CDO_AUTO_CLOSE | CDO_AUTO_EJECT);
if (!CDROM_CAN(CDC_LOCK))
return -EDRIVE_CANT_DO_THIS;
- keeplocked = arg ? 1 : 0;
+ cdi->keeplocked = arg ? 1 : 0;
/*
* Don't unlock the door on multiple opens by default, but allow
if (err)
return err;
- if (__get_user(c32.auth, &client->auth)
+ if (__get_user(c32.idx, &client->idx)
+ || __get_user(c32.auth, &client->auth)
|| __get_user(c32.pid, &client->pid)
|| __get_user(c32.uid, &client->uid)
|| __get_user(c32.magic, &client->magic)
if (enable_fbc < 0) {
DRM_DEBUG_KMS("fbc set to per-chip default\n");
enable_fbc = 1;
- if (INTEL_INFO(dev)->gen <= 5)
+ if (INTEL_INFO(dev)->gen <= 6)
enable_fbc = 0;
}
if (!enable_fbc) {
}
}
+ pipeconf &= ~PIPECONF_INTERLACE_MASK;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
/* the chip adds 2 halflines automatically */
adjusted_mode->crtc_vsync_end -= 1;
adjusted_mode->crtc_vsync_start -= 1;
} else
- pipeconf &= ~PIPECONF_INTERLACE_MASK; /* progressive */
+ pipeconf |= PIPECONF_PROGRESSIVE;
I915_WRITE(HTOTAL(pipe),
(adjusted_mode->crtc_hdisplay - 1) |
}
}
+ pipeconf &= ~PIPECONF_INTERLACE_MASK;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
/* the chip adds 2 halflines automatically */
adjusted_mode->crtc_vsync_end -= 1;
adjusted_mode->crtc_vsync_start -= 1;
} else
- pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
+ pipeconf |= PIPECONF_PROGRESSIVE;
I915_WRITE(HTOTAL(pipe),
(adjusted_mode->crtc_hdisplay - 1) |
*/
static int
-intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock, int check_bpp)
+intel_dp_link_required(int pixel_clock, int bpp)
{
- struct drm_crtc *crtc = intel_dp->base.base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int bpp = 24;
-
- if (check_bpp)
- bpp = check_bpp;
- else if (intel_crtc)
- bpp = intel_crtc->bpp;
-
return (pixel_clock * bpp + 9) / 10;
}
return MODE_PANEL;
}
- mode_rate = intel_dp_link_required(intel_dp, mode->clock, 0);
+ mode_rate = intel_dp_link_required(mode->clock, 24);
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
if (mode_rate > max_rate) {
- mode_rate = intel_dp_link_required(intel_dp,
- mode->clock, 18);
+ mode_rate = intel_dp_link_required(mode->clock, 18);
if (mode_rate > max_rate)
return MODE_CLOCK_HIGH;
else
int lane_count, clock;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
- int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 0;
+ int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
- if (intel_dp_link_required(intel_dp, mode->clock, bpp)
+ if (intel_dp_link_required(mode->clock, bpp)
<= link_avail) {
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
},
{
.callback = intel_no_lvds_dmi_callback,
+ .ident = "AOpen i45GMx-I",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+ DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
.ident = "Aopen i945GTt-VFA",
.matches = {
DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
if (r) {
DRM_ERROR("radeon: failed testing IB (%d).\n", r);
rdev->accel_working = false;
+ return r;
}
r = r600_audio_init(rdev);
static inline void f75375_write16(struct i2c_client *client, u8 reg,
u16 value)
{
- int err = i2c_smbus_write_byte_data(client, reg, (value << 8));
+ int err = i2c_smbus_write_byte_data(client, reg, (value >> 8));
if (err)
return;
i2c_smbus_write_byte_data(client, reg + 1, (value & 0xFF));
f75375_read16(client, F75375_REG_FAN_MIN(nr));
data->fan_target[nr] =
f75375_read16(client, F75375_REG_FAN_EXP(nr));
- data->pwm[nr] = f75375_read8(client,
- F75375_REG_FAN_PWM_DUTY(nr));
-
}
for (nr = 0; nr < 4; nr++) {
data->in_max[nr] =
if (time_after(jiffies, data->last_updated + 2 * HZ)
|| !data->valid) {
for (nr = 0; nr < 2; nr++) {
+ data->pwm[nr] = f75375_read8(client,
+ F75375_REG_FAN_PWM_DUTY(nr));
/* assign MSB, therefore shift it by 8 bits */
data->temp11[nr] =
f75375_read8(client, F75375_REG_TEMP(nr)) << 8;
fanmode |= (3 << FAN_CTRL_MODE(nr));
break;
case 2: /* AUTOMATIC*/
- fanmode |= (2 << FAN_CTRL_MODE(nr));
+ fanmode |= (1 << FAN_CTRL_MODE(nr));
break;
case 3: /* fan speed */
break;
if (data->kind == f75387) {
bool manu, duty;
- if (!(conf & (1 << F75387_FAN_CTRL_LINEAR(nr))))
+ if (!(mode & (1 << F75387_FAN_CTRL_LINEAR(nr))))
data->pwm_mode[nr] = 1;
manu = ((mode >> F75387_FAN_MANU_MODE(nr)) & 1);
for (i = 0; i < data->pwm_num; i++)
data->pwm_enable_orig[i] = data->pwm_enable[i];
- /* Read pwm data to save original values */
- w83627ehf_update_pwm_common(dev, data);
- for (i = 0; i < data->pwm_num; i++)
- data->pwm_enable_orig[i] = data->pwm_enable[i];
-
/* Register sysfs hooks */
for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++) {
err = device_create_file(dev, &sda_sf3_arrays[i].dev_attr);
#include <linux/mutex.h>
#include <net/neighbour.h>
+#include <net/sch_generic.h>
#include <linux/atomic.h>
u16 reserved;
};
-struct ipoib_pseudoheader {
- u8 hwaddr[INFINIBAND_ALEN];
+struct ipoib_cb {
+ struct qdisc_skb_cb qdisc_cb;
+ u8 hwaddr[INFINIBAND_ALEN];
};
/* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
}
static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
- struct ipoib_pseudoheader *phdr)
+ struct ipoib_cb *cb)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_path *path;
spin_lock_irqsave(&priv->lock, flags);
- path = __path_find(dev, phdr->hwaddr + 4);
+ path = __path_find(dev, cb->hwaddr + 4);
if (!path || !path->valid) {
int new_path = 0;
if (!path) {
- path = path_rec_create(dev, phdr->hwaddr + 4);
+ path = path_rec_create(dev, cb->hwaddr + 4);
new_path = 1;
}
if (path) {
- /* put pseudoheader back on for next time */
- skb_push(skb, sizeof *phdr);
__skb_queue_tail(&path->queue, skb);
if (!path->query && path_rec_start(dev, path)) {
be16_to_cpu(path->pathrec.dlid));
spin_unlock_irqrestore(&priv->lock, flags);
- ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
+ ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr));
return;
} else if ((path->query || !path_rec_start(dev, path)) &&
skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
- /* put pseudoheader back on for next time */
- skb_push(skb, sizeof *phdr);
__skb_queue_tail(&path->queue, skb);
} else {
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
}
} else {
- struct ipoib_pseudoheader *phdr =
- (struct ipoib_pseudoheader *) skb->data;
- skb_pull(skb, sizeof *phdr);
+ struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
- if (phdr->hwaddr[4] == 0xff) {
+ if (cb->hwaddr[4] == 0xff) {
/* Add in the P_Key for multicast*/
- phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
- phdr->hwaddr[9] = priv->pkey & 0xff;
+ cb->hwaddr[8] = (priv->pkey >> 8) & 0xff;
+ cb->hwaddr[9] = priv->pkey & 0xff;
- ipoib_mcast_send(dev, phdr->hwaddr + 4, skb);
+ ipoib_mcast_send(dev, cb->hwaddr + 4, skb);
} else {
/* unicast GID -- should be ARP or RARP reply */
ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x %pI6\n",
skb_dst(skb) ? "neigh" : "dst",
be16_to_cpup((__be16 *) skb->data),
- IPOIB_QPN(phdr->hwaddr),
- phdr->hwaddr + 4);
+ IPOIB_QPN(cb->hwaddr),
+ cb->hwaddr + 4);
dev_kfree_skb_any(skb);
++dev->stats.tx_dropped;
goto unlock;
}
- unicast_arp_send(skb, dev, phdr);
+ unicast_arp_send(skb, dev, cb);
}
}
unlock:
const void *daddr, const void *saddr, unsigned len)
{
struct ipoib_header *header;
- struct dst_entry *dst;
- struct neighbour *n;
header = (struct ipoib_header *) skb_push(skb, sizeof *header);
header->reserved = 0;
/*
- * If we don't have a neighbour structure, stuff the
- * destination address onto the front of the skb so we can
- * figure out where to send the packet later.
+ * If we don't have a dst_entry structure, stuff the
+ * destination address into skb->cb so we can figure out where
+ * to send the packet later.
*/
- dst = skb_dst(skb);
- n = NULL;
- if (dst)
- n = dst_get_neighbour_noref_raw(dst);
- if ((!dst || !n) && daddr) {
- struct ipoib_pseudoheader *phdr =
- (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
- memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
+ if (!skb_dst(skb)) {
+ struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
+ memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
}
return 0;
dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
- /*
- * We add in INFINIBAND_ALEN to allow for the destination
- * address "pseudoheader" for skbs without neighbour struct.
- */
- dev->hard_header_len = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
+ dev->hard_header_len = IPOIB_ENCAP_LEN;
dev->addr_len = INFINIBAND_ALEN;
dev->type = ARPHRD_INFINIBAND;
dev->tx_queue_len = ipoib_sendq_size * 2;
netif_tx_lock_bh(dev);
while (!skb_queue_empty(&mcast->pkt_queue)) {
struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
- struct dst_entry *dst = skb_dst(skb);
- struct neighbour *n = NULL;
netif_tx_unlock_bh(dev);
skb->dev = dev;
- if (dst)
- n = dst_get_neighbour_noref_raw(dst);
- if (!dst || !n) {
- /* put pseudoheader back on for next time */
- skb_push(skb, sizeof (struct ipoib_pseudoheader));
- }
-
if (dev_queue_xmit(skb))
ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n");
+
netif_tx_lock_bh(dev);
}
netif_tx_unlock_bh(dev);
{
isdn_net_local *lp = netdev_priv(dev);
unsigned char *p;
- ushort len = 0;
+ int len = 0;
switch (lp->p_encap) {
case ISDN_NET_ENCAP_ETHER:
config TWL4030_CORE
bool "Texas Instruments TWL4030/TWL5030/TWL6030/TPS659x0 Support"
- depends on I2C=y && GENERIC_HARDIRQS && IRQ_DOMAIN
+ depends on I2C=y && GENERIC_HARDIRQS
help
Say yes here if you have TWL4030 / TWL6030 family chip on your board.
This core driver provides register access and IRQ handling
static struct twl_client twl_modules[TWL_NUM_SLAVES];
+#ifdef CONFIG_IRQ_DOMAIN
static struct irq_domain domain;
+#endif
/* mapping the module id to slave id and base address */
struct twl_mapping {
pdata->irq_base = status;
pdata->irq_end = pdata->irq_base + nr_irqs;
+#ifdef CONFIG_IRQ_DOMAIN
domain.irq_base = pdata->irq_base;
domain.nr_irq = nr_irqs;
-#ifdef CONFIG_OF_IRQ
domain.of_node = of_node_get(node);
domain.ops = &irq_domain_simple_ops;
-#endif
irq_domain_add(&domain);
+#endif
if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C) == 0) {
dev_dbg(&client->dev, "can't talk I2C?\n");
[RES_MAIN_REF] = 0x94,
};
-static int __init twl4030_write_script_byte(u8 address, u8 byte)
+static int __devinit twl4030_write_script_byte(u8 address, u8 byte)
{
int err;
return err;
}
-static int __init twl4030_write_script_ins(u8 address, u16 pmb_message,
+static int __devinit twl4030_write_script_ins(u8 address, u16 pmb_message,
u8 delay, u8 next)
{
int err;
return err;
}
-static int __init twl4030_write_script(u8 address, struct twl4030_ins *script,
+static int __devinit twl4030_write_script(u8 address, struct twl4030_ins *script,
int len)
{
int err;
return err;
}
-static int __init twl4030_config_wakeup3_sequence(u8 address)
+static int __devinit twl4030_config_wakeup3_sequence(u8 address)
{
int err;
u8 data;
return err;
}
-static int __init twl4030_config_wakeup12_sequence(u8 address)
+static int __devinit twl4030_config_wakeup12_sequence(u8 address)
{
int err = 0;
u8 data;
return err;
}
-static int __init twl4030_config_sleep_sequence(u8 address)
+static int __devinit twl4030_config_sleep_sequence(u8 address)
{
int err;
return err;
}
-static int __init twl4030_config_warmreset_sequence(u8 address)
+static int __devinit twl4030_config_warmreset_sequence(u8 address)
{
int err;
u8 rd_data;
return err;
}
-static int __init twl4030_configure_resource(struct twl4030_resconfig *rconfig)
+static int __devinit twl4030_configure_resource(struct twl4030_resconfig *rconfig)
{
int rconfig_addr;
int err;
return 0;
}
-static int __init load_twl4030_script(struct twl4030_script *tscript,
+static int __devinit load_twl4030_script(struct twl4030_script *tscript,
u8 address)
{
int err;
pr_err("TWL4030 Unable to power off\n");
}
-void __init twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
+void __devinit twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
{
int err = 0;
int i;
# Misc strange devices
#
-# This one has to live outside of the MISC_DEVICES conditional,
-# because it may be selected by drivers/platform/x86/hp_accel.
+menu "Misc devices"
+
config SENSORS_LIS3LV02D
tristate
depends on INPUT
select INPUT_POLLDEV
default n
-menuconfig MISC_DEVICES
- bool "Misc devices"
- ---help---
- Say Y here to get to see options for device drivers from various
- different categories. This option alone does not add any kernel code.
-
- If you say N, all options in this submenu will be skipped and disabled.
-
-if MISC_DEVICES
-
config AD525X_DPOT
tristate "Analog Devices Digital Potentiometers"
depends on (I2C || SPI) && SYSFS
source "drivers/misc/lis3lv02d/Kconfig"
source "drivers/misc/carma/Kconfig"
source "drivers/misc/altera-stapl/Kconfig"
-
-endif # MISC_DEVICES
+endmenu
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/ioport.h>
#include <linux/c2port.h>
#define DATA_PORT 0x325
if (err)
return err;
+ spin_lock_init(&chip->irq_lock);
chip->pdev = pdev;
chip->iobase = pcim_iomap_table(pdev)[0];
* In other cases (such as with VSAless OpenFirmware), the system firmware
* leaves timers available for us to use.
*/
-static int __init scan_timers(struct cs5535_mfgpt_chip *mfgpt)
+static int __devinit scan_timers(struct cs5535_mfgpt_chip *mfgpt)
{
struct cs5535_mfgpt_timer timer = { .chip = mfgpt };
unsigned long flags;
* fear that guest will need it. Host may reject some pages, we need to
* check the return value and maybe submit a different page.
*/
-static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
+static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
unsigned int *hv_status)
{
unsigned long status, dummy;
pfn32 = (u32)pfn;
if (pfn32 != pfn)
- return false;
+ return -1;
STATS_INC(b->stats.lock);
*hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy);
if (vmballoon_check_status(b, status))
- return true;
+ return 0;
pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
STATS_INC(b->stats.lock_fail);
- return false;
+ return 1;
}
/*
struct page *page;
gfp_t flags;
unsigned int hv_status;
- bool locked = false;
+ int locked;
flags = can_sleep ? VMW_PAGE_ALLOC_CANSLEEP : VMW_PAGE_ALLOC_NOSLEEP;
do {
/* inform monitor */
locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status);
- if (!locked) {
+ if (locked > 0) {
STATS_INC(b->stats.refused_alloc);
if (hv_status == VMW_BALLOON_ERROR_RESET ||
if (++b->n_refused_pages >= VMW_BALLOON_MAX_REFUSED)
return -EIO;
}
- } while (!locked);
+ } while (locked != 0);
/* track allocated page */
list_add(&page->lru, &b->pages);
md->power_ro_lock.show = power_ro_lock_show;
md->power_ro_lock.store = power_ro_lock_store;
+ sysfs_attr_init(&md->power_ro_lock.attr);
md->power_ro_lock.attr.mode = mode;
md->power_ro_lock.attr.name =
"ro_lock_until_next_power_on";
static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
bool is_first_req)
{
- if (host->ops->pre_req)
+ if (host->ops->pre_req) {
+ mmc_host_clk_hold(host);
host->ops->pre_req(host, mrq, is_first_req);
+ mmc_host_clk_release(host);
+ }
}
/**
static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
int err)
{
- if (host->ops->post_req)
+ if (host->ops->post_req) {
+ mmc_host_clk_hold(host);
host->ops->post_req(host, mrq, err);
+ mmc_host_clk_release(host);
+ }
}
/**
int err;
host->en_dis_recurs = 1;
+ mmc_host_clk_hold(host);
err = host->ops->enable(host);
+ mmc_host_clk_release(host);
host->en_dis_recurs = 0;
if (err) {
int err;
host->en_dis_recurs = 1;
+ mmc_host_clk_hold(host);
err = host->ops->disable(host, lazy);
+ mmc_host_clk_release(host);
host->en_dis_recurs = 0;
if (err < 0) {
* might not allow this operation
*/
voltage = regulator_get_voltage(supply);
+
+ if (mmc->caps2 & MMC_CAP2_BROKEN_VOLTAGE)
+ min_uV = max_uV = voltage;
+
if (voltage < 0)
result = voltage;
else if (voltage < min_uV || voltage > max_uV)
host->ios.signal_voltage = signal_voltage;
- if (host->ops->start_signal_voltage_switch)
+ if (host->ops->start_signal_voltage_switch) {
+ mmc_host_clk_hold(host);
err = host->ops->start_signal_voltage_switch(host, &host->ios);
+ mmc_host_clk_release(host);
+ }
return err;
}
int err = 0;
card = host->card;
+ mmc_claim_host(host);
/*
* Send power notify command only if card
/* Set the card state to no notification after the poweroff */
card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
}
+ mmc_release_host(host);
}
/*
void mmc_power_off(struct mmc_host *host)
{
+ int err = 0;
mmc_host_clk_hold(host);
host->ios.clock = 0;
host->ios.vdd = 0;
- mmc_poweroff_notify(host);
+ /*
+ * For eMMC 4.5 device send AWAKE command before
+ * POWER_OFF_NOTIFY command, because in sleep state
+ * eMMC 4.5 devices respond to only RESET and AWAKE cmd
+ */
+ if (host->card && mmc_card_is_sleep(host->card) &&
+ host->bus_ops->resume) {
+ err = host->bus_ops->resume(host);
+
+ if (!err)
+ mmc_poweroff_notify(host);
+ else
+ pr_warning("%s: error %d during resume "
+ "(continue with poweroff sequence)\n",
+ mmc_hostname(host), err);
+ }
/*
* Reset ocr mask to be the highest possible voltage supported for
*/
if (mmc_try_claim_host(host)) {
if (host->bus_ops->suspend) {
- /*
- * For eMMC 4.5 device send notify command
- * before sleep, because in sleep state eMMC 4.5
- * devices respond to only RESET and AWAKE cmd
- */
- mmc_poweroff_notify(host);
err = host->bus_ops->suspend(host);
}
mmc_do_release_host(host);
int mmc_register_host_class(void);
void mmc_unregister_host_class(void);
-
-#ifdef CONFIG_MMC_CLKGATE
-void mmc_host_clk_hold(struct mmc_host *host);
-void mmc_host_clk_release(struct mmc_host *host);
-unsigned int mmc_host_clk_rate(struct mmc_host *host);
-
-#else
-static inline void mmc_host_clk_hold(struct mmc_host *host)
-{
-}
-
-static inline void mmc_host_clk_release(struct mmc_host *host)
-{
-}
-
-static inline unsigned int mmc_host_clk_rate(struct mmc_host *host)
-{
- return host->ios.clock;
-}
-#endif
-
void mmc_host_deeper_disable(struct work_struct *work);
#endif
}
card->ext_csd.raw_hc_erase_gap_size =
- ext_csd[EXT_CSD_PARTITION_ATTRIBUTE];
+ ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
card->ext_csd.raw_sec_trim_mult =
ext_csd[EXT_CSD_SEC_TRIM_MULT];
card->ext_csd.raw_sec_erase_mult =
goto out;
/* only compare read only fields */
- err = (!(card->ext_csd.raw_partition_support ==
+ err = !((card->ext_csd.raw_partition_support ==
bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
(card->ext_csd.raw_erased_mem_count ==
bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
err = mmc_select_hs200(card);
else if (host->caps & MMC_CAP_MMC_HIGHSPEED)
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_HS_TIMING, 1, 0);
+ EXT_CSD_HS_TIMING, 1,
+ card->ext_csd.generic_cmd6_time);
if (err && err != -EBADMSG)
goto free_card;
* Activate wide bus and DDR (if supported).
*/
if (!mmc_card_hs200(card) &&
- (card->csd.mmca_vsn >= CSD_SPEC_VER_3) &&
+ (card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
static unsigned ext_csd_bits[][2] = {
{ EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 },
BUG_ON(!host->card);
mmc_claim_host(host);
- if (mmc_card_can_sleep(host))
+ if (mmc_card_can_sleep(host)) {
err = mmc_card_sleep(host);
- else if (!mmc_host_is_spi(host))
+ if (!err)
+ mmc_card_set_sleep(host->card);
+ } else if (!mmc_host_is_spi(host))
mmc_deselect_cards(host);
- host->card->state &= ~MMC_STATE_HIGHSPEED;
+ host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200);
mmc_release_host(host);
return err;
BUG_ON(!host->card);
mmc_claim_host(host);
- err = mmc_init_card(host, host->ocr, host->card);
+ if (mmc_card_is_sleep(host->card)) {
+ err = mmc_card_awake(host);
+ mmc_card_clr_sleep(host->card);
+ } else
+ err = mmc_init_card(host, host->ocr, host->card);
mmc_release_host(host);
return err;
{
int ret;
- host->card->state &= ~MMC_STATE_HIGHSPEED;
+ host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200);
+ mmc_card_clr_sleep(host->card);
mmc_claim_host(host);
ret = mmc_init_card(host, host->ocr, host->card);
mmc_release_host(host);
* information and let the hardware specific code
* return what is possible given the options
*/
+ mmc_host_clk_hold(card->host);
drive_strength = card->host->ops->select_drive_strength(
card->sw_caps.uhs_max_dtr,
host_drv_type, card_drv_type);
+ mmc_host_clk_release(card->host);
err = mmc_sd_switch(card, 1, 2, drive_strength, status);
if (err)
goto out;
/* SPI mode doesn't define CMD19 */
- if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning)
+ if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning) {
+ mmc_host_clk_hold(card->host);
err = card->host->ops->execute_tuning(card->host,
MMC_SEND_TUNING_BLOCK);
+ mmc_host_clk_release(card->host);
+ }
out:
kfree(status);
if (!reinit) {
int ro = -1;
- if (host->ops->get_ro)
+ if (host->ops->get_ro) {
+ mmc_host_clk_hold(card->host);
ro = host->ops->get_ro(host);
+ mmc_host_clk_release(card->host);
+ }
if (ro < 0) {
pr_warning("%s: host does not "
* Since initialization is now complete, enable preset
* value registers for UHS-I cards.
*/
- if (host->ops->enable_preset_value)
+ if (host->ops->enable_preset_value) {
+ mmc_host_clk_hold(card->host);
host->ops->enable_preset_value(host, true);
+ mmc_host_clk_release(card->host);
+ }
} else {
/*
* Attempt to change to high-speed (if supported)
return err;
/* Disable preset value enable if already set since last time */
- if (host->ops->enable_preset_value)
+ if (host->ops->enable_preset_value) {
+ mmc_host_clk_hold(host);
host->ops->enable_preset_value(host, false);
+ mmc_host_clk_release(host);
+ }
err = mmc_send_app_op_cond(host, 0, &ocr);
if (err)
return ret;
}
-static int sdio_read_cccr(struct mmc_card *card)
+static int sdio_read_cccr(struct mmc_card *card, u32 ocr)
{
int ret;
int cccr_vsn;
+ int uhs = ocr & R4_18V_PRESENT;
unsigned char data;
unsigned char speed;
card->scr.sda_spec3 = 0;
card->sw_caps.sd3_bus_mode = 0;
card->sw_caps.sd3_drv_type = 0;
- if (cccr_vsn >= SDIO_CCCR_REV_3_00) {
+ if (cccr_vsn >= SDIO_CCCR_REV_3_00 && uhs) {
card->scr.sda_spec3 = 1;
ret = mmc_io_rw_direct(card, 0, 0,
SDIO_CCCR_UHS, 0, &data);
/*
* Read the common registers.
*/
- err = sdio_read_cccr(card);
+ err = sdio_read_cccr(card, ocr);
if (err)
goto remove;
}
set_current_state(TASK_INTERRUPTIBLE);
- if (host->caps & MMC_CAP_SDIO_IRQ)
+ if (host->caps & MMC_CAP_SDIO_IRQ) {
+ mmc_host_clk_hold(host);
host->ops->enable_sdio_irq(host, 1);
+ mmc_host_clk_release(host);
+ }
if (!kthread_should_stop())
schedule_timeout(period);
set_current_state(TASK_RUNNING);
} while (!kthread_should_stop());
- if (host->caps & MMC_CAP_SDIO_IRQ)
+ if (host->caps & MMC_CAP_SDIO_IRQ) {
+ mmc_host_clk_hold(host);
host->ops->enable_sdio_irq(host, 0);
+ mmc_host_clk_release(host);
+ }
pr_debug("%s: IRQ thread exiting with code %d\n",
mmc_hostname(host), ret);
config MMC_CB710
tristate "ENE CB710 MMC/SD Interface support"
depends on PCI
- select MISC_DEVICES
select CB710_CORE
help
This option enables support for MMC/SD part of ENE CB710/720 Flash
host->data_status = 0;
if (host->need_reset) {
+ iflags = atmci_readl(host, ATMCI_IMR);
+ iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB);
atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
atmci_writel(host, ATMCI_MR, host->mode_reg);
if (host->caps.has_cfg_reg)
atmci_writel(host, ATMCI_CFG, host->cfg_reg);
+ atmci_writel(host, ATMCI_IER, iflags);
host->need_reset = false;
}
atmci_writel(host, ATMCI_SDCR, slot->sdc_reg);
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/scatterlist.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/stat.h>
host->dir_status = DW_MCI_SEND_STATUS;
if (dw_mci_submit_data_dma(host, data)) {
+ int flags = SG_MITER_ATOMIC;
+ if (host->data->flags & MMC_DATA_READ)
+ flags |= SG_MITER_TO_SG;
+ else
+ flags |= SG_MITER_FROM_SG;
+
+ sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
host->sg = data->sg;
- host->pio_offset = 0;
host->part_buf_start = 0;
host->part_buf_count = 0;
* generates a block interrupt, hence setting
* the scatter-gather pointer to NULL.
*/
+ sg_miter_stop(&host->sg_miter);
host->sg = NULL;
ctrl = mci_readl(host, CTRL);
ctrl |= SDMMC_CTRL_FIFO_RESET;
static void dw_mci_read_data_pio(struct dw_mci *host)
{
- struct scatterlist *sg = host->sg;
- void *buf = sg_virt(sg);
- unsigned int offset = host->pio_offset;
+ struct sg_mapping_iter *sg_miter = &host->sg_miter;
+ void *buf;
+ unsigned int offset;
struct mmc_data *data = host->data;
int shift = host->data_shift;
u32 status;
unsigned int nbytes = 0, len;
+ unsigned int remain, fcnt;
do {
- len = host->part_buf_count +
- (SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift);
- if (offset + len <= sg->length) {
+ if (!sg_miter_next(sg_miter))
+ goto done;
+
+ host->sg = sg_miter->__sg;
+ buf = sg_miter->addr;
+ remain = sg_miter->length;
+ offset = 0;
+
+ do {
+ fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
+ << shift) + host->part_buf_count;
+ len = min(remain, fcnt);
+ if (!len)
+ break;
dw_mci_pull_data(host, (void *)(buf + offset), len);
-
offset += len;
nbytes += len;
-
- if (offset == sg->length) {
- flush_dcache_page(sg_page(sg));
- host->sg = sg = sg_next(sg);
- if (!sg)
- goto done;
-
- offset = 0;
- buf = sg_virt(sg);
- }
- } else {
- unsigned int remaining = sg->length - offset;
- dw_mci_pull_data(host, (void *)(buf + offset),
- remaining);
- nbytes += remaining;
-
- flush_dcache_page(sg_page(sg));
- host->sg = sg = sg_next(sg);
- if (!sg)
- goto done;
-
- offset = len - remaining;
- buf = sg_virt(sg);
- dw_mci_pull_data(host, buf, offset);
- nbytes += offset;
- }
+ remain -= len;
+ } while (remain);
+ sg_miter->consumed = offset;
status = mci_readl(host, MINTSTS);
mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
if (status & DW_MCI_DATA_ERROR_FLAGS) {
host->data_status = status;
data->bytes_xfered += nbytes;
+ sg_miter_stop(sg_miter);
+ host->sg = NULL;
smp_wmb();
set_bit(EVENT_DATA_ERROR, &host->pending_events);
return;
}
} while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
- host->pio_offset = offset;
data->bytes_xfered += nbytes;
+
+ if (!remain) {
+ if (!sg_miter_next(sg_miter))
+ goto done;
+ sg_miter->consumed = 0;
+ }
+ sg_miter_stop(sg_miter);
return;
done:
data->bytes_xfered += nbytes;
+ sg_miter_stop(sg_miter);
+ host->sg = NULL;
smp_wmb();
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
}
static void dw_mci_write_data_pio(struct dw_mci *host)
{
- struct scatterlist *sg = host->sg;
- void *buf = sg_virt(sg);
- unsigned int offset = host->pio_offset;
+ struct sg_mapping_iter *sg_miter = &host->sg_miter;
+ void *buf;
+ unsigned int offset;
struct mmc_data *data = host->data;
int shift = host->data_shift;
u32 status;
unsigned int nbytes = 0, len;
+ unsigned int fifo_depth = host->fifo_depth;
+ unsigned int remain, fcnt;
do {
- len = ((host->fifo_depth -
- SDMMC_GET_FCNT(mci_readl(host, STATUS))) << shift)
- - host->part_buf_count;
- if (offset + len <= sg->length) {
+ if (!sg_miter_next(sg_miter))
+ goto done;
+
+ host->sg = sg_miter->__sg;
+ buf = sg_miter->addr;
+ remain = sg_miter->length;
+ offset = 0;
+
+ do {
+ fcnt = ((fifo_depth -
+ SDMMC_GET_FCNT(mci_readl(host, STATUS)))
+ << shift) - host->part_buf_count;
+ len = min(remain, fcnt);
+ if (!len)
+ break;
host->push_data(host, (void *)(buf + offset), len);
-
offset += len;
nbytes += len;
- if (offset == sg->length) {
- host->sg = sg = sg_next(sg);
- if (!sg)
- goto done;
-
- offset = 0;
- buf = sg_virt(sg);
- }
- } else {
- unsigned int remaining = sg->length - offset;
-
- host->push_data(host, (void *)(buf + offset),
- remaining);
- nbytes += remaining;
-
- host->sg = sg = sg_next(sg);
- if (!sg)
- goto done;
-
- offset = len - remaining;
- buf = sg_virt(sg);
- host->push_data(host, (void *)buf, offset);
- nbytes += offset;
- }
+ remain -= len;
+ } while (remain);
+ sg_miter->consumed = offset;
status = mci_readl(host, MINTSTS);
mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
if (status & DW_MCI_DATA_ERROR_FLAGS) {
host->data_status = status;
data->bytes_xfered += nbytes;
+ sg_miter_stop(sg_miter);
+ host->sg = NULL;
smp_wmb();
return;
}
} while (status & SDMMC_INT_TXDR); /* if TXDR write again */
- host->pio_offset = offset;
data->bytes_xfered += nbytes;
+
+ if (!remain) {
+ if (!sg_miter_next(sg_miter))
+ goto done;
+ sg_miter->consumed = 0;
+ }
+ sg_miter_stop(sg_miter);
return;
done:
data->bytes_xfered += nbytes;
+ sg_miter_stop(sg_miter);
+ host->sg = NULL;
smp_wmb();
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
}
* block interrupt, hence setting the
* scatter-gather pointer to NULL.
*/
+ sg_miter_stop(&host->sg_miter);
host->sg = NULL;
ctrl = mci_readl(host, CTRL);
const int j = i * 2;
u32 mask;
- mask = mmc_vddrange_to_ocrmask(voltage_ranges[j],
- voltage_ranges[j + 1]);
+ mask = mmc_vddrange_to_ocrmask(be32_to_cpu(voltage_ranges[j]),
+ be32_to_cpu(voltage_ranges[j + 1]));
if (!mask) {
ret = -EINVAL;
dev_err(dev, "OF: voltage-range #%d is invalid\n", i);
int base = reg & ~0x3;
int shift = (reg & 0x3) * 8;
u8 ret = (in_be32(host->ioaddr + base) >> shift) & 0xff;
+
+ /*
+ * "DMA select" locates at offset 0x28 in SD specification, but on
+ * P5020 or P3041, it locates at 0x29.
+ */
+ if (reg == SDHCI_HOST_CONTROL) {
+ u32 dma_bits;
+
+ dma_bits = in_be32(host->ioaddr + reg);
+ /* DMA select is 22,23 bits in Protocol Control Register */
+ dma_bits = (dma_bits >> 5) & SDHCI_CTRL_DMA_MASK;
+
+ /* fixup the result */
+ ret &= ~SDHCI_CTRL_DMA_MASK;
+ ret |= dma_bits;
+ }
+
return ret;
}
static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg)
{
+ /*
+ * "DMA select" location is offset 0x28 in SD specification, but on
+ * P5020 or P3041, it's located at 0x29.
+ */
+ if (reg == SDHCI_HOST_CONTROL) {
+ u32 dma_bits;
+
+ /* DMA select is 22,23 bits in Protocol Control Register */
+ dma_bits = (val & SDHCI_CTRL_DMA_MASK) << 5;
+ clrsetbits_be32(host->ioaddr + reg , SDHCI_CTRL_DMA_MASK << 5,
+ dma_bits);
+ val &= ~SDHCI_CTRL_DMA_MASK;
+ val |= in_be32(host->ioaddr + reg) & SDHCI_CTRL_DMA_MASK;
+ }
+
/* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */
if (reg == SDHCI_HOST_CONTROL)
val &= ~ESDHC_HOST_CONTROL_RES;
static int mfd_sdio_probe_slot(struct sdhci_pci_slot *slot)
{
- slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD;
+ slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE;
return 0;
}
* sdhci-pltfm.c Support for SDHCI platform devices
* Copyright (c) 2009 Intel Corporation
*
- * Copyright (c) 2007 Freescale Semiconductor, Inc.
+ * Copyright (c) 2007, 2011 Freescale Semiconductor, Inc.
* Copyright (c) 2009 MontaVista Software, Inc.
*
* Authors: Xiaobo Xie <X.Xie@freescale.com>
if (sdhci_of_wp_inverted(np))
host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT;
+ if (of_device_is_compatible(np, "fsl,p2020-rev1-esdhc"))
+ host->quirks |= SDHCI_QUIRK_BROKEN_DMA;
+
+ if (of_device_is_compatible(np, "fsl,p2020-esdhc") ||
+ of_device_is_compatible(np, "fsl,p1010-esdhc") ||
+ of_device_is_compatible(np, "fsl,mpc8536-esdhc"))
+ host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+
clk = of_get_property(np, "clock-frequency", &size);
if (clk && size == sizeof(*clk) && *clk)
pltfm_host->clock = be32_to_cpup(clk);
if (ret < 0)
goto clean_up2;
- mmc_add_host(mmc);
+ INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work);
sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
}
ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host);
if (ret) {
- free_irq(irq[0], host);
dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
- goto clean_up3;
+ goto clean_up4;
}
- INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work);
-
- mmc_detect_change(host->mmc, 0);
+ ret = mmc_add_host(mmc);
+ if (ret < 0)
+ goto clean_up5;
dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
dev_dbg(&pdev->dev, "chip ver H'%04x\n",
sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff);
return ret;
+clean_up5:
+ free_irq(irq[1], host);
+clean_up4:
+ free_irq(irq[0], host);
clean_up3:
- mmc_remove_host(mmc);
pm_runtime_suspend(&pdev->dev);
clean_up2:
pm_runtime_disable(&pdev->dev);
#include <linux/mmc/tmio.h>
#include <linux/mutex.h>
#include <linux/pagemap.h>
-#include <linux/spinlock.h>
#include <linux/scatterlist.h>
+#include <linux/spinlock.h>
/* Definitions for values the CTRL_SDIO_STATUS register can take. */
#define TMIO_SDIO_STAT_IOIRQ 0x0001
void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable);
void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata);
void tmio_mmc_release_dma(struct tmio_mmc_host *host);
+void tmio_mmc_abort_dma(struct tmio_mmc_host *host);
#else
static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host,
struct mmc_data *data)
static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host)
{
}
+
+static inline void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
+{
+}
#endif
#ifdef CONFIG_PM
#endif
}
+void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
+{
+ tmio_mmc_enable_dma(host, false);
+
+ if (host->chan_rx)
+ dmaengine_terminate_all(host->chan_rx);
+ if (host->chan_tx)
+ dmaengine_terminate_all(host->chan_tx);
+
+ tmio_mmc_enable_dma(host, true);
+}
+
static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
{
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
-#include <linux/workqueue.h>
#include <linux/spinlock.h>
+#include <linux/workqueue.h>
#include "tmio_mmc.h"
/* Ready for new calls */
host->mrq = NULL;
+ tmio_mmc_abort_dma(host);
mmc_request_done(host->mmc, mrq);
}
host->mrq = NULL;
spin_unlock_irqrestore(&host->lock, flags);
+ if (mrq->cmd->error || (mrq->data && mrq->data->error))
+ tmio_mmc_abort_dma(host);
+
mmc_request_done(host->mmc, mrq);
}
for (i = 0; i < dlc; i++)
cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]);
+ /* Store echo skb before starting the transfer */
+ can_put_echo_skb(skb, dev, 0);
+
cc770_write_reg(priv, msgobj[mo].ctrl1,
RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC);
stats->tx_bytes += dlc;
- can_put_echo_skb(skb, dev, 0);
/*
* HM: We had some cases of repeated IRQs so make sure the
#define CC770_IOSIZE 0x20
#define CC770_IOSIZE_INDIRECT 0x02
+/* Spinlock for cc770_isa_port_write_reg_indirect
+ * and cc770_isa_port_read_reg_indirect
+ */
+static DEFINE_SPINLOCK(cc770_isa_port_lock);
+
static struct platform_device *cc770_isa_devs[MAXDEV];
static u8 cc770_isa_mem_read_reg(const struct cc770_priv *priv, int reg)
int reg)
{
unsigned long base = (unsigned long)priv->reg_base;
+ unsigned long flags;
+ u8 val;
+ spin_lock_irqsave(&cc770_isa_port_lock, flags);
outb(reg, base);
- return inb(base + 1);
+ val = inb(base + 1);
+ spin_unlock_irqrestore(&cc770_isa_port_lock, flags);
+
+ return val;
}
static void cc770_isa_port_write_reg_indirect(const struct cc770_priv *priv,
int reg, u8 val)
{
unsigned long base = (unsigned long)priv->reg_base;
+ unsigned long flags;
+ spin_lock_irqsave(&cc770_isa_port_lock, flags);
outb(reg, base);
outb(val, base + 1);
+ spin_unlock_irqrestore(&cc770_isa_port_lock, flags);
}
static int __devinit cc770_isa_probe(struct platform_device *pdev)
(FLEXCAN_ESR_TWRN_INT | FLEXCAN_ESR_RWRN_INT | FLEXCAN_ESR_BOFF_INT)
#define FLEXCAN_ESR_ERR_ALL \
(FLEXCAN_ESR_ERR_BUS | FLEXCAN_ESR_ERR_STATE)
+#define FLEXCAN_ESR_ALL_INT \
+ (FLEXCAN_ESR_TWRN_INT | FLEXCAN_ESR_RWRN_INT | \
+ FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT)
/* FLEXCAN interrupt flag register (IFLAG) bits */
#define FLEXCAN_TX_BUF_ID 8
reg_iflag1 = flexcan_read(®s->iflag1);
reg_esr = flexcan_read(®s->esr);
- flexcan_write(FLEXCAN_ESR_ERR_INT, ®s->esr); /* ACK err IRQ */
+ /* ACK all bus error and state change IRQ sources */
+ if (reg_esr & FLEXCAN_ESR_ALL_INT)
+ flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, ®s->esr);
/*
* schedule NAPI in case of:
#define PCH_IF_CREQ_BUSY BIT(15)
#define PCH_STATUS_INT 0x8000
+#define PCH_RP 0x00008000
#define PCH_REC 0x00007f00
#define PCH_TEC 0x000000ff
priv->can.can_stats.error_passive++;
state = CAN_STATE_ERROR_PASSIVE;
cf->can_id |= CAN_ERR_CRTL;
- if (((errc & PCH_REC) >> 8) > 127)
+ if (errc & PCH_RP)
cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
if ((errc & PCH_TEC) > 127)
cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
#define DRV_NAME "peak_pci"
struct peak_pci_chan {
- void __iomem *cfg_base; /* Common for all channels */
- struct net_device *next_dev; /* Chain of network devices */
- u16 icr_mask; /* Interrupt mask for fast ack */
+ void __iomem *cfg_base; /* Common for all channels */
+ struct net_device *prev_dev; /* Chain of network devices */
+ u16 icr_mask; /* Interrupt mask for fast ack */
};
#define PEAK_PCI_CAN_CLOCK (16000000 / 2)
{
struct sja1000_priv *priv;
struct peak_pci_chan *chan;
- struct net_device *dev, *dev0 = NULL;
+ struct net_device *dev;
void __iomem *cfg_base, *reg_base;
u16 sub_sys_id, icr;
int i, err, channels;
}
/* Create chain of SJA1000 devices */
- if (i == 0)
- dev0 = dev;
- else
- chan->next_dev = dev;
+ chan->prev_dev = pci_get_drvdata(pdev);
+ pci_set_drvdata(pdev, dev);
dev_info(&pdev->dev,
"%s at reg_base=0x%p cfg_base=0x%p irq=%d\n",
dev->name, priv->reg_base, chan->cfg_base, dev->irq);
}
- pci_set_drvdata(pdev, dev0);
-
/* Enable interrupts */
writew(icr, cfg_base + PITA_ICR + 2);
/* Disable interrupts */
writew(0x0, cfg_base + PITA_ICR + 2);
- for (dev = dev0; dev; dev = chan->next_dev) {
+ for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) {
unregister_sja1000dev(dev);
free_sja1000dev(dev);
priv = netdev_priv(dev);
chan = priv->priv;
- dev = chan->next_dev;
}
pci_iounmap(pdev, reg_base);
static void __devexit peak_pci_remove(struct pci_dev *pdev)
{
- struct net_device *dev = pci_get_drvdata(pdev); /* First device */
+ struct net_device *dev = pci_get_drvdata(pdev); /* Last device */
struct sja1000_priv *priv = netdev_priv(dev);
struct peak_pci_chan *chan = priv->priv;
void __iomem *cfg_base = chan->cfg_base;
dev_info(&pdev->dev, "removing device %s\n", dev->name);
unregister_sja1000dev(dev);
free_sja1000dev(dev);
- dev = chan->next_dev;
+ dev = chan->prev_dev;
if (!dev)
break;
priv = netdev_priv(dev);
}
}
- netif_receive_skb(skb);
+ netif_rx(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+
return 0;
}
err = usb_submit_urb(urb, GFP_KERNEL);
if (err) {
- if (err == -ENODEV)
- netif_device_detach(dev->netdev);
-
usb_unanchor_urb(urb);
usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
urb->transfer_dma);
err = usb_submit_urb(dev->intr_urb, GFP_KERNEL);
if (err) {
- if (err == -ENODEV)
- netif_device_detach(dev->netdev);
-
dev_warn(netdev->dev.parent, "intr URB submit failed: %d\n",
err);
return 0;
failed:
- if (err == -ENODEV)
- netif_device_detach(dev->netdev);
-
dev_warn(netdev->dev.parent, "couldn't submit control: %d\n", err);
return err;
skb = build_skb(data);
if (likely(skb)) {
-
#ifdef BNX2X_STOP_ON_ERROR
if (pad + len > fp->rx_buf_size) {
BNX2X_ERR("skb_put is about to fail... "
return;
}
-
+ kfree(new_data);
drop:
/* drop the packet and keep the buffer in the bin */
DP(NETIF_MSG_RX_STATUS,
flash_attr = kzalloc(sizeof(struct bfa_flash_attr), GFP_KERNEL);
if (!flash_attr)
- return -ENOMEM;
+ return 0;
fcomp.bnad = bnad;
fcomp.comp_status = 0;
if (ret != BFA_STATUS_OK) {
spin_unlock_irqrestore(&bnad->bna_lock, flags);
kfree(flash_attr);
- goto out_err;
+ return 0;
}
spin_unlock_irqrestore(&bnad->bna_lock, flags);
wait_for_completion(&fcomp.comp);
}
kfree(flash_attr);
return flash_part;
-out_err:
- return -EINVAL;
}
static int
/* Query the flash partition based on the offset */
flash_part = bnad_get_flash_partition_by_offset(bnad,
eeprom->offset, &base_offset);
- if (flash_part <= 0)
+ if (flash_part == 0)
return -EFAULT;
fcomp.bnad = bnad;
/* Query the flash partition based on the offset */
flash_part = bnad_get_flash_partition_by_offset(bnad,
eeprom->offset, &base_offset);
- if (flash_part <= 0)
+ if (flash_part == 0)
return -EFAULT;
fcomp.bnad = bnad;
be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
{
struct be_adapter *adapter = netdev_priv(netdev);
- char file_name[ETHTOOL_FLASH_MAX_FILENAME];
- file_name[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
- strcpy(file_name, efl->data);
-
- return be_load_fw(adapter, file_name);
+ return be_load_fw(adapter, efl->data);
}
static int
phy_id = 0;
}
- snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
+ snprintf(phy_name, sizeof(phy_name), PHY_ID_FMT, mdio_bus_id, phy_id);
phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 0,
fep->phy_interface);
if (IS_ERR(phy_dev)) {
e1000_setup_rctl(adapter);
e1000_set_rx_mode(netdev);
+ rctl = er32(RCTL);
+
/* turn on all-multi mode if wake on multicast is enabled */
- if (wufc & E1000_WUFC_MC) {
- rctl = er32(RCTL);
+ if (wufc & E1000_WUFC_MC)
rctl |= E1000_RCTL_MPE;
- ew32(RCTL, rctl);
- }
+
+ /* enable receives in the hardware */
+ ew32(RCTL, rctl | E1000_RCTL_EN);
if (hw->mac_type >= e1000_82540) {
ctrl = er32(CTRL);
vf_devfn = pdev->devfn + 0x80;
pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
while (pvfdev) {
- if (pvfdev->devfn == vf_devfn)
+ if (pvfdev->devfn == vf_devfn &&
+ (pvfdev->bus->number >= pdev->bus->number))
vfs_found++;
vf_devfn += vf_stride;
pvfdev = pci_get_device(hw->vendor_id,
################################################################################
#
# Intel(R) 82576 Virtual Function Linux driver
-# Copyright(c) 2009 - 2010 Intel Corporation.
+# Copyright(c) 2009 - 2012 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2010 Intel Corporation.
+ Copyright(c) 2009 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2010 Intel Corporation.
+ Copyright(c) 2009 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2010 Intel Corporation.
+ Copyright(c) 2009 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2010 Intel Corporation.
+ Copyright(c) 2009 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
static const char igbvf_driver_string[] =
"Intel(R) Gigabit Virtual Function Network Driver";
static const char igbvf_copyright[] =
- "Copyright (c) 2009 - 2011 Intel Corporation.";
+ "Copyright (c) 2009 - 2012 Intel Corporation.";
static int igbvf_poll(struct napi_struct *napi, int budget);
static void igbvf_reset(struct igbvf_adapter *);
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2010 Intel Corporation.
+ Copyright(c) 2009 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2010 Intel Corporation.
+ Copyright(c) 2009 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2010 Intel Corporation.
+ Copyright(c) 2009 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
################################################################################
#
# Intel 10 Gigabit PCI Express Linux driver
-# Copyright(c) 1999 - 2010 Intel Corporation.
+# Copyright(c) 1999 - 2012 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
-s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packtetbuf_num);
+s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num);
s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw);
s32 ixgbe_validate_mac_addr(u8 *mac_addr);
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
{
u8 err = 0;
+ u8 prio_tc[MAX_USER_PRIORITY] = {0};
+ int i;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
/* Fail command if not in CEE mode */
if (!!state != !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
return err;
- if (state > 0)
+ if (state > 0) {
err = ixgbe_setup_tc(netdev, adapter->dcb_cfg.num_tcs.pg_tcs);
- else
+ ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc);
+ } else {
err = ixgbe_setup_tc(netdev, 0);
+ }
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ netdev_set_prio_tc_map(netdev, i, prio_tc[i]);
return err;
}
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
sizeof(((struct rtnl_link_stats64 *)0)->m), \
offsetof(struct rtnl_link_stats64, m)
-static struct ixgbe_stats ixgbe_gstrings_stats[] = {
+static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
{"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
{"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
#endif /* IXGBE_FCOE */
};
-#define IXGBE_QUEUE_STATS_LEN \
- ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_tx_queues + \
- ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) * \
+/* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
+ * we set the num_rx_queues to evaluate to num_tx_queues. This is
+ * used because we do not have a good way to get the max number of
+ * rx queues with CONFIG_RPS disabled.
+ */
+#define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
+
+#define IXGBE_QUEUE_STATS_LEN ( \
+ (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
(sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
#define IXGBE_PB_STATS_LEN ( \
- (((struct ixgbe_adapter *)netdev_priv(netdev))->flags & \
- IXGBE_FLAG_DCB_ENABLED) ? \
- (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
- sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
- sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
- sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
- / sizeof(u64) : 0)
+ (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
+ sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
+ sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
+ sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
+ / sizeof(u64))
#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
IXGBE_PB_STATS_LEN + \
IXGBE_QUEUE_STATS_LEN)
data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- for (j = 0; j < adapter->num_tx_queues; j++) {
+ for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
ring = adapter->tx_ring[j];
+ if (!ring) {
+ data[i] = 0;
+ data[i+1] = 0;
+ i += 2;
+ continue;
+ }
+
do {
start = u64_stats_fetch_begin_bh(&ring->syncp);
data[i] = ring->stats.packets;
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
i += 2;
}
- for (j = 0; j < adapter->num_rx_queues; j++) {
+ for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
ring = adapter->rx_ring[j];
+ if (!ring) {
+ data[i] = 0;
+ data[i+1] = 0;
+ i += 2;
+ continue;
+ }
+
do {
start = u64_stats_fetch_begin_bh(&ring->syncp);
data[i] = ring->stats.packets;
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
i += 2;
}
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) {
- data[i++] = adapter->stats.pxontxc[j];
- data[i++] = adapter->stats.pxofftxc[j];
- }
- for (j = 0; j < MAX_RX_PACKET_BUFFERS; j++) {
- data[i++] = adapter->stats.pxonrxc[j];
- data[i++] = adapter->stats.pxoffrxc[j];
- }
+
+ for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
+ data[i++] = adapter->stats.pxontxc[j];
+ data[i++] = adapter->stats.pxofftxc[j];
+ }
+ for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
+ data[i++] = adapter->stats.pxonrxc[j];
+ data[i++] = adapter->stats.pxoffrxc[j];
}
}
static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
char *p = (char *)data;
int i;
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
- for (i = 0; i < adapter->num_tx_queues; i++) {
+ for (i = 0; i < netdev->num_tx_queues; i++) {
sprintf(p, "tx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "tx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
}
- for (i = 0; i < adapter->num_rx_queues; i++) {
+ for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
sprintf(p, "rx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
}
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
- sprintf(p, "tx_pb_%u_pxon", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_pb_%u_pxoff", i);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < MAX_RX_PACKET_BUFFERS; i++) {
- sprintf(p, "rx_pb_%u_pxon", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_pb_%u_pxoff", i);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
+ sprintf(p, "tx_pb_%u_pxon", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_pb_%u_pxoff", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
+ sprintf(p, "rx_pb_%u_pxon", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_pb_%u_pxoff", i);
+ p += ETH_GSTRING_LEN;
}
/* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
break;
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
__stringify(BUILD) "-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
- "Copyright (c) 1999-2011 Intel Corporation.";
+ "Copyright (c) 1999-2012 Intel Corporation.";
static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_82598] = &ixgbe_82598_info,
/*
* we must limit the number of descriptors so that the
* total size of max desc * buf_len is not greater
- * than 65535
+ * than 65536
*/
if (ring_is_ps_enabled(ring)) {
-#if (MAX_SKB_FRAGS > 16)
+#if (PAGE_SIZE < 8192)
rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
-#elif (MAX_SKB_FRAGS > 8)
+#elif (PAGE_SIZE < 16384)
rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
-#elif (MAX_SKB_FRAGS > 4)
+#elif (PAGE_SIZE < 32768)
rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
#else
rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
#endif
} else {
- if (rx_buf_len < IXGBE_RXBUFFER_4K)
+ if (rx_buf_len <= IXGBE_RXBUFFER_4K)
rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
- else if (rx_buf_len < IXGBE_RXBUFFER_8K)
+ else if (rx_buf_len <= IXGBE_RXBUFFER_8K)
rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
else
rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
vf_shift = adapter->num_vfs % 32;
- reg_offset = (adapter->num_vfs > 32) ? 1 : 0;
+ reg_offset = (adapter->num_vfs >= 32) ? 1 : 0;
/* Enable only the PF's pool for Tx/Rx */
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
adapter->num_tx_queues = 1;
done:
+ if ((adapter->netdev->reg_state == NETREG_UNREGISTERED) ||
+ (adapter->netdev->reg_state == NETREG_UNREGISTERING))
+ return 0;
+
/* Notify the stack of the (possibly) reduced queue counts. */
netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
return netif_set_real_num_rx_queues(adapter->netdev,