crashkernel=<range1>:<size1>[,<range2>:<size2>,...][@offset]
range=start-[end]
+ 'start' is inclusive and 'end' is exclusive.
+
For example:
crashkernel=512M-2G:64M,2G-:128M
1) if the RAM is smaller than 512M, then don't reserve anything
(this is the "rescue" case)
- 2) if the RAM size is between 512M and 2G, then reserve 64M
+ 2) if the RAM size is between 512M and 2G (exclusive), then reserve 64M
3) if the RAM size is larger than 2G, then reserve 128M
+
Boot into System Kernel
=======================
CPUSETS
P: Paul Jackson
-P: Simon Derr
+P: Paul Menage
M: pj@sgi.com
-M: simon.derr@bull.net
+M: menage@google.com
L: linux-kernel@vger.kernel.org
W: http://www.bullopensource.org/cpuset/
S: Supported
L: general@lists.openfabrics.org
S: Supported
+EMBEDDED LINUX
+P: Paul Gortmaker
+M: paul.gortmaker@windriver.com
+P David Woodhouse
+M: dwmw2@infradead.org
+L: linux-embedded@vger.kernel.org
+S: Maintained
+
EMULEX LPFC FC SCSI DRIVER
P: James Smart
M: james.smart@emulex.com
obj-$(CONFIG_MMU) += \
pgalloc.o highmem.o fault.o extable.o cache-page.o tlb-flush.o tlb-miss.o \
- mmu-context.o dma-alloc.o unaligned.o elf-fdpic.o
+ mmu-context.o dma-alloc.o elf-fdpic.o
-
/*
* kvm_ia64.c: Basic KVM suppport On Itanium series processors
*
if (itc_diff < 0)
itc_diff = -itc_diff;
- expires = div64_64(itc_diff, cyc_per_usec);
+ expires = div64_u64(itc_diff, cyc_per_usec);
kt = ktime_set(0, 1000 * expires);
vcpu->arch.ht_active = 1;
hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain));
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_PREEMPTCOUNT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
return 0;
#include <asm/segment.h>
#include <asm/asm-offsets.h>
#include <asm/entry.h>
+#include <asm/unistd.h>
.text
RESTORE_SWITCH_STACK
rts
+ENTRY(ret_from_user_signal)
+ moveq #__NR_sigreturn,%d0
+ trap #0
+
+ENTRY(ret_from_user_rt_signal)
+ move #__NR_rt_sigreturn,%d0
+ trap #0
+
printk(KERN_INFO "DragonEngine II board support by Georges Menie\n");
#endif
#ifdef CONFIG_M5235EVB
- printk(KERN_INFO "Motorola M5235EVB support (C)2005 Syn-tech Systems, Inc. (Jate Sujjavanich)");
+ printk(KERN_INFO "Motorola M5235EVB support (C)2005 Syn-tech Systems, Inc. (Jate Sujjavanich)\n");
#endif
#ifdef DEBUG
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+void ret_from_user_signal(void);
+void ret_from_user_rt_signal(void);
asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs);
/*
return err;
}
-static inline void push_cache (unsigned long vaddr)
-{
-}
-
static inline void *
get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
{
err |= copy_to_user (&frame->sc, &context, sizeof(context));
/* Set up to return from userspace. */
- err |= __put_user(frame->retcode, &frame->pretcode);
- /* moveq #,d0; trap #0 */
- err |= __put_user(0x70004e40 + (__NR_sigreturn << 16),
- (long *)(frame->retcode));
+ err |= __put_user((void *) ret_from_user_signal, &frame->pretcode);
if (err)
goto give_sigsegv;
- push_cache ((unsigned long) &frame->retcode);
-
/* Set up registers for signal handler */
wrusp ((unsigned long) frame);
regs->pc = (unsigned long) ka->sa.sa_handler;
err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set));
/* Set up to return from userspace. */
- err |= __put_user(frame->retcode, &frame->pretcode);
- /* moveq #,d0; notb d0; trap #0 */
- err |= __put_user(0x70004600 + ((__NR_rt_sigreturn ^ 0xff) << 16),
- (long *)(frame->retcode + 0));
- err |= __put_user(0x4e40, (short *)(frame->retcode + 4));
+ err |= __put_user((void *) ret_from_user_rt_signal, &frame->pretcode);
if (err)
goto give_sigsegv;
- push_cache ((unsigned long) &frame->retcode);
-
/* Set up registers for signal handler */
wrusp ((unsigned long) frame);
regs->pc = (unsigned long) ka->sa.sa_handler;
#include <linux/linkage.h>
#include <linux/init.h>
#include <linux/ptrace.h>
+#include <linux/kallsyms.h>
#include <asm/setup.h>
#include <asm/fpu.h>
force_sig(SIGSEGV, current);
}
-
int kstack_depth_to_print = 48;
-void show_stack(struct task_struct *task, unsigned long *stack)
+static void __show_stack(struct task_struct *task, unsigned long *stack)
{
unsigned long *endstack, addr;
- extern char _start, _etext;
+ unsigned long *last_stack;
int i;
- if (!stack) {
- if (task)
- stack = (unsigned long *)task->thread.ksp;
- else
- stack = (unsigned long *)&stack;
- }
+ if (!stack)
+ stack = (unsigned long *)task->thread.ksp;
addr = (unsigned long) stack;
endstack = (unsigned long *) PAGE_ALIGN(addr);
printk(KERN_EMERG "Stack from %08lx:", (unsigned long)stack);
for (i = 0; i < kstack_depth_to_print; i++) {
- if (stack + 1 > endstack)
+ if (stack + 1 + i > endstack)
break;
if (i % 8 == 0)
printk("\n" KERN_EMERG " ");
- printk(" %08lx", *stack++);
+ printk(" %08lx", *(stack + i));
}
printk("\n");
- printk(KERN_EMERG "Call Trace:");
- i = 0;
- while (stack + 1 <= endstack) {
- addr = *stack++;
- /*
- * If the address is either in the text segment of the
- * kernel, or in the region which contains vmalloc'ed
- * memory, it *may* be the address of a calling
- * routine; if so, print it so that someone tracing
- * down the cause of the crash will be able to figure
- * out the call path that was taken.
- */
- if (((addr >= (unsigned long) &_start) &&
- (addr <= (unsigned long) &_etext))) {
- if (i % 4 == 0)
- printk("\n" KERN_EMERG " ");
- printk(" [<%08lx>]", addr);
- i++;
- }
+#ifdef CONFIG_FRAME_POINTER
+ printk(KERN_EMERG "Call Trace:\n");
+
+ last_stack = stack - 1;
+ while (stack <= endstack && stack > last_stack) {
+
+ addr = *(stack + 1);
+ printk(KERN_EMERG " [%08lx] ", addr);
+ print_symbol(KERN_CONT "%s\n", addr);
+
+ last_stack = stack;
+ stack = (unsigned long *)*stack;
}
printk("\n");
+#else
+ printk(KERN_EMERG "CONFIG_FRAME_POINTER disabled, no symbolic call trace\n");
+#endif
}
void bad_super_trap(struct frame *fp)
current->thread.esp0 = ssp;
}
-
/*
* The architecture-independent backtrace generator
*/
void dump_stack(void)
{
- unsigned long stack;
+ /*
+ * We need frame pointers for this little trick, which works as follows:
+ *
+ * +------------+ 0x00
+ * | Next SP | -> 0x0c
+ * +------------+ 0x04
+ * | Caller |
+ * +------------+ 0x08
+ * | Local vars | -> our stack var
+ * +------------+ 0x0c
+ * | Next SP | -> 0x18, that is what we pass to show_stack()
+ * +------------+ 0x10
+ * | Caller |
+ * +------------+ 0x14
+ * | Local vars |
+ * +------------+ 0x18
+ * | ... |
+ * +------------+
+ */
- show_stack(current, &stack);
-}
+ unsigned long *stack;
+ stack = (unsigned long *)&stack;
+ stack++;
+ __show_stack(current, stack);
+}
EXPORT_SYMBOL(dump_stack);
+void show_stack(struct task_struct *task, unsigned long *stack)
+{
+ if (!stack && !task)
+ dump_stack();
+ else
+ __show_stack(task, stack);
+}
+
#ifdef CONFIG_M68KFPU_EMU
asmlinkage void fpemu_signal(int signal, int code, void *addr)
{
_stext = . ;
TEXT_TEXT
SCHED_TEXT
+ LOCK_TEXT
*(.text.lock)
. = ALIGN(16); /* Exception table */
*(.rodata) *(.rodata.*)
*(__vermagic) /* Kernel version magic */
+ *(__markers_strings)
*(.rodata1)
*(.rodata.str1.1)
*(COMMON)
. = ALIGN(4) ;
_ebss = . ;
+ _end = . ;
} > BSS
}
/***************************************************************************/
-static void __init m5206_uart_init_line(int line, int irq)
+static void __init m5206e_uart_init_line(int line, int irq)
{
if (line == 0) {
writel(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI1, MCF_MBAR + MCFSIM_UART1ICR);
/* Copy command line from FLASH to local buffer... */
memcpy(commandp, (char *) 0xf0004000, size);
commandp[size-1] = 0;
-#elif defined(CONFIG_MTD_KeyTechnology)
- /* Copy command line from FLASH to local buffer... */
- memcpy(commandp, (char *) 0xffe06000, size);
- commandp[size-1] = 0;
#elif defined(CONFIG_CANCam)
/* Copy command line from FLASH to local buffer... */
memcpy(commandp, (char *) 0xf0010000, size);
#include <asm/mcfuart.h>
#include <asm/mcfqspi.h>
+#ifdef CONFIG_MTD_PARTITIONS
+#include <linux/mtd/partitions.h>
+#endif
+
/***************************************************************************/
void coldfire_reset(void);
+static void coldfire_qspi_cs_control(u8 cs, u8 command);
+
+/***************************************************************************/
+
+#if defined(CONFIG_SPI)
+
+#if defined(CONFIG_WILDFIRE)
+#define SPI_NUM_CHIPSELECTS 0x02
+#define SPI_PAR_VAL 0x07 /* Enable DIN, DOUT, CLK */
+#define SPI_CS_MASK 0x18
+
+#define FLASH_BLOCKSIZE (1024*64)
+#define FLASH_NUMBLOCKS 16
+#define FLASH_TYPE "m25p80"
+
+#define M25P80_CS 0
+#define MMC_CS 1
+
+#ifdef CONFIG_MTD_PARTITIONS
+static struct mtd_partition stm25p_partitions[] = {
+ /* sflash */
+ [0] = {
+ .name = "stm25p80",
+ .offset = 0x00000000,
+ .size = FLASH_BLOCKSIZE * FLASH_NUMBLOCKS,
+ .mask_flags = 0
+ }
+};
+
+#endif
+
+#elif defined(CONFIG_WILDFIREMOD)
+
+#define SPI_NUM_CHIPSELECTS 0x08
+#define SPI_PAR_VAL 0x07 /* Enable DIN, DOUT, CLK */
+#define SPI_CS_MASK 0x78
+
+#define FLASH_BLOCKSIZE (1024*64)
+#define FLASH_NUMBLOCKS 64
+#define FLASH_TYPE "m25p32"
+/* Reserve 1M for the kernel parition */
+#define FLASH_KERNEL_SIZE (1024 * 1024)
+
+#define M25P80_CS 5
+#define MMC_CS 6
+
+#ifdef CONFIG_MTD_PARTITIONS
+static struct mtd_partition stm25p_partitions[] = {
+ /* sflash */
+ [0] = {
+ .name = "kernel",
+ .offset = FLASH_BLOCKSIZE * FLASH_NUMBLOCKS - FLASH_KERNEL_SIZE,
+ .size = FLASH_KERNEL_SIZE,
+ .mask_flags = 0
+ },
+ [1] = {
+ .name = "image",
+ .offset = 0x00000000,
+ .size = FLASH_BLOCKSIZE * FLASH_NUMBLOCKS - FLASH_KERNEL_SIZE,
+ .mask_flags = 0
+ },
+ [2] = {
+ .name = "all",
+ .offset = 0x00000000,
+ .size = FLASH_BLOCKSIZE * FLASH_NUMBLOCKS,
+ .mask_flags = 0
+ }
+};
+#endif
+
+#else
+#define SPI_NUM_CHIPSELECTS 0x04
+#define SPI_PAR_VAL 0x7F /* Enable DIN, DOUT, CLK, CS0 - CS4 */
+#endif
+
+#ifdef MMC_CS
+static struct coldfire_spi_chip flash_chip_info = {
+ .mode = SPI_MODE_0,
+ .bits_per_word = 16,
+ .del_cs_to_clk = 17,
+ .del_after_trans = 1,
+ .void_write_data = 0
+};
+
+static struct coldfire_spi_chip mmc_chip_info = {
+ .mode = SPI_MODE_0,
+ .bits_per_word = 16,
+ .del_cs_to_clk = 17,
+ .del_after_trans = 1,
+ .void_write_data = 0xFFFF
+};
+#endif
+
+#ifdef M25P80_CS
+static struct flash_platform_data stm25p80_platform_data = {
+ .name = "ST M25P80 SPI Flash chip",
+#ifdef CONFIG_MTD_PARTITIONS
+ .parts = stm25p_partitions,
+ .nr_parts = sizeof(stm25p_partitions) / sizeof(*stm25p_partitions),
+#endif
+ .type = FLASH_TYPE
+};
+#endif
+
+static struct spi_board_info spi_board_info[] __initdata = {
+#ifdef M25P80_CS
+ {
+ .modalias = "m25p80",
+ .max_speed_hz = 16000000,
+ .bus_num = 1,
+ .chip_select = M25P80_CS,
+ .platform_data = &stm25p80_platform_data,
+ .controller_data = &flash_chip_info
+ },
+#endif
+#ifdef MMC_CS
+ {
+ .modalias = "mmc_spi",
+ .max_speed_hz = 16000000,
+ .bus_num = 1,
+ .chip_select = MMC_CS,
+ .controller_data = &mmc_chip_info
+ }
+#endif
+};
+
+static struct coldfire_spi_master coldfire_master_info = {
+ .bus_num = 1,
+ .num_chipselect = SPI_NUM_CHIPSELECTS,
+ .irq_source = MCF5282_QSPI_IRQ_SOURCE,
+ .irq_vector = MCF5282_QSPI_IRQ_VECTOR,
+ .irq_mask = ((0x01 << MCF5282_QSPI_IRQ_SOURCE) | 0x01),
+ .irq_lp = 0x2B, /* Level 5 and Priority 3 */
+ .par_val = SPI_PAR_VAL,
+ .cs_control = coldfire_qspi_cs_control,
+};
+
+static struct resource coldfire_spi_resources[] = {
+ [0] = {
+ .name = "qspi-par",
+ .start = MCF5282_QSPI_PAR,
+ .end = MCF5282_QSPI_PAR,
+ .flags = IORESOURCE_MEM
+ },
+
+ [1] = {
+ .name = "qspi-module",
+ .start = MCF5282_QSPI_QMR,
+ .end = MCF5282_QSPI_QMR + 0x18,
+ .flags = IORESOURCE_MEM
+ },
+
+ [2] = {
+ .name = "qspi-int-level",
+ .start = MCF5282_INTC0 + MCFINTC_ICR0 + MCF5282_QSPI_IRQ_SOURCE,
+ .end = MCF5282_INTC0 + MCFINTC_ICR0 + MCF5282_QSPI_IRQ_SOURCE,
+ .flags = IORESOURCE_MEM
+ },
+
+ [3] = {
+ .name = "qspi-int-mask",
+ .start = MCF5282_INTC0 + MCFINTC_IMRL,
+ .end = MCF5282_INTC0 + MCFINTC_IMRL,
+ .flags = IORESOURCE_MEM
+ }
+};
+
+static struct platform_device coldfire_spi = {
+ .name = "spi_coldfire",
+ .id = -1,
+ .resource = coldfire_spi_resources,
+ .num_resources = ARRAY_SIZE(coldfire_spi_resources),
+ .dev = {
+ .platform_data = &coldfire_master_info,
+ }
+};
+
+static void coldfire_qspi_cs_control(u8 cs, u8 command)
+{
+ u8 cs_bit = ((0x01 << cs) << 3) & SPI_CS_MASK;
+
+#if defined(CONFIG_WILDFIRE)
+ u8 cs_mask = ~(((0x01 << cs) << 3) & SPI_CS_MASK);
+#endif
+#if defined(CONFIG_WILDFIREMOD)
+ u8 cs_mask = (cs << 3) & SPI_CS_MASK;
+#endif
+
+ /*
+ * Don't do anything if the chip select is not
+ * one of the port qs pins.
+ */
+ if (command & QSPI_CS_INIT) {
+#if defined(CONFIG_WILDFIRE)
+ MCF5282_GPIO_DDRQS |= cs_bit;
+ MCF5282_GPIO_PQSPAR &= ~cs_bit;
+#endif
+
+#if defined(CONFIG_WILDFIREMOD)
+ MCF5282_GPIO_DDRQS |= SPI_CS_MASK;
+ MCF5282_GPIO_PQSPAR &= ~SPI_CS_MASK;
+#endif
+ }
+
+ if (command & QSPI_CS_ASSERT) {
+ MCF5282_GPIO_PORTQS &= ~SPI_CS_MASK;
+ MCF5282_GPIO_PORTQS |= cs_mask;
+ } else if (command & QSPI_CS_DROP) {
+ MCF5282_GPIO_PORTQS |= SPI_CS_MASK;
+ }
+}
+
+static int __init spi_dev_init(void)
+{
+ int retval;
+
+ retval = platform_device_register(&coldfire_spi);
+ if (retval < 0)
+ return retval;
+
+ if (ARRAY_SIZE(spi_board_info))
+ retval = spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
+
+ return retval;
+}
+
+#endif /* CONFIG_SPI */
/***************************************************************************/
/***************************************************************************/
+#ifdef CONFIG_WILDFIRE
+void wildfire_halt(void)
+{
+ writeb(0, 0x30000007);
+ writeb(0x2, 0x30000007);
+}
+#endif
+
+#ifdef CONFIG_WILDFIREMOD
+void wildfiremod_halt(void)
+{
+ printk(KERN_INFO "WildFireMod hibernating...\n");
+
+ /* Set portE.5 to Digital IO */
+ MCF5282_GPIO_PEPAR &= ~(1 << (5 * 2));
+
+ /* Make portE.5 an output */
+ MCF5282_GPIO_DDRE |= (1 << 5);
+
+ /* Now toggle portE.5 from low to high */
+ MCF5282_GPIO_PORTE &= ~(1 << 5);
+ MCF5282_GPIO_PORTE |= (1 << 5);
+
+ printk(KERN_EMERG "Failed to hibernate. Halting!\n");
+}
+#endif
+
void __init config_BSP(char *commandp, int size)
{
mcf_disableall();
- mach_reset = coldfire_reset;
+
+#ifdef CONFIG_WILDFIRE
+ mach_halt = wildfire_halt;
+#endif
+#ifdef CONFIG_WILDFIREMOD
+ mach_halt = wildfiremod_halt;
+#endif
}
/***************************************************************************/
mcf_setimr(MCFSIM_IMR_MASKALL);
#if defined(CONFIG_NETtel) || defined(CONFIG_eLIA) || \
- defined(CONFIG_DISKtel) || defined(CONFIG_SECUREEDGEMP3) || \
- defined(CONFIG_CLEOPATRA)
+ defined(CONFIG_SECUREEDGEMP3) || defined(CONFIG_CLEOPATRA)
/* Copy command line from FLASH to local buffer... */
memcpy(commandp, (char *) 0xf0004000, size);
commandp[size-1] = 0;
addql #4,%sp
ret_from_exception:
+ move #0x2700,%sr /* disable intrs */
btst #5,%sp@(PT_SR) /* check if returning to kernel */
jeq Luser_return /* if so, skip resched, signals */
+#ifdef CONFIG_PREEMPT
+ movel %sp,%d1 /* get thread_info pointer */
+ andl #-THREAD_SIZE,%d1 /* at base of kernel stack */
+ movel %d1,%a0
+ movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */
+ andl #_TIF_NEED_RESCHED,%d1
+ jeq Lkernel_return
+
+ movel %a0@(TI_PREEMPTCOUNT),%d1
+ cmpl #0,%d1
+ jne Lkernel_return
+
+ pea Lkernel_return
+ jmp preempt_schedule_irq /* preempt the kernel */
+#endif
+
Lkernel_return:
moveml %sp@,%d1-%d5/%a0-%a2
lea %sp@(32),%sp /* space for 8 regs */
Lwork_to_do:
movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */
+ move #0x2000,%sr /* enable intrs again */
btst #TIF_NEED_RESCHED,%d1
jne reschedule
#include <linux/module.h>
#include <linux/elfcore.h>
#include <linux/compat.h>
+#include <linux/math64.h>
#define elf_prstatus elf_prstatus32
struct elf_prstatus32
* one divide.
*/
u64 nsec = (u64)jiffies * TICK_NSEC;
- long rem;
- value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &rem);
+ u32 rem;
+ value->tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
value->tv_usec = rem / NSEC_PER_USEC;
}
#include <linux/module.h>
#include <linux/elfcore.h>
#include <linux/compat.h>
+#include <linux/math64.h>
#define elf_prstatus elf_prstatus32
struct elf_prstatus32
* one divide.
*/
u64 nsec = (u64)jiffies * TICK_NSEC;
- long rem;
- value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &rem);
+ u32 rem;
+ value->tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
value->tv_usec = rem / NSEC_PER_USEC;
}
u64 tb_to_xs;
unsigned tb_to_us;
-#define TICKLEN_SCALE TICK_LENGTH_SHIFT
+#define TICKLEN_SCALE NTP_SCALE_SHIFT
u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */
u64 ticklen_to_xs; /* 0.64 fraction */
vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
vdso_data->tb_to_xs = tb_to_xs;
- time_freq = 0;
-
write_sequnlock_irqrestore(&xtime_lock, flags);
/* Register the clocksource, if we're not running on iSeries */
#include "i8254.h"
#ifndef CONFIG_X86_64
-#define mod_64(x, y) ((x) - (y) * div64_64(x, y))
+#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
#else
#define mod_64(x, y) ((x) % (y))
#endif
rl = (u64)u.l.low * (u64)b;
rh = (u64)u.l.high * (u64)b;
rh += (rl >> 32);
- res.l.high = div64_64(rh, c);
- res.l.low = div64_64(((mod_64(rh, c) << 32) + (rl & 0xffffffff)), c);
+ res.l.high = div64_u64(rh, c);
+ res.l.low = div64_u64(((mod_64(rh, c) << 32) + (rl & 0xffffffff)), c);
return res.ll;
}
#include <linux/hrtimer.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/math64.h>
#include <asm/processor.h>
#include <asm/msr.h>
#include <asm/page.h>
#include <asm/current.h>
#include <asm/apicdef.h>
#include <asm/atomic.h>
-#include <asm/div64.h>
#include "irq.h"
#define PRId64 "d"
} else
passed = ktime_sub(now, apic->timer.last_update);
- counter_passed = div64_64(ktime_to_ns(passed),
- (APIC_BUS_CYCLE_NS * apic->timer.divide_count));
+ counter_passed = div64_u64(ktime_to_ns(passed),
+ (APIC_BUS_CYCLE_NS * apic->timer.divide_count));
if (counter_passed > tmcct) {
if (unlikely(!apic_lvtt_period(apic))) {
{
if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
prepare_flush_fn == NULL) {
- printk(KERN_ERR "%s: prepare_flush_fn required\n",
- __FUNCTION__);
+ printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__);
return -EINVAL;
}
if (unlikely(nbytes > bio->bi_size)) {
printk(KERN_ERR "%s: want %u bytes done, %u left\n",
- __FUNCTION__, nbytes, bio->bi_size);
+ __func__, nbytes, bio->bi_size);
nbytes = bio->bi_size;
}
if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
blk_dump_rq_flags(req, "__end_that");
printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
- __FUNCTION__, bio->bi_idx,
- bio->bi_vcnt);
+ __func__, bio->bi_idx, bio->bi_vcnt);
break;
}
{
if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
- printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
- max_sectors);
+ printk(KERN_INFO "%s: set to minimum %d\n",
+ __func__, max_sectors);
}
if (BLK_DEF_MAX_SECTORS > max_sectors)
{
if (!max_segments) {
max_segments = 1;
- printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
- max_segments);
+ printk(KERN_INFO "%s: set to minimum %d\n",
+ __func__, max_segments);
}
q->max_phys_segments = max_segments;
{
if (!max_segments) {
max_segments = 1;
- printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
- max_segments);
+ printk(KERN_INFO "%s: set to minimum %d\n",
+ __func__, max_segments);
}
q->max_hw_segments = max_segments;
{
if (max_size < PAGE_CACHE_SIZE) {
max_size = PAGE_CACHE_SIZE;
- printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
- max_size);
+ printk(KERN_INFO "%s: set to minimum %d\n",
+ __func__, max_size);
}
q->max_segment_size = max_size;
{
if (mask < PAGE_CACHE_SIZE - 1) {
mask = PAGE_CACHE_SIZE - 1;
- printk(KERN_INFO "%s: set to minimum %lx\n", __FUNCTION__,
- mask);
+ printk(KERN_INFO "%s: set to minimum %lx\n",
+ __func__, mask);
}
q->seg_boundary_mask = mask;
if (q && depth > q->nr_requests * 2) {
depth = q->nr_requests * 2;
printk(KERN_ERR "%s: adjusted depth to %d\n",
- __FUNCTION__, depth);
+ __func__, depth);
}
tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
if (unlikely(bqt->tag_index[tag] == NULL))
printk(KERN_ERR "%s: tag %d is missing\n",
- __FUNCTION__, tag);
+ __func__, tag);
bqt->tag_index[tag] = NULL;
if (unlikely(!test_bit(tag, bqt->tag_map))) {
printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
- __FUNCTION__, tag);
+ __func__, tag);
return;
}
/*
if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
printk(KERN_ERR
"%s: request %p for device [%s] already tagged %d",
- __FUNCTION__, rq,
+ __func__, rq,
rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
BUG();
}
#undef BSG_DEBUG
#ifdef BSG_DEBUG
-#define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ##args)
+#define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args)
#else
#define dprintk(fmt, args...)
#endif
default:
printk(KERN_ERR "%s: bad insertion point %d\n",
- __FUNCTION__, where);
+ __func__, where);
BUG();
}
rq->cmd_flags |= REQ_QUIET;
end_queued_request(rq, 0);
} else {
- printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
- ret);
+ printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
break;
}
}
int err)
{
if (!err) {
- struct aead_givcrypt_request *greq = req->data;
+ struct aead_request *areq = req->data;
+ struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
- err = crypto_authenc_genicv(&greq->areq, greq->giv, 0);
+ err = crypto_authenc_genicv(areq, greq->giv, 0);
}
aead_request_complete(req->data, err);
int err;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
- if (IS_ERR(inst))
+ if (!inst) {
+ inst = ERR_PTR(-ENOMEM);
goto out;
+ }
err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
}
ablkcipher_request_set_crypt(subreq, reqctx->src, dst,
- req->creq.nbytes, req->creq.info);
+ req->creq.nbytes + ivsize,
+ req->creq.info);
memcpy(req->creq.info, ctx->salt, ivsize);
};
EXPORT_SYMBOL(cpu_sysdev_class);
-static struct sys_device *cpu_sys_devices[NR_CPUS];
+static DEFINE_PER_CPU(struct sys_device *, cpu_sys_devices);
#ifdef CONFIG_HOTPLUG_CPU
static ssize_t show_online(struct sys_device *dev, char *buf)
sysdev_remove_file(&cpu->sysdev, &attr_online);
sysdev_unregister(&cpu->sysdev);
- cpu_sys_devices[logical_cpu] = NULL;
+ per_cpu(cpu_sys_devices, logical_cpu) = NULL;
return;
}
#else /* ... !CONFIG_HOTPLUG_CPU */
if (!error && cpu->hotpluggable)
register_cpu_control(cpu);
if (!error)
- cpu_sys_devices[num] = &cpu->sysdev;
+ per_cpu(cpu_sys_devices, num) = &cpu->sysdev;
if (!error)
register_cpu_under_node(num, cpu_to_node(num));
struct sys_device *get_cpu_sysdev(unsigned cpu)
{
- if (cpu < NR_CPUS)
- return cpu_sys_devices[cpu];
+ if (cpu < nr_cpu_ids && cpu_possible(cpu))
+ return per_cpu(cpu_sys_devices, cpu);
else
return NULL;
}
proc_cciss = proc_mkdir("driver/cciss", NULL);
if (!proc_cciss)
return;
- pde = proc_create(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP |
+ pde = proc_create_data(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP |
S_IROTH, proc_cciss,
- &cciss_proc_fops);
- if (!pde)
- return;
-
- pde->data = hba[i];
+ &cciss_proc_fops, hba[i]);
}
#endif /* CONFIG_PROC_FS */
module_param(power_status, bool, 0600);
MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k");
+static int fan_mult = I8K_FAN_MULT;
+module_param(fan_mult, int, 0);
+MODULE_PARM_DESC(fan_mult, "Factor to multiply fan speed with");
+
static int i8k_open_fs(struct inode *inode, struct file *file);
static int i8k_ioctl(struct inode *, struct file *, unsigned int,
unsigned long);
struct smm_regs regs = { .eax = I8K_SMM_GET_SPEED, };
regs.ebx = fan & 0xff;
- return i8k_smm(®s) ? : (regs.eax & 0xffff) * I8K_FAN_MULT;
+ return i8k_smm(®s) ? : (regs.eax & 0xffff) * fan_mult;
}
/*
#include <linux/miscdevice.h>
#include <linux/posix-timers.h>
#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/math64.h>
#include <asm/uaccess.h>
#include <asm/sn/addrs.h>
nsec = rtc_time() * sgi_clock_period
+ sgi_clock_offset.tv_nsec;
- tp->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &tp->tv_nsec)
- + sgi_clock_offset.tv_sec;
+ *tp = ns_to_timespec(nsec);
+ tp->tv_sec += sgi_clock_offset.tv_sec;
return 0;
};
{
u64 nsec;
- u64 rem;
+ u32 rem;
nsec = rtc_time() * sgi_clock_period;
- sgi_clock_offset.tv_sec = tp->tv_sec - div_long_long_rem(nsec, NSEC_PER_SEC, &rem);
+ sgi_clock_offset.tv_sec = tp->tv_sec - div_u64_rem(nsec, NSEC_PER_SEC, &rem);
if (rem <= tp->tv_nsec)
sgi_clock_offset.tv_nsec = tp->tv_sec - rem;
return 0;
}
-#define timespec_to_ns(x) ((x).tv_nsec + (x).tv_sec * NSEC_PER_SEC)
-#define ns_to_timespec(ts, nsec) (ts).tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &(ts).tv_nsec)
-
/* Assumption: it_lock is already held with irq's disabled */
static void sgi_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
{
return;
}
- ns_to_timespec(cur_setting->it_interval, timr->it.mmtimer.incr * sgi_clock_period);
- ns_to_timespec(cur_setting->it_value, (timr->it.mmtimer.expires - rtc_time())* sgi_clock_period);
- return;
+ cur_setting->it_interval = ns_to_timespec(timr->it.mmtimer.incr * sgi_clock_period);
+ cur_setting->it_value = ns_to_timespec((timr->it.mmtimer.expires - rtc_time()) * sgi_clock_period);
}
sgi_timer_get(timr, old_setting);
sgi_timer_del(timr);
- when = timespec_to_ns(new_setting->it_value);
- period = timespec_to_ns(new_setting->it_interval);
+ when = timespec_to_ns(&new_setting->it_value);
+ period = timespec_to_ns(&new_setting->it_interval);
if (when == 0)
/* Clear timer */
unsigned long now;
getnstimeofday(&n);
- now = timespec_to_ns(n);
+ now = timespec_to_ns(&n);
if (when > now)
when -= now;
else
*/
static int mgsl_put_char(struct tty_struct *tty, unsigned char ch)
{
- struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
+ struct mgsl_struct *info = tty->driver_data;
unsigned long flags;
- int ret;
+ int ret = 0;
- if ( debug_level >= DEBUG_LEVEL_INFO ) {
- printk( "%s(%d):mgsl_put_char(%d) on %s\n",
- __FILE__,__LINE__,ch,info->device_name);
+ if (debug_level >= DEBUG_LEVEL_INFO) {
+ printk(KERN_DEBUG "%s(%d):mgsl_put_char(%d) on %s\n",
+ __FILE__, __LINE__, ch, info->device_name);
}
if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char"))
if (!tty || !info->xmit_buf)
return 0;
- spin_lock_irqsave(&info->irq_spinlock,flags);
+ spin_lock_irqsave(&info->irq_spinlock, flags);
- if ( (info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active ) {
+ if ((info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active) {
if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) {
info->xmit_buf[info->xmit_head++] = ch;
info->xmit_head &= SERIAL_XMIT_SIZE-1;
ret = 1;
}
}
- spin_unlock_irqrestore(&info->irq_spinlock,flags);
+ spin_unlock_irqrestore(&info->irq_spinlock, flags);
return ret;
} /* end of mgsl_put_char() */
{ "pca9537", 4, },
{ "pca9538", 8, },
{ "pca9539", 16, },
+ { "pca9555", 16, },
+ { "pca9557", 8, },
/* REVISIT several pca955x parts should work here too */
{ }
};
{
struct pca953x_platform_data *pdata;
struct pca953x_chip *chip;
- int ret, i;
+ int ret;
pdata = client->dev.platform_data;
if (pdata == NULL)
sprintf(conf_name, "%s%d", PROC_CONF_BASENAME, card->myid);
if ((card->procconf = (void *) proc_create(conf_name,
S_IFREG | S_IRUGO | S_IWUSR,
- hysdn_proc_entry)) != NULL) {
+ hysdn_proc_entry,
+ &conf_fops)) != NULL) {
hysdn_proclog_init(card); /* init the log file entry */
}
card = card->next; /* next entry */
#define FEC_MAX_PORTS 1
#endif
+#if defined(CONFIG_FADS) || defined(CONFIG_RPXCLASSIC) || defined(CONFIG_M5272)
+#define HAVE_mii_link_interrupt
+#endif
+
/*
* Define the fixed address of the FEC hardware.
*/
cbd_t *cur_rx, *cur_tx; /* The next free ring entry */
cbd_t *dirty_tx; /* The ring entries to be free()ed. */
uint tx_full;
- spinlock_t lock;
+ /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
+ spinlock_t hw_lock;
+ /* hold while accessing the mii_list_t() elements */
+ spinlock_t mii_lock;
uint phy_id;
uint phy_id_done;
volatile fec_t *fecp;
volatile cbd_t *bdp;
unsigned short status;
+ unsigned long flags;
fep = netdev_priv(dev);
fecp = (volatile fec_t*)dev->base_addr;
return 1;
}
+ spin_lock_irqsave(&fep->hw_lock, flags);
/* Fill in a Tx ring entry */
bdp = fep->cur_tx;
* This should not happen, since dev->tbusy should be set.
*/
printk("%s: tx queue full!.\n", dev->name);
+ spin_unlock_irqrestore(&fep->hw_lock, flags);
return 1;
}
#endif
flush_dcache_range((unsigned long)skb->data,
(unsigned long)skb->data + skb->len);
- spin_lock_irq(&fep->lock);
-
/* Send it on its way. Tell FEC it's ready, interrupt when done,
* it's the last BD of the frame, and to put the CRC on the end.
*/
fep->cur_tx = (cbd_t *)bdp;
- spin_unlock_irq(&fep->lock);
+ spin_unlock_irqrestore(&fep->hw_lock, flags);
return 0;
}
struct net_device *dev = dev_id;
volatile fec_t *fecp;
uint int_events;
- int handled = 0;
+ irqreturn_t ret = IRQ_NONE;
fecp = (volatile fec_t*)dev->base_addr;
/* Get the interrupt events that caused us to be here.
*/
- while ((int_events = fecp->fec_ievent) != 0) {
+ do {
+ int_events = fecp->fec_ievent;
fecp->fec_ievent = int_events;
/* Handle receive event in its own function.
*/
if (int_events & FEC_ENET_RXF) {
- handled = 1;
+ ret = IRQ_HANDLED;
fec_enet_rx(dev);
}
them as part of the transmit process.
*/
if (int_events & FEC_ENET_TXF) {
- handled = 1;
+ ret = IRQ_HANDLED;
fec_enet_tx(dev);
}
if (int_events & FEC_ENET_MII) {
- handled = 1;
+ ret = IRQ_HANDLED;
fec_enet_mii(dev);
}
- }
- return IRQ_RETVAL(handled);
+ } while (int_events);
+
+ return ret;
}
struct sk_buff *skb;
fep = netdev_priv(dev);
- spin_lock(&fep->lock);
+ spin_lock_irq(&fep->hw_lock);
bdp = fep->dirty_tx;
while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
}
}
fep->dirty_tx = (cbd_t *)bdp;
- spin_unlock(&fep->lock);
+ spin_unlock_irq(&fep->hw_lock);
}
fep = netdev_priv(dev);
fecp = (volatile fec_t*)dev->base_addr;
+ spin_lock_irq(&fep->hw_lock);
+
/* First, grab all of the stats for the incoming packet.
* These get messed up if we get called due to a busy condition.
*/
*/
fecp->fec_r_des_active = 0;
#endif
+
+ spin_unlock_irq(&fep->hw_lock);
}
uint mii_reg;
fep = netdev_priv(dev);
+ spin_lock_irq(&fep->mii_lock);
+
ep = fep->hwp;
mii_reg = ep->fec_mii_data;
- spin_lock(&fep->lock);
-
if ((mip = mii_head) == NULL) {
printk("MII and no head!\n");
goto unlock;
ep->fec_mii_data = mip->mii_regval;
unlock:
- spin_unlock(&fep->lock);
+ spin_unlock_irq(&fep->mii_lock);
}
static int
/* Add PHY address to register command.
*/
fep = netdev_priv(dev);
- regval |= fep->phy_addr << 23;
+ spin_lock_irqsave(&fep->mii_lock, flags);
+ regval |= fep->phy_addr << 23;
retval = 0;
- spin_lock_irqsave(&fep->lock,flags);
-
if ((mip = mii_free) != NULL) {
mii_free = mip->mii_next;
mip->mii_regval = regval;
retval = 1;
}
- spin_unlock_irqrestore(&fep->lock,flags);
-
- return(retval);
+ spin_unlock_irqrestore(&fep->mii_lock, flags);
+ return retval;
}
static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c)
};
/* ------------------------------------------------------------------------- */
-#if !defined(CONFIG_M532x)
+#ifdef HAVE_mii_link_interrupt
#ifdef CONFIG_RPXCLASSIC
static void
mii_link_interrupt(void *dev_id);
unsigned short irq;
} *idp, id[] = {
{ "fec(TXF)", 23 },
- { "fec(TXB)", 24 },
- { "fec(TXFIFO)", 25 },
- { "fec(TXCR)", 26 },
{ "fec(RXF)", 27 },
- { "fec(RXB)", 28 },
{ "fec(MII)", 29 },
- { "fec(LC)", 30 },
- { "fec(HBERR)", 31 },
- { "fec(GRA)", 32 },
- { "fec(EBERR)", 33 },
- { "fec(BABT)", 34 },
- { "fec(BABR)", 35 },
{ NULL },
};
unsigned short irq;
} *idp, id[] = {
{ "fec(TXF)", 23 },
- { "fec(TXB)", 24 },
- { "fec(TXFIFO)", 25 },
- { "fec(TXCR)", 26 },
{ "fec(RXF)", 27 },
- { "fec(RXB)", 28 },
{ "fec(MII)", 29 },
- { "fec(LC)", 30 },
- { "fec(HBERR)", 31 },
- { "fec(GRA)", 32 },
- { "fec(EBERR)", 33 },
- { "fec(BABT)", 34 },
- { "fec(BABR)", 35 },
{ NULL },
};
unsigned short irq;
} *idp, id[] = {
{ "fec(TXF)", 36 },
- { "fec(TXB)", 37 },
- { "fec(TXFIFO)", 38 },
- { "fec(TXCR)", 39 },
{ "fec(RXF)", 40 },
- { "fec(RXB)", 41 },
{ "fec(MII)", 42 },
- { "fec(LC)", 43 },
- { "fec(HBERR)", 44 },
- { "fec(GRA)", 45 },
- { "fec(EBERR)", 46 },
- { "fec(BABT)", 47 },
- { "fec(BABR)", 48 },
{ NULL },
};
/* This interrupt occurs when the PHY detects a link change.
*/
+#ifdef HAVE_mii_link_interrupt
#ifdef CONFIG_RPXCLASSIC
static void
mii_link_interrupt(void *dev_id)
return IRQ_HANDLED;
}
+#endif
static int
fec_enet_open(struct net_device *dev)
/* Catch all multicast addresses, so set the
* filter to all 1's.
*/
- ep->fec_hash_table_high = 0xffffffff;
- ep->fec_hash_table_low = 0xffffffff;
+ ep->fec_grp_hash_table_high = 0xffffffff;
+ ep->fec_grp_hash_table_low = 0xffffffff;
} else {
/* Clear filter and add the addresses in hash register.
*/
- ep->fec_hash_table_high = 0;
- ep->fec_hash_table_low = 0;
+ ep->fec_grp_hash_table_high = 0;
+ ep->fec_grp_hash_table_low = 0;
dmi = dev->mc_list;
hash = (crc >> (32 - HASH_BITS)) & 0x3f;
if (hash > 31)
- ep->fec_hash_table_high |= 1 << (hash - 32);
+ ep->fec_grp_hash_table_high |= 1 << (hash - 32);
else
- ep->fec_hash_table_low |= 1 << hash;
+ ep->fec_grp_hash_table_low |= 1 << hash;
}
}
}
return -ENOMEM;
}
+ spin_lock_init(&fep->hw_lock);
+ spin_lock_init(&fep->mii_lock);
+
/* Create an Ethernet device instance.
*/
fecp = (volatile fec_t *) fec_hw[index];
*/
fec_request_intrs(dev);
- fecp->fec_hash_table_high = 0;
- fecp->fec_hash_table_low = 0;
+ fecp->fec_grp_hash_table_high = 0;
+ fecp->fec_grp_hash_table_low = 0;
fecp->fec_r_buff_size = PKT_MAXBLR_SIZE;
fecp->fec_ecntrl = 2;
fecp->fec_r_des_active = 0;
+#ifndef CONFIG_M5272
+ fecp->fec_hash_table_high = 0;
+ fecp->fec_hash_table_low = 0;
+#endif
dev->base_addr = (unsigned long)fecp;
/* Clear and enable interrupts */
fecp->fec_ievent = 0xffc00000;
- fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_TXB |
- FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII);
+ fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII);
/* Queue up command to detect the PHY and initialize the
* remainder of the interface.
/* Reset all multicast.
*/
- fecp->fec_hash_table_high = 0;
- fecp->fec_hash_table_low = 0;
+ fecp->fec_grp_hash_table_high = 0;
+ fecp->fec_grp_hash_table_low = 0;
/* Set maximum receive buffer size.
*/
/* Enable interrupts we wish to service.
*/
- fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_TXB |
- FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII);
+ fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII);
}
static void
static int __init fec_enet_module_init(void)
{
struct net_device *dev;
- int i, j, err;
+ int i, err;
DECLARE_MAC_BUF(mac);
printk("FEC ENET Version 0.2\n");
unsigned long fec_reserved7[158];
unsigned long fec_addr_low; /* Low 32bits MAC address */
unsigned long fec_addr_high; /* High 16bits MAC address */
- unsigned long fec_hash_table_high; /* High 32bits hash table */
- unsigned long fec_hash_table_low; /* Low 32bits hash table */
+ unsigned long fec_grp_hash_table_high;/* High 32bits hash table */
+ unsigned long fec_grp_hash_table_low; /* Low 32bits hash table */
unsigned long fec_r_des_start; /* Receive descriptor ring */
unsigned long fec_x_des_start; /* Transmit descriptor ring */
unsigned long fec_r_buff_size; /* Maximum receive buff size */
default:
pwr |= SET_VCC_VPP(0,0,sock);
printk("%s: bad Vcc/Vpp (%d:%d)\n",
- __FUNCTION__,
+ __func__,
state->Vcc,
state->Vpp);
break;
default:
pwr |= SET_VCC_VPP(0,0,sock);
printk("%s: bad Vcc/Vpp (%d:%d)\n",
- __FUNCTION__,
+ __func__,
state->Vcc,
state->Vpp);
break;
default: /* what's this ? */
pwr |= SET_VCC_VPP(0,0,sock);
printk(KERN_ERR "%s: bad Vcc %d\n",
- __FUNCTION__, state->Vcc);
+ __func__, state->Vcc);
break;
}
#include <linux/notifier.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <asm/io.h>
u32 *pcmcia_base_vaddrs[2];
extern const unsigned long mips_io_port_base;
-DECLARE_MUTEX(pcmcia_sockets_lock);
+static DEFINE_MUTEX(pcmcia_sockets_lock);
static int (*au1x00_pcmcia_hw_init[])(struct device *dev) = {
au1x_board_init,
struct skt_dev_info *sinfo = dev_get_drvdata(dev);
int i;
- down(&pcmcia_sockets_lock);
+ mutex_lock(&pcmcia_sockets_lock);
dev_set_drvdata(dev, NULL);
for (i = 0; i < sinfo->nskt; i++) {
}
kfree(sinfo);
- up(&pcmcia_sockets_lock);
+ mutex_unlock(&pcmcia_sockets_lock);
return 0;
}
{
int i, ret = -ENODEV;
- down(&pcmcia_sockets_lock);
+ mutex_lock(&pcmcia_sockets_lock);
for (i=0; i < ARRAY_SIZE(au1x00_pcmcia_hw_init); i++) {
ret = au1x00_pcmcia_hw_init[i](dev);
if (ret == 0)
break;
}
- up(&pcmcia_sockets_lock);
+ mutex_unlock(&pcmcia_sockets_lock);
return ret;
}
pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,
configure->sock);
printk("%s: bad Vcc/Vpp (%d:%d)\n",
- __FUNCTION__,
+ __func__,
configure->vcc,
configure->vpp);
break;
pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,
configure->sock);
printk("%s: bad Vcc/Vpp (%d:%d)\n",
- __FUNCTION__,
+ __func__,
configure->vcc,
configure->vpp);
break;
pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,
configure->sock);
printk("%s: bad Vcc/Vpp (%d:%d)\n",
- __FUNCTION__,
+ __func__,
configure->vcc,
configure->vpp);
break;
default: /* what's this ? */
pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,configure->sock);
printk(KERN_ERR "%s: bad Vcc %d\n",
- __FUNCTION__, configure->vcc);
+ __func__, configure->vcc);
break;
}
default:
pcr |= SET_VCC_VPP(0,0);
printk("%s: bad Vcc/Vpp (%d:%d)\n",
- __FUNCTION__,
+ __func__,
configure->vcc,
configure->vpp);
break;
default:
pcr |= SET_VCC_VPP(0,0);
printk("%s: bad Vcc/Vpp (%d:%d)\n",
- __FUNCTION__,
+ __func__,
configure->vcc,
configure->vpp);
break;
default: /* what's this ? */
pcr |= SET_VCC_VPP(0,0);
printk(KERN_ERR "%s: bad Vcc %d\n",
- __FUNCTION__, configure->vcc);
+ __func__, configure->vcc);
break;
}
#define PCMCIA_IRQ AU1000_GPIO_4
#if 0
-#define DEBUG(x,args...) printk(__FUNCTION__ ": " x,##args)
+#define DEBUG(x, args...) printk(__func__ ": " x, ##args)
#else
#define DEBUG(x,args...)
#endif
}
}
-int cb_alloc(struct pcmcia_socket * s)
+int __ref cb_alloc(struct pcmcia_socket * s)
{
struct pci_bus *bus = s->cb_dev->subordinate;
struct pci_dev *dev;
/* the pcmcia_bus_interface is used to handle pcmcia socket devices */
-static struct class_interface pcmcia_bus_interface = {
+static struct class_interface pcmcia_bus_interface __refdata = {
.class = &pcmcia_socket_class,
.add_dev = &pcmcia_bus_add_socket,
.remove_dev = &pcmcia_bus_remove_socket,
}
#endif
-static struct pci_driver i82092aa_pci_drv = {
+static struct pci_driver i82092aa_pci_driver = {
.name = "i82092aa",
.id_table = i82092aa_pci_ids,
.probe = i82092aa_pci_probe,
static int i82092aa_module_init(void)
{
- return pci_register_driver(&i82092aa_pci_drv);
+ return pci_register_driver(&i82092aa_pci_driver);
}
static void i82092aa_module_exit(void)
{
enter("i82092aa_module_exit");
- pci_unregister_driver(&i82092aa_pci_drv);
+ pci_unregister_driver(&i82092aa_pci_driver);
if (sockets[0].io_base>0)
release_region(sockets[0].io_base, 2);
leave("i82092aa_module_exit");
static int omap_cf_ss_suspend(struct pcmcia_socket *s)
{
- pr_debug("%s: %s\n", driver_name, __FUNCTION__);
+ pr_debug("%s: %s\n", driver_name, __func__);
return omap_cf_set_socket(s, &dead_socket);
}
};
MODULE_DEVICE_TABLE(pci, pd6729_pci_ids);
-static struct pci_driver pd6729_pci_drv = {
+static struct pci_driver pd6729_pci_driver = {
.name = "pd6729",
.id_table = pd6729_pci_ids,
.probe = pd6729_pci_probe,
static int pd6729_module_init(void)
{
- return pci_register_driver(&pd6729_pci_drv);
+ return pci_register_driver(&pd6729_pci_driver);
}
static void pd6729_module_exit(void)
{
- pci_unregister_driver(&pd6729_pci_drv);
+ pci_unregister_driver(&pd6729_pci_driver);
}
module_init(pd6729_module_init);
default:
printk(KERN_ERR "%s(): unrecognized Vcc %u\n",
- __FUNCTION__, state->Vcc);
+ __func__, state->Vcc);
ret = -1;
}
pa_dwr_set |= GPIO_A0;
else {
printk(KERN_ERR "%s(): unrecognized Vpp %u\n",
- __FUNCTION__, state->Vpp);
+ __func__, state->Vpp);
ret = -1;
break;
}
default:
printk(KERN_ERR "%s(): unrecognized Vcc %u\n",
- __FUNCTION__, state->Vcc);
+ __func__, state->Vcc);
ret = -1;
break;
}
if (state->Vpp != state->Vcc && state->Vpp != 0) {
printk(KERN_ERR "%s(): CF slot cannot support Vpp %u\n",
- __FUNCTION__, state->Vpp);
+ __func__, state->Vpp);
ret = -1;
break;
}
case 50: power |= MST_PCMCIA_PWR_VCC_50; break;
default:
printk(KERN_ERR "%s(): bad Vcc %u\n",
- __FUNCTION__, state->Vcc);
+ __func__, state->Vcc);
ret = -1;
}
power |= MST_PCMCIA_PWR_VPP_VCC;
} else {
printk(KERN_ERR "%s(): bad Vpp %u\n",
- __FUNCTION__, state->Vpp);
+ __func__, state->Vpp);
ret = -1;
}
}
device_remove_file(dev, *attr);
}
-static struct class_interface pccard_rsrc_interface = {
+static struct class_interface pccard_rsrc_interface __refdata = {
.class = &pcmcia_socket_class,
.add_dev = &pccard_sysfs_add_rsrc,
.remove_dev = __devexit_p(&pccard_sysfs_remove_rsrc),
case 50:
printk(KERN_WARNING "%s(): CS asked for 5V, applying 3.3V...\n",
- __FUNCTION__);
+ __func__);
case 33: /* Can only apply 3.3V to the CF slot. */
mask = ASSABET_BCR_CF_PWR;
break;
default:
- printk(KERN_ERR "%s(): unrecognized Vcc %u\n", __FUNCTION__,
+ printk(KERN_ERR "%s(): unrecognized Vcc %u\n", __func__,
state->Vcc);
return -1;
}
case 0:
if ((state->Vcc != 0) &&
(state->Vcc != badge4_pcmvcc)) {
- complain_about_jumpering(__FUNCTION__, "pcmvcc",
+ complain_about_jumpering(__func__, "pcmvcc",
badge4_pcmvcc, state->Vcc);
// Apply power regardless of the jumpering.
// return -1;
}
if ((state->Vpp != 0) &&
(state->Vpp != badge4_pcmvpp)) {
- complain_about_jumpering(__FUNCTION__, "pcmvpp",
+ complain_about_jumpering(__func__, "pcmvpp",
badge4_pcmvpp, state->Vpp);
return -1;
}
case 1:
if ((state->Vcc != 0) &&
(state->Vcc != badge4_cfvcc)) {
- complain_about_jumpering(__FUNCTION__, "cfvcc",
+ complain_about_jumpering(__func__, "cfvcc",
badge4_cfvcc, state->Vcc);
return -1;
}
if (machine_is_badge4()) {
printk(KERN_INFO
"%s: badge4_pcmvcc=%d, badge4_pcmvpp=%d, badge4_cfvcc=%d\n",
- __FUNCTION__,
+ __func__,
badge4_pcmvcc, badge4_pcmvpp, badge4_cfvcc);
ret = sa11xx_drv_pcmcia_probe(dev, &badge4_pcmcia_ops, 0, 2);
default:
printk(KERN_ERR "%s(): unrecognized Vcc %u\n",
- __FUNCTION__, state->Vcc);
+ __func__, state->Vcc);
return -1;
}
unsigned int pa_dwr_mask, pa_dwr_set;
int ret;
-printk("%s(): config socket %d vcc %d vpp %d\n", __FUNCTION__,
+printk("%s(): config socket %d vcc %d vpp %d\n", __func__,
skt->nr, state->Vcc, state->Vpp);
switch (skt->nr) {
if (state->Vpp != state->Vcc && state->Vpp != 0) {
printk(KERN_ERR "%s(): slot cannot support VPP %u\n",
- __FUNCTION__, state->Vpp);
+ __func__, state->Vpp);
return -1;
}
ncr_set = NCR_A0VPP;
else {
printk(KERN_ERR "%s(): unrecognized VPP %u\n",
- __FUNCTION__, state->Vpp);
+ __func__, state->Vpp);
return -1;
}
break;
if (state->Vpp != state->Vcc && state->Vpp != 0) {
printk(KERN_ERR "%s(): CF slot cannot support VPP %u\n",
- __FUNCTION__, state->Vpp);
+ __func__, state->Vpp);
return -1;
}
break;
{
switch (state->Vcc) {
case 0: /* power off */
- printk(KERN_WARNING "%s(): CS asked for 0V, still applying 3.3V..\n", __FUNCTION__);
+ printk(KERN_WARNING "%s(): CS asked for 0V, still applying 3.3V..\n", __func__);
break;
case 50:
- printk(KERN_WARNING "%s(): CS asked for 5V, applying 3.3V..\n", __FUNCTION__);
+ printk(KERN_WARNING "%s(): CS asked for 5V, applying 3.3V..\n", __func__);
case 33:
break;
default:
printk(KERN_ERR "%s(): unrecognized Vcc %u\n",
- __FUNCTION__, state->Vcc);
+ __func__, state->Vcc);
return -1;
}
- printk(KERN_WARNING "%s(): Warning, Can't perform reset\n", __FUNCTION__);
+ printk(KERN_WARNING "%s(): Warning, Can't perform reset\n", __func__);
/* Silently ignore Vpp, output enable, speaker enable. */
default:
printk(KERN_ERR "%s(): unrecognized Vcc %u\n",
- __FUNCTION__, state->Vcc);
+ __func__, state->Vcc);
clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1);
local_irq_restore(flags);
return -1;
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/mm.h>
+#include <linux/mutex.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/spinlock.h>
(map->flags&MAP_PREFETCH)?"PREFETCH ":"");
if (map->map >= MAX_IO_WIN) {
- printk(KERN_ERR "%s(): map (%d) out of range\n", __FUNCTION__,
+ printk(KERN_ERR "%s(): map (%d) out of range\n", __func__,
map->map);
return -1;
}
LIST_HEAD(soc_pcmcia_sockets);
-DECLARE_MUTEX(soc_pcmcia_sockets_lock);
+static DEFINE_MUTEX(soc_pcmcia_sockets_lock);
static const char *skt_names[] = {
"PCMCIA socket 0",
struct cpufreq_freqs *freqs = data;
int ret = 0;
- down(&soc_pcmcia_sockets_lock);
+ mutex_lock(&soc_pcmcia_sockets_lock);
list_for_each_entry(skt, &soc_pcmcia_sockets, node)
if ( skt->ops->frequency_change )
ret += skt->ops->frequency_change(skt, val, freqs);
- up(&soc_pcmcia_sockets_lock);
+ mutex_unlock(&soc_pcmcia_sockets_lock);
return ret;
}
struct soc_pcmcia_socket *skt;
int ret, i;
- down(&soc_pcmcia_sockets_lock);
+ mutex_lock(&soc_pcmcia_sockets_lock);
sinfo = kzalloc(SKT_DEV_INFO_SIZE(nr), GFP_KERNEL);
if (!sinfo) {
kfree(sinfo);
out:
- up(&soc_pcmcia_sockets_lock);
+ mutex_unlock(&soc_pcmcia_sockets_lock);
return ret;
}
dev_set_drvdata(dev, NULL);
- down(&soc_pcmcia_sockets_lock);
+ mutex_lock(&soc_pcmcia_sockets_lock);
for (i = 0; i < sinfo->nskt; i++) {
struct soc_pcmcia_socket *skt = &sinfo->skt[i];
if (list_empty(&soc_pcmcia_sockets))
soc_pcmcia_cpufreq_unregister();
- up(&soc_pcmcia_sockets_lock);
+ mutex_unlock(&soc_pcmcia_sockets_lock);
kfree(sinfo);
extern struct list_head soc_pcmcia_sockets;
-extern struct semaphore soc_pcmcia_sockets_lock;
extern int soc_common_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops, int first, int nr);
extern int soc_common_drv_pcmcia_remove(struct device *dev);
p[1] = map & 0xff;
p[2] = (map >> 8) & 0xff;
- dev_dbg(&dev->dev, " encode irq %d\n", res->start);
+ dev_dbg(&dev->dev, " encode irq %llu\n",
+ (unsigned long long)res->start);
}
static void pnpbios_encode_dma(struct pnp_dev *dev, unsigned char *p,
map = 1 << res->start;
p[1] = map & 0xff;
- dev_dbg(&dev->dev, " encode dma %d\n", res->start);
+ dev_dbg(&dev->dev, " encode dma %llu\n",
+ (unsigned long long)res->start);
}
static void pnpbios_encode_port(struct pnp_dev *dev, unsigned char *p,
dev_printk(KERN_DEBUG, &pdev->dev,
"phy[%d] Get Attached Address 0x%llX ,"
" SAS Address 0x%llX\n",
- i, phy->att_dev_sas_addr, phy->dev_sas_addr);
+ i,
+ (unsigned long long)phy->att_dev_sas_addr,
+ (unsigned long long)phy->dev_sas_addr);
dev_printk(KERN_DEBUG, &pdev->dev,
"Rate = %x , type = %d\n",
sas_phy->linkrate, phy->phy_type);
#ifdef DEBUG_WAITING_LIST
if (waiting_list) printk("%s: waiting_list=%lx processing sts=%d\n", ncr_name(np), (u_long) waiting_list, sts);
#endif
- while (wcmd = waiting_list) {
+ while ((wcmd = waiting_list) != NULL) {
waiting_list = (struct scsi_cmnd *) wcmd->next_wcmd;
wcmd->next_wcmd = NULL;
if (sts == DID_OK) {
}
if (up->port.flags & UPF_IOREMAP) {
- up->port.membase = ioremap(up->port.mapbase, size);
+ up->port.membase = ioremap_nocache(up->port.mapbase,
+ size);
if (!up->port.membase) {
release_mem_region(up->port.mapbase, size);
ret = -ENOMEM;
(void __iomem *)__fix_to_virt(FIX_EARLYCON_MEM_BASE);
port->membase += port->mapbase & ~PAGE_MASK;
#else
- port->membase = ioremap(port->mapbase, 64);
+ port->membase = ioremap_nocache(port->mapbase, 64);
if (!port->membase) {
printk(KERN_ERR "%s: Couldn't ioremap 0x%llx\n",
__func__,
len = pci_resource_len(dev, bar);
if (!priv->remapped_bar[bar])
- priv->remapped_bar[bar] = ioremap(base, len);
+ priv->remapped_bar[bar] = ioremap_nocache(base, len);
if (!priv->remapped_bar[bar])
return -ENOMEM;
/*
* enable/disable interrupts
*/
- p = ioremap(pci_resource_start(dev, 0), 0x80);
+ p = ioremap_nocache(pci_resource_start(dev, 0), 0x80);
if (p == NULL)
return -ENOMEM;
writel(irq_config, p + 0x4c);
/*
* disable interrupts
*/
- p = ioremap(pci_resource_start(dev, 0), 0x80);
+ p = ioremap_nocache(pci_resource_start(dev, 0), 0x80);
if (p != NULL) {
writel(0, p + 0x4c);
{
u8 __iomem *p;
- p = ioremap(pci_resource_start(dev, 0), pci_resource_len(dev, 0));
+ p = ioremap_nocache(pci_resource_start(dev, 0),
+ pci_resource_len(dev, 0));
if (p == NULL)
return -ENOMEM;
{
u8 __iomem *p;
- p = ioremap(pci_resource_start(dev, 0), pci_resource_len(dev, 0));
+ p = ioremap_nocache(pci_resource_start(dev, 0),
+ pci_resource_len(dev, 0));
/* FIXME: What if resource_len < OCT_REG_CR_OFF */
if (p != NULL)
writeb(0, p + OCT_REG_CR_OFF);
break;
}
- p = ioremap(pci_resource_start(dev, 0), 0x80);
+ p = ioremap_nocache(pci_resource_start(dev, 0), 0x80);
if (p == NULL)
return -ENOMEM;
#define PCI_DEVICE_NEO_2DB9PRI_PCI_NAME "Neo 2 - DB9 Universal PCI - Powered Ring Indicator"
#define PCI_DEVICE_NEO_2RJ45_PCI_NAME "Neo 2 - RJ45 Universal PCI"
#define PCI_DEVICE_NEO_2RJ45PRI_PCI_NAME "Neo 2 - RJ45 Universal PCI - Powered Ring Indicator"
+#define PCIE_DEVICE_NEO_IBM_PCI_NAME "Neo 4 - PCI Express - IBM"
/*
* Our Global Variables.
/* store the info for the board we've found */
brd->boardnum = adapter_count++;
brd->pci_dev = pdev;
- brd->maxports = 2;
+ if (pdev->device == PCIE_DEVICE_ID_NEO_4_IBM)
+ brd->maxports = 4;
+ else
+ brd->maxports = 2;
spin_lock_init(&brd->bd_lock);
spin_lock_init(&brd->bd_intr_lock);
{ PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_NEO_2DB9PRI), 0, 0, 1 },
{ PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_NEO_2RJ45), 0, 0, 2 },
{ PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_NEO_2RJ45PRI), 0, 0, 3 },
+ { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCIE_DEVICE_ID_NEO_4_IBM), 0, 0, 4 },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, jsm_pci_tbl);
u8 width;
u16 cr, dma_width, dma_config;
u32 tranf_success = 1;
+ u8 full_duplex = 0;
/* Get current state information */
message = drv_data->cur_msg;
}
if (transfer->rx_buf != NULL) {
+ full_duplex = transfer->tx_buf != NULL;
drv_data->rx = transfer->rx_buf;
drv_data->rx_end = drv_data->rx + transfer->len;
dev_dbg(&drv_data->pdev->dev, "rx_buf is %p, rx_end is %p\n",
* successful use different way to r/w according to
* drv_data->cur_chip->enable_dma
*/
- if (drv_data->cur_chip->enable_dma && drv_data->len > 6) {
+ if (!full_duplex && drv_data->cur_chip->enable_dma
+ && drv_data->len > 6) {
disable_dma(drv_data->dma_channel);
clear_dma_irqstat(drv_data->dma_channel);
/* IO mode write then read */
dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n");
- if (drv_data->tx != NULL && drv_data->rx != NULL) {
+ if (full_duplex) {
/* full duplex mode */
BUG_ON((drv_data->tx_end - drv_data->tx) !=
(drv_data->rx_end - drv_data->rx));
/* is clk = pclk / (2 * (pre+1)), or is it
* clk = (pclk * 2) / ( pre + 1) */
- div = (div / 2) - 1;
+ div /= 2;
- if (div < 0)
- div = 1;
+ if (div > 0)
+ div -= 1;
if (div > 255)
div = 255;
status = 0;
done:
DPRINTK("returning = %d", status);
- mntput(mnt);
dput(dentry);
+ mntput(mnt);
return status;
}
/* Can we expire this guy */
if (autofs4_can_expire(dentry, timeout, do_now)) {
expired = dentry;
- break;
+ goto found;
}
goto next;
}
inf->flags |= AUTOFS_INF_EXPIRING;
spin_unlock(&sbi->fs_lock);
expired = dentry;
- break;
+ goto found;
}
spin_unlock(&sbi->fs_lock);
/*
expired = autofs4_check_leaves(mnt, dentry, timeout, do_now);
if (expired) {
dput(dentry);
- break;
+ goto found;
}
}
next:
spin_lock(&dcache_lock);
next = next->next;
}
-
- if (expired) {
- DPRINTK("returning %p %.*s",
- expired, (int)expired->d_name.len, expired->d_name.name);
- spin_lock(&dcache_lock);
- list_move(&expired->d_parent->d_subdirs, &expired->d_u.d_child);
- spin_unlock(&dcache_lock);
- return expired;
- }
spin_unlock(&dcache_lock);
-
return NULL;
+
+found:
+ DPRINTK("returning %p %.*s",
+ expired, (int)expired->d_name.len, expired->d_name.name);
+ spin_lock(&dcache_lock);
+ list_move(&expired->d_parent->d_subdirs, &expired->d_u.d_child);
+ spin_unlock(&dcache_lock);
+ return expired;
}
/* Perform an expiry operation */
if (d_mountpoint(dentry)) {
struct file *fp = NULL;
- struct vfsmount *fp_mnt = mntget(mnt);
- struct dentry *fp_dentry = dget(dentry);
+ struct path fp_path = { .dentry = dentry, .mnt = mnt };
- if (!autofs4_follow_mount(&fp_mnt, &fp_dentry)) {
- dput(fp_dentry);
- mntput(fp_mnt);
+ path_get(&fp_path);
+
+ if (!autofs4_follow_mount(&fp_path.mnt, &fp_path.dentry)) {
+ path_put(&fp_path);
dcache_dir_close(inode, file);
goto out;
}
- fp = dentry_open(fp_dentry, fp_mnt, file->f_flags);
+ fp = dentry_open(fp_path.dentry, fp_path.mnt, file->f_flags);
status = PTR_ERR(fp);
if (IS_ERR(fp)) {
dcache_dir_close(inode, file);
{
struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
struct autofs_info *ino = autofs4_dentry_ino(dentry);
- int status = 0;
+ struct dentry *new;
+ int status;
/* Block on any pending expiry here; invalidate the dentry
when expiration is done to trigger mount request with a new
spin_lock(&dentry->d_lock);
dentry->d_flags &= ~DCACHE_AUTOFS_PENDING;
spin_unlock(&dentry->d_lock);
- return status;
+
+ /*
+ * The dentry that is passed in from lookup may not be the one
+ * we end up using, as mkdir can create a new one. If this
+ * happens, and another process tries the lookup at the same time,
+ * it will set the PENDING flag on this new dentry, but add itself
+ * to our waitq. Then, if after the lookup succeeds, the first
+ * process that requested the mount performs another lookup of the
+ * same directory, it will show up as still pending! So, we need
+ * to redo the lookup here and clear pending on that dentry.
+ */
+ if (d_unhashed(dentry)) {
+ new = d_lookup(dentry->d_parent, &dentry->d_name);
+ if (new) {
+ spin_lock(&new->d_lock);
+ new->d_flags &= ~DCACHE_AUTOFS_PENDING;
+ spin_unlock(&new->d_lock);
+ dput(new);
+ }
+ }
+
+ return 0;
}
/* For autofs direct mounts the follow link triggers the mount */
for (tmp = dentry ; tmp != root ; tmp = tmp->d_parent)
len += tmp->d_name.len + 1;
- if (--len > NAME_MAX) {
+ if (!len || --len > NAME_MAX) {
spin_unlock(&dcache_lock);
return 0;
}
nbytes = min(nbytes, (unsigned) FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
- npages = min(max(npages, 1), FUSE_MAX_PAGES_PER_REQ);
+ npages = clamp(npages, 1, FUSE_MAX_PAGES_PER_REQ);
down_read(¤t->mm->mmap_sem);
npages = get_user_pages(current, current->mm, user_addr, npages, write,
0, req->pages, NULL);
#endif
+static bool nsec_special(long nsec)
+{
+ return nsec == UTIME_OMIT || nsec == UTIME_NOW;
+}
+
static bool nsec_valid(long nsec)
{
- if (nsec == UTIME_OMIT || nsec == UTIME_NOW)
+ if (nsec_special(nsec))
return true;
return nsec >= 0 && nsec <= 999999999;
newattrs.ia_mtime.tv_nsec = times[1].tv_nsec;
newattrs.ia_valid |= ATTR_MTIME_SET;
}
- } else {
+ }
+
+ /*
+ * If times is NULL or both times are either UTIME_OMIT or
+ * UTIME_NOW, then need to check permissions, because
+ * inode_change_ok() won't do it.
+ */
+ if (!times || (nsec_special(times[0].tv_nsec) &&
+ nsec_special(times[1].tv_nsec))) {
error = -EACCES;
if (IS_IMMUTABLE(inode))
goto mnt_drop_write_and_out;
#endif
-extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
-
#endif
* 2 of the License, or (at your option) any later version.
*/
-#ifndef _ASM_FRV_UNALIGNED_H
-#define _ASM_FRV_UNALIGNED_H
+#ifndef _ASM_UNALIGNED_H
+#define _ASM_UNALIGNED_H
#include <linux/unaligned/le_byteshift.h>
#include <linux/unaligned/be_byteshift.h>
#define get_unaligned __get_unaligned_be
#define put_unaligned __put_unaligned_be
-#endif /* _ASM_FRV_UNALIGNED_H */
+#endif /* _ASM_UNALIGNED_H */
__rem; \
})
-static inline uint64_t div64_64(uint64_t dividend, uint64_t divisor)
-{
- return dividend / divisor;
-}
-
#elif BITS_PER_LONG == 32
extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
__rem; \
})
-extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
-
#else /* BITS_PER_LONG == ?? */
# error do_div() does not yet support the C64
#include <asm/io.h>
+/* Use normal IO mappings for DMI */
+#define dmi_ioremap ioremap
+#define dmi_iounmap(x,l) iounmap(x)
+#define dmi_alloc(l) kmalloc(l, GFP_ATOMIC)
+
#endif
extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
extern void iounmap (volatile void __iomem *addr);
-/* Use normal IO mappings for DMI */
-#define dmi_ioremap ioremap
-#define dmi_iounmap(x,l) iounmap(x)
-#define dmi_alloc(l) kmalloc(l, GFP_ATOMIC)
-
/*
* String version of IO memory access ops:
*/
__rem; \
})
-extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
#endif /* _M68K_DIV64_H */
/*
* Set number of channels of DMA on ColdFire for different implementations.
*/
-#if defined(CONFIG_M5249) || defined(CONFIG_M5307) || defined(CONFIG_M5407)
+#if defined(CONFIG_M5249) || defined(CONFIG_M5307) || defined(CONFIG_M5407) || \
+ defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x)
#define MAX_M68K_DMA_CHANNELS 4
#elif defined(CONFIG_M5272)
#define MAX_M68K_DMA_CHANNELS 1
#ifndef _M68KNOMMU_PARAM_H
#define _M68KNOMMU_PARAM_H
-#define HZ CONFIG_HZ
-
#ifdef __KERNEL__
+#define HZ CONFIG_HZ
#define USER_HZ HZ
#define CLOCKS_PER_SEC (USER_HZ)
#endif
+#ifndef HZ
+#define HZ 100
+#endif
+
#define EXEC_PAGESIZE 4096
#ifndef NOGROUP
(n) = __quot; \
__mod; })
-extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
#endif /* (_MIPS_SZLONG == 32) */
#if (_MIPS_SZLONG == 64)
(n) = __quot; \
__mod; })
-static inline uint64_t div64_64(uint64_t dividend, uint64_t divisor)
-{
- return dividend / divisor;
-}
-
#endif /* (_MIPS_SZLONG == 64) */
#endif /* _ASM_DIV64_H */
return result;
}
-extern __attribute__((const))
-uint64_t div64_64(uint64_t dividend, uint64_t divisor);
-
#endif /* _ASM_DIV64 */
#include "asm/arch/div64.h"
-extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
#endif
__mod; \
})
-/*
- * (long)X = ((long long)divs) / (long)div
- * (long)rem = ((long long)divs) % (long)div
- *
- * Warning, this will do an exception if X overflows.
- */
-#define div_long_long_rem(a, b, c) div_ll_X_l_rem(a, b, c)
-
-static inline long div_ll_X_l_rem(long long divs, long div, long *rem)
+static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
{
- long dum2;
- asm("divl %2":"=a"(dum2), "=d"(*rem)
- : "rm"(div), "A"(divs));
-
- return dum2;
-
+ union {
+ u64 v64;
+ u32 v32[2];
+ } d = { dividend };
+ u32 upper;
+
+ upper = d.v32[1];
+ d.v32[1] = 0;
+ if (upper >= divisor) {
+ d.v32[1] = upper / divisor;
+ upper %= divisor;
+ }
+ asm ("divl %2" : "=a" (d.v32[0]), "=d" (*remainder) :
+ "rm" (divisor), "0" (d.v32[0]), "1" (upper));
+ return d.v64;
}
-
-extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
+#define div_u64_rem div_u64_rem
#else
# include <asm-generic/div64.h>
#endif
+/* Use early IO mappings for DMI because it's initialized early */
#define dmi_ioremap early_ioremap
#define dmi_iounmap early_iounmap
extern void early_iounmap(void *addr, unsigned long size);
extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
-/* Use early IO mappings for DMI because it's initialized early */
-#define dmi_ioremap early_ioremap
-#define dmi_iounmap early_iounmap
-#define dmi_alloc alloc_bootmem
-
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
*/
struct scatterlist *sg2)
{
sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0);
+ sg1[num - 1].page_link &= ~0x02;
}
static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)
{
+ if (sg_is_last(sg))
+ return NULL;
+
return (++sg)->length ? sg : (void *)sg_page(sg);
}
+++ /dev/null
-#ifndef _LINUX_CALC64_H
-#define _LINUX_CALC64_H
-
-#include <linux/types.h>
-#include <asm/div64.h>
-
-/*
- * This is a generic macro which is used when the architecture
- * specific div64.h does not provide a optimized one.
- *
- * The 64bit dividend is divided by the divisor (data type long), the
- * result is returned and the remainder stored in the variable
- * referenced by remainder (data type long *). In contrast to the
- * do_div macro the dividend is kept intact.
- */
-#ifndef div_long_long_rem
-#define div_long_long_rem(dividend, divisor, remainder) \
- do_div_llr((dividend), divisor, remainder)
-
-static inline unsigned long do_div_llr(const long long dividend,
- const long divisor, long *remainder)
-{
- u64 result = dividend;
-
- *(remainder) = do_div(result, divisor);
- return (unsigned long) result;
-}
-#endif
-
-/*
- * Sign aware variation of the above. On some architectures a
- * negative dividend leads to an divide overflow exception, which
- * is avoided by the sign check.
- */
-static inline long div_long_long_rem_signed(const long long dividend,
- const long divisor, long *remainder)
-{
- long res;
-
- if (unlikely(dividend < 0)) {
- res = -div_long_long_rem(-dividend, divisor, remainder);
- *remainder = -(*remainder);
- } else
- res = div_long_long_rem(dividend, divisor, remainder);
-
- return res;
-}
-
-#endif
#endif
};
+extern struct clocksource *clock; /* current clocksource */
+
/*
* Clock source flags bits::
*/
compat_long_t calcnt;
compat_long_t errcnt;
compat_long_t stbcnt;
+ compat_int_t tai;
compat_int_t :32; compat_int_t :32; compat_int_t :32; compat_int_t :32;
compat_int_t :32; compat_int_t :32; compat_int_t :32; compat_int_t :32;
- compat_int_t :32; compat_int_t :32; compat_int_t :32; compat_int_t :32;
+ compat_int_t :32; compat_int_t :32; compat_int_t :32;
};
#define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW)
#ifndef _LINUX_JIFFIES_H
#define _LINUX_JIFFIES_H
-#include <linux/calc64.h>
+#include <linux/math64.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/time.h>
--- /dev/null
+#ifndef _LINUX_MATH64_H
+#define _LINUX_MATH64_H
+
+#include <linux/types.h>
+#include <asm/div64.h>
+
+#if BITS_PER_LONG == 64
+
+/**
+ * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
+ *
+ * This is commonly provided by 32bit archs to provide an optimized 64bit
+ * divide.
+ */
+static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
+{
+ *remainder = dividend % divisor;
+ return dividend / divisor;
+}
+
+/**
+ * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
+ */
+static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
+{
+ *remainder = dividend % divisor;
+ return dividend / divisor;
+}
+
+/**
+ * div64_u64 - unsigned 64bit divide with 64bit divisor
+ */
+static inline u64 div64_u64(u64 dividend, u64 divisor)
+{
+ return dividend / divisor;
+}
+
+#elif BITS_PER_LONG == 32
+
+#ifndef div_u64_rem
+static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
+{
+ *remainder = do_div(dividend, divisor);
+ return dividend;
+}
+#endif
+
+#ifndef div_s64_rem
+extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
+#endif
+
+#ifndef div64_u64
+extern u64 div64_u64(u64 dividend, u64 divisor);
+#endif
+
+#endif /* BITS_PER_LONG */
+
+/**
+ * div_u64 - unsigned 64bit divide with 32bit divisor
+ *
+ * This is the most common 64bit divide and should be used if possible,
+ * as many 32bit archs can optimize this variant better than a full 64bit
+ * divide.
+ */
+#ifndef div_u64
+static inline u64 div_u64(u64 dividend, u32 divisor)
+{
+ u32 remainder;
+ return div_u64_rem(dividend, divisor, &remainder);
+}
+#endif
+
+/**
+ * div_s64 - signed 64bit divide with 32bit divisor
+ */
+#ifndef div_s64
+static inline s64 div_s64(s64 dividend, s32 divisor)
+{
+ s32 remainder;
+ return div_s64_rem(dividend, divisor, &remainder);
+}
+#endif
+
+#endif /* _LINUX_MATH64_H */
#define PCI_DEVICE_ID_NEO_2DB9PRI 0x00C9
#define PCI_DEVICE_ID_NEO_2RJ45 0x00CA
#define PCI_DEVICE_ID_NEO_2RJ45PRI 0x00CB
+#define PCIE_DEVICE_ID_NEO_4_IBM 0x00F4
#define PCI_VENDOR_ID_XIRCOM 0x115d
#define PCI_DEVICE_ID_XIRCOM_RBM56G 0x0101
((type) == USRQUOTA ? (sb_dqopt(sb)->flags & DQUOT_USR_SUSPENDED) : \
(sb_dqopt(sb)->flags & DQUOT_GRP_SUSPENDED))
+#define sb_any_quota_suspended(sb) (sb_has_quota_suspended(sb, USRQUOTA) | \
+ sb_has_quota_suspended(sb, GRPQUOTA))
+
int register_quota_format(struct quota_format_type *fmt);
void unregister_quota_format(struct quota_format_type *fmt);
extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
extern void argv_free(char **argv);
+extern bool sysfs_streq(const char *s1, const char *s2);
+
#endif
#endif /* _LINUX_STRING_H_ */
return 0;
}
+static inline int sysfs_update_group(struct kobject *kobj,
+ const struct attribute_group *grp)
+{
+ return 0;
+}
+
static inline void sysfs_remove_group(struct kobject *kobj,
const struct attribute_group *grp)
{
#include <asm/param.h>
+#define NTP_API 4 /* NTP API version */
+
/*
* SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen
* for a slightly underdamped convergence characteristic. SHIFT_KH
#define MAXTC 10 /* maximum time constant (shift) */
/*
- * The SHIFT_UPDATE define establishes the decimal point of the
- * time_offset variable which represents the current offset with
- * respect to standard time.
- *
* SHIFT_USEC defines the scaling (shift) of the time_freq and
* time_tolerance variables, which represent the current frequency
* offset and maximum frequency tolerance.
*/
-#define SHIFT_UPDATE (SHIFT_HZ + 1) /* time offset scale (shift) */
#define SHIFT_USEC 16 /* frequency offset scale (shift) */
-#define SHIFT_NSEC 12 /* kernel frequency offset scale */
-
-#define MAXPHASE 512000L /* max phase error (us) */
-#define MAXFREQ (512L << SHIFT_USEC) /* max frequency error (ppm) */
-#define MAXFREQ_NSEC (512000L << SHIFT_NSEC) /* max frequency error (ppb) */
+#define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
+#define PPM_SCALE_INV_SHIFT 20
+#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \
+ PPM_SCALE + 1)
+
+#define MAXPHASE 500000000l /* max phase error (ns) */
+#define MAXFREQ 500000 /* max frequency error (ns/s) */
+#define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT)
#define MINSEC 256 /* min interval between updates (s) */
#define MAXSEC 2048 /* max interval between updates (s) */
-#define NTP_PHASE_LIMIT (MAXPHASE << 5) /* beyond max. dispersion */
+#define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */
/*
* syscall interface - used (mainly by NTP daemon)
long errcnt; /* calibration errors (ro) */
long stbcnt; /* stability limit exceeded (ro) */
+ int tai; /* TAI offset (ro) */
+
int :32; int :32; int :32; int :32;
int :32; int :32; int :32; int :32;
- int :32; int :32; int :32; int :32;
+ int :32; int :32; int :32;
};
/*
#define ADJ_ESTERROR 0x0008 /* estimated time error */
#define ADJ_STATUS 0x0010 /* clock status */
#define ADJ_TIMECONST 0x0020 /* pll time constant */
+#define ADJ_TAI 0x0080 /* set TAI offset */
+#define ADJ_MICRO 0x1000 /* select microsecond resolution */
+#define ADJ_NANO 0x2000 /* select nanosecond resolution */
#define ADJ_TICK 0x4000 /* tick value */
#define ADJ_OFFSET_SINGLESHOT 0x8001 /* old-fashioned adjtime */
#define ADJ_OFFSET_SS_READ 0xa001 /* read-only adjtime */
#define MOD_ESTERROR ADJ_ESTERROR
#define MOD_STATUS ADJ_STATUS
#define MOD_TIMECONST ADJ_TIMECONST
-#define MOD_CLKB ADJ_TICK
-#define MOD_CLKA ADJ_OFFSET_SINGLESHOT /* 0x8000 in original */
/*
#define STA_PPSERROR 0x0800 /* PPS signal calibration error (ro) */
#define STA_CLOCKERR 0x1000 /* clock hardware fault (ro) */
+#define STA_NANO 0x2000 /* resolution (0 = us, 1 = ns) (ro) */
+#define STA_MODE 0x4000 /* mode (0 = PLL, 1 = FLL) (ro) */
+#define STA_CLK 0x8000 /* clock source (0 = A, 1 = B) (ro) */
+/* read-only bits */
#define STA_RONLY (STA_PPSSIGNAL | STA_PPSJITTER | STA_PPSWANDER | \
- STA_PPSERROR | STA_CLOCKERR) /* read-only bits */
+ STA_PPSERROR | STA_CLOCKERR | STA_NANO | STA_MODE | STA_CLK)
/*
* Clock states (time_state)
extern long time_maxerror; /* maximum error */
extern long time_esterror; /* estimated error */
-extern long time_freq; /* frequency offset (scaled ppm) */
-
extern long time_adjust; /* The amount of adjtime left */
+extern void ntp_init(void);
extern void ntp_clear(void);
/**
__x < 0 ? -(-__x >> __s) : __x >> __s; \
})
-#define TICK_LENGTH_SHIFT 32
+#define NTP_SCALE_SHIFT 32
#ifdef CONFIG_NO_HZ
#define NTP_INTERVAL_FREQ (2)
#endif
#define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ)
-/* Returns how long ticks are at present, in ns / 2^(SHIFT_SCALE-10). */
-extern u64 current_tick_length(void);
+/* Returns how long ticks are at present, in ns / 2^NTP_SCALE_SHIFT. */
+extern u64 tick_length;
extern void second_overflow(void);
extern void update_ntp_one_tick(void);
__put_user(txc.jitcnt, &utp->jitcnt) ||
__put_user(txc.calcnt, &utp->calcnt) ||
__put_user(txc.errcnt, &utp->errcnt) ||
- __put_user(txc.stbcnt, &utp->stbcnt))
+ __put_user(txc.stbcnt, &utp->stbcnt) ||
+ __put_user(txc.tai, &utp->tai))
ret = -EFAULT;
return ret;
}
/* match ? */
- if (system_ram >= start && system_ram <= end) {
+ if (system_ram >= start && system_ram < end) {
*crash_size = size;
break;
}
#include <linux/sched.h>
#include <linux/posix-timers.h>
-#include <asm/uaccess.h>
#include <linux/errno.h>
+#include <linux/math64.h>
+#include <asm/uaccess.h>
static int check_clock(const clockid_t which_clock)
{
union cpu_time_count cpu,
struct timespec *tp)
{
- if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
- tp->tv_sec = div_long_long_rem(cpu.sched,
- NSEC_PER_SEC, &tp->tv_nsec);
- } else {
+ if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
+ *tp = ns_to_timespec(cpu.sched);
+ else
cputime_to_timespec(cpu.cpu, tp);
- }
}
static inline int cpu_time_before(const clockid_t which_clock,
se->my_q = cfs_rq;
se->load.weight = tg->shares;
- se->load.inv_weight = div64_64(1ULL<<32, se->load.weight);
+ se->load.inv_weight = div64_u64(1ULL<<32, se->load.weight);
se->parent = parent;
}
#endif
dequeue_entity(cfs_rq, se, 0);
se->load.weight = shares;
- se->load.inv_weight = div64_64((1ULL<<32), shares);
+ se->load.inv_weight = div64_u64((1ULL<<32), shares);
if (on_rq)
enqueue_entity(cfs_rq, se, 0);
if (runtime == RUNTIME_INF)
return 1ULL << 16;
- return div64_64(runtime << 16, period);
+ return div64_u64(runtime << 16, period);
}
#ifdef CONFIG_CGROUP_SCHED
avg_per_cpu = p->se.sum_exec_runtime;
if (p->se.nr_migrations) {
- avg_per_cpu = div64_64(avg_per_cpu,
- p->se.nr_migrations);
+ avg_per_cpu = div64_u64(avg_per_cpu,
+ p->se.nr_migrations);
} else {
avg_per_cpu = -1LL;
}
local_irq_disable();
/* Find end, append list for that CPU. */
- *__get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).head;
- __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
- per_cpu(tasklet_vec, cpu).head = NULL;
- per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
+ if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
+ *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
+ __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
+ per_cpu(tasklet_vec, cpu).head = NULL;
+ per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
+ }
raise_softirq_irqoff(TASKLET_SOFTIRQ);
- *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
- __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
- per_cpu(tasklet_hi_vec, cpu).head = NULL;
- per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
+ if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
+ *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
+ __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
+ per_cpu(tasklet_hi_vec, cpu).head = NULL;
+ per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
+ }
raise_softirq_irqoff(HI_SOFTIRQ);
local_irq_enable();
#include <linux/security.h>
#include <linux/fs.h>
#include <linux/slab.h>
+#include <linux/math64.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
struct timespec ns_to_timespec(const s64 nsec)
{
struct timespec ts;
+ s32 rem;
if (!nsec)
return (struct timespec) {0, 0};
- ts.tv_sec = div_long_long_rem_signed(nsec, NSEC_PER_SEC, &ts.tv_nsec);
- if (unlikely(nsec < 0))
- set_normalized_timespec(&ts, ts.tv_sec, ts.tv_nsec);
+ ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
+ if (unlikely(rem < 0)) {
+ ts.tv_sec--;
+ rem += NSEC_PER_SEC;
+ }
+ ts.tv_nsec = rem;
return ts;
}
* Convert jiffies to nanoseconds and separate with
* one divide.
*/
- u64 nsec = (u64)jiffies * TICK_NSEC;
- value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_nsec);
+ u32 rem;
+ value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
+ NSEC_PER_SEC, &rem);
+ value->tv_nsec = rem;
}
EXPORT_SYMBOL(jiffies_to_timespec);
* Convert jiffies to nanoseconds and separate with
* one divide.
*/
- u64 nsec = (u64)jiffies * TICK_NSEC;
- long tv_usec;
+ u32 rem;
- value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &tv_usec);
- tv_usec /= NSEC_PER_USEC;
- value->tv_usec = tv_usec;
+ value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
+ NSEC_PER_SEC, &rem);
+ value->tv_usec = rem / NSEC_PER_USEC;
}
EXPORT_SYMBOL(jiffies_to_timeval);
return x / (HZ / USER_HZ);
# endif
#else
- u64 tmp = (u64)x * TICK_NSEC;
- do_div(tmp, (NSEC_PER_SEC / USER_HZ));
- return (long)tmp;
+ return div_u64((u64)x * TICK_NSEC, NSEC_PER_SEC / USER_HZ);
#endif
}
EXPORT_SYMBOL(jiffies_to_clock_t);
return ~0UL;
return x * (HZ / USER_HZ);
#else
- u64 jif;
-
/* Don't worry about loss of precision here .. */
if (x >= ~0UL / HZ * USER_HZ)
return ~0UL;
/* .. but do try to contain it here */
- jif = x * (u64) HZ;
- do_div(jif, USER_HZ);
- return jif;
+ return div_u64((u64)x * HZ, USER_HZ);
#endif
}
EXPORT_SYMBOL(clock_t_to_jiffies);
{
#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
# if HZ < USER_HZ
- x *= USER_HZ;
- do_div(x, HZ);
+ x = div_u64(x * USER_HZ, HZ);
# elif HZ > USER_HZ
- do_div(x, HZ / USER_HZ);
+ x = div_u64(x, HZ / USER_HZ);
# else
/* Nothing to do */
# endif
* but even this doesn't overflow in hundreds of years
* in 64 bits, so..
*/
- x *= TICK_NSEC;
- do_div(x, (NSEC_PER_SEC / USER_HZ));
+ x = div_u64(x * TICK_NSEC, (NSEC_PER_SEC / USER_HZ));
#endif
return x;
}
u64 nsec_to_clock_t(u64 x)
{
#if (NSEC_PER_SEC % USER_HZ) == 0
- do_div(x, (NSEC_PER_SEC / USER_HZ));
+ return div_u64(x, NSEC_PER_SEC / USER_HZ);
#elif (USER_HZ % 512) == 0
- x *= USER_HZ/512;
- do_div(x, (NSEC_PER_SEC / 512));
+ return div_u64(x * USER_HZ / 512, NSEC_PER_SEC / 512);
#else
/*
* max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
* overflow after 64.99 years.
* exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
*/
- x *= 9;
- do_div(x, (unsigned long)((9ull * NSEC_PER_SEC + (USER_HZ/2)) /
- USER_HZ));
+ return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ);
#endif
- return x;
}
#if (BITS_PER_LONG < 64)
#include <linux/jiffies.h>
#include <linux/hrtimer.h>
#include <linux/capability.h>
-#include <asm/div64.h>
+#include <linux/math64.h>
+#include <linux/clocksource.h>
#include <asm/timex.h>
/*
*/
unsigned long tick_usec = TICK_USEC; /* USER_HZ period (usec) */
unsigned long tick_nsec; /* ACTHZ period (nsec) */
-static u64 tick_length, tick_length_base;
+u64 tick_length;
+static u64 tick_length_base;
+
+static struct hrtimer leap_timer;
#define MAX_TICKADJ 500 /* microsecs */
#define MAX_TICKADJ_SCALED (((u64)(MAX_TICKADJ * NSEC_PER_USEC) << \
- TICK_LENGTH_SHIFT) / NTP_INTERVAL_FREQ)
+ NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
/*
* phase-lock loop variables
/* TIME_ERROR prevents overwriting the CMOS clock */
static int time_state = TIME_OK; /* clock synchronization status */
int time_status = STA_UNSYNC; /* clock status bits */
-static s64 time_offset; /* time adjustment (ns) */
+static long time_tai; /* TAI offset (s) */
+static s64 time_offset; /* time adjustment (ns) */
static long time_constant = 2; /* pll time constant */
long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */
long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */
-long time_freq; /* frequency offset (scaled ppm)*/
+static s64 time_freq; /* frequency offset (scaled ns/s)*/
static long time_reftime; /* time at last adjustment (s) */
long time_adjust;
static long ntp_tick_adj;
static void ntp_update_frequency(void)
{
u64 second_length = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ)
- << TICK_LENGTH_SHIFT;
- second_length += (s64)ntp_tick_adj << TICK_LENGTH_SHIFT;
- second_length += (s64)time_freq << (TICK_LENGTH_SHIFT - SHIFT_NSEC);
+ << NTP_SCALE_SHIFT;
+ second_length += (s64)ntp_tick_adj << NTP_SCALE_SHIFT;
+ second_length += time_freq;
tick_length_base = second_length;
- do_div(second_length, HZ);
- tick_nsec = second_length >> TICK_LENGTH_SHIFT;
+ tick_nsec = div_u64(second_length, HZ) >> NTP_SCALE_SHIFT;
+ tick_length_base = div_u64(tick_length_base, NTP_INTERVAL_FREQ);
+}
+
+static void ntp_update_offset(long offset)
+{
+ long mtemp;
+ s64 freq_adj;
+
+ if (!(time_status & STA_PLL))
+ return;
- do_div(tick_length_base, NTP_INTERVAL_FREQ);
+ if (!(time_status & STA_NANO))
+ offset *= NSEC_PER_USEC;
+
+ /*
+ * Scale the phase adjustment and
+ * clamp to the operating range.
+ */
+ offset = min(offset, MAXPHASE);
+ offset = max(offset, -MAXPHASE);
+
+ /*
+ * Select how the frequency is to be controlled
+ * and in which mode (PLL or FLL).
+ */
+ if (time_status & STA_FREQHOLD || time_reftime == 0)
+ time_reftime = xtime.tv_sec;
+ mtemp = xtime.tv_sec - time_reftime;
+ time_reftime = xtime.tv_sec;
+
+ freq_adj = (s64)offset * mtemp;
+ freq_adj <<= NTP_SCALE_SHIFT - 2 * (SHIFT_PLL + 2 + time_constant);
+ time_status &= ~STA_MODE;
+ if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > MAXSEC)) {
+ freq_adj += div_s64((s64)offset << (NTP_SCALE_SHIFT - SHIFT_FLL),
+ mtemp);
+ time_status |= STA_MODE;
+ }
+ freq_adj += time_freq;
+ freq_adj = min(freq_adj, MAXFREQ_SCALED);
+ time_freq = max(freq_adj, -MAXFREQ_SCALED);
+
+ time_offset = div_s64((s64)offset << NTP_SCALE_SHIFT, NTP_INTERVAL_FREQ);
}
/**
}
/*
- * this routine handles the overflow of the microsecond field
- *
- * The tricky bits of code to handle the accurate clock support
- * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
- * They were originally developed for SUN and DEC kernels.
- * All the kudos should go to Dave for this stuff.
+ * Leap second processing. If in leap-insert state at the end of the
+ * day, the system clock is set back one second; if in leap-delete
+ * state, the system clock is set ahead one second.
*/
-void second_overflow(void)
+static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
{
- long time_adj;
+ enum hrtimer_restart res = HRTIMER_NORESTART;
- /* Bump the maxerror field */
- time_maxerror += MAXFREQ >> SHIFT_USEC;
- if (time_maxerror > NTP_PHASE_LIMIT) {
- time_maxerror = NTP_PHASE_LIMIT;
- time_status |= STA_UNSYNC;
- }
+ write_seqlock_irq(&xtime_lock);
- /*
- * Leap second processing. If in leap-insert state at the end of the
- * day, the system clock is set back one second; if in leap-delete
- * state, the system clock is set ahead one second. The microtime()
- * routine or external clock driver will insure that reported time is
- * always monotonic. The ugly divides should be replaced.
- */
switch (time_state) {
case TIME_OK:
- if (time_status & STA_INS)
- time_state = TIME_INS;
- else if (time_status & STA_DEL)
- time_state = TIME_DEL;
break;
case TIME_INS:
- if (xtime.tv_sec % 86400 == 0) {
- xtime.tv_sec--;
- wall_to_monotonic.tv_sec++;
- time_state = TIME_OOP;
- printk(KERN_NOTICE "Clock: inserting leap second "
- "23:59:60 UTC\n");
- }
+ xtime.tv_sec--;
+ wall_to_monotonic.tv_sec++;
+ time_state = TIME_OOP;
+ printk(KERN_NOTICE "Clock: "
+ "inserting leap second 23:59:60 UTC\n");
+ leap_timer.expires = ktime_add_ns(leap_timer.expires,
+ NSEC_PER_SEC);
+ res = HRTIMER_RESTART;
break;
case TIME_DEL:
- if ((xtime.tv_sec + 1) % 86400 == 0) {
- xtime.tv_sec++;
- wall_to_monotonic.tv_sec--;
- time_state = TIME_WAIT;
- printk(KERN_NOTICE "Clock: deleting leap second "
- "23:59:59 UTC\n");
- }
+ xtime.tv_sec++;
+ time_tai--;
+ wall_to_monotonic.tv_sec--;
+ time_state = TIME_WAIT;
+ printk(KERN_NOTICE "Clock: "
+ "deleting leap second 23:59:59 UTC\n");
break;
case TIME_OOP:
+ time_tai++;
time_state = TIME_WAIT;
- break;
+ /* fall through */
case TIME_WAIT:
if (!(time_status & (STA_INS | STA_DEL)))
- time_state = TIME_OK;
+ time_state = TIME_OK;
+ break;
+ }
+ update_vsyscall(&xtime, clock);
+
+ write_sequnlock_irq(&xtime_lock);
+
+ return res;
+}
+
+/*
+ * this routine handles the overflow of the microsecond field
+ *
+ * The tricky bits of code to handle the accurate clock support
+ * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
+ * They were originally developed for SUN and DEC kernels.
+ * All the kudos should go to Dave for this stuff.
+ */
+void second_overflow(void)
+{
+ s64 time_adj;
+
+ /* Bump the maxerror field */
+ time_maxerror += MAXFREQ / NSEC_PER_USEC;
+ if (time_maxerror > NTP_PHASE_LIMIT) {
+ time_maxerror = NTP_PHASE_LIMIT;
+ time_status |= STA_UNSYNC;
}
/*
tick_length = tick_length_base;
time_adj = shift_right(time_offset, SHIFT_PLL + time_constant);
time_offset -= time_adj;
- tick_length += (s64)time_adj << (TICK_LENGTH_SHIFT - SHIFT_UPDATE);
+ tick_length += time_adj;
if (unlikely(time_adjust)) {
if (time_adjust > MAX_TICKADJ) {
tick_length -= MAX_TICKADJ_SCALED;
} else {
tick_length += (s64)(time_adjust * NSEC_PER_USEC /
- NTP_INTERVAL_FREQ) << TICK_LENGTH_SHIFT;
+ NTP_INTERVAL_FREQ) << NTP_SCALE_SHIFT;
time_adjust = 0;
}
}
}
-/*
- * Return how long ticks are at the moment, that is, how much time
- * update_wall_time_one_tick will add to xtime next time we call it
- * (assuming no calls to do_adjtimex in the meantime).
- * The return value is in fixed-point nanoseconds shifted by the
- * specified number of bits to the right of the binary point.
- * This function has no side-effects.
- */
-u64 current_tick_length(void)
-{
- return tick_length;
-}
-
#ifdef CONFIG_GENERIC_CMOS_UPDATE
/* Disable the cmos update - used by virtualization and embedded */
*/
int do_adjtimex(struct timex *txc)
{
- long mtemp, save_adjust, rem;
- s64 freq_adj, temp64;
+ struct timespec ts;
+ long save_adjust, sec;
int result;
/* In order to modify anything, you gotta be super-user! */
/* Now we validate the data before disabling interrupts */
if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) {
- /* singleshot must not be used with any other mode bits */
- if (txc->modes != ADJ_OFFSET_SINGLESHOT &&
- txc->modes != ADJ_OFFSET_SS_READ)
+ /* singleshot must not be used with any other mode bits */
+ if (txc->modes & ~ADJ_OFFSET_SS_READ)
return -EINVAL;
}
- if (txc->modes != ADJ_OFFSET_SINGLESHOT && (txc->modes & ADJ_OFFSET))
- /* adjustment Offset limited to +- .512 seconds */
- if (txc->offset <= - MAXPHASE || txc->offset >= MAXPHASE )
- return -EINVAL;
-
/* if the quartz is off by more than 10% something is VERY wrong ! */
if (txc->modes & ADJ_TICK)
if (txc->tick < 900000/USER_HZ ||
txc->tick > 1100000/USER_HZ)
return -EINVAL;
+ if (time_state != TIME_OK && txc->modes & ADJ_STATUS)
+ hrtimer_cancel(&leap_timer);
+ getnstimeofday(&ts);
+
write_seqlock_irq(&xtime_lock);
- result = time_state; /* mostly `TIME_OK' */
/* Save for later - semantics of adjtime is to return old value */
save_adjust = time_adjust;
-#if 0 /* STA_CLOCKERR is never set yet */
- time_status &= ~STA_CLOCKERR; /* reset STA_CLOCKERR */
-#endif
/* If there are input parameters, then process them */
- if (txc->modes)
- {
- if (txc->modes & ADJ_STATUS) /* only set allowed bits */
- time_status = (txc->status & ~STA_RONLY) |
- (time_status & STA_RONLY);
-
- if (txc->modes & ADJ_FREQUENCY) { /* p. 22 */
- if (txc->freq > MAXFREQ || txc->freq < -MAXFREQ) {
- result = -EINVAL;
- goto leave;
- }
- time_freq = ((s64)txc->freq * NSEC_PER_USEC)
- >> (SHIFT_USEC - SHIFT_NSEC);
- }
-
- if (txc->modes & ADJ_MAXERROR) {
- if (txc->maxerror < 0 || txc->maxerror >= NTP_PHASE_LIMIT) {
- result = -EINVAL;
- goto leave;
+ if (txc->modes) {
+ if (txc->modes & ADJ_STATUS) {
+ if ((time_status & STA_PLL) &&
+ !(txc->status & STA_PLL)) {
+ time_state = TIME_OK;
+ time_status = STA_UNSYNC;
+ }
+ /* only set allowed bits */
+ time_status &= STA_RONLY;
+ time_status |= txc->status & ~STA_RONLY;
+
+ switch (time_state) {
+ case TIME_OK:
+ start_timer:
+ sec = ts.tv_sec;
+ if (time_status & STA_INS) {
+ time_state = TIME_INS;
+ sec += 86400 - sec % 86400;
+ hrtimer_start(&leap_timer, ktime_set(sec, 0), HRTIMER_MODE_ABS);
+ } else if (time_status & STA_DEL) {
+ time_state = TIME_DEL;
+ sec += 86400 - (sec + 1) % 86400;
+ hrtimer_start(&leap_timer, ktime_set(sec, 0), HRTIMER_MODE_ABS);
+ }
+ break;
+ case TIME_INS:
+ case TIME_DEL:
+ time_state = TIME_OK;
+ goto start_timer;
+ break;
+ case TIME_WAIT:
+ if (!(time_status & (STA_INS | STA_DEL)))
+ time_state = TIME_OK;
+ break;
+ case TIME_OOP:
+ hrtimer_restart(&leap_timer);
+ break;
+ }
}
- time_maxerror = txc->maxerror;
- }
- if (txc->modes & ADJ_ESTERROR) {
- if (txc->esterror < 0 || txc->esterror >= NTP_PHASE_LIMIT) {
- result = -EINVAL;
- goto leave;
+ if (txc->modes & ADJ_NANO)
+ time_status |= STA_NANO;
+ if (txc->modes & ADJ_MICRO)
+ time_status &= ~STA_NANO;
+
+ if (txc->modes & ADJ_FREQUENCY) {
+ time_freq = (s64)txc->freq * PPM_SCALE;
+ time_freq = min(time_freq, MAXFREQ_SCALED);
+ time_freq = max(time_freq, -MAXFREQ_SCALED);
}
- time_esterror = txc->esterror;
- }
- if (txc->modes & ADJ_TIMECONST) { /* p. 24 */
- if (txc->constant < 0) { /* NTP v4 uses values > 6 */
- result = -EINVAL;
- goto leave;
+ if (txc->modes & ADJ_MAXERROR)
+ time_maxerror = txc->maxerror;
+ if (txc->modes & ADJ_ESTERROR)
+ time_esterror = txc->esterror;
+
+ if (txc->modes & ADJ_TIMECONST) {
+ time_constant = txc->constant;
+ if (!(time_status & STA_NANO))
+ time_constant += 4;
+ time_constant = min(time_constant, (long)MAXTC);
+ time_constant = max(time_constant, 0l);
}
- time_constant = min(txc->constant + 4, (long)MAXTC);
- }
- if (txc->modes & ADJ_OFFSET) { /* values checked earlier */
- if (txc->modes == ADJ_OFFSET_SINGLESHOT) {
- /* adjtime() is independent from ntp_adjtime() */
- time_adjust = txc->offset;
+ if (txc->modes & ADJ_TAI && txc->constant > 0)
+ time_tai = txc->constant;
+
+ if (txc->modes & ADJ_OFFSET) {
+ if (txc->modes == ADJ_OFFSET_SINGLESHOT)
+ /* adjtime() is independent from ntp_adjtime() */
+ time_adjust = txc->offset;
+ else
+ ntp_update_offset(txc->offset);
}
- else if (time_status & STA_PLL) {
- time_offset = txc->offset * NSEC_PER_USEC;
-
- /*
- * Scale the phase adjustment and
- * clamp to the operating range.
- */
- time_offset = min(time_offset, (s64)MAXPHASE * NSEC_PER_USEC);
- time_offset = max(time_offset, (s64)-MAXPHASE * NSEC_PER_USEC);
-
- /*
- * Select whether the frequency is to be controlled
- * and in which mode (PLL or FLL). Clamp to the operating
- * range. Ugly multiply/divide should be replaced someday.
- */
-
- if (time_status & STA_FREQHOLD || time_reftime == 0)
- time_reftime = xtime.tv_sec;
- mtemp = xtime.tv_sec - time_reftime;
- time_reftime = xtime.tv_sec;
-
- freq_adj = time_offset * mtemp;
- freq_adj = shift_right(freq_adj, time_constant * 2 +
- (SHIFT_PLL + 2) * 2 - SHIFT_NSEC);
- if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > MAXSEC)) {
- u64 utemp64;
- temp64 = time_offset << (SHIFT_NSEC - SHIFT_FLL);
- if (time_offset < 0) {
- utemp64 = -temp64;
- do_div(utemp64, mtemp);
- freq_adj -= utemp64;
- } else {
- utemp64 = temp64;
- do_div(utemp64, mtemp);
- freq_adj += utemp64;
- }
- }
- freq_adj += time_freq;
- freq_adj = min(freq_adj, (s64)MAXFREQ_NSEC);
- time_freq = max(freq_adj, (s64)-MAXFREQ_NSEC);
- time_offset = div_long_long_rem_signed(time_offset,
- NTP_INTERVAL_FREQ,
- &rem);
- time_offset <<= SHIFT_UPDATE;
- } /* STA_PLL */
- } /* txc->modes & ADJ_OFFSET */
- if (txc->modes & ADJ_TICK)
- tick_usec = txc->tick;
-
- if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET))
- ntp_update_frequency();
- } /* txc->modes */
-leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0)
+ if (txc->modes & ADJ_TICK)
+ tick_usec = txc->tick;
+
+ if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET))
+ ntp_update_frequency();
+ }
+
+ result = time_state; /* mostly `TIME_OK' */
+ if (time_status & (STA_UNSYNC|STA_CLOCKERR))
result = TIME_ERROR;
if ((txc->modes == ADJ_OFFSET_SINGLESHOT) ||
- (txc->modes == ADJ_OFFSET_SS_READ))
+ (txc->modes == ADJ_OFFSET_SS_READ))
txc->offset = save_adjust;
- else
- txc->offset = ((long)shift_right(time_offset, SHIFT_UPDATE)) *
- NTP_INTERVAL_FREQ / 1000;
- txc->freq = (time_freq / NSEC_PER_USEC) <<
- (SHIFT_USEC - SHIFT_NSEC);
+ else {
+ txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
+ NTP_SCALE_SHIFT);
+ if (!(time_status & STA_NANO))
+ txc->offset /= NSEC_PER_USEC;
+ }
+ txc->freq = shift_right((s32)(time_freq >> PPM_SCALE_INV_SHIFT) *
+ (s64)PPM_SCALE_INV,
+ NTP_SCALE_SHIFT);
txc->maxerror = time_maxerror;
txc->esterror = time_esterror;
txc->status = time_status;
txc->constant = time_constant;
txc->precision = 1;
- txc->tolerance = MAXFREQ;
+ txc->tolerance = MAXFREQ_SCALED / PPM_SCALE;
txc->tick = tick_usec;
+ txc->tai = time_tai;
/* PPS is not implemented, so these are zero */
txc->ppsfreq = 0;
txc->errcnt = 0;
txc->stbcnt = 0;
write_sequnlock_irq(&xtime_lock);
- do_gettimeofday(&txc->time);
+
+ txc->time.tv_sec = ts.tv_sec;
+ txc->time.tv_usec = ts.tv_nsec;
+ if (!(time_status & STA_NANO))
+ txc->time.tv_usec /= NSEC_PER_USEC;
+
notify_cmos_timer();
- return(result);
+
+ return result;
}
static int __init ntp_tick_adj_setup(char *str)
}
__setup("ntp_tick_adj=", ntp_tick_adj_setup);
+
+void __init ntp_init(void)
+{
+ ntp_clear();
+ hrtimer_init(&leap_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+ leap_timer.function = ntp_leap_second;
+}
timespec_add_ns(&xtime_cache, nsec);
}
-static struct clocksource *clock; /* pointer to current clocksource */
+struct clocksource *clock;
#ifdef CONFIG_GENERIC_TIME
write_seqlock_irqsave(&xtime_lock, flags);
- ntp_clear();
+ ntp_init();
clock = clocksource_get_next();
clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
* here. This is tuned so that an error of about 1 msec is adjusted
* within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
*/
- error2 = clock->error >> (TICK_LENGTH_SHIFT + 22 - 2 * SHIFT_HZ);
+ error2 = clock->error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
error2 = abs(error2);
for (look_ahead = 0; error2 > 0; look_ahead++)
error2 >>= 2;
* Now calculate the error in (1 << look_ahead) ticks, but first
* remove the single look ahead already included in the error.
*/
- tick_error = current_tick_length() >>
- (TICK_LENGTH_SHIFT - clock->shift + 1);
+ tick_error = tick_length >> (NTP_SCALE_SHIFT - clock->shift + 1);
tick_error -= clock->xtime_interval >> 1;
error = ((error - tick_error) >> look_ahead) + tick_error;
s64 error, interval = clock->cycle_interval;
int adj;
- error = clock->error >> (TICK_LENGTH_SHIFT - clock->shift - 1);
+ error = clock->error >> (NTP_SCALE_SHIFT - clock->shift - 1);
if (error > interval) {
error >>= 2;
if (likely(error <= interval))
clock->xtime_interval += interval;
clock->xtime_nsec -= offset;
clock->error -= (interval - offset) <<
- (TICK_LENGTH_SHIFT - clock->shift);
+ (NTP_SCALE_SHIFT - clock->shift);
}
/**
}
/* accumulate error between NTP and clock interval */
- clock->error += current_tick_length();
- clock->error -= clock->xtime_interval << (TICK_LENGTH_SHIFT - clock->shift);
+ clock->error += tick_length;
+ clock->error -= clock->xtime_interval << (NTP_SCALE_SHIFT - clock->shift);
}
/* correct the clock when NTP error is too big */
int queue_delayed_work(struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay)
{
- timer_stats_timer_set_start_info(&dwork->timer);
if (delay == 0)
return queue_work(wq, &dwork->work);
struct timer_list *timer = &dwork->timer;
struct work_struct *work = &dwork->work;
- timer_stats_timer_set_start_info(&dwork->timer);
if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
BUG_ON(timer_pending(timer));
BUG_ON(!list_empty(&work->entry));
+ timer_stats_timer_set_start_info(&dwork->timer);
+
/* This stores cwq for the moment, for the timer_fn */
set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
timer->expires = jiffies + delay;
int schedule_delayed_work(struct delayed_work *dwork,
unsigned long delay)
{
- timer_stats_timer_set_start_info(&dwork->timer);
return queue_delayed_work(keventd_wq, dwork, delay);
}
EXPORT_SYMBOL(schedule_delayed_work);
int schedule_delayed_work_on(int cpu,
struct delayed_work *dwork, unsigned long delay)
{
- timer_stats_timer_set_start_info(&dwork->timer);
return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
}
EXPORT_SYMBOL(schedule_delayed_work_on);
* assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S.
*/
-#include <linux/types.h>
#include <linux/module.h>
-#include <asm/div64.h>
+#include <linux/math64.h>
/* Not needed on 64bit architectures */
#if BITS_PER_LONG == 32
EXPORT_SYMBOL(__div64_32);
+#ifndef div_s64_rem
+s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
+{
+ u64 quotient;
+
+ if (dividend < 0) {
+ quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder);
+ *remainder = -*remainder;
+ if (divisor > 0)
+ quotient = -quotient;
+ } else {
+ quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder);
+ if (divisor < 0)
+ quotient = -quotient;
+ }
+ return quotient;
+}
+EXPORT_SYMBOL(div_s64_rem);
+#endif
+
/* 64bit divisor, dividend and result. dynamic precision */
-uint64_t div64_64(uint64_t dividend, uint64_t divisor)
+#ifndef div64_u64
+u64 div64_u64(u64 dividend, u64 divisor)
{
- uint32_t high, d;
+ u32 high, d;
high = divisor >> 32;
if (high) {
} else
d = divisor;
- do_div(dividend, d);
-
- return dividend;
+ return div_u64(dividend, d);
}
-EXPORT_SYMBOL(div64_64);
+EXPORT_SYMBOL(div64_u64);
+#endif
#endif /* BITS_PER_LONG == 32 */
while (idp->id_free_cnt >= IDR_FREE_MAX) {
p = alloc_layer(idp);
kmem_cache_free(idr_layer_cache, p);
- return;
}
+ return;
}
EXPORT_SYMBOL(idr_remove);
EXPORT_SYMBOL(strsep);
#endif
+/**
+ * sysfs_streq - return true if strings are equal, modulo trailing newline
+ * @s1: one string
+ * @s2: another string
+ *
+ * This routine returns true iff two strings are equal, treating both
+ * NUL and newline-then-NUL as equivalent string terminations. It's
+ * geared for use with sysfs input strings, which generally terminate
+ * with newlines but are compared against values without newlines.
+ */
+bool sysfs_streq(const char *s1, const char *s2)
+{
+ while (*s1 && *s1 == *s2) {
+ s1++;
+ s2++;
+ }
+
+ if (*s1 == *s2)
+ return true;
+ if (!*s1 && *s2 == '\n' && !s2[1])
+ return true;
+ if (*s1 == '\n' && !s1[1] && !*s2)
+ return true;
+ return false;
+}
+EXPORT_SYMBOL(sysfs_streq);
+
#ifndef __HAVE_ARCH_MEMSET
/**
* memset - Fill a region of memory with the given value
*/
MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
MEM_CGROUP_STAT_RSS, /* # of pages charged as rss */
+ MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */
+ MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */
MEM_CGROUP_STAT_NSTATS,
};
__mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_CACHE, val);
else
__mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val);
+
+ if (charge)
+ __mem_cgroup_stat_add_safe(stat,
+ MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
+ else
+ __mem_cgroup_stat_add_safe(stat,
+ MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
}
static struct mem_cgroup_per_zone *
} mem_cgroup_stat_desc[] = {
[MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
[MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
+ [MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, },
+ [MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, },
};
static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
#include <linux/debugobjects.h>
#include <linux/kallsyms.h>
#include <linux/memory.h>
+#include <linux/math64.h>
/*
* Lock order:
len += sprintf(buf + len, "<not-available>");
if (l->sum_time != l->min_time) {
- unsigned long remainder;
-
len += sprintf(buf + len, " age=%ld/%ld/%ld",
- l->min_time,
- div_long_long_rem(l->sum_time, l->count, &remainder),
- l->max_time);
+ l->min_time,
+ (long)div_u64(l->sum_time, l->count),
+ l->max_time);
} else
len += sprintf(buf + len, " age=%ld",
l->min_time);
* @gfp_mask: flags for the page level allocator
* @prot: protection mask for the allocated pages
* @node: node to use for allocation or -1
+ * @caller: caller's return address
*
* Allocate enough pages to cover @size from the page level
* allocator with @gfp_mask flags. Map them into contiguous
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/math64.h>
#include <net/tcp.h>
-#include <asm/div64.h>
#define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation
* max_cwnd = snd_cwnd * beta
* x = ( 2 * x + a / x ) / 3
* k+1 k k
*/
- x = (2 * x + (u32)div64_64(a, (u64)x * (u64)(x - 1)));
+ x = (2 * x + (u32)div64_u64(a, (u64)x * (u64)(x - 1)));
x = ((x * 341) >> 10);
return x;
}
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/skbuff.h>
+#include <linux/math64.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_connbytes.h>
#include <net/netfilter/nf_conntrack.h>
-#include <asm/div64.h>
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
MODULE_DESCRIPTION("Xtables: Number of packets/bytes per connection matching");
break;
}
if (pkts != 0)
- what = div64_64(bytes, pkts);
+ what = div64_u64(bytes, pkts);
break;
}