S: Beaverton, Oregon 97005
S: USA
-N: Marcelo W. Tosatti
-E: marcelo.tosatti@cyclades.com
-D: Miscellaneous kernel hacker
+N: Marcelo Tosatti
+E: marcelo@kvack.org
D: v2.4 kernel maintainer
-D: Current pc300/cyclades maintainer
-S: Cyclades Corporation
-S: Av Cristovao Colombo, 462. Floresta.
-S: Porto Alegre
S: Brazil
N: Stefan Traby
These devices support the same API as the generic SCSI
devices.
- 97 block Packet writing for CD/DVD devices
- 0 = /dev/pktcdvd0 First packet-writing module
- 1 = /dev/pktcdvd1 Second packet-writing module
- ...
-
98 char Control and Measurement Device (comedi)
0 = /dev/comedi0 First comedi device
1 = /dev/comedi1 Second comedi device
}
sub nxt2002 {
- my $sourcefile = "Broadband4PC_4_2_11.zip";
+ my $sourcefile = "Technisat_DVB-PC_4_4_COMPACT.zip";
my $url = "http://www.bbti.us/download/windows/$sourcefile";
- my $hash = "c6d2ea47a8f456d887ada0cfb718ff2a";
+ my $hash = "476befae8c7c1bb9648954060b1eec1f";
my $outfile = "dvb-fe-nxt2002.fw";
my $tmpdir = tempdir(DIR => "/tmp", CLEANUP => 1);
wgetfile($sourcefile, $url);
unzip($sourcefile, $tmpdir);
- verify("$tmpdir/SkyNETU.sys", $hash);
- extract("$tmpdir/SkyNETU.sys", 375832, 5908, $outfile);
+ verify("$tmpdir/SkyNET.sys", $hash);
+ extract("$tmpdir/SkyNET.sys", 331624, 5908, $outfile);
$outfile;
}
---------------------------
+What: sbp2: module parameter "force_inquiry_hack"
+When: July 2006
+Why: Superceded by parameter "workarounds". Both parameters are meant to be
+ used ad-hoc and for single devices only, i.e. not in modprobe.conf,
+ therefore the impact of this feature replacement should be low.
+Who: Stefan Richter <stefanr@s5r6.in-berlin.de>
+
+---------------------------
+
What: Video4Linux API 1 ioctls and video_decoder.h from Video devices.
When: July 2006
Why: V4L1 AP1 was replaced by V4L2 API. during migration from 2.4 to 2.6
on the setup, so I think that the choice on what firmware to make
persistent should be left to userspace.
- - Why register_firmware()+__init can be useful:
- - For boot devices needing firmware.
- - To make the transition easier:
- The firmware can be declared __init and register_firmware()
- called on module_init. Then the firmware is warranted to be
- there even if "firmware hotplug userspace" is not there yet or
- it doesn't yet provide the needed firmware.
- Once the firmware is widely available in userspace, it can be
- removed from the kernel. Or made optional (CONFIG_.*_FIRMWARE).
-
- In either case, if firmware hotplug support is there, it can move the
- firmware out of kernel memory into the real filesystem for later
- usage.
-
- Note: If persistence is implemented on top of initramfs,
- register_firmware() may not be appropriate.
-
*
* Sample code on how to use request_firmware() from drivers.
*
- * Note that register_firmware() is currently useless.
- *
*/
#include <linux/module.h>
#include "linux/firmware.h"
-#define WE_CAN_NEED_FIRMWARE_BEFORE_USERSPACE_IS_AVAILABLE
-#ifdef WE_CAN_NEED_FIRMWARE_BEFORE_USERSPACE_IS_AVAILABLE
-char __init inkernel_firmware[] = "let's say that this is firmware\n";
-#endif
-
static struct device ghost_device = {
.bus_id = "ghost0",
};
static int sample_init(void)
{
-#ifdef WE_CAN_NEED_FIRMWARE_BEFORE_USERSPACE_IS_AVAILABLE
- register_firmware("sample_driver_fw", inkernel_firmware,
- sizeof(inkernel_firmware));
-#endif
device_initialize(&ghost_device);
/* since there is no real hardware insertion I just call the
* sample probe functions here */
acpi_irq_isa= [HW,ACPI] If irq_balance, mark listed IRQs used by ISA
Format: <irq>,<irq>...
+ acpi_os_name= [HW,ACPI] Tell ACPI BIOS the name of the OS
+ Format: To spoof as Windows 98: ="Microsoft Windows"
+
acpi_osi= [HW,ACPI] empty param disables _OSI
acpi_serialize [HW,ACPI] force serialization of AML methods
- Control dependencies.
- SMP barrier pairing.
- Examples of memory barrier sequences.
+ - Read memory barriers vs load speculation.
(*) Explicit kernel barriers.
we may get either of:
STORE *A = X; Y = LOAD *A;
- STORE *A = Y;
+ STORE *A = Y = X;
=========================
(4) General memory barriers.
- A general memory barrier is a combination of both a read memory barrier
- and a write memory barrier. It is a partial ordering over both loads and
- stores.
+ A general memory barrier gives a guarantee that all the LOAD and STORE
+ operations specified before the barrier will appear to happen before all
+ the LOAD and STORE operations specified after the barrier with respect to
+ the other components of the system.
+
+ A general memory barrier is a partial ordering over both loads and stores.
General memory barriers imply both read and write memory barriers, and so
can substitute for either.
=============== ===============
a = 1;
<write barrier>
- b = 2; x = a;
+ b = 2; x = b;
<read barrier>
- y = b;
+ y = a;
Or:
Basically, the read barrier always has to be there, even though it can be of
the "weaker" type.
+[!] Note that the stores before the write barrier would normally be expected to
+match the loads after the read barrier or data dependency barrier, and vice
+versa:
+
+ CPU 1 CPU 2
+ =============== ===============
+ a = 1; }---- --->{ v = c
+ b = 2; } \ / { w = d
+ <write barrier> \ <read barrier>
+ c = 3; } / \ { x = a;
+ d = 4; }---- --->{ y = b;
+
EXAMPLES OF MEMORY BARRIER SEQUENCES
------------------------------------
| | +------+
+-------+ : :
|
- | Sequence in which stores committed to memory system
- | by CPU 1
+ | Sequence in which stores are committed to the
+ | memory system by CPU 1
V
| : : | |
| : : | CPU 2 |
| +-------+ | |
- \ | X->9 |------>| |
- \ +-------+ | |
- ----->| B->2 | | |
- +-------+ | |
- Makes sure all effects ---> ddddddddddddddddd | |
- prior to the store of C +-------+ | |
- are perceptible to | B->2 |------>| |
- successive loads +-------+ | |
+ | | X->9 |------>| |
+ | +-------+ | |
+ Makes sure all effects ---> \ ddddddddddddddddd | |
+ prior to the store of C \ +-------+ | |
+ are perceptible to ----->| B->2 |------>| |
+ subsequent loads +-------+ | |
: : +-------+
CPU 1 CPU 2
======================= =======================
+ { A = 0, B = 9 }
STORE A=1
- STORE B=2
- STORE C=3
<write barrier>
- STORE D=4
- STORE E=5
- LOAD A
+ STORE B=2
LOAD B
- LOAD C
- LOAD D
- LOAD E
+ LOAD A
Without intervention, CPU 2 may then choose to perceive the events on CPU 1 in
some effectively random order, despite the write barrier issued by CPU 1:
- +-------+ : :
- | | +------+
- | |------>| C=3 | }
- | | : +------+ }
- | | : | A=1 | }
- | | : +------+ }
- | CPU 1 | : | B=2 | }---
- | | +------+ } \
- | | wwwwwwwwwwwww} \
- | | +------+ } \ : : +-------+
- | | : | E=5 | } \ +-------+ | |
- | | : +------+ } \ { | C->3 |------>| |
- | |------>| D=4 | } \ { +-------+ : | |
- | | +------+ \ { | E->5 | : | |
- +-------+ : : \ { +-------+ : | |
- Transfer -->{ | A->1 | : | CPU 2 |
- from CPU 1 { +-------+ : | |
- to CPU 2 { | D->4 | : | |
- { +-------+ : | |
- { | B->2 |------>| |
- +-------+ | |
- : : +-------+
-
-
-If, however, a read barrier were to be placed between the load of C and the
-load of D on CPU 2, then the partial ordering imposed by CPU 1 will be
-perceived correctly by CPU 2.
+ +-------+ : : : :
+ | | +------+ +-------+
+ | |------>| A=1 |------ --->| A->0 |
+ | | +------+ \ +-------+
+ | CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
+ | | +------+ | +-------+
+ | |------>| B=2 |--- | : :
+ | | +------+ \ | : : +-------+
+ +-------+ : : \ | +-------+ | |
+ ---------->| B->2 |------>| |
+ | +-------+ | CPU 2 |
+ | | A->0 |------>| |
+ | +-------+ | |
+ | : : +-------+
+ \ : :
+ \ +-------+
+ ---->| A->1 |
+ +-------+
+ : :
- +-------+ : :
- | | +------+
- | |------>| C=3 | }
- | | : +------+ }
- | | : | A=1 | }---
- | | : +------+ } \
- | CPU 1 | : | B=2 | } \
- | | +------+ \
- | | wwwwwwwwwwwwwwww \
- | | +------+ \ : : +-------+
- | | : | E=5 | } \ +-------+ | |
- | | : +------+ }--- \ { | C->3 |------>| |
- | |------>| D=4 | } \ \ { +-------+ : | |
- | | +------+ \ -->{ | B->2 | : | |
- +-------+ : : \ { +-------+ : | |
- \ { | A->1 | : | CPU 2 |
- \ +-------+ | |
- At this point the read ----> \ rrrrrrrrrrrrrrrrr | |
- barrier causes all effects \ +-------+ | |
- prior to the storage of C \ { | E->5 | : | |
- to be perceptible to CPU 2 -->{ +-------+ : | |
- { | D->4 |------>| |
- +-------+ | |
- : : +-------+
+
+If, however, a read barrier were to be placed between the load of E and the
+load of A on CPU 2:
+
+ CPU 1 CPU 2
+ ======================= =======================
+ { A = 0, B = 9 }
+ STORE A=1
+ <write barrier>
+ STORE B=2
+ LOAD B
+ <read barrier>
+ LOAD A
+
+then the partial ordering imposed by CPU 1 will be perceived correctly by CPU
+2:
+
+ +-------+ : : : :
+ | | +------+ +-------+
+ | |------>| A=1 |------ --->| A->0 |
+ | | +------+ \ +-------+
+ | CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
+ | | +------+ | +-------+
+ | |------>| B=2 |--- | : :
+ | | +------+ \ | : : +-------+
+ +-------+ : : \ | +-------+ | |
+ ---------->| B->2 |------>| |
+ | +-------+ | CPU 2 |
+ | : : | |
+ | : : | |
+ At this point the read ----> \ rrrrrrrrrrrrrrrrr | |
+ barrier causes all effects \ +-------+ | |
+ prior to the storage of B ---->| A->1 |------>| |
+ to be perceptible to CPU 2 +-------+ | |
+ : : +-------+
+
+
+To illustrate this more completely, consider what could happen if the code
+contained a load of A either side of the read barrier:
+
+ CPU 1 CPU 2
+ ======================= =======================
+ { A = 0, B = 9 }
+ STORE A=1
+ <write barrier>
+ STORE B=2
+ LOAD B
+ LOAD A [first load of A]
+ <read barrier>
+ LOAD A [second load of A]
+
+Even though the two loads of A both occur after the load of B, they may both
+come up with different values:
+
+ +-------+ : : : :
+ | | +------+ +-------+
+ | |------>| A=1 |------ --->| A->0 |
+ | | +------+ \ +-------+
+ | CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
+ | | +------+ | +-------+
+ | |------>| B=2 |--- | : :
+ | | +------+ \ | : : +-------+
+ +-------+ : : \ | +-------+ | |
+ ---------->| B->2 |------>| |
+ | +-------+ | CPU 2 |
+ | : : | |
+ | : : | |
+ | +-------+ | |
+ | | A->0 |------>| 1st |
+ | +-------+ | |
+ At this point the read ----> \ rrrrrrrrrrrrrrrrr | |
+ barrier causes all effects \ +-------+ | |
+ prior to the storage of B ---->| A->1 |------>| 2nd |
+ to be perceptible to CPU 2 +-------+ | |
+ : : +-------+
+
+
+But it may be that the update to A from CPU 1 becomes perceptible to CPU 2
+before the read barrier completes anyway:
+
+ +-------+ : : : :
+ | | +------+ +-------+
+ | |------>| A=1 |------ --->| A->0 |
+ | | +------+ \ +-------+
+ | CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
+ | | +------+ | +-------+
+ | |------>| B=2 |--- | : :
+ | | +------+ \ | : : +-------+
+ +-------+ : : \ | +-------+ | |
+ ---------->| B->2 |------>| |
+ | +-------+ | CPU 2 |
+ | : : | |
+ \ : : | |
+ \ +-------+ | |
+ ---->| A->1 |------>| 1st |
+ +-------+ | |
+ rrrrrrrrrrrrrrrrr | |
+ +-------+ | |
+ | A->1 |------>| 2nd |
+ +-------+ | |
+ : : +-------+
+
+
+The guarantee is that the second load will always come up with A == 1 if the
+load of B came up with B == 2. No such guarantee exists for the first load of
+A; that may come up with either A == 0 or A == 1.
+
+
+READ MEMORY BARRIERS VS LOAD SPECULATION
+----------------------------------------
+
+Many CPUs speculate with loads: that is they see that they will need to load an
+item from memory, and they find a time where they're not using the bus for any
+other loads, and so do the load in advance - even though they haven't actually
+got to that point in the instruction execution flow yet. This permits the
+actual load instruction to potentially complete immediately because the CPU
+already has the value to hand.
+
+It may turn out that the CPU didn't actually need the value - perhaps because a
+branch circumvented the load - in which case it can discard the value or just
+cache it for later use.
+
+Consider:
+
+ CPU 1 CPU 2
+ ======================= =======================
+ LOAD B
+ DIVIDE } Divide instructions generally
+ DIVIDE } take a long time to perform
+ LOAD A
+
+Which might appear as this:
+
+ : : +-------+
+ +-------+ | |
+ --->| B->2 |------>| |
+ +-------+ | CPU 2 |
+ : :DIVIDE | |
+ +-------+ | |
+ The CPU being busy doing a ---> --->| A->0 |~~~~ | |
+ division speculates on the +-------+ ~ | |
+ LOAD of A : : ~ | |
+ : :DIVIDE | |
+ : : ~ | |
+ Once the divisions are complete --> : : ~-->| |
+ the CPU can then perform the : : | |
+ LOAD with immediate effect : : +-------+
+
+
+Placing a read barrier or a data dependency barrier just before the second
+load:
+
+ CPU 1 CPU 2
+ ======================= =======================
+ LOAD B
+ DIVIDE
+ DIVIDE
+ <read barrier>
+ LOAD A
+
+will force any value speculatively obtained to be reconsidered to an extent
+dependent on the type of barrier used. If there was no change made to the
+speculated memory location, then the speculated value will just be used:
+
+ : : +-------+
+ +-------+ | |
+ --->| B->2 |------>| |
+ +-------+ | CPU 2 |
+ : :DIVIDE | |
+ +-------+ | |
+ The CPU being busy doing a ---> --->| A->0 |~~~~ | |
+ division speculates on the +-------+ ~ | |
+ LOAD of A : : ~ | |
+ : :DIVIDE | |
+ : : ~ | |
+ : : ~ | |
+ rrrrrrrrrrrrrrrr~ | |
+ : : ~ | |
+ : : ~-->| |
+ : : | |
+ : : +-------+
+
+
+but if there was an update or an invalidation from another CPU pending, then
+the speculation will be cancelled and the value reloaded:
+
+ : : +-------+
+ +-------+ | |
+ --->| B->2 |------>| |
+ +-------+ | CPU 2 |
+ : :DIVIDE | |
+ +-------+ | |
+ The CPU being busy doing a ---> --->| A->0 |~~~~ | |
+ division speculates on the +-------+ ~ | |
+ LOAD of A : : ~ | |
+ : :DIVIDE | |
+ : : ~ | |
+ : : ~ | |
+ rrrrrrrrrrrrrrrrr | |
+ +-------+ | |
+ The speculation is discarded ---> --->| A->1 |------>| |
+ and an updated value is +-------+ | |
+ retrieved : : +-------+
========================
===============================
Some of the other functions in the linux kernel imply memory barriers, amongst
-which are locking, scheduling and memory allocation functions.
+which are locking and scheduling functions.
This specification is a _minimum_ guarantee; any particular architecture may
provide more substantial guarantees, but these may not be relied upon outside
barriers is that the effects instructions outside of a critical section may
seep into the inside of the critical section.
+A LOCK followed by an UNLOCK may not be assumed to be full memory barrier
+because it is possible for an access preceding the LOCK to happen after the
+LOCK, and an access following the UNLOCK to happen before the UNLOCK, and the
+two accesses can themselves then cross:
+
+ *A = a;
+ LOCK
+ UNLOCK
+ *B = b;
+
+may occur as:
+
+ LOCK, STORE *B, STORE *A, UNLOCK
+
Locks and semaphores may not provide any guarantee of ordering on UP compiled
systems, and so cannot be counted on in such a situation to actually achieve
anything at all - especially with respect to I/O accesses - unless combined
(*) schedule() and similar imply full memory barriers.
- (*) Memory allocation and release functions imply full memory barriers.
-
=================================
INTER-CPU LOCKING BARRIER EFFECTS
LOCKS VS MEMORY ACCESSES
------------------------
-Consider the following: the system has a pair of spinlocks (N) and (Q), and
+Consider the following: the system has a pair of spinlocks (M) and (Q), and
three CPUs; then should the following sequence of events occur:
CPU 1 CPU 2
smp_wmb();
<A:modify v=2> <C:busy>
<C:queue v=2>
- p = &b; q = p;
+ p = &v; q = p;
<D:request p>
<B:modify p=&v> <D:commit p=&v>
<D:read p>
The interaction of the iflag bits is as follows (parity error
given as an example):
Parity error INPCK IGNPAR
- None n/a n/a character received
- Yes n/a 0 character discarded
- Yes 0 1 character received, marked as
+ n/a 0 n/a character received, marked as
TTY_NORMAL
- Yes 1 1 character received, marked as
+ None 1 n/a character received, marked as
+ TTY_NORMAL
+ Yes 1 0 character received, marked as
TTY_PARITY
+ Yes 1 1 character discarded
Other flags may be used (eg, xon/xoff characters) if your
hardware supports hardware "soft" flow control.
--- /dev/null
+PXA2xx SPI on SSP driver HOWTO
+===================================================
+This a mini howto on the pxa2xx_spi driver. The driver turns a PXA2xx
+synchronous serial port into a SPI master controller
+(see Documentation/spi/spi_summary). The driver has the following features
+
+- Support for any PXA2xx SSP
+- SSP PIO and SSP DMA data transfers.
+- External and Internal (SSPFRM) chip selects.
+- Per slave device (chip) configuration.
+- Full suspend, freeze, resume support.
+
+The driver is built around a "spi_message" fifo serviced by workqueue and a
+tasklet. The workqueue, "pump_messages", drives message fifo and the tasklet
+(pump_transfer) is responsible for queuing SPI transactions and setting up and
+launching the dma/interrupt driven transfers.
+
+Declaring PXA2xx Master Controllers
+-----------------------------------
+Typically a SPI master is defined in the arch/.../mach-*/board-*.c as a
+"platform device". The master configuration is passed to the driver via a table
+found in include/asm-arm/arch-pxa/pxa2xx_spi.h:
+
+struct pxa2xx_spi_master {
+ enum pxa_ssp_type ssp_type;
+ u32 clock_enable;
+ u16 num_chipselect;
+ u8 enable_dma;
+};
+
+The "pxa2xx_spi_master.ssp_type" field must have a value between 1 and 3 and
+informs the driver which features a particular SSP supports.
+
+The "pxa2xx_spi_master.clock_enable" field is used to enable/disable the
+corresponding SSP peripheral block in the "Clock Enable Register (CKEN"). See
+the "PXA2xx Developer Manual" section "Clocks and Power Management".
+
+The "pxa2xx_spi_master.num_chipselect" field is used to determine the number of
+slave device (chips) attached to this SPI master.
+
+The "pxa2xx_spi_master.enable_dma" field informs the driver that SSP DMA should
+be used. This caused the driver to acquire two DMA channels: rx_channel and
+tx_channel. The rx_channel has a higher DMA service priority the tx_channel.
+See the "PXA2xx Developer Manual" section "DMA Controller".
+
+NSSP MASTER SAMPLE
+------------------
+Below is a sample configuration using the PXA255 NSSP.
+
+static struct resource pxa_spi_nssp_resources[] = {
+ [0] = {
+ .start = __PREG(SSCR0_P(2)), /* Start address of NSSP */
+ .end = __PREG(SSCR0_P(2)) + 0x2c, /* Range of registers */
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_NSSP, /* NSSP IRQ */
+ .end = IRQ_NSSP,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct pxa2xx_spi_master pxa_nssp_master_info = {
+ .ssp_type = PXA25x_NSSP, /* Type of SSP */
+ .clock_enable = CKEN9_NSSP, /* NSSP Peripheral clock */
+ .num_chipselect = 1, /* Matches the number of chips attached to NSSP */
+ .enable_dma = 1, /* Enables NSSP DMA */
+};
+
+static struct platform_device pxa_spi_nssp = {
+ .name = "pxa2xx-spi", /* MUST BE THIS VALUE, so device match driver */
+ .id = 2, /* Bus number, MUST MATCH SSP number 1..n */
+ .resource = pxa_spi_nssp_resources,
+ .num_resources = ARRAY_SIZE(pxa_spi_nssp_resources),
+ .dev = {
+ .platform_data = &pxa_nssp_master_info, /* Passed to driver */
+ },
+};
+
+static struct platform_device *devices[] __initdata = {
+ &pxa_spi_nssp,
+};
+
+static void __init board_init(void)
+{
+ (void)platform_add_device(devices, ARRAY_SIZE(devices));
+}
+
+Declaring Slave Devices
+-----------------------
+Typically each SPI slave (chip) is defined in the arch/.../mach-*/board-*.c
+using the "spi_board_info" structure found in "linux/spi/spi.h". See
+"Documentation/spi/spi_summary" for additional information.
+
+Each slave device attached to the PXA must provide slave specific configuration
+information via the structure "pxa2xx_spi_chip" found in
+"include/asm-arm/arch-pxa/pxa2xx_spi.h". The pxa2xx_spi master controller driver
+will uses the configuration whenever the driver communicates with the slave
+device.
+
+struct pxa2xx_spi_chip {
+ u8 tx_threshold;
+ u8 rx_threshold;
+ u8 dma_burst_size;
+ u32 timeout_microsecs;
+ u8 enable_loopback;
+ void (*cs_control)(u32 command);
+};
+
+The "pxa2xx_spi_chip.tx_threshold" and "pxa2xx_spi_chip.rx_threshold" fields are
+used to configure the SSP hardware fifo. These fields are critical to the
+performance of pxa2xx_spi driver and misconfiguration will result in rx
+fifo overruns (especially in PIO mode transfers). Good default values are
+
+ .tx_threshold = 12,
+ .rx_threshold = 4,
+
+The "pxa2xx_spi_chip.dma_burst_size" field is used to configure PXA2xx DMA
+engine and is related the "spi_device.bits_per_word" field. Read and understand
+the PXA2xx "Developer Manual" sections on the DMA controller and SSP Controllers
+to determine the correct value. An SSP configured for byte-wide transfers would
+use a value of 8.
+
+The "pxa2xx_spi_chip.timeout_microsecs" fields is used to efficiently handle
+trailing bytes in the SSP receiver fifo. The correct value for this field is
+dependent on the SPI bus speed ("spi_board_info.max_speed_hz") and the specific
+slave device. Please note the the PXA2xx SSP 1 does not support trailing byte
+timeouts and must busy-wait any trailing bytes.
+
+The "pxa2xx_spi_chip.enable_loopback" field is used to place the SSP porting
+into internal loopback mode. In this mode the SSP controller internally
+connects the SSPTX pin the the SSPRX pin. This is useful for initial setup
+testing.
+
+The "pxa2xx_spi_chip.cs_control" field is used to point to a board specific
+function for asserting/deasserting a slave device chip select. If the field is
+NULL, the pxa2xx_spi master controller driver assumes that the SSP port is
+configured to use SSPFRM instead.
+
+NSSP SALVE SAMPLE
+-----------------
+The pxa2xx_spi_chip structure is passed to the pxa2xx_spi driver in the
+"spi_board_info.controller_data" field. Below is a sample configuration using
+the PXA255 NSSP.
+
+/* Chip Select control for the CS8415A SPI slave device */
+static void cs8415a_cs_control(u32 command)
+{
+ if (command & PXA2XX_CS_ASSERT)
+ GPCR(2) = GPIO_bit(2);
+ else
+ GPSR(2) = GPIO_bit(2);
+}
+
+/* Chip Select control for the CS8405A SPI slave device */
+static void cs8405a_cs_control(u32 command)
+{
+ if (command & PXA2XX_CS_ASSERT)
+ GPCR(3) = GPIO_bit(3);
+ else
+ GPSR(3) = GPIO_bit(3);
+}
+
+static struct pxa2xx_spi_chip cs8415a_chip_info = {
+ .tx_threshold = 12, /* SSP hardward FIFO threshold */
+ .rx_threshold = 4, /* SSP hardward FIFO threshold */
+ .dma_burst_size = 8, /* Byte wide transfers used so 8 byte bursts */
+ .timeout_microsecs = 64, /* Wait at least 64usec to handle trailing */
+ .cs_control = cs8415a_cs_control, /* Use external chip select */
+};
+
+static struct pxa2xx_spi_chip cs8405a_chip_info = {
+ .tx_threshold = 12, /* SSP hardward FIFO threshold */
+ .rx_threshold = 4, /* SSP hardward FIFO threshold */
+ .dma_burst_size = 8, /* Byte wide transfers used so 8 byte bursts */
+ .timeout_microsecs = 64, /* Wait at least 64usec to handle trailing */
+ .cs_control = cs8405a_cs_control, /* Use external chip select */
+};
+
+static struct spi_board_info streetracer_spi_board_info[] __initdata = {
+ {
+ .modalias = "cs8415a", /* Name of spi_driver for this device */
+ .max_speed_hz = 3686400, /* Run SSP as fast a possbile */
+ .bus_num = 2, /* Framework bus number */
+ .chip_select = 0, /* Framework chip select */
+ .platform_data = NULL; /* No spi_driver specific config */
+ .controller_data = &cs8415a_chip_info, /* Master chip config */
+ .irq = STREETRACER_APCI_IRQ, /* Slave device interrupt */
+ },
+ {
+ .modalias = "cs8405a", /* Name of spi_driver for this device */
+ .max_speed_hz = 3686400, /* Run SSP as fast a possbile */
+ .bus_num = 2, /* Framework bus number */
+ .chip_select = 1, /* Framework chip select */
+ .controller_data = &cs8405a_chip_info, /* Master chip config */
+ .irq = STREETRACER_APCI_IRQ, /* Slave device interrupt */
+ },
+};
+
+static void __init streetracer_init(void)
+{
+ spi_register_board_info(streetracer_spi_board_info,
+ ARRAY_SIZE(streetracer_spi_board_info));
+}
+
+
+DMA and PIO I/O Support
+-----------------------
+The pxa2xx_spi driver support both DMA and interrupt driven PIO message
+transfers. The driver defaults to PIO mode and DMA transfers must enabled by
+setting the "enable_dma" flag in the "pxa2xx_spi_master" structure and and
+ensuring that the "pxa2xx_spi_chip.dma_burst_size" field is non-zero. The DMA
+mode support both coherent and stream based DMA mappings.
+
+The following logic is used to determine the type of I/O to be used on
+a per "spi_transfer" basis:
+
+if !enable_dma or dma_burst_size == 0 then
+ always use PIO transfers
+
+if spi_message.is_dma_mapped and rx_dma_buf != 0 and tx_dma_buf != 0 then
+ use coherent DMA mode
+
+if rx_buf and tx_buf are aligned on 8 byte boundary then
+ use streaming DMA mode
+
+otherwise
+ use PIO transfer
+
+THANKS TO
+---------
+
+David Brownell and others for mentoring the development of this driver.
+
The driver will initialize the fields of that spi_master, including the
bus number (maybe the same as the platform device ID) and three methods
used to interact with the SPI core and SPI protocol drivers. It will
-also initialize its own internal state.
+also initialize its own internal state. (See below about bus numbering
+and those methods.)
+
+After you initialize the spi_master, then use spi_register_master() to
+publish it to the rest of the system. At that time, device nodes for
+the controller and any predeclared spi devices will be made available,
+and the driver model core will take care of binding them to drivers.
+
+If you need to remove your SPI controller driver, spi_unregister_master()
+will reverse the effect of spi_register_master().
+
+
+BUS NUMBERING
+
+Bus numbering is important, since that's how Linux identifies a given
+SPI bus (shared SCK, MOSI, MISO). Valid bus numbers start at zero. On
+SOC systems, the bus numbers should match the numbers defined by the chip
+manufacturer. For example, hardware controller SPI2 would be bus number 2,
+and spi_board_info for devices connected to it would use that number.
+
+If you don't have such hardware-assigned bus number, and for some reason
+you can't just assign them, then provide a negative bus number. That will
+then be replaced by a dynamically assigned number. You'd then need to treat
+this as a non-static configuration (see above).
+
+
+SPI MASTER METHODS
master->setup(struct spi_device *spi)
This sets up the device clock rate, SPI mode, and word sizes.
state it dynamically associates with that device. If you do that,
be sure to provide the cleanup() method to free that state.
+
+SPI MESSAGE QUEUE
+
The bulk of the driver will be managing the I/O queue fed by transfer().
That queue could be purely conceptual. For example, a driver used only
often DMA (especially if the root filesystem is in SPI flash), and
execution contexts like IRQ handlers, tasklets, or workqueues (such
as keventd). Your driver can be as fancy, or as simple, as you need.
+Such a transfer() method would normally just add the message to a
+queue, and then start some asynchronous transfer engine (unless it's
+already running).
THANKS TO
some data to the device. So a very simple watchdog daemon would look
like this:
+#include <stdlib.h>
+#include <fcntl.h>
+
int main(int argc, const char *argv[]) {
int fd=open("/dev/watchdog",O_WRONLY);
if (fd==-1) {
PLEASE document known bugs. If it doesn't work for everything
or does something very odd once a month document it.
+ PLEASE remember that submissions must be made under the terms
+ of the OSDL certificate of contribution
+ (http://www.osdl.org/newsroom/press_releases/2004/2004_05_24_dco.html)
+ and should include a Signed-off-by: line.
+
6. Make sure you have the right to send any changes you make. If you
do changes at work you may find your employer owns the patch
not you.
-7. Happy hacking.
+7. When sending security related changes or reports to a maintainer
+ please Cc: security@kernel.org, especially if the maintainer
+ does not respond.
+
+8. Happy hacking.
-----------------------------------
P: Arnd Bergmann
M: arnd@arndb.de
L: linuxppc-dev@ozlabs.org
-W: http://linuxppc64.org
+W: http://www.penguinppc.org/ppc64/
+S: Supported
+
+BROADCOM BNX2 GIGABIT ETHERNET DRIVER
+P: Michael Chan
+M: mchan@broadcom.com
+L: netdev@vger.kernel.org
+S: Supported
+
+BROADCOM TG3 GIGABIT ETHERNET DRIVER
+P: Michael Chan
+M: mchan@broadcom.com
+L: netdev@vger.kernel.org
S: Supported
BTTV VIDEO4LINUX DRIVER
EXT3 FILE SYSTEM
P: Stephen Tweedie, Andrew Morton
M: sct@redhat.com, akpm@osdl.org, adilger@clusterfs.com
-L: ext3-users@redhat.com
+L: ext2-devel@lists.sourceforge.net
S: Maintained
F71805F HARDWARE MONITORING DRIVER
T: git kernel.org:/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git
S: Supported
+JOURNALLING LAYER FOR BLOCK DEVICS (JBD)
+P: Stephen Tweedie, Andrew Morton
+M: sct@redhat.com, akpm@osdl.org
+L: ext2-devel@lists.sourceforge.net
+S: Maintained
+
KCONFIG
P: Roman Zippel
M: zippel@linux-m68k.org
L: kbuild-devel@lists.sourceforge.net
S: Maintained
+KDUMP
+P: Vivek Goyal
+M: vgoyal@in.ibm.com
+P: Haren Myneni
+M: hbabu@us.ibm.com
+L: fastboot@lists.osdl.org
+L: linux-kernel@vger.kernel.org
+W: http://lse.sourceforge.net/kdump/
+S: Maintained
+
KERNEL AUTOMOUNTER (AUTOFS)
P: H. Peter Anvin
M: hpa@zytor.com
L: linux-scsi@vger.kernel.org
S: Maintained
+LED SUBSYSTEM
+P: Richard Purdie
+M: rpurdie@rpsys.net
+S: Maintained
+
LEGO USB Tower driver
P: Juergen Stuber
M: starblue@users.sourceforge.net
LINUX FOR POWERPC EMBEDDED PPC8XX
P: Marcelo Tosatti
-M: marcelo.tosatti@cyclades.com
+M: marcelo@kvack.org
W: http://www.penguinppc.org/
L: linuxppc-embedded@ozlabs.org
S: Maintained
P: Anton Blanchard
M: anton@samba.org
M: anton@au.ibm.com
-W: http://linuxppc64.org
+W: http://www.penguinppc.org/ppc64/
L: linuxppc-dev@ozlabs.org
S: Supported
W: http://www.atnf.csiro.au/~rgooch/linux/kernel-patches.html
S: Maintained
+MULTIMEDIA CARD SUBSYSTEM
+P: Russell King
+M: rmk+mmc@arm.linux.org.uk
+S: Maintained
+
MULTISOUND SOUND DRIVER
P: Andrew Veliath
M: andrewtv@usa.net
L: linux-scsi@vger.kernel.org
S: Maintained
+NETEM NETWORK EMULATOR
+P: Stephen Hemminger
+M: shemminger@osdl.org
+L: netem@osdl.org
+S: Maintained
+
NETFILTER/IPTABLES/IPCHAINS
P: Rusty Russell
P: Marc Boucher
L: alsa-devel@alsa-project.org
S: Maintained
+SPI SUBSYSTEM
+P: David Brownell
+M: dbrownell@users.sourceforge.net
+L: spi-devel-general@lists.sourceforge.net
+S: Maintained
+
TPM DEVICE DRIVER
P: Kylene Hall
M: kjhall@us.ibm.com
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 17
-EXTRAVERSION =-rc4
-NAME=Sliding Snow Leopard
+EXTRAVERSION =-rc6
+NAME=Crazed Snow-Weasel
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
config GENERIC_HWEIGHT
bool
- default y if !ALPHA_EV6 && !ALPHA_EV67
+ default y if !ALPHA_EV67
config ALPHA_AVANTI
bool
EXPORT_SYMBOL(smp_call_function);
EXPORT_SYMBOL(smp_call_function_on_cpu);
EXPORT_SYMBOL(_atomic_dec_and_lock);
-EXPORT_SYMBOL(cpu_present_mask);
#endif /* CONFIG_SMP */
/*
if (cpuid != boot_cpuid) {
flags |= 0x00040000UL; /* "remain halted" */
*pflags = flags;
- clear_bit(cpuid, &cpu_present_mask);
+ cpu_clear(cpuid, cpu_present_map);
halt();
}
#endif
#ifdef CONFIG_SMP
/* Wait for the secondaries to halt. */
- cpu_clear(boot_cpuid, cpu_possible_map);
- while (cpus_weight(cpu_possible_map))
+ cpu_clear(boot_cpuid, cpu_present_map);
+ while (cpus_weight(cpu_present_map))
barrier();
#endif
static int smp_secondary_alive __initdata = 0;
/* Which cpus ids came online. */
-cpumask_t cpu_present_mask;
cpumask_t cpu_online_map;
EXPORT_SYMBOL(cpu_online_map);
if ((cpu->flags & 0x1cc) == 0x1cc) {
smp_num_probed++;
/* Assume here that "whami" == index */
- cpu_set(i, cpu_present_mask);
+ cpu_set(i, cpu_present_map);
cpu->pal_revision = boot_cpu_palrev;
}
}
} else {
smp_num_probed = 1;
- cpu_set(boot_cpuid, cpu_present_mask);
}
- printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
- smp_num_probed, cpu_possible_map.bits[0]);
+ printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n",
+ smp_num_probed, cpu_present_map.bits[0]);
}
/*
/* Nothing to do on a UP box, or when told not to. */
if (smp_num_probed == 1 || max_cpus == 0) {
- cpu_present_mask = cpumask_of_cpu(boot_cpuid);
+ cpu_present_map = cpumask_of_cpu(boot_cpuid);
printk(KERN_INFO "SMP mode deactivated.\n");
return;
}
void __devinit
smp_prepare_boot_cpu(void)
{
- /*
- * Mark the boot cpu (current cpu) as online
- */
- cpu_set(smp_processor_id(), cpu_online_map);
}
int __devinit
register int bcpu = boot_cpuid;
#ifdef CONFIG_SMP
- cpumask_t cpm = cpu_present_mask;
+ cpumask_t cpm = cpu_present_map;
volatile unsigned long *dim0, *dim1, *dim2, *dim3;
unsigned long mask0, mask1, mask2, mask3, dummy;
help
Choice for UART for kernel low-level using S3C2410 UARTS,
should be between zero and two. The port must have been
- initalised by the boot-loader before use.
+ initialised by the boot-loader before use.
The uncompressor code port configuration is now handled
by CONFIG_S3C2410_LOWLEVEL_UART_PORT.
DEFINE(MACHINFO_NAME, offsetof(struct machine_desc, name));
DEFINE(MACHINFO_PHYSIO, offsetof(struct machine_desc, phys_io));
DEFINE(MACHINFO_PGOFFIO, offsetof(struct machine_desc, io_pg_offst));
+ BLANK();
+ DEFINE(PROC_INFO_SZ, sizeof(struct proc_info_list));
DEFINE(PROCINFO_INITFUNC, offsetof(struct proc_info_list, __cpu_flush));
DEFINE(PROCINFO_MMUFLAGS, offsetof(struct proc_info_list, __cpu_mmu_flags));
return 0;
.residue = isa_get_dma_residue,
};
-static struct resource dma_resources[] = {
- { "dma1", 0x0000, 0x000f },
- { "dma low page", 0x0080, 0x008f },
- { "dma2", 0x00c0, 0x00df },
- { "dma high page", 0x0480, 0x048f }
-};
+static struct resource dma_resources[] = { {
+ .name = "dma1",
+ .start = 0x0000,
+ .end = 0x000f
+}, {
+ .name = "dma low page",
+ .start = 0x0080,
+ .end = 0x008f
+}, {
+ .name = "dma2",
+ .start = 0x00c0,
+ .end = 0x00df
+}, {
+ .name = "dma high page",
+ .start = 0x0480,
+ .end = 0x048f
+} };
void __init isa_init_dma(dma_t *dma)
{
struct thread_info_list *th = &get_cpu_var(thread_info_list);
if (th->nr < EXTRA_TASK_STRUCT) {
unsigned long *p = (unsigned long *)thread;
- p[0] = th->head;
+ p[0] = (unsigned long)th->head;
th->head = p;
th->nr += 1;
put_cpu_var(thread_info_list);
#define reg r5
#define stack r6
-.Ldumpstm: stmfd sp!, {instr, reg, stack, r7, lr}
+.Ldumpstm: stmfd sp!, {instr, reg, stack, r7, r8, lr}
mov stack, r0
mov instr, r1
mov reg, #9
adrne r0, .Lcr
blne printk
mov r0, stack
- LOADREGS(fd, sp!, {instr, reg, stack, r7, pc})
+ LOADREGS(fd, sp!, {instr, reg, stack, r7, r8, pc})
.Lfp: .asciz " r%d = %08X%c"
.Lcr: .asciz "\n"
moveq pc, lr
@ Division by 0:
- str lr, [sp, #-4]!
+ str lr, [sp, #-8]!
bl __div0
@ as wrong as it could be...
mov yl, #0
mov yh, #0
mov xh, #0
- ldr pc, [sp], #4
+ ldr pc, [sp], #8
}
}
-static unsigned char ts72xx_rtc_readb(unsigned long addr)
+static unsigned char ts72xx_rtc_readbyte(unsigned long addr)
{
__raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE);
return __raw_readb(TS72XX_RTC_DATA_VIRT_BASE);
}
-static void ts72xx_rtc_writeb(unsigned char value, unsigned long addr)
+static void ts72xx_rtc_writebyte(unsigned char value, unsigned long addr)
{
__raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE);
__raw_writeb(value, TS72XX_RTC_DATA_VIRT_BASE);
}
static struct m48t86_ops ts72xx_rtc_ops = {
- .readb = ts72xx_rtc_readb,
- .writeb = ts72xx_rtc_writeb,
+ .readbyte = ts72xx_rtc_readbyte,
+ .writebyte = ts72xx_rtc_writebyte,
};
static struct platform_device ts72xx_rtc_device = {
imx_gpio_ack_irq(unsigned int irq)
{
DEBUG_IRQ("%s: irq %d\n", __FUNCTION__, irq);
- ISR(IRQ_TO_REG(irq)) |= 1 << ((irq - IRQ_GPIOA(0)) % 32);
+ ISR(IRQ_TO_REG(irq)) = 1 << ((irq - IRQ_GPIOA(0)) % 32);
}
static void
for (i = IRQ_PIC_START; i <= IRQ_PIC_END; i++) {
if (i == 11)
i = 22;
- if (i == IRQ_CP_CPPLDINT)
- i++;
if (i == 29)
break;
set_irq_chip(i, &pic_chip);
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
}
- set_irq_handler(IRQ_CP_CPPLDINT, sic_handle_irq);
- pic_unmask_irq(IRQ_CP_CPPLDINT);
+ set_irq_chained_handler(IRQ_CP_CPPLDINT, sic_handle_irq);
}
/*
static void ixp23xx_irq_mask(unsigned int irq)
{
- volatile unsigned long *intr_reg = IXP23XX_INTR_EN1 + (irq / 32);
+ volatile unsigned long *intr_reg;
+ if (irq >= 56)
+ irq += 8;
+
+ intr_reg = IXP23XX_INTR_EN1 + (irq / 32);
*intr_reg &= ~(1 << (irq % 32));
}
*/
static void ixp23xx_irq_level_unmask(unsigned int irq)
{
- volatile unsigned long *intr_reg = IXP23XX_INTR_EN1 + (irq / 32);
+ volatile unsigned long *intr_reg;
ixp23xx_irq_ack(irq);
+ if (irq >= 56)
+ irq += 8;
+
+ intr_reg = IXP23XX_INTR_EN1 + (irq / 32);
*intr_reg |= (1 << (irq % 32));
}
static void ixp23xx_irq_edge_unmask(unsigned int irq)
{
- volatile unsigned long *intr_reg = IXP23XX_INTR_EN1 + (irq / 32);
+ volatile unsigned long *intr_reg;
+
+ if (irq >= 56)
+ irq += 8;
+ intr_reg = IXP23XX_INTR_EN1 + (irq / 32);
*intr_reg |= (1 << (irq % 32));
}
2) If > 64MB of memory space is required, the IXP4xx can be
configured to use indirect registers to access PCI This allows
for up to 128MB (0x48000000 to 0x4fffffff) of memory on the bus.
- The disadvantadge of this is that every PCI access requires
+ The disadvantage of this is that every PCI access requires
three local register accesses plus a spinlock, but in some
cases the performance hit is acceptable. In addition, you cannot
mmap() PCI devices in this case due to the indirect nature
for(irq = MAINSTONE_IRQ(0); irq <= MAINSTONE_IRQ(15); irq++) {
set_irq_chip(irq, &mainstone_irq_chip);
set_irq_handler(irq, do_level_IRQ);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ if (irq == MAINSTONE_IRQ(10) || irq == MAINSTONE_IRQ(14))
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE | IRQF_NOAUTOEN);
+ else
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
set_irq_flags(MAINSTONE_IRQ(8), 0);
set_irq_flags(MAINSTONE_IRQ(12), 0);
MACHINE_START(MAINSTONE, "Intel HCDDBBVA0 Development Platform (aka Mainstone)")
/* Maintainer: MontaVista Software Inc. */
.phys_io = 0x40000000,
+ .boot_params = 0xa0000100, /* BLOB boot parameter setting */
.io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc,
.map_io = mainstone_map_io,
.init_irq = mainstone_init_irq,
static struct pxaohci_platform_data spitz_ohci_platform_data = {
.port_mode = PMM_NPS_MODE,
.init = spitz_ohci_init,
+ .power_budget = 150,
};
static void __init gic_init_irq(void)
{
#ifdef CONFIG_REALVIEW_MPCORE
+ unsigned int pldctrl;
writel(0x0000a05f, __io_address(REALVIEW_SYS_LOCK));
- writel(0x008003c0, __io_address(REALVIEW_SYS_BASE) + 0xd8);
+ pldctrl = readl(__io_address(REALVIEW_SYS_BASE) + 0xd8);
+ pldctrl |= 0x00800000; /* New irq mode */
+ writel(pldctrl, __io_address(REALVIEW_SYS_BASE) + 0xd8);
writel(0x00000000, __io_address(REALVIEW_SYS_LOCK));
#endif
gic_dist_init(__io_address(REALVIEW_GIC_DIST_BASE));
depends on ARCH_S3C2410 && PM
help
Say Y here if you want verbose debugging from the PM Suspend and
- Resume code. See `Documentation/arm/Samsing-S3C24XX/Suspend.txt`
+ Resume code. See <file:Documentation/arm/Samsung-S3C24XX/Suspend.txt>
for more information.
config S3C2410_PM_CHECK
mrc p15, 0, r5, c13, c0, 0 @ PID
mrc p15, 0, r6, c3, c0, 0 @ Domain ID
mrc p15, 0, r7, c2, c0, 0 @ translation table base address
- mrc p15, 0, r8, c2, c0, 0 @ auxiliary control register
- mrc p15, 0, r9, c1, c0, 0 @ control register
+ mrc p15, 0, r8, c1, c0, 0 @ control register
stmia r0, { r4 - r13 }
mcr p15, 0, r5, c13, c0, 0 @ PID
mcr p15, 0, r6, c3, c0, 0 @ Domain ID
mcr p15, 0, r7, c2, c0, 0 @ translation table base
- mcr p15, 0, r8, c1, c1, 0 @ auxilliary control
#ifdef CONFIG_DEBUG_RESUME
mov r3, #'R'
#endif
ldr r2, =resume_with_mmu
- mcr p15, 0, r9, c1, c0, 0 @ turn on MMU, etc
+ mcr p15, 0, r8, c1, c0, 0 @ turn on MMU, etc
nop @ second-to-last before mmu
mov pc, r2 @ go back to virtual address
if (irr & (IRR_ETHERNET | IRR_USAR)) {
desc->chip->mask(irq);
+ /*
+ * Ack the interrupt now to prevent re-entering
+ * this neponset handler. Again, this is safe
+ * since we'll check the IRR register prior to
+ * leaving.
+ */
+ desc->chip->ack(irq);
+
if (irr & IRR_ETHERNET) {
d = irq_desc + IRQ_NEPONSET_SMC9196;
desc_handle_irq(IRQ_NEPONSET_SMC9196, d, regs);
{
unsigned int i;
- vic_init(VA_VIC_BASE, IRQ_VIC_START, ~(1 << 31));
+ vic_init(VA_VIC_BASE, IRQ_VIC_START, ~0);
- set_irq_handler(IRQ_VICSOURCE31, sic_handle_irq);
- enable_irq(IRQ_VICSOURCE31);
+ set_irq_chained_handler(IRQ_VICSOURCE31, sic_handle_irq);
/* Do second interrupt controller */
writel(~0, VA_SIC_BASE + SIC_IRQ_ENABLE_CLEAR);
return NULL;
addr = (unsigned long)area->addr;
if (remap_area_pages(addr, pfn, size, flags)) {
- vfree((void *)addr);
+ vunmap((void *)addr);
return NULL;
}
return (void __iomem *) (offset + (char *)addr);
void __iounmap(void __iomem *addr)
{
- vfree((void *) (PAGE_MASK & (unsigned long) addr));
+ vunmap((void *)(PAGE_MASK & (unsigned long)addr));
}
EXPORT_SYMBOL(__iounmap);
ecc_mask = 0;
}
- if (cpu_arch <= CPU_ARCH_ARMv5TEJ) {
+ if (cpu_arch <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) {
for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
if (mem_types[i].prot_l1)
mem_types[i].prot_l1 |= PMD_BIT4;
pgd = init_mm.pgd;
base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
- if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ)
+ if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
base_pmdval |= PMD_BIT4;
for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {
#endif
mcr p15, 0, r0, c1, c0, 1 @ set auxiliary control reg
mrc p15, 0, r0, c1, c0, 0 @ get control register
- bic r0, r0, #0x0200 @ .... ..R. .... ....
bic r0, r0, #0x0002 @ .... .... .... ..A.
orr r0, r0, #0x0005 @ .... .... .... .C.M
#if BTB_ENABLE
+ bic r0, r0, #0x0200 @ .... ..R. .... ....
orr r0, r0, #0x3900 @ ..VI Z..S .... ....
#else
+ bic r0, r0, #0x0a00 @ .... Z.R. .... ....
orr r0, r0, #0x3100 @ ..VI ...S .... ....
#endif
#if L2_CACHE_ENABLE
bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
depends on SMP && HOTPLUG && EXPERIMENTAL && !X86_VOYAGER
---help---
- Say Y here to experiment with turning CPUs off and on. CPUs
- can be controlled through /sys/devices/system/cpu.
+ Say Y here to experiment with turning CPUs off and on, and to
+ enable suspend on SMP systems. CPUs can be controlled through
+ /sys/devices/system/cpu.
- Say N.
endmenu
{
struct acpi_table_madt *madt = NULL;
- if (!phys_addr || !size)
+ if (!phys_addr || !size || !cpu_has_apic)
return -EINVAL;
madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size);
static int __init acpi_parse_fadt(unsigned long phys, unsigned long size)
{
- struct fadt_descriptor_rev2 *fadt = NULL;
+ struct fadt_descriptor *fadt = NULL;
- fadt = (struct fadt_descriptor_rev2 *)__acpi_map_table(phys, size);
+ fadt = (struct fadt_descriptor *)__acpi_map_table(phys, size);
if (!fadt) {
printk(KERN_WARNING PREFIX "Unable to map FADT\n");
return 0;
return -ENODEV;
}
- if (!cpu_has_apic)
+ if (!cpu_has_apic)
return -ENODEV;
/*
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/pci.h>
+#include <linux/acpi.h>
+
#include <asm/pci-direct.h>
#include <asm/acpi.h>
#include <asm/apic.h>
+#ifdef CONFIG_ACPI
+
+static int nvidia_hpet_detected __initdata;
+
+static int __init nvidia_hpet_check(unsigned long phys, unsigned long size)
+{
+ nvidia_hpet_detected = 1;
+ return 0;
+}
+#endif
+
static int __init check_bridge(int vendor, int device)
{
#ifdef CONFIG_ACPI
- /* According to Nvidia all timer overrides are bogus. Just ignore
- them all. */
+ /* According to Nvidia all timer overrides are bogus unless HPET
+ is enabled. */
if (vendor == PCI_VENDOR_ID_NVIDIA) {
- acpi_skip_timer_override = 1;
+ nvidia_hpet_detected = 0;
+ acpi_table_parse(ACPI_HPET, nvidia_hpet_check);
+ if (nvidia_hpet_detected == 0) {
+ acpi_skip_timer_override = 1;
+ }
}
#endif
if (vendor == PCI_VENDOR_ID_ATI && timer_over_8254 == 1) {
buf[2] = ACPI_PDC_C_CAPABILITY_SMP;
if (cpu_has(c, X86_FEATURE_EST))
- buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
+ buf[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP;
obj->type = ACPI_TYPE_BUFFER;
obj->buffer.length = 12;
connect_bsp_APIC();
+ /*
+ * Hack: In case of kdump, after a crash, kernel might be booting
+ * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid
+ * might be zero if read from MP tables. Get it from LAPIC.
+ */
+#ifdef CONFIG_CRASH_DUMP
+ boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
+#endif
phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid);
setup_local_APIC();
struct cpufreq_acpi_io {
- struct acpi_processor_performance acpi_data;
+ struct acpi_processor_performance *acpi_data;
struct cpufreq_frequency_table *freq_table;
unsigned int resume;
};
static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS];
+static struct acpi_processor_performance *acpi_perf_data[NR_CPUS];
static struct cpufreq_driver acpi_cpufreq_driver;
{
u16 port = 0;
u8 bit_width = 0;
+ int i = 0;
int ret = 0;
u32 value = 0;
- int i = 0;
- struct cpufreq_freqs cpufreq_freqs;
- cpumask_t saved_mask;
int retval;
+ struct acpi_processor_performance *perf;
dprintk("acpi_processor_set_performance\n");
- /*
- * TBD: Use something other than set_cpus_allowed.
- * As set_cpus_allowed is a bit racy,
- * with any other set_cpus_allowed for this process.
- */
- saved_mask = current->cpus_allowed;
- set_cpus_allowed(current, cpumask_of_cpu(cpu));
- if (smp_processor_id() != cpu) {
- return (-EAGAIN);
- }
-
- if (state == data->acpi_data.state) {
+ retval = 0;
+ perf = data->acpi_data;
+ if (state == perf->state) {
if (unlikely(data->resume)) {
dprintk("Called after resume, resetting to P%d\n", state);
data->resume = 0;
} else {
dprintk("Already at target state (P%d)\n", state);
- retval = 0;
- goto migrate_end;
+ return (retval);
}
}
- dprintk("Transitioning from P%d to P%d\n",
- data->acpi_data.state, state);
-
- /* cpufreq frequency struct */
- cpufreq_freqs.cpu = cpu;
- cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency;
- cpufreq_freqs.new = data->freq_table[state].frequency;
-
- /* notify cpufreq */
- cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE);
+ dprintk("Transitioning from P%d to P%d\n", perf->state, state);
/*
* First we write the target state's 'control' value to the
* control_register.
*/
- port = data->acpi_data.control_register.address;
- bit_width = data->acpi_data.control_register.bit_width;
- value = (u32) data->acpi_data.states[state].control;
+ port = perf->control_register.address;
+ bit_width = perf->control_register.bit_width;
+ value = (u32) perf->states[state].control;
dprintk("Writing 0x%08x to port 0x%04x\n", value, port);
ret = acpi_processor_write_port(port, bit_width, value);
if (ret) {
dprintk("Invalid port width 0x%04x\n", bit_width);
- retval = ret;
- goto migrate_end;
+ return (ret);
}
/*
* before giving up.
*/
- port = data->acpi_data.status_register.address;
- bit_width = data->acpi_data.status_register.bit_width;
+ port = perf->status_register.address;
+ bit_width = perf->status_register.bit_width;
dprintk("Looking for 0x%08x from port 0x%04x\n",
- (u32) data->acpi_data.states[state].status, port);
+ (u32) perf->states[state].status, port);
- for (i=0; i<100; i++) {
+ for (i = 0; i < 100; i++) {
ret = acpi_processor_read_port(port, bit_width, &value);
if (ret) {
dprintk("Invalid port width 0x%04x\n", bit_width);
- retval = ret;
- goto migrate_end;
+ return (ret);
}
- if (value == (u32) data->acpi_data.states[state].status)
+ if (value == (u32) perf->states[state].status)
break;
udelay(10);
}
} else {
i = 0;
- value = (u32) data->acpi_data.states[state].status;
+ value = (u32) perf->states[state].status;
}
- /* notify cpufreq */
- cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE);
-
- if (unlikely(value != (u32) data->acpi_data.states[state].status)) {
- unsigned int tmp = cpufreq_freqs.new;
- cpufreq_freqs.new = cpufreq_freqs.old;
- cpufreq_freqs.old = tmp;
- cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE);
- cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE);
+ if (unlikely(value != (u32) perf->states[state].status)) {
printk(KERN_WARNING "acpi-cpufreq: Transition failed\n");
retval = -ENODEV;
- goto migrate_end;
+ return (retval);
}
dprintk("Transition successful after %d microseconds\n", i * 10);
- data->acpi_data.state = state;
-
- retval = 0;
-migrate_end:
- set_cpus_allowed(current, saved_mask);
+ perf->state = state;
return (retval);
}
unsigned int relation)
{
struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
+ struct acpi_processor_performance *perf;
+ struct cpufreq_freqs freqs;
+ cpumask_t online_policy_cpus;
+ cpumask_t saved_mask;
+ cpumask_t set_mask;
+ cpumask_t covered_cpus;
+ unsigned int cur_state = 0;
unsigned int next_state = 0;
unsigned int result = 0;
+ unsigned int j;
+ unsigned int tmp;
dprintk("acpi_cpufreq_setpolicy\n");
target_freq,
relation,
&next_state);
- if (result)
+ if (unlikely(result))
return (result);
- result = acpi_processor_set_performance (data, policy->cpu, next_state);
+ perf = data->acpi_data;
+ cur_state = perf->state;
+ freqs.old = data->freq_table[cur_state].frequency;
+ freqs.new = data->freq_table[next_state].frequency;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ /* cpufreq holds the hotplug lock, so we are safe from here on */
+ cpus_and(online_policy_cpus, cpu_online_map, policy->cpus);
+#else
+ online_policy_cpus = policy->cpus;
+#endif
+
+ for_each_cpu_mask(j, online_policy_cpus) {
+ freqs.cpu = j;
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ }
+
+ /*
+ * We need to call driver->target() on all or any CPU in
+ * policy->cpus, depending on policy->shared_type.
+ */
+ saved_mask = current->cpus_allowed;
+ cpus_clear(covered_cpus);
+ for_each_cpu_mask(j, online_policy_cpus) {
+ /*
+ * Support for SMP systems.
+ * Make sure we are running on CPU that wants to change freq
+ */
+ cpus_clear(set_mask);
+ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
+ cpus_or(set_mask, set_mask, online_policy_cpus);
+ else
+ cpu_set(j, set_mask);
+
+ set_cpus_allowed(current, set_mask);
+ if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) {
+ dprintk("couldn't limit to CPUs in this domain\n");
+ result = -EAGAIN;
+ break;
+ }
+
+ result = acpi_processor_set_performance (data, j, next_state);
+ if (result) {
+ result = -EAGAIN;
+ break;
+ }
+
+ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
+ break;
+
+ cpu_set(j, covered_cpus);
+ }
+
+ for_each_cpu_mask(j, online_policy_cpus) {
+ freqs.cpu = j;
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ }
+ if (unlikely(result)) {
+ /*
+ * We have failed halfway through the frequency change.
+ * We have sent callbacks to online_policy_cpus and
+ * acpi_processor_set_performance() has been called on
+ * coverd_cpus. Best effort undo..
+ */
+
+ if (!cpus_empty(covered_cpus)) {
+ for_each_cpu_mask(j, covered_cpus) {
+ policy->cpu = j;
+ acpi_processor_set_performance (data,
+ j,
+ cur_state);
+ }
+ }
+
+ tmp = freqs.new;
+ freqs.new = freqs.old;
+ freqs.old = tmp;
+ for_each_cpu_mask(j, online_policy_cpus) {
+ freqs.cpu = j;
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ }
+ }
+
+ set_cpus_allowed(current, saved_mask);
return (result);
}
struct cpufreq_acpi_io *data,
unsigned int cpu)
{
+ struct acpi_processor_performance *perf = data->acpi_data;
+
if (cpu_khz) {
/* search the closest match to cpu_khz */
unsigned int i;
unsigned long freq;
- unsigned long freqn = data->acpi_data.states[0].core_frequency * 1000;
+ unsigned long freqn = perf->states[0].core_frequency * 1000;
- for (i=0; i < (data->acpi_data.state_count - 1); i++) {
+ for (i = 0; i < (perf->state_count - 1); i++) {
freq = freqn;
- freqn = data->acpi_data.states[i+1].core_frequency * 1000;
+ freqn = perf->states[i+1].core_frequency * 1000;
if ((2 * cpu_khz) > (freqn + freq)) {
- data->acpi_data.state = i;
+ perf->state = i;
return (freq);
}
}
- data->acpi_data.state = data->acpi_data.state_count - 1;
+ perf->state = perf->state_count - 1;
return (freqn);
- } else
+ } else {
/* assume CPU is at P0... */
- data->acpi_data.state = 0;
- return data->acpi_data.states[0].core_frequency * 1000;
-
+ perf->state = 0;
+ return perf->states[0].core_frequency * 1000;
+ }
}
+/*
+ * acpi_cpufreq_early_init - initialize ACPI P-States library
+ *
+ * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
+ * in order to determine correct frequency and voltage pairings. We can
+ * do _PDC and _PSD and find out the processor dependency for the
+ * actual init that will happen later...
+ */
+static int acpi_cpufreq_early_init_acpi(void)
+{
+ struct acpi_processor_performance *data;
+ unsigned int i, j;
+
+ dprintk("acpi_cpufreq_early_init\n");
+
+ for_each_cpu(i) {
+ data = kzalloc(sizeof(struct acpi_processor_performance),
+ GFP_KERNEL);
+ if (!data) {
+ for_each_cpu(j) {
+ kfree(acpi_perf_data[j]);
+ acpi_perf_data[j] = NULL;
+ }
+ return (-ENOMEM);
+ }
+ acpi_perf_data[i] = data;
+ }
+
+ /* Do initialization in ACPI core */
+ acpi_processor_preregister_performance(acpi_perf_data);
+ return 0;
+}
+
static int
acpi_cpufreq_cpu_init (
struct cpufreq_policy *policy)
struct cpufreq_acpi_io *data;
unsigned int result = 0;
struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
+ struct acpi_processor_performance *perf;
dprintk("acpi_cpufreq_cpu_init\n");
+ if (!acpi_perf_data[cpu])
+ return (-ENODEV);
+
data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
if (!data)
return (-ENOMEM);
+ data->acpi_data = acpi_perf_data[cpu];
acpi_io_data[cpu] = data;
- result = acpi_processor_register_performance(&data->acpi_data, cpu);
+ result = acpi_processor_register_performance(data->acpi_data, cpu);
if (result)
goto err_free;
+ perf = data->acpi_data;
+ policy->cpus = perf->shared_cpu_map;
+ policy->shared_type = perf->shared_type;
+
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
}
/* capability check */
- if (data->acpi_data.state_count <= 1) {
+ if (perf->state_count <= 1) {
dprintk("No P-States\n");
result = -ENODEV;
goto err_unreg;
}
- if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO) ||
- (data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
+
+ if ((perf->control_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO) ||
+ (perf->status_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
dprintk("Unsupported address space [%d, %d]\n",
- (u32) (data->acpi_data.control_register.space_id),
- (u32) (data->acpi_data.status_register.space_id));
+ (u32) (perf->control_register.space_id),
+ (u32) (perf->status_register.space_id));
result = -ENODEV;
goto err_unreg;
}
/* alloc freq_table */
- data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * (data->acpi_data.state_count + 1), GFP_KERNEL);
+ data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * (perf->state_count + 1), GFP_KERNEL);
if (!data->freq_table) {
result = -ENOMEM;
goto err_unreg;
/* detect transition latency */
policy->cpuinfo.transition_latency = 0;
- for (i=0; i<data->acpi_data.state_count; i++) {
- if ((data->acpi_data.states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency)
- policy->cpuinfo.transition_latency = data->acpi_data.states[i].transition_latency * 1000;
+ for (i=0; i<perf->state_count; i++) {
+ if ((perf->states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency)
+ policy->cpuinfo.transition_latency = perf->states[i].transition_latency * 1000;
}
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
/* table init */
- for (i=0; i<=data->acpi_data.state_count; i++)
+ for (i=0; i<=perf->state_count; i++)
{
data->freq_table[i].index = i;
- if (i<data->acpi_data.state_count)
- data->freq_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000;
+ if (i<perf->state_count)
+ data->freq_table[i].frequency = perf->states[i].core_frequency * 1000;
else
data->freq_table[i].frequency = CPUFREQ_TABLE_END;
}
printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management activated.\n",
cpu);
- for (i = 0; i < data->acpi_data.state_count; i++)
+ for (i = 0; i < perf->state_count; i++)
dprintk(" %cP%d: %d MHz, %d mW, %d uS\n",
- (i == data->acpi_data.state?'*':' '), i,
- (u32) data->acpi_data.states[i].core_frequency,
- (u32) data->acpi_data.states[i].power,
- (u32) data->acpi_data.states[i].transition_latency);
+ (i == perf->state?'*':' '), i,
+ (u32) perf->states[i].core_frequency,
+ (u32) perf->states[i].power,
+ (u32) perf->states[i].transition_latency);
cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
err_freqfree:
kfree(data->freq_table);
err_unreg:
- acpi_processor_unregister_performance(&data->acpi_data, cpu);
+ acpi_processor_unregister_performance(perf, cpu);
err_free:
kfree(data);
acpi_io_data[cpu] = NULL;
if (data) {
cpufreq_frequency_table_put_attr(policy->cpu);
acpi_io_data[policy->cpu] = NULL;
- acpi_processor_unregister_performance(&data->acpi_data, policy->cpu);
+ acpi_processor_unregister_performance(data->acpi_data, policy->cpu);
kfree(data);
}
dprintk("acpi_cpufreq_init\n");
- result = cpufreq_register_driver(&acpi_cpufreq_driver);
+ result = acpi_cpufreq_early_init_acpi();
+
+ if (!result)
+ result = cpufreq_register_driver(&acpi_cpufreq_driver);
return (result);
}
static void __exit
acpi_cpufreq_exit (void)
{
+ unsigned int i;
dprintk("acpi_cpufreq_exit\n");
cpufreq_unregister_driver(&acpi_cpufreq_driver);
+ for_each_cpu(i) {
+ kfree(acpi_perf_data[i]);
+ acpi_perf_data[i] = NULL;
+ }
return;
}
#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
-static struct acpi_processor_performance p;
+static struct acpi_processor_performance *acpi_perf_data[NR_CPUS];
+
+/*
+ * centrino_cpu_early_init_acpi - Do the preregistering with ACPI P-States
+ * library
+ *
+ * Before doing the actual init, we need to do _PSD related setup whenever
+ * supported by the BIOS. These are handled by this early_init routine.
+ */
+static int centrino_cpu_early_init_acpi(void)
+{
+ unsigned int i, j;
+ struct acpi_processor_performance *data;
+
+ for_each_cpu(i) {
+ data = kzalloc(sizeof(struct acpi_processor_performance),
+ GFP_KERNEL);
+ if (!data) {
+ for_each_cpu(j) {
+ kfree(acpi_perf_data[j]);
+ acpi_perf_data[j] = NULL;
+ }
+ return (-ENOMEM);
+ }
+ acpi_perf_data[i] = data;
+ }
+
+ acpi_processor_preregister_performance(acpi_perf_data);
+ return 0;
+}
/*
* centrino_cpu_init_acpi - register with ACPI P-States library
unsigned long cur_freq;
int result = 0, i;
unsigned int cpu = policy->cpu;
+ struct acpi_processor_performance *p;
+
+ p = acpi_perf_data[cpu];
/* register with ACPI core */
- if (acpi_processor_register_performance(&p, cpu)) {
+ if (acpi_processor_register_performance(p, cpu)) {
dprintk(KERN_INFO PFX "obtaining ACPI data failed\n");
return -EIO;
}
+ policy->cpus = p->shared_cpu_map;
+ policy->shared_type = p->shared_type;
/* verify the acpi_data */
- if (p.state_count <= 1) {
+ if (p->state_count <= 1) {
dprintk("No P-States\n");
result = -ENODEV;
goto err_unreg;
}
- if ((p.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
- (p.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
+ if ((p->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
+ (p->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
dprintk("Invalid control/status registers (%x - %x)\n",
- p.control_register.space_id, p.status_register.space_id);
+ p->control_register.space_id, p->status_register.space_id);
result = -EIO;
goto err_unreg;
}
- for (i=0; i<p.state_count; i++) {
- if (p.states[i].control != p.states[i].status) {
+ for (i=0; i<p->state_count; i++) {
+ if (p->states[i].control != p->states[i].status) {
dprintk("Different control (%llu) and status values (%llu)\n",
- p.states[i].control, p.states[i].status);
+ p->states[i].control, p->states[i].status);
result = -EINVAL;
goto err_unreg;
}
- if (!p.states[i].core_frequency) {
+ if (!p->states[i].core_frequency) {
dprintk("Zero core frequency for state %u\n", i);
result = -EINVAL;
goto err_unreg;
}
- if (p.states[i].core_frequency > p.states[0].core_frequency) {
+ if (p->states[i].core_frequency > p->states[0].core_frequency) {
dprintk("P%u has larger frequency (%llu) than P0 (%llu), skipping\n", i,
- p.states[i].core_frequency, p.states[0].core_frequency);
- p.states[i].core_frequency = 0;
+ p->states[i].core_frequency, p->states[0].core_frequency);
+ p->states[i].core_frequency = 0;
continue;
}
}
}
centrino_model[cpu]->model_name=NULL;
- centrino_model[cpu]->max_freq = p.states[0].core_frequency * 1000;
+ centrino_model[cpu]->max_freq = p->states[0].core_frequency * 1000;
centrino_model[cpu]->op_points = kmalloc(sizeof(struct cpufreq_frequency_table) *
- (p.state_count + 1), GFP_KERNEL);
+ (p->state_count + 1), GFP_KERNEL);
if (!centrino_model[cpu]->op_points) {
result = -ENOMEM;
goto err_kfree;
}
- for (i=0; i<p.state_count; i++) {
- centrino_model[cpu]->op_points[i].index = p.states[i].control;
- centrino_model[cpu]->op_points[i].frequency = p.states[i].core_frequency * 1000;
+ for (i=0; i<p->state_count; i++) {
+ centrino_model[cpu]->op_points[i].index = p->states[i].control;
+ centrino_model[cpu]->op_points[i].frequency = p->states[i].core_frequency * 1000;
dprintk("adding state %i with frequency %u and control value %04x\n",
i, centrino_model[cpu]->op_points[i].frequency, centrino_model[cpu]->op_points[i].index);
}
- centrino_model[cpu]->op_points[p.state_count].frequency = CPUFREQ_TABLE_END;
+ centrino_model[cpu]->op_points[p->state_count].frequency = CPUFREQ_TABLE_END;
cur_freq = get_cur_freq(cpu);
- for (i=0; i<p.state_count; i++) {
- if (!p.states[i].core_frequency) {
+ for (i=0; i<p->state_count; i++) {
+ if (!p->states[i].core_frequency) {
dprintk("skipping state %u\n", i);
centrino_model[cpu]->op_points[i].frequency = CPUFREQ_ENTRY_INVALID;
continue;
}
if (cur_freq == centrino_model[cpu]->op_points[i].frequency)
- p.state = i;
+ p->state = i;
}
/* notify BIOS that we exist */
err_kfree:
kfree(centrino_model[cpu]);
err_unreg:
- acpi_processor_unregister_performance(&p, cpu);
+ acpi_processor_unregister_performance(p, cpu);
dprintk(KERN_INFO PFX "invalid ACPI data\n");
return (result);
}
#else
static inline int centrino_cpu_init_acpi(struct cpufreq_policy *policy) { return -ENODEV; }
+static inline int centrino_cpu_early_init_acpi(void) { return 0; }
#endif
static int centrino_cpu_init(struct cpufreq_policy *policy)
#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
if (!centrino_model[cpu]->model_name) {
- dprintk("unregistering and freeing ACPI data\n");
- acpi_processor_unregister_performance(&p, cpu);
- kfree(centrino_model[cpu]->op_points);
- kfree(centrino_model[cpu]);
+ static struct acpi_processor_performance *p;
+
+ if (acpi_perf_data[cpu]) {
+ p = acpi_perf_data[cpu];
+ dprintk("unregistering and freeing ACPI data\n");
+ acpi_processor_unregister_performance(p, cpu);
+ kfree(centrino_model[cpu]->op_points);
+ kfree(centrino_model[cpu]);
+ }
}
#endif
unsigned int relation)
{
unsigned int newstate = 0;
- unsigned int msr, oldmsr, h, cpu = policy->cpu;
+ unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu;
struct cpufreq_freqs freqs;
+ cpumask_t online_policy_cpus;
cpumask_t saved_mask;
- int retval;
+ cpumask_t set_mask;
+ cpumask_t covered_cpus;
+ int retval = 0;
+ unsigned int j, k, first_cpu, tmp;
- if (centrino_model[cpu] == NULL)
+ if (unlikely(centrino_model[cpu] == NULL))
return -ENODEV;
- /*
- * Support for SMP systems.
- * Make sure we are running on the CPU that wants to change frequency
- */
- saved_mask = current->cpus_allowed;
- set_cpus_allowed(current, policy->cpus);
- if (!cpu_isset(smp_processor_id(), policy->cpus)) {
- dprintk("couldn't limit to CPUs in this domain\n");
- return(-EAGAIN);
+ if (unlikely(cpufreq_frequency_table_target(policy,
+ centrino_model[cpu]->op_points,
+ target_freq,
+ relation,
+ &newstate))) {
+ return -EINVAL;
}
- if (cpufreq_frequency_table_target(policy, centrino_model[cpu]->op_points, target_freq,
- relation, &newstate)) {
- retval = -EINVAL;
- goto migrate_end;
- }
+#ifdef CONFIG_HOTPLUG_CPU
+ /* cpufreq holds the hotplug lock, so we are safe from here on */
+ cpus_and(online_policy_cpus, cpu_online_map, policy->cpus);
+#else
+ online_policy_cpus = policy->cpus;
+#endif
- msr = centrino_model[cpu]->op_points[newstate].index;
- rdmsr(MSR_IA32_PERF_CTL, oldmsr, h);
+ saved_mask = current->cpus_allowed;
+ first_cpu = 1;
+ cpus_clear(covered_cpus);
+ for_each_cpu_mask(j, online_policy_cpus) {
+ /*
+ * Support for SMP systems.
+ * Make sure we are running on CPU that wants to change freq
+ */
+ cpus_clear(set_mask);
+ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
+ cpus_or(set_mask, set_mask, online_policy_cpus);
+ else
+ cpu_set(j, set_mask);
+
+ set_cpus_allowed(current, set_mask);
+ if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) {
+ dprintk("couldn't limit to CPUs in this domain\n");
+ retval = -EAGAIN;
+ if (first_cpu) {
+ /* We haven't started the transition yet. */
+ goto migrate_end;
+ }
+ break;
+ }
- if (msr == (oldmsr & 0xffff)) {
- retval = 0;
- dprintk("no change needed - msr was and needs to be %x\n", oldmsr);
- goto migrate_end;
- }
+ msr = centrino_model[cpu]->op_points[newstate].index;
+
+ if (first_cpu) {
+ rdmsr(MSR_IA32_PERF_CTL, oldmsr, h);
+ if (msr == (oldmsr & 0xffff)) {
+ dprintk("no change needed - msr was and needs "
+ "to be %x\n", oldmsr);
+ retval = 0;
+ goto migrate_end;
+ }
+
+ freqs.old = extract_clock(oldmsr, cpu, 0);
+ freqs.new = extract_clock(msr, cpu, 0);
+
+ dprintk("target=%dkHz old=%d new=%d msr=%04x\n",
+ target_freq, freqs.old, freqs.new, msr);
+
+ for_each_cpu_mask(k, online_policy_cpus) {
+ freqs.cpu = k;
+ cpufreq_notify_transition(&freqs,
+ CPUFREQ_PRECHANGE);
+ }
+
+ first_cpu = 0;
+ /* all but 16 LSB are reserved, treat them with care */
+ oldmsr &= ~0xffff;
+ msr &= 0xffff;
+ oldmsr |= msr;
+ }
- freqs.cpu = cpu;
- freqs.old = extract_clock(oldmsr, cpu, 0);
- freqs.new = extract_clock(msr, cpu, 0);
+ wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
+ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
+ break;
- dprintk("target=%dkHz old=%d new=%d msr=%04x\n",
- target_freq, freqs.old, freqs.new, msr);
+ cpu_set(j, covered_cpus);
+ }
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ for_each_cpu_mask(k, online_policy_cpus) {
+ freqs.cpu = k;
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ }
- /* all but 16 LSB are "reserved", so treat them with
- care */
- oldmsr &= ~0xffff;
- msr &= 0xffff;
- oldmsr |= msr;
+ if (unlikely(retval)) {
+ /*
+ * We have failed halfway through the frequency change.
+ * We have sent callbacks to policy->cpus and
+ * MSRs have already been written on coverd_cpus.
+ * Best effort undo..
+ */
- wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
+ if (!cpus_empty(covered_cpus)) {
+ for_each_cpu_mask(j, covered_cpus) {
+ set_cpus_allowed(current, cpumask_of_cpu(j));
+ wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
+ }
+ }
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ tmp = freqs.new;
+ freqs.new = freqs.old;
+ freqs.old = tmp;
+ for_each_cpu_mask(j, online_policy_cpus) {
+ freqs.cpu = j;
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ }
+ }
- retval = 0;
migrate_end:
set_cpus_allowed(current, saved_mask);
- return (retval);
+ return 0;
}
static struct freq_attr* centrino_attr[] = {
if (!cpu_has(cpu, X86_FEATURE_EST))
return -ENODEV;
+ centrino_cpu_early_init_acpi();
+
return cpufreq_register_driver(¢rino_driver);
}
static void __exit centrino_exit(void)
{
+#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
+ unsigned int j;
+#endif
+
cpufreq_unregister_driver(¢rino_driver);
+
+#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
+ for_each_cpu(j) {
+ kfree(acpi_perf_data[j]);
+ acpi_perf_data[j] = NULL;
+ }
+#endif
}
MODULE_AUTHOR ("Jeremy Fitzhardinge <jeremy@goop.org>");
probe_roms();
for (i = 0; i < e820.nr_map; i++) {
struct resource *res;
+ if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
+ continue;
res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
switch (e820.map[i].type) {
case E820_RAM: res->name = "System RAM"; break;
if (efi_enabled)
efi_map_memmap();
-#ifdef CONFIG_X86_IO_APIC
- check_acpi_pci(); /* Checks more than just ACPI actually */
-#endif
-
#ifdef CONFIG_ACPI
/*
* Parse the ACPI tables for possible boot-time SMP configuration.
*/
acpi_boot_table_init();
+#endif
+
+#ifdef CONFIG_X86_IO_APIC
+ check_acpi_pci(); /* Checks more than just ACPI actually */
+#endif
+
+#ifdef CONFIG_ACPI
acpi_boot_init();
#if defined(CONFIG_SMP) && defined(CONFIG_X86_PC)
.long sys_splice
.long sys_sync_file_range
.long sys_tee /* 315 */
+ .long sys_vmsplice
print_symbol("%s", addr);
printed = (printed + 1) % CONFIG_STACK_BACKTRACE_COLS;
-
if (printed)
- printk(" ");
+ printk(" ");
else
printk("\n");
}
stack = esp;
- printk(log_lvl);
for(i = 0; i < kstack_depth_to_print; i++) {
if (kstack_end(stack))
break;
int i;
for (i = 0; apic_probe[i]; ++i) {
if (apic_probe[i]->mps_oem_check(mpc,oem,productid)) {
- genapic = apic_probe[i];
- printk(KERN_INFO "Switched to APIC driver `%s'.\n",
- genapic->name);
+ if (!cmdline_apic) {
+ genapic = apic_probe[i];
+ printk(KERN_INFO "Switched to APIC driver `%s'.\n",
+ genapic->name);
+ }
return 1;
}
}
int i;
for (i = 0; apic_probe[i]; ++i) {
if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) {
- genapic = apic_probe[i];
- printk(KERN_INFO "Switched to APIC driver `%s'.\n",
- genapic->name);
+ if (!cmdline_apic) {
+ genapic = apic_probe[i];
+ printk(KERN_INFO "Switched to APIC driver `%s'.\n",
+ genapic->name);
+ }
return 1;
}
}
* Specifically, in the case of x86, we will always add
* memory to the highmem for now.
*/
-#ifdef CONFIG_HOTPLUG_MEMORY
+#ifdef CONFIG_MEMORY_HOTPLUG
#ifndef CONFIG_NEED_MULTIPLE_NODES
int add_memory(u64 start, u64 size)
{
{
__u8 cpu_model = boot_cpu_data.x86_model;
- if (cpu_model > 0xd)
+ if (cpu_model == 14)
+ *cpu_type = "i386/core";
+ else if (cpu_model > 0xd)
return 0;
-
- if (cpu_model == 9) {
+ else if (cpu_model == 9) {
*cpu_type = "i386/p6_mobile";
} else if (cpu_model > 5) {
*cpu_type = "i386/piii";
write_cr4(ctxt->cr4);
write_cr3(ctxt->cr3);
write_cr2(ctxt->cr2);
- write_cr2(ctxt->cr0);
+ write_cr0(ctxt->cr0);
/*
* now restore the descriptor tables to their proper values
config IA64_GENERIC
bool "generic"
select ACPI
+ select PCI
select NUMA
select ACPI_NUMA
help
CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
CONFIG_NUMA=y
-CONFIG_NODES_SHIFT=8
+CONFIG_NODES_SHIFT=10
CONFIG_VIRTUAL_MEM_MAP=y
CONFIG_HOLES_IN_ZONE=y
CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
# CONFIG_SCHEDSTATS is not set
# CONFIG_DEBUG_SLAB is not set
CONFIG_DEBUG_PREEMPT=y
-CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_MUTEXES is not set
# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_KOBJECT is not set
if (!iovp_shift)
iovp_shift = min(PAGE_SHIFT, 16);
}
- ACPI_MEM_FREE(dev_info);
+ kfree(dev_info);
/*
* default anything not caught above or specified on cmdline to 4k
unsigned char acpi_kbd_controller_present = 1;
unsigned char acpi_legacy_devices;
-static unsigned int __initdata acpi_madt_rev;
-
unsigned int acpi_cpei_override;
unsigned int acpi_cpei_phys_cpuid;
return iosapic_init(iosapic->address, iosapic->global_irq_base);
}
+static unsigned int __initdata acpi_madt_rev;
+
static int __init
acpi_parse_plat_int_src(acpi_table_entry_header * header,
const unsigned long end)
ia64_vector vec = irq_to_vector(irq);
struct iosapic_rte_info *rte;
- move_irq(irq);
+ move_native_irq(irq);
list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list)
iosapic_eoi(rte->addr, vec);
}
{
irq_desc_t *idesc = irq_descp(irq);
- move_irq(irq);
+ move_native_irq(irq);
/*
* Once we have recorded IRQ_PENDING already, we can mask the
* interrupt for real. This prevents IRQ storms from unhandled
if (irq < NR_IRQS) {
irq_affinity[irq] = mask;
- set_irq_info(irq, mask);
irq_redir[irq] = (char) (redir & 0xff);
}
}
return ret;
}
+EXPORT_SYMBOL_GPL(add_memory);
int remove_memory(u64 start, u64 size)
{
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(remove_memory);
#endif
default SGI_IP22
config MIPS_MTX1
- bool "Support for 4G Systems MTX-1 board"
+ bool "4G Systems MTX-1 board"
select DMA_NONCOHERENT
select HW_HAS_PCI
select SOC_AU1500
select SYS_SUPPORTS_LITTLE_ENDIAN
config MIPS_COBALT
- bool "Support for Cobalt Server"
+ bool "Cobalt Server"
select DMA_NONCOHERENT
select HW_HAS_PCI
select I8259
select SYS_SUPPORTS_LITTLE_ENDIAN
config MACH_DECSTATION
- bool "Support for DECstations"
+ bool "DECstations"
select BOOT_ELF32
select DMA_NONCOHERENT
select EARLY_PRINTK
otherwise choose R3000.
config MIPS_EV64120
- bool "Support for Galileo EV64120 Evaluation board (EXPERIMENTAL)"
+ bool "Galileo EV64120 Evaluation board (EXPERIMENTAL)"
depends on EXPERIMENTAL
select DMA_NONCOHERENT
select HW_HAS_PCI
kernel for this platform.
config MIPS_EV96100
- bool "Support for Galileo EV96100 Evaluation board (EXPERIMENTAL)"
+ bool "Galileo EV96100 Evaluation board (EXPERIMENTAL)"
depends on EXPERIMENTAL
select DMA_NONCOHERENT
select HW_HAS_PCI