include/linux/autoconf.h
include/linux/compile.h
include/linux/version.h
+include/linux/utsrelease.h
# stgit generated dirs
patches-*
+
+# quilt's files
+patches
+series
!Iinclude/linux/hrtimer.h
!Ekernel/hrtimer.c
</sect1>
+ <sect1><title>Workqueues and Kevents</title>
+!Ekernel/workqueue.c
+ </sect1>
<sect1><title>Internal Functions</title>
!Ikernel/exit.c
!Ikernel/signal.c
</sect1>
<sect1><title>Resources Management</title>
-!Ekernel/resource.c
+!Ikernel/resource.c
</sect1>
<sect1><title>MTRR Handling</title>
!Edrivers/pci/pci-driver.c
!Edrivers/pci/remove.c
!Edrivers/pci/pci-acpi.c
-<!-- kerneldoc does not understand __devinit
-X!Edrivers/pci/search.c
- -->
+!Edrivers/pci/search.c
!Edrivers/pci/msi.c
!Edrivers/pci/bus.c
<!-- FIXME: Removed for now since no structured comments in source
with "the system." This text is a collection of suggestions which
can greatly increase the chances of your change being accepted.
-If you are submitting a driver, also read Documentation/SubmittingDrivers.
+Read Documentation/SubmitChecklist for a list of items to check
+before submitting code. If you are submitting a driver, also read
+Documentation/SubmittingDrivers.
Quilt:
http://savannah.nongnu.org/projects/quilt
-Randy Dunlap's patch scripts:
-http://www.xenotime.net/linux/scripts/patching-scripts-002.tar.gz
-
Andrew Morton's patch scripts:
http://www.zip.com.au/~akpm/linux/patches/
Instead of these scripts, quilt is the recommended patch management
<http://www.kroah.com/log/2005/10/19/>
<http://www.kroah.com/log/2006/01/11/>
-NO!!!! No more huge patch bombs to linux-kernel@vger.kernel.org people!.
+NO!!!! No more huge patch bombs to linux-kernel@vger.kernel.org people!
<http://marc.theaimsgroup.com/?l=linux-kernel&m=112112749912944&w=2>
Kernel Documentation/CodingStyle
Linus Torvald's mail on the canonical patch format:
<http://lkml.org/lkml/2005/4/7/183>
--
-Last updated on 17 Nov 2005.
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASKSTATS=y
-Enable the accounting at boot time by adding
-the following to the kernel boot options
- delayacct
+Delay accounting is enabled by default at boot up.
+To disable, add
+ nodelayacct
+to the kernel boot options. The rest of the instructions
+below assume this has not been done.
-and after the system has booted up, use a utility
+After the system has booted up, use a utility
similar to getdelays.c to access the delays
seen by a given task or a task group (tgid).
The utility also allows a given command to be
that some governors won't load - they only
work on some specific architectures or
processors.
-scaling_min_freq and
+scaling_min_freq and
scaling_max_freq show the current "policy limits" (in
kHz). By echoing new values into these
files, you can change these limits.
+ NOTE: when setting a policy you need to
+ first set scaling_max_freq, then
+ scaling_min_freq.
If you have selected the "userspace" governor which allows you to
return NOTIFY_OK;
}
- static struct notifier_block foobar_cpu_notifer =
+ static struct notifier_block __cpuinitdata foobar_cpu_notifer =
{
.notifier_call = foobar_cpu_callback,
};
+You need to call register_cpu_notifier() from your init function.
+Init functions could be of two types:
+1. early init (init function called when only the boot processor is online).
+2. late init (init function called _after_ all the CPUs are online).
-In your init function,
+For the first case, you should add the following to your init function
register_cpu_notifier(&foobar_cpu_notifier);
+For the second case, you should add the following to your init function
+
+ register_hotcpu_notifier(&foobar_cpu_notifier);
+
You can fail PREPARE notifiers if something doesn't work to prepare resources.
This will stop the activity and send a following CANCELED event back.
243 = /dev/usb/dabusb3 Fourth dabusb device
180 block USB block devices
- 0 = /dev/uba First USB block device
- 8 = /dev/ubb Second USB block device
- 16 = /dev/ubc Thrid USB block device
- ...
+ 0 = /dev/uba First USB block device
+ 8 = /dev/ubb Second USB block device
+ 16 = /dev/ubc Third USB block device
+ ...
181 char Conrad Electronic parallel port radio clocks
0 = /dev/pcfclock0 First Conrad radio clock
--- /dev/null
+
+What is imacfb?
+===============
+
+This is a generic EFI platform driver for Intel based Apple computers.
+Imacfb is only for EFI booted Intel Macs.
+
+Supported Hardware
+==================
+
+iMac 17"/20"
+Macbook
+Macbook Pro 15"/17"
+MacMini
+
+How to use it?
+==============
+
+Imacfb does not have any kind of autodetection of your machine.
+You have to add the fillowing kernel parameters in your elilo.conf:
+ Macbook :
+ video=imacfb:macbook
+ MacMini :
+ video=imacfb:mini
+ Macbook Pro 15", iMac 17" :
+ video=imacfb:i17
+ Macbook Pro 17", iMac 20" :
+ video=imacfb:i20
+
+--
+Edgar Hucek <gimli@dark-green.com>
Who: Jean Delvare <khali@linux-fr.org>
---------------------------
+
+What: Bridge netfilter deferred IPv4/IPv6 output hook calling
+When: January 2007
+Why: The deferred output hooks are a layering violation causing unusual
+ and broken behaviour on bridge devices. Examples of things they
+ break include QoS classifation using the MARK or CLASSIFY targets,
+ the IPsec policy match and connection tracking with VLANs on a
+ bridge. Their only use is to enable bridge output port filtering
+ within iptables with the physdev match, which can also be done by
+ combining iptables and ebtables using netfilter marks. Until it
+ will get removed the hook deferral is disabled by default and is
+ only enabled when needed.
+
+Who: Patrick McHardy <kaber@trash.net>
+
+---------------------------
References
- IETF IP over InfiniBand (ipoib) Working Group
- http://ietf.org/html.charters/ipoib-charter.html
Transmission of IP over InfiniBand (IPoIB) (RFC 4391)
http://ietf.org/rfc/rfc4391.txt
IP over InfiniBand (IPoIB) Architecture (RFC 4392)
initrd is mounted as root, and the normal boot procedure is followed,
with the RAM disk still mounted as root.
+Compressed cpio images
+----------------------
+
+Recent kernels have support for populating a ramdisk from a compressed cpio
+archive, on such systems, the creation of a ramdisk image doesn't need to
+involve special block devices or loopbacks, you merely create a directory on
+disk with the desired initrd content, cd to that directory, and run (as an
+example):
+
+find . | cpio --quiet -c -o | gzip -9 -n > /boot/imagefile.img
+
+Examining the contents of an existing image file is just as simple:
+
+mkdir /tmp/imagefile
+cd /tmp/imagefile
+gzip -cd /boot/imagefile.img | cpio -imd --quiet
Installation
------------
The second argument is optional, and if supplied will be used
if first argument is not supported.
+ ld-option
+ ld-option is used to check if $(CC) when used to link object files
+ supports the given option. An optional second option may be
+ specified if first option are not supported.
+
+ Example:
+ #arch/i386/kernel/Makefile
+ vsyscall-flags += $(call ld-option, -Wl$(comma)--hash-style=sysv)
+
+ In the above example vsyscall-flags will be assigned the option
+ -Wl$(comma)--hash-style=sysv if it is supported by $(CC).
+ The second argument is optional, and if supplied will be used
+ if first argument is not supported.
+
cc-option
cc-option is used to check if $(CC) support a given option, and not
supported to use an optional second option.
Format: <area>[,<node>]
See also Documentation/networking/decnet.txt.
- delayacct [KNL] Enable per-task delay accounting
-
dhash_entries= [KNL]
Set number of hash buckets for dentry cache.
nocache [ARM]
+ nodelayacct [KNL] Disable per-task delay accounting
+
nodisconnect [HW,SCSI,M68K] Disables SCSI disconnects.
noexec [IA-64]
- default_attrs: Default attributes to be exported via sysfs when the
object is registered.Note that the last attribute has to be
initialized to NULL ! You can find a complete implementation
- in drivers/block/genhd.c
+ in block/genhd.c
Instances of struct kobj_type are not registered; only referenced by
Default: 87380*2 bytes.
tcp_mem - vector of 3 INTEGERs: min, pressure, max
- low: below this number of pages TCP is not bothered about its
+ min: below this number of pages TCP is not bothered about its
memory appetite.
pressure: when amount of memory allocated by TCP exceeds this number
of pages, TCP moderates its memory consumption and enters memory
pressure mode, which is exited when memory consumption falls
- under "low".
+ under "min".
- high: number of pages allowed for queueing by all TCP sockets.
+ max: number of pages allowed for queueing by all TCP sockets.
Defaults are calculated at boot time from amount of available
memory.
- model : Model of the device. Can be "TSEC", "eTSEC", or "FEC"
- compatible : Should be "gianfar"
- reg : Offset and length of the register set for the device
- - address : List of bytes representing the ethernet address of
+ - mac-address : List of bytes representing the ethernet address of
this controller
- interrupts : <a b> where a is the interrupt number and b is a
field that represents an encoding of the sense and level
model = "TSEC";
compatible = "gianfar";
reg = <24000 1000>;
- address = [ 00 E0 0C 00 73 00 ];
+ mac-address = [ 00 E0 0C 00 73 00 ];
interrupts = <d 3 e 3 12 3>;
interrupt-parent = <40000>;
phy-handle = <2452000>
model = "TSEC";
compatible = "gianfar";
reg = <24000 1000>;
- address = [ 00 E0 0C 00 73 00 ];
+ mac-address = [ 00 E0 0C 00 73 00 ];
interrupts = <d 3 e 3 12 3>;
interrupt-parent = <40000>;
phy-handle = <2452000>;
model = "TSEC";
compatible = "gianfar";
reg = <25000 1000>;
- address = [ 00 E0 0C 00 73 01 ];
+ mac-address = [ 00 E0 0C 00 73 01 ];
interrupts = <13 3 14 3 18 3>;
interrupt-parent = <40000>;
phy-handle = <2452001>;
model = "FEC";
compatible = "gianfar";
reg = <26000 1000>;
- address = [ 00 E0 0C 00 73 02 ];
+ mac-address = [ 00 E0 0C 00 73 02 ];
interrupts = <19 3>;
interrupt-parent = <40000>;
phy-handle = <2452002>;
0: try to continue operation
-1: delay a few seconds (to give klogd time to record the oops output) and
- then panic. If the `panic' sysctl is also non-zero then the machine will
- be rebooted.
+1: panic immediatly. If the `panic' sysctl is also non-zero then the
+ machine will be rebooted.
==============================================================
would issue more ioctls to the device to communicate to it using
control, bulk, or other kinds of USB transfers. The IOCTLs are
listed in the <linux/usbdevice_fs.h> file, and at this writing the
-source code (linux/drivers/usb/devio.c) is the primary reference
+source code (linux/drivers/usb/core/devio.c) is the primary reference
for how to access devices through those files.
Note that since by default these BBB/DDD files are writable only by
Documentation/usb/*, see the following:
Linux-USB project: http://www.linux-usb.org
- mirrors at http://www.suse.cz/development/linux-usb/
- and http://usb.in.tum.de/linux-usb/
+ mirrors at http://usb.in.tum.de/linux-usb/
and http://it.linux-usb.org
Linux USB Guide: http://linux-usb.sourceforge.net
Linux-USB device overview (working devices and drivers):
pagefaulttrace Dump all page faults. Only useful for extreme debugging
and will create a lot of output.
+ call_trace=[old|both|newfallback|new]
+ old: use old inexact backtracer
+ new: use new exact dwarf2 unwinder
+ both: print entries from both
+ newfallback: use new unwinder but fall back to old if it gets
+ stuck (default)
+
Misc
noreplacement Don't replace instructions with more appropriate ones
T: git kernel.org:/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6.git
S: Maintained
+ACPI PCI HOTPLUG DRIVER
+P: Kristen Carlson Accardi
+M: kristen.c.accardi@intel.com
+L: pcihpd-discuss@lists.sourceforge.net
+S: Maintained
+
AD1816 SOUND DRIVER
P: Thorsten Knabe
M: Thorsten Knabe <linux@thorsten-knabe.de>
W: http://www.amd.com/us-en/ConnectivitySolutions/TechnicalResources/0,,50_2334_2452_11363,00.html
S: Supported
+AOA (Apple Onboard Audio) ALSA DRIVER
+P: Johannes Berg
+M: johannes@sipsolutions.net
+L: linuxppc-dev@ozlabs.org
+L: alsa-devel@alsa-project.org
+S: Maintained
+
APM DRIVER
P: Stephen Rothwell
M: sfr@canb.auug.org.au
T: git http://tali.admingilde.org/git/linux-docbook.git
S: Maintained
+DOCKING STATION DRIVER
+P: Kristen Carlson Accardi
+M: kristen.c.accardi@intel.com
+L: linux-acpi@vger.kernel.org
+S: Maintained
+
DOUBLETALK DRIVER
P: James R. Van Zandt
M: jrv@vanzandt.mv.com
M: saw@saw.sw.com.sg
S: Maintained
+EFS FILESYSTEM
+W: http://aeschi.ch.eu.org/efs/
+S: Orphan
+
EMU10K1 SOUND DRIVER
P: James Courtier-Dutton
M: James@superbug.demon.co.uk
T: git kernel.org:/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git
S: Supported
-JOURNALLING LAYER FOR BLOCK DEVICS (JBD)
+JOURNALLING LAYER FOR BLOCK DEVICES (JBD)
P: Stephen Tweedie, Andrew Morton
M: sct@redhat.com, akpm@osdl.org
L: ext2-devel@lists.sourceforge.net
KERNEL JANITORS
P: Several
-L: kernel-janitors@osdl.org
+L: kernel-janitors@lists.osdl.org
W: http://www.kerneljanitors.org/
-W: http://sf.net/projects/kernel-janitor/
S: Maintained
KERNEL NFSD
W: http://megaraid.lsilogic.com
S: Maintained
+MEMORY MANAGEMENT
+L: linux-mm@kvack.org
+L: linux-kernel@vger.kernel.org
+W: http://www.linux-mm.org
+S: Maintained
+
MEMORY TECHNOLOGY DEVICES (MTD)
P: David Woodhouse
M: dwmw2@infradead.org
L: spi-devel-general@lists.sourceforge.net
S: Maintained
+STABLE BRANCH:
+P: Greg Kroah-Hartman
+M: greg@kroah.com
+P: Chris Wright
+M: chrisw@sous-sol.org
+L: stable@kernel.org
+S: Maintained
+
+STABLE BRANCH:
+P: Greg Kroah-Hartman
+M: greg@kroah.com
+P: Chris Wright
+M: chrisw@sous-sol.org
+L: stable@kernel.org
+S: Maintained
+
TPM DEVICE DRIVER
P: Kylene Hall
M: kjhall@us.ibm.com
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 18
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc4
NAME=Crazed Snow-Weasel
# *DOCUMENTATION*
CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-fno-strict-aliasing -fno-common
-# Force gcc to behave correct even for buggy distributions
-CFLAGS += $(call cc-option, -fno-stack-protector-all \
- -fno-stack-protector)
AFLAGS := -D__ASSEMBLY__
# Read KERNELRELEASE from include/config/kernel.release (if it exists)
no-dot-config-targets := clean mrproper distclean \
cscope TAGS tags help %docs check% \
+ include/linux/version.h headers_% \
kernelrelease kernelversion
config-targets := 0
endif # KBUILD_EXTMOD
ifeq ($(dot-config),1)
-# In this section, we need .config
+# Read in config
+-include include/config/auto.conf
+ifeq ($(KBUILD_EXTMOD),)
# Read in dependencies to all Kconfig* files, make sure to run
# oldconfig if changes are detected.
-include include/config/auto.conf.cmd
--include include/config/auto.conf
# To avoid any implicit rule to kick in, define an empty command
$(KCONFIG_CONFIG) include/config/auto.conf.cmd: ;
# if auto.conf.cmd is missing then we are probably in a cleaned tree so
# we execute the config step to be sure to catch updated Kconfig files
include/config/auto.conf: $(KCONFIG_CONFIG) include/config/auto.conf.cmd
-ifeq ($(KBUILD_EXTMOD),)
$(Q)$(MAKE) -f $(srctree)/Makefile silentoldconfig
else
- $(error kernel configuration not valid - run 'make prepare' in $(srctree) to update it)
-endif
+# external modules needs include/linux/autoconf.h and include/config/auto.conf
+# but do not care if they are up-to-date. Use auto.conf to trigger the test
+PHONY += include/config/auto.conf
+
+include/config/auto.conf:
+ $(Q)test -e include/linux/autoconf.h -a -e $@ || ( \
+ echo; \
+ echo " ERROR: Kernel configuration is invalid."; \
+ echo " include/linux/autoconf.h or $@ are missing."; \
+ echo " Run 'make oldconfig && make prepare' on kernel src to fix it."; \
+ echo; \
+ /bin/false)
+
+endif # KBUILD_EXTMOD
else
# Dummy target needed, because used as prerequisite
include/config/auto.conf: ;
-endif
+endif # $(dot-config)
# The all: target is the default when no target is given on the
# command line.
CFLAGS += -O2
endif
+include $(srctree)/arch/$(ARCH)/Makefile
+
ifdef CONFIG_FRAME_POINTER
CFLAGS += -fno-omit-frame-pointer $(call cc-option,-fno-optimize-sibling-calls,)
else
CFLAGS += -g
endif
-include $(srctree)/arch/$(ARCH)/Makefile
+# Force gcc to behave correct even for buggy distributions
+CFLAGS += $(call cc-option, -fno-stack-protector)
# arch Makefile may override CC so keep this after arch Makefile is included
NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include)
struct el_subpacket_handler ev7_pal_subpacket_handler =
SUBPACKET_HANDLER_INIT(EL_CLASS__PAL, ev7_process_pal_subpacket);
-void
+void
ev7_register_error_handlers(void)
{
int i;
- for(i = 0;
- i<sizeof(el_ev7_pal_annotations)/sizeof(el_ev7_pal_annotations[1]);
- i++) {
+ for (i = 0; i < ARRAY_SIZE(el_ev7_pal_annotations); i++)
cdl_register_subpacket_annotation(&el_ev7_pal_annotations[i]);
- }
+
cdl_register_subpacket_handler(&ev7_pal_subpacket_handler);
}
long len, err = -EINVAL;
offset = command-1;
- if (offset >= sizeof(sysinfo_table)/sizeof(char *)) {
+ if (offset >= ARRAY_SIZE(sysinfo_table)) {
/* Digital UNIX has a few unpublished interfaces here */
printk("sysinfo(%d)", command);
goto out;
}
-
+
down_read(&uts_sem);
res = sysinfo_table[offset];
len = strlen(res)+1;
int alpha_using_srm;
#endif
-#define N(a) (sizeof(a)/sizeof(a[0]))
-
static struct alpha_machine_vector *get_sysvec(unsigned long, unsigned long,
unsigned long);
static struct alpha_machine_vector *get_sysvec_byname(const char *);
standard_io_resources[0].start = RTC_PORT(0);
standard_io_resources[0].end = RTC_PORT(0) + 0x10;
- for (i = 0; i < N(standard_io_resources); ++i)
+ for (i = 0; i < ARRAY_SIZE(standard_io_resources); ++i)
request_resource(io, standard_io_resources+i);
}
/* Search the system tables first... */
vec = NULL;
- if (type < N(systype_vecs)) {
+ if (type < ARRAY_SIZE(systype_vecs)) {
vec = systype_vecs[type];
} else if ((type > ST_API_BIAS) &&
- (type - ST_API_BIAS) < N(api_vecs)) {
+ (type - ST_API_BIAS) < ARRAY_SIZE(api_vecs)) {
vec = api_vecs[type - ST_API_BIAS];
} else if ((type > ST_UNOFFICIAL_BIAS) &&
- (type - ST_UNOFFICIAL_BIAS) < N(unofficial_vecs)) {
+ (type - ST_UNOFFICIAL_BIAS) < ARRAY_SIZE(unofficial_vecs)) {
vec = unofficial_vecs[type - ST_UNOFFICIAL_BIAS];
}
switch (type) {
case ST_DEC_ALCOR:
- if (member < N(alcor_indices))
+ if (member < ARRAY_SIZE(alcor_indices))
vec = alcor_vecs[alcor_indices[member]];
break;
case ST_DEC_EB164:
- if (member < N(eb164_indices))
+ if (member < ARRAY_SIZE(eb164_indices))
vec = eb164_vecs[eb164_indices[member]];
/* PC164 may show as EB164 variation with EV56 CPU,
but, since no true EB164 had anything but EV5... */
vec = &pc164_mv;
break;
case ST_DEC_EB64P:
- if (member < N(eb64p_indices))
+ if (member < ARRAY_SIZE(eb64p_indices))
vec = eb64p_vecs[eb64p_indices[member]];
break;
case ST_DEC_EB66:
- if (member < N(eb66_indices))
+ if (member < ARRAY_SIZE(eb66_indices))
vec = eb66_vecs[eb66_indices[member]];
break;
case ST_DEC_MARVEL:
- if (member < N(marvel_indices))
+ if (member < ARRAY_SIZE(marvel_indices))
vec = marvel_vecs[marvel_indices[member]];
break;
case ST_DEC_TITAN:
vec = titan_vecs[0]; /* default */
- if (member < N(titan_indices))
+ if (member < ARRAY_SIZE(titan_indices))
vec = titan_vecs[titan_indices[member]];
break;
case ST_DEC_TSUNAMI:
- if (member < N(tsunami_indices))
+ if (member < ARRAY_SIZE(tsunami_indices))
vec = tsunami_vecs[tsunami_indices[member]];
break;
case ST_DEC_1000:
size_t i;
- for (i = 0; i < N(all_vecs); ++i) {
+ for (i = 0; i < ARRAY_SIZE(all_vecs); ++i) {
struct alpha_machine_vector *mv = all_vecs[i];
if (strcasecmp(mv->vector_name, name) == 0)
return mv;
/* If not in the tables, make it UNKNOWN,
else set type name to family */
- if (type < N(systype_names)) {
+ if (type < ARRAY_SIZE(systype_names)) {
*type_name = systype_names[type];
} else if ((type > ST_API_BIAS) &&
- (type - ST_API_BIAS) < N(api_names)) {
+ (type - ST_API_BIAS) < ARRAY_SIZE(api_names)) {
*type_name = api_names[type - ST_API_BIAS];
} else if ((type > ST_UNOFFICIAL_BIAS) &&
- (type - ST_UNOFFICIAL_BIAS) < N(unofficial_names)) {
+ (type - ST_UNOFFICIAL_BIAS) < ARRAY_SIZE(unofficial_names)) {
*type_name = unofficial_names[type - ST_UNOFFICIAL_BIAS];
} else {
*type_name = sys_unknown;
default: /* default to variation "0" for now */
break;
case ST_DEC_EB164:
- if (member < N(eb164_indices))
+ if (member < ARRAY_SIZE(eb164_indices))
*variation_name = eb164_names[eb164_indices[member]];
/* PC164 may show as EB164 variation, but with EV56 CPU,
so, since no true EB164 had anything but EV5... */
*variation_name = eb164_names[1]; /* make it PC164 */
break;
case ST_DEC_ALCOR:
- if (member < N(alcor_indices))
+ if (member < ARRAY_SIZE(alcor_indices))
*variation_name = alcor_names[alcor_indices[member]];
break;
case ST_DEC_EB64P:
- if (member < N(eb64p_indices))
+ if (member < ARRAY_SIZE(eb64p_indices))
*variation_name = eb64p_names[eb64p_indices[member]];
break;
case ST_DEC_EB66:
- if (member < N(eb66_indices))
+ if (member < ARRAY_SIZE(eb66_indices))
*variation_name = eb66_names[eb66_indices[member]];
break;
case ST_DEC_MARVEL:
- if (member < N(marvel_indices))
+ if (member < ARRAY_SIZE(marvel_indices))
*variation_name = marvel_names[marvel_indices[member]];
break;
case ST_DEC_RAWHIDE:
- if (member < N(rawhide_indices))
+ if (member < ARRAY_SIZE(rawhide_indices))
*variation_name = rawhide_names[rawhide_indices[member]];
break;
case ST_DEC_TITAN:
*variation_name = titan_names[0]; /* default */
- if (member < N(titan_indices))
+ if (member < ARRAY_SIZE(titan_indices))
*variation_name = titan_names[titan_indices[member]];
break;
case ST_DEC_TSUNAMI:
- if (member < N(tsunami_indices))
+ if (member < ARRAY_SIZE(tsunami_indices))
*variation_name = tsunami_names[tsunami_indices[member]];
break;
}
cpu_index = (unsigned) (cpu->type - 1);
cpu_name = "Unknown";
- if (cpu_index < N(cpu_names))
+ if (cpu_index < ARRAY_SIZE(cpu_names))
cpu_name = cpu_names[cpu_index];
get_sysnames(hwrpb->sys_type, hwrpb->sys_variation,
ruffian_get_bank_size(unsigned long offset)
{
unsigned long bank_addr, bank, ret = 0;
-
+
/* Valid offsets are: 0x800, 0x840 and 0x880
since Ruffian only uses three banks. */
bank_addr = (unsigned long)PYXIS_MCR + offset;
bank = *(vulp)bank_addr;
-
+
/* Check BANK_ENABLE */
if (bank & 0x01) {
static unsigned long size[] __initdata = {
- 0x40000000UL, /* 0x00, 1G */
+ 0x40000000UL, /* 0x00, 1G */
0x20000000UL, /* 0x02, 512M */
0x10000000UL, /* 0x04, 256M */
0x08000000UL, /* 0x06, 128M */
};
bank = (bank & 0x1e) >> 1;
- if (bank < sizeof(size)/sizeof(*size))
+ if (bank < ARRAY_SIZE(size))
ret = size[bank];
}
index = cpu->type & 0xffffffff;
/* If index out of bounds, no way to validate. */
- if (index >= sizeof(cpu_hz)/sizeof(cpu_hz[0]))
+ if (index >= ARRAY_SIZE(cpu_hz))
return cc;
/* If index contains no data, no way to validate. */
}
#endif
-static struct irqchip gic_chip = {
+static struct irq_chip gic_chip = {
+ .name = "GIC",
.ack = gic_ack_irq,
.mask = gic_mask_irq,
.unmask = gic_unmask_irq,
locomo_writel(r, mapbase + LOCOMO_ICR);
}
-static struct irqchip locomo_chip = {
+static struct irq_chip locomo_chip = {
+ .name = "LOCOMO",
.ack = locomo_ack_irq,
.mask = locomo_mask_irq,
.unmask = locomo_unmask_irq,
locomo_writel(r, mapbase + LOCOMO_KEYBOARD + LOCOMO_KIC);
}
-static struct irqchip locomo_key_chip = {
+static struct irq_chip locomo_key_chip = {
+ .name = "LOCOMO-key",
.ack = locomo_key_ack_irq,
.mask = locomo_key_mask_irq,
.unmask = locomo_key_unmask_irq,
locomo_writel(r, mapbase + LOCOMO_GIE);
}
-static struct irqchip locomo_gpio_chip = {
+static struct irq_chip locomo_gpio_chip = {
+ .name = "LOCOMO-gpio",
.ack = locomo_gpio_ack_irq,
.mask = locomo_gpio_mask_irq,
.unmask = locomo_gpio_unmask_irq,
locomo_writel(r, mapbase + LOCOMO_LTINT);
}
-static struct irqchip locomo_lt_chip = {
+static struct irq_chip locomo_lt_chip = {
+ .name = "LOCOMO-lt",
.ack = locomo_lt_ack_irq,
.mask = locomo_lt_mask_irq,
.unmask = locomo_lt_unmask_irq,
locomo_writel(r, mapbase + LOCOMO_SPIIE);
}
-static struct irqchip locomo_spi_chip = {
+static struct irq_chip locomo_spi_chip = {
+ .name = "LOCOMO-spi",
.ack = locomo_spi_ack_irq,
.mask = locomo_spi_mask_irq,
.unmask = locomo_spi_unmask_irq,
rtc_time_to_tm(next_time, next);
}
}
+EXPORT_SYMBOL(rtc_next_alarm_time);
static inline int rtc_arm_read_time(struct rtc_ops *ops, struct rtc_time *tm)
{
return 0;
}
-static struct irqchip sa1111_low_chip = {
+static struct irq_chip sa1111_low_chip = {
+ .name = "SA1111-l",
.ack = sa1111_ack_irq,
.mask = sa1111_mask_lowirq,
.unmask = sa1111_unmask_lowirq,
return 0;
}
-static struct irqchip sa1111_high_chip = {
+static struct irq_chip sa1111_high_chip = {
+ .name = "SA1111-h",
.ack = sa1111_ack_irq,
.mask = sa1111_mask_highirq,
.unmask = sa1111_unmask_highirq,
writel(1 << irq, base + VIC_INT_ENABLE);
}
-static struct irqchip vic_chip = {
+static struct irq_chip vic_chip = {
+ .name = "VIC",
.ack = vic_mask_irq,
.mask = vic_mask_irq,
.unmask = vic_unmask_irq,
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.18-rc1
-# Sun Jul 9 15:21:30 2006
+# Linux kernel version: 2.6.18-rc1-git9
+# Sat Jul 15 15:08:10 2006
#
CONFIG_ARM=y
CONFIG_MMU=y
CONFIG_SYSVIPC=y
# CONFIG_POSIX_MQUEUE is not set
# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
CONFIG_SYSCTL=y
# CONFIG_AUDIT is not set
CONFIG_IKCONFIG=y
# USB support
#
CONFIG_USB_ARCH_HAS_HCD=y
-# CONFIG_USB_ARCH_HAS_OHCI is not set
+CONFIG_USB_ARCH_HAS_OHCI=y
# CONFIG_USB_ARCH_HAS_EHCI is not set
CONFIG_USB=y
CONFIG_USB_DEBUG=y
# USB Host Controller Drivers
#
# CONFIG_USB_ISP116X_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
# CONFIG_USB_SL811_HCD is not set
#
CONFIG_USB_SERIAL_PL2303=y
# CONFIG_USB_SERIAL_HP4X is not set
# CONFIG_USB_SERIAL_SAFE is not set
+# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set
# CONFIG_USB_SERIAL_TI is not set
# CONFIG_USB_SERIAL_CYBERJACK is not set
# CONFIG_USB_SERIAL_XIRCOM is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
# CONFIG_USB_LED is not set
-# CONFIG_USB_CY7C63 is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
# CONFIG_USB_PHIDGETKIT is not set
# CONFIG_USB_PHIDGETSERVO is not set
# CONFIG_RTC_DRV_X1205 is not set
# CONFIG_RTC_DRV_DS1307 is not set
# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
# CONFIG_RTC_DRV_DS1672 is not set
# CONFIG_RTC_DRV_DS1742 is not set
# CONFIG_RTC_DRV_PCF8563 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
# CONFIG_CIFS is not set
-# CONFIG_CIFS_DEBUG2 is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
# CONFIG_AFS_FS is not set
}
}
-static struct irqchip ecard_chip = {
+static struct irq_chip ecard_chip = {
+ .name = "ECARD",
.ack = ecard_irq_mask,
.mask = ecard_irq_mask,
.unmask = ecard_irq_unmask,
* Use the page tables supplied from __cpu_up.
*/
adr r4, __secondary_data
- ldmia r4, {r5, r6, r13} @ address to jump to after
+ ldmia r4, {r5, r7, r13} @ address to jump to after
sub r4, r4, r5 @ mmu has been enabled
- ldr r4, [r6, r4] @ get secondary_data.pgdir
+ ldr r4, [r7, r4] @ get secondary_data.pgdir
adr lr, __enable_mmu @ return address
add pc, r10, #12 @ initialise processor
@ (return control reg)
* r6 = &secondary_data
*/
ENTRY(__secondary_switched)
- ldr sp, [r6, #4] @ get secondary_data.stack
+ ldr sp, [r7, #4] @ get secondary_data.stack
mov fp, #0
b secondary_start_kernel
seq_printf(p, "%3d: ", i);
for_each_present_cpu(cpu)
seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[i]);
+ seq_printf(p, " %10s", irq_desc[i].chip->name ? : "-");
seq_printf(p, " %s", action->name);
for (action = action->next; action; action = action->next)
seq_printf(p, ", %s", action->name);
bust_spinlocks(0);
spin_unlock_irq(&die_lock);
- if (panic_on_oops) {
- printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
- ssleep(5);
+ if (panic_on_oops)
panic("Fatal exception");
- }
do_exit(SIGSEGV);
}
return (type == IRQT_BOTHEDGE) ? 0 : -EINVAL;
}
-static struct irqchip gpio_irqchip = {
+static struct irq_chip gpio_irqchip = {
+ .name = "GPIO",
.mask = gpio_irq_mask,
.unmask = gpio_irq_unmask,
.set_type = gpio_irq_type,
#define at91_aic_set_wake NULL
#endif
-static struct irqchip at91_aic_chip = {
+static struct irq_chip at91_aic_chip = {
+ .name = "AIC",
.ack = at91_aic_mask_irq,
.mask = at91_aic_mask_irq,
.unmask = at91_aic_unmask_irq,
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/tty.h>
+#include <linux/screen_info.h>
#include <asm/hardware/dec21285.h>
#include <asm/io.h>
imx_gpio_handler(mask, irq, desc, regs);
}
-static struct irqchip imx_internal_chip = {
+static struct irq_chip imx_internal_chip = {
+ .name = "MPU",
.ack = imx_mask_irq,
.mask = imx_mask_irq,
.unmask = imx_unmask_irq,
};
-static struct irqchip imx_gpio_chip = {
+static struct irq_chip imx_gpio_chip = {
+ .name = "GPIO",
.ack = imx_gpio_ack_irq,
.mask = imx_gpio_mask_irq,
.unmask = imx_gpio_unmask_irq,
writel(1 << irq, VA_IC_BASE + IRQ_ENABLE_SET);
}
-static struct irqchip sc_chip = {
+static struct irq_chip sc_chip = {
+ .name = "SC",
.ack = sc_mask_irq,
.mask = sc_mask_irq,
.unmask = sc_unmask_irq,
cic_writel(1 << irq, INTCP_VA_CIC_BASE + IRQ_ENABLE_SET);
}
-static struct irqchip cic_chip = {
+static struct irq_chip cic_chip = {
+ .name = "CIC",
.ack = cic_mask_irq,
.mask = cic_mask_irq,
.unmask = cic_unmask_irq,
pic_writel(1 << irq, INTCP_VA_PIC_BASE + IRQ_ENABLE_SET);
}
-static struct irqchip pic_chip = {
+static struct irq_chip pic_chip = {
+ .name = "PIC",
.ack = pic_mask_irq,
.mask = pic_mask_irq,
.unmask = pic_unmask_irq,
sic_writel(1 << irq, INTCP_VA_SIC_BASE + IRQ_ENABLE_SET);
}
-static struct irqchip sic_chip = {
+static struct irq_chip sic_chip = {
+ .name = "SIC",
.ack = sic_mask_irq,
.mask = sic_mask_irq,
.unmask = sic_unmask_irq,
intctl_write(iop321_mask);
}
-struct irqchip ext_chip = {
+struct irq_chip ext_chip = {
+ .name = "IOP",
.ack = iop321_irq_mask,
.mask = iop321_irq_mask,
.unmask = iop321_irq_unmask,
intctl_write1(iop331_mask1);
}
-struct irqchip iop331_irqchip1 = {
+struct irq_chip iop331_irqchip1 = {
+ .name = "IOP-1",
.ack = iop331_irq_mask1,
.mask = iop331_irq_mask1,
.unmask = iop331_irq_unmask1,
};
-struct irqchip iop331_irqchip2 = {
+struct irq_chip iop331_irqchip2 = {
+ .name = "IOP-2",
.ack = iop331_irq_mask2,
.mask = iop331_irq_mask2,
.unmask = iop331_irq_unmask2,
return -EIO;
}
-EXPORT_SYMBOL(pci_set_dma_mask);
-EXPORT_SYMBOL(pci_set_consistent_dma_mask);
EXPORT_SYMBOL(ixp4xx_pci_read);
EXPORT_SYMBOL(ixp4xx_pci_write);
.width = 2,
};
-static struct gtw5715_flash_resource = {
+static struct resource gtwx5715_flash_resource = {
.flags = IORESOURCE_MEM,
-}
+};
static struct platform_device gtwx5715_flash = {
.name = "IXP4XX-Flash",
{
ixp4xx_sys_init();
- if (!flash_resource)
- printk(KERN_ERR "Could not allocate flash resource\n");
-
gtwx5715_flash_resource.start = IXP4XX_EXP_BUS_BASE(0);
gtwx5715_flash_resource.end = IXP4XX_EXP_BUS_BASE(0) + SZ_8M - 1;
CPLD_WR_PB_INT_MASK = CPLD_IRQ_mask;
}
-static struct irqchip kev7a400_cpld_chip = {
+static struct irq_chip kev7a400_cpld_chip = {
+ .name = "CPLD",
.ack = kev7a400_ack_cpld_irq,
.mask = kev7a400_mask_cpld_irq,
.unmask = kev7a400_unmask_cpld_irq,
}
}
-static struct irqchip lpd7a40x_cpld_chip = {
+static struct irq_chip lpd7a40x_cpld_chip = {
+ .name = "CPLD",
.ack = lh7a40x_ack_cpld_irq,
.mask = lh7a40x_mask_cpld_irq,
.unmask = lh7a40x_unmask_cpld_irq,
}
static struct
-irqchip lh7a400_cpld_chip = {
+irq_chip lh7a400_cpld_chip = {
+ .name = "CPLD",
.ack = lh7a400_ack_cpld_irq,
.mask = lh7a400_mask_cpld_irq,
.unmask = lh7a400_unmask_cpld_irq,
INTC_INTENC = (1 << irq);
}
-static struct irqchip lh7a400_internal_chip = {
+static struct irq_chip lh7a400_internal_chip = {
+ .name = "MPU",
.ack = lh7a400_mask_irq, /* Level triggering -> mask is ack */
.mask = lh7a400_mask_irq,
.unmask = lh7a400_unmask_irq,
};
-static struct irqchip lh7a400_gpio_chip = {
+static struct irq_chip lh7a400_gpio_chip = {
+ .name = "GPIO",
.ack = lh7a400_ack_gpio_irq,
.mask = lh7a400_mask_irq,
.unmask = lh7a400_unmask_irq,
VIC2_INTENCLR = (1 << irq);
}
-static struct irqchip lh7a404_vic1_chip = {
+static struct irq_chip lh7a404_vic1_chip = {
+ .name = "VIC1",
.ack = lh7a404_vic1_mask_irq, /* Because level-triggered */
.mask = lh7a404_vic1_mask_irq,
.unmask = lh7a404_vic1_unmask_irq,
};
-static struct irqchip lh7a404_vic2_chip = {
+static struct irq_chip lh7a404_vic2_chip = {
+ .name = "VIC2",
.ack = lh7a404_vic2_mask_irq, /* Because level-triggered */
.mask = lh7a404_vic2_mask_irq,
.unmask = lh7a404_vic2_unmask_irq,
};
-static struct irqchip lh7a404_gpio_vic1_chip = {
+static struct irq_chip lh7a404_gpio_vic1_chip = {
+ .name = "GPIO-VIC1",
.ack = lh7a404_vic1_ack_gpio_irq,
.mask = lh7a404_vic1_mask_irq,
.unmask = lh7a404_vic1_unmask_irq,
};
-static struct irqchip lh7a404_gpio_vic2_chip = {
+static struct irq_chip lh7a404_gpio_vic2_chip = {
+ .name = "GPIO-VIC2",
.ack = lh7a404_vic2_ack_gpio_irq,
.mask = lh7a404_vic2_mask_irq,
.unmask = lh7a404_vic2_unmask_irq,
}
}
-static struct irqchip lh7a40x_cpld_chip = {
+static struct irq_chip lh7a40x_cpld_chip = {
+ .name = "CPLD",
.ack = lh7a40x_ack_cpld_irq,
.mask = lh7a40x_mask_cpld_irq,
.unmask = lh7a40x_unmask_cpld_irq,
}
}
-static struct irqchip omap_fpga_irq_ack = {
+static struct irq_chip omap_fpga_irq_ack = {
+ .name = "FPGA-ack",
.ack = fpga_mask_ack_irq,
.mask = fpga_mask_irq,
.unmask = fpga_unmask_irq,
};
-static struct irqchip omap_fpga_irq = {
+static struct irq_chip omap_fpga_irq = {
+ .name = "FPGA",
.ack = fpga_ack_irq,
.mask = fpga_mask_irq,
.unmask = fpga_unmask_irq,
};
#endif
-static struct irqchip omap_irq_chip = {
+static struct irq_chip omap_irq_chip = {
+ .name = "MPU",
.ack = omap_mask_ack_irq,
.mask = omap_mask_irq,
.unmask = omap_unmask_irq,
omap_ack_irq(irq);
}
-static struct irqchip omap_irq_chip = {
+static struct irq_chip omap_irq_chip = {
+ .name = "INTC",
.ack = omap_mask_ack_irq,
.mask = omap_mask_irq,
.unmask = omap_unmask_irq,
ICMR |= (1 << (irq + PXA_IRQ_SKIP));
}
-static struct irqchip pxa_internal_chip_low = {
+static struct irq_chip pxa_internal_chip_low = {
+ .name = "SC",
.ack = pxa_mask_low_irq,
.mask = pxa_mask_low_irq,
.unmask = pxa_unmask_low_irq,
ICMR2 |= (1 << (irq - 32 + PXA_IRQ_SKIP));
}
-static struct irqchip pxa_internal_chip_high = {
+static struct irq_chip pxa_internal_chip_high = {
+ .name = "SC-hi",
.ack = pxa_mask_high_irq,
.mask = pxa_mask_high_irq,
.unmask = pxa_unmask_high_irq,
GEDR0 = (1 << (irq - IRQ_GPIO0));
}
-static struct irqchip pxa_low_gpio_chip = {
+static struct irq_chip pxa_low_gpio_chip = {
+ .name = "GPIO-l",
.ack = pxa_ack_low_gpio,
.mask = pxa_mask_low_irq,
.unmask = pxa_unmask_low_irq,
GFER(gpio) = GPIO_IRQ_falling_edge[idx] & GPIO_IRQ_mask[idx];
}
-static struct irqchip pxa_muxed_gpio_chip = {
+static struct irq_chip pxa_muxed_gpio_chip = {
+ .name = "GPIO",
.ack = pxa_ack_muxed_gpio,
.mask = pxa_mask_muxed_gpio,
.unmask = pxa_unmask_muxed_gpio,
__raw_writew(lpd270_irq_enabled, LPD270_INT_MASK);
}
-static struct irqchip lpd270_irq_chip = {
+static struct irq_chip lpd270_irq_chip = {
+ .name = "CPLD",
.ack = lpd270_mask_irq,
.mask = lpd270_mask_irq,
.unmask = lpd270_unmask_irq,
LUB_IRQ_MASK_EN = (lubbock_irq_enabled |= (1 << lubbock_irq));
}
-static struct irqchip lubbock_irq_chip = {
+static struct irq_chip lubbock_irq_chip = {
+ .name = "FPGA",
.ack = lubbock_mask_irq,
.mask = lubbock_mask_irq,
.unmask = lubbock_unmask_irq,
MST_INTMSKENA = (mainstone_irq_enabled |= (1 << mainstone_irq));
}
-static struct irqchip mainstone_irq_chip = {
+static struct irq_chip mainstone_irq_chip = {
+ .name = "FPGA",
.ack = mainstone_mask_irq,
.mask = mainstone_mask_irq,
.unmask = mainstone_unmask_irq,
.virtual = (u32)S3C24XX_VA_ISA_BYTE,
.pfn = __phys_to_pfn(0x0),
.length = SZ_4M,
- .type = MT_DEVICE
+ .type = MT_DEVICE,
}, {
.virtual = (u32)S3C24XX_VA_ISA_WORD,
.pfn = __phys_to_pfn(0x0),
- .length = SZ_4M, MT_DEVICE
+ .length = SZ_4M,
+ .type = MT_DEVICE,
},
/* we could possibly compress the next set down into a set of smaller tables
.virtual = (u32)ANUBIS_VA_CTRL1,
.pfn = __phys_to_pfn(ANUBIS_PA_CTRL1),
.length = SZ_4K,
- .type = MT_DEVICE
+ .type = MT_DEVICE,
}, {
.virtual = (u32)ANUBIS_VA_CTRL2,
.pfn = __phys_to_pfn(ANUBIS_PA_CTRL2),
.length = SZ_4K,
- .type =MT_DEVICE
- },
-
- /* IDE drives */
-
- {
- .virtual = (u32)ANUBIS_IDEPRI,
- .pfn = __phys_to_pfn(S3C2410_CS3),
- .length = SZ_1M,
- .type = MT_DEVICE
- }, {
- .virtual = (u32)ANUBIS_IDEPRIAUX,
- .pfn = __phys_to_pfn(S3C2410_CS3+(1<<26)),
- .length = SZ_1M,
- .type = MT_DEVICE
- }, {
- .virtual = (u32)ANUBIS_IDESEC,
- .pfn = __phys_to_pfn(S3C2410_CS4),
- .length = SZ_1M,
- .type = MT_DEVICE
- }, {
- .virtual = (u32)ANUBIS_IDESECAUX,
- .pfn = __phys_to_pfn(S3C2410_CS4+(1<<26)),
- .length = SZ_1M,
- .type = MT_DEVICE
+ .type = MT_DEVICE,
},
};
.name = "pclk",
.divisor = 1,
.min_baud = 0,
- .max_baud = 0.
+ .max_baud = 0,
}
};
.ulcon = ULCON,
.ufcon = UFCON,
.clocks = anubis_serial_clocks,
- .clocks_size = ARRAY_SIZE(anubis_serial_clocks)
+ .clocks_size = ARRAY_SIZE(anubis_serial_clocks),
},
[1] = {
.hwport = 2,
.ulcon = ULCON,
.ufcon = UFCON,
.clocks = anubis_serial_clocks,
- .clocks_size = ARRAY_SIZE(anubis_serial_clocks)
+ .clocks_size = ARRAY_SIZE(anubis_serial_clocks),
},
};
[0] = {
.name = "Boot Agent",
.size = SZ_16K,
- .offset = 0
+ .offset = 0,
},
[1] = {
.name = "/boot",
.nr_chips = 1,
.nr_map = external_map,
.nr_partitions = ARRAY_SIZE(anubis_default_nand_part),
- .partitions = anubis_default_nand_part
+ .partitions = anubis_default_nand_part,
},
[0] = {
.name = "chip0",
.nr_chips = 1,
.nr_map = chip0_map,
.nr_partitions = ARRAY_SIZE(anubis_default_nand_part),
- .partitions = anubis_default_nand_part
+ .partitions = anubis_default_nand_part,
},
[2] = {
.name = "chip1",
.nr_chips = 1,
.nr_map = chip1_map,
.nr_partitions = ARRAY_SIZE(anubis_default_nand_part),
- .partitions = anubis_default_nand_part
+ .partitions = anubis_default_nand_part,
},
};
.devices = anubis_devices,
.devices_count = ARRAY_SIZE(anubis_devices),
.clocks = anubis_clocks,
- .clocks_count = ARRAY_SIZE(anubis_clocks)
+ .clocks_count = ARRAY_SIZE(anubis_clocks),
};
static void __init anubis_map_io(void)
.virtual = (u32)OSIRIS_VA_CTRL1,
.pfn = __phys_to_pfn(OSIRIS_PA_CTRL1),
.length = SZ_16K,
- .type = MT_DEVICE
+ .type = MT_DEVICE,
}, {
.virtual = (u32)OSIRIS_VA_CTRL2,
.pfn = __phys_to_pfn(OSIRIS_PA_CTRL2),
.length = SZ_16K,
- .type = MT_DEVICE
+ .type = MT_DEVICE,
},
};
.name = "pclk",
.divisor = 1,
.min_baud = 0,
- .max_baud = 0.
+ .max_baud = 0,
}
};
.ulcon = ULCON,
.ufcon = UFCON,
.clocks = osiris_serial_clocks,
- .clocks_size = ARRAY_SIZE(osiris_serial_clocks)
+ .clocks_size = ARRAY_SIZE(osiris_serial_clocks),
},
[1] = {
.hwport = 1,
.ulcon = ULCON,
.ufcon = UFCON,
.clocks = osiris_serial_clocks,
- .clocks_size = ARRAY_SIZE(osiris_serial_clocks)
+ .clocks_size = ARRAY_SIZE(osiris_serial_clocks),
},
};
[0] = {
.name = "Boot Agent",
.size = SZ_16K,
- .offset = 0
+ .offset = 0,
},
[1] = {
.name = "/boot",
.nr_chips = 1,
.nr_map = external_map,
.nr_partitions = ARRAY_SIZE(osiris_default_nand_part),
- .partitions = osiris_default_nand_part
+ .partitions = osiris_default_nand_part,
},
[0] = {
.name = "chip0",
.nr_chips = 1,
.nr_map = chip0_map,
.nr_partitions = ARRAY_SIZE(osiris_default_nand_part),
- .partitions = osiris_default_nand_part
+ .partitions = osiris_default_nand_part,
},
[2] = {
.name = "chip1",
.nr_chips = 1,
.nr_map = chip1_map,
.nr_partitions = ARRAY_SIZE(osiris_default_nand_part),
- .partitions = osiris_default_nand_part
+ .partitions = osiris_default_nand_part,
},
};
.devices = osiris_devices,
.devices_count = ARRAY_SIZE(osiris_devices),
.clocks = osiris_clocks,
- .clocks_count = ARRAY_SIZE(osiris_clocks)
+ .clocks_count = ARRAY_SIZE(osiris_clocks),
};
static void __init osiris_map_io(void)
return 0;
}
-static struct irqchip sa1100_low_gpio_chip = {
+static struct irq_chip sa1100_low_gpio_chip = {
+ .name = "GPIO-l",
.ack = sa1100_low_gpio_ack,
.mask = sa1100_low_gpio_mask,
.unmask = sa1100_low_gpio_unmask,
return 0;
}
-static struct irqchip sa1100_high_gpio_chip = {
+static struct irq_chip sa1100_high_gpio_chip = {
+ .name = "GPIO-h",
.ack = sa1100_high_gpio_ack,
.mask = sa1100_high_gpio_mask,
.unmask = sa1100_high_gpio_unmask,
return -EINVAL;
}
-static struct irqchip sa1100_normal_chip = {
+static struct irq_chip sa1100_normal_chip = {
+ .name = "SC",
.ack = sa1100_mask_irq,
.mask = sa1100_mask_irq,
.unmask = sa1100_unmask_irq,
static struct irqaction cascade;
-static struct irqchip fb_chip = {
+static struct irq_chip fb_chip = {
+ .name = "XT-PIC",
.ack = shark_ack_8259A_irq,
.mask = shark_disable_8259A_irq,
.unmask = shark_enable_8259A_irq,
writel(1 << irq, VA_SIC_BASE + SIC_IRQ_ENABLE_SET);
}
-static struct irqchip sic_chip = {
+static struct irq_chip sic_chip = {
+ .name = "SIC",
.ack = sic_mask_irq,
.mask = sic_mask_irq,
.unmask = sic_unmask_irq,
void __iounmap(void __iomem *addr)
{
+#ifndef CONFIG_SMP
struct vm_struct **p, *tmp;
+#endif
unsigned int section_mapping = 0;
addr = (void __iomem *)(PAGE_MASK & (unsigned long)addr);
#include <asm/cacheflush.h>
#include <asm/proc-fns.h>
#include <asm/tlbflush.h>
+#include <asm/page.h>
#ifndef MULTI_CPU
EXPORT_SYMBOL(cpu_dcache_clean_area);
EXPORT_SYMBOL(cpu_cache);
#endif
+#ifndef MULTI_USER
+EXPORT_SYMBOL(__cpu_clear_user_page);
+EXPORT_SYMBOL(__cpu_copy_user_page);
+#else
+EXPORT_SYMBOL(cpu_user);
+#endif
+
/*
* No module should need to touch the TLB (and currently
* no modules do. We export this for "loadkernel" support
.asciz "XScale-80200"
.size cpu_80200_name, . - cpu_80200_name
+ .type cpu_80219_name, #object
+cpu_80219_name:
+ .asciz "XScale-80219"
+ .size cpu_80219_name, . - cpu_80219_name
+
.type cpu_8032x_name, #object
cpu_8032x_name:
.asciz "XScale-IOP8032x Family"
.long xscale_cache_fns
.size __80200_proc_info, . - __80200_proc_info
+ .type __80219_proc_info,#object
+__80219_proc_info:
+ .long 0x69052e20
+ .long 0xffffffe0
+ .long PMD_TYPE_SECT | \
+ PMD_SECT_BUFFERABLE | \
+ PMD_SECT_CACHEABLE | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
+ .long PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
+ b __xscale_setup
+ .long cpu_arch_name
+ .long cpu_elf_name
+ .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
+ .long cpu_80219_name
+ .long xscale_processor_functions
+ .long v4wbi_tlb_fns
+ .long xscale_mc_user_fns
+ .long xscale_cache_fns
+ .size __80219_proc_info, . - __80219_proc_info
+
.type __8032x_proc_info,#object
__8032x_proc_info:
.long 0x69052420
- .long 0xfffff5e0 @ mask should accomodate IOP80219 also
+ .long 0xffffffe0
.long PMD_TYPE_SECT | \
PMD_SECT_BUFFERABLE | \
PMD_SECT_CACHEABLE | \
_set_gpio_irqenable(bank, gpio, 1);
}
-static struct irqchip gpio_irq_chip = {
+static struct irq_chip gpio_irq_chip = {
+ .name = "GPIO",
.ack = gpio_ack_irq,
.mask = gpio_mask_irq,
.unmask = gpio_unmask_irq,
.set_wake = gpio_wake_enable,
};
-static struct irqchip mpuio_irq_chip = {
+static struct irq_chip mpuio_irq_chip = {
+ .name = "MPUIO",
.ack = mpuio_ack_irq,
.mask = mpuio_mask_irq,
- .unmask = mpuio_unmask_irq
+ .unmask = mpuio_unmask_irq
};
static int initialized;
See <file:Documentation/mtrr.txt> for more information.
config EFI
- bool "Boot from EFI support (EXPERIMENTAL)"
+ bool "Boot from EFI support"
depends on ACPI
default n
---help---
export CPPFLAGS_vsyscall.lds += -P -C -U$(ARCH)
-vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1
+vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1 \
+ $(call ld-option, -Wl$(comma)--hash-style=sysv)
SYSCFLAGS_vsyscall-sysenter.so = $(vsyscall-flags)
SYSCFLAGS_vsyscall-int80.so = $(vsyscall-flags)
#define BAD_MADT_ENTRY(entry, end) ( \
(!entry) || (unsigned long)entry + sizeof(*entry) > end || \
- ((acpi_table_entry_header *)entry)->length != sizeof(*entry))
+ ((acpi_table_entry_header *)entry)->length < sizeof(*entry))
#define PREFIX "ACPI: "
pushl $3
call acpi_enter_sleep_state
addl $4, %esp
- ret
+
+# In case of S3 failure, we'll emerge here. Jump
+# to ret_point to recover
+ jmp ret_point
.p2align 4,,7
ret_point:
call restore_registers
config X86_GX_SUSPMOD
tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation"
+ depends on PCI
help
This add the CPUFreq driver for NatSemi Geode processors which
support suspend modulation.
config X86_LONGHAUL
tristate "VIA Cyrix III Longhaul"
select CPU_FREQ_TABLE
- depends on BROKEN
+ depends on ACPI_PROCESSOR
help
This adds the CPUFreq driver for VIA Samuel/CyrixIII,
VIA Cyrix Samuel/C3, VIA Cyrix Ezra and VIA Cyrix Ezra-T
}
/* Do initialization in ACPI core */
- acpi_processor_preregister_performance(acpi_perf_data);
- return 0;
+ return acpi_processor_preregister_performance(acpi_perf_data);
}
static int
#include <linux/cpufreq.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/pci.h>
#include <asm/msr.h>
#include <asm/timex.h>
#include <asm/io.h>
+#include <asm/acpi.h>
+#include <linux/acpi.h>
+#include <acpi/processor.h>
#include "longhaul.h"
static unsigned int minmult, maxmult;
static int can_scale_voltage;
static int vrmrev;
+static struct acpi_processor *pr = NULL;
+static struct acpi_processor_cx *cx = NULL;
/* Module parameters */
static int dont_scale_voltage;
return eblcr_table[invalue];
}
+/* For processor with BCR2 MSR */
-static void do_powersaver(union msr_longhaul *longhaul,
- unsigned int clock_ratio_index)
+static void do_longhaul1(int cx_address, unsigned int clock_ratio_index)
{
- struct pci_dev *dev;
- unsigned long flags;
- unsigned int tmp_mask;
- int version;
- int i;
- u16 pci_cmd;
- u16 cmd_state[64];
+ union msr_bcr2 bcr2;
+ u32 t;
- switch (cpu_model) {
- case CPU_EZRA_T:
- version = 3;
- break;
- case CPU_NEHEMIAH:
- version = 0xf;
- break;
- default:
- return;
- }
+ rdmsrl(MSR_VIA_BCR2, bcr2.val);
+ /* Enable software clock multiplier */
+ bcr2.bits.ESOFTBF = 1;
+ bcr2.bits.CLOCKMUL = clock_ratio_index;
- rdmsrl(MSR_VIA_LONGHAUL, longhaul->val);
- longhaul->bits.SoftBusRatio = clock_ratio_index & 0xf;
- longhaul->bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4;
- longhaul->bits.EnableSoftBusRatio = 1;
- longhaul->bits.RevisionKey = 0;
+ /* Sync to timer tick */
+ safe_halt();
+ ACPI_FLUSH_CPU_CACHE();
+ /* Change frequency on next halt or sleep */
+ wrmsrl(MSR_VIA_BCR2, bcr2.val);
+ /* Invoke C3 */
+ inb(cx_address);
+ /* Dummy op - must do something useless after P_LVL3 read */
+ t = inl(acpi_fadt.xpm_tmr_blk.address);
+
+ /* Disable software clock multiplier */
+ local_irq_disable();
+ rdmsrl(MSR_VIA_BCR2, bcr2.val);
+ bcr2.bits.ESOFTBF = 0;
+ wrmsrl(MSR_VIA_BCR2, bcr2.val);
+}
- preempt_disable();
- local_irq_save(flags);
+/* For processor with Longhaul MSR */
- /*
- * get current pci bus master state for all devices
- * and clear bus master bit
- */
- dev = NULL;
- i = 0;
- do {
- dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
- if (dev != NULL) {
- pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
- cmd_state[i++] = pci_cmd;
- pci_cmd &= ~PCI_COMMAND_MASTER;
- pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
- }
- } while (dev != NULL);
+static void do_powersaver(int cx_address, unsigned int clock_ratio_index)
+{
+ union msr_longhaul longhaul;
+ u32 t;
- tmp_mask=inb(0x21); /* works on C3. save mask. */
- outb(0xFE,0x21); /* TMR0 only */
- outb(0xFF,0x80); /* delay */
+ rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
+ longhaul.bits.SoftBusRatio = clock_ratio_index & 0xf;
+ longhaul.bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4;
+ longhaul.bits.EnableSoftBusRatio = 1;
+ /* Sync to timer tick */
safe_halt();
- wrmsrl(MSR_VIA_LONGHAUL, longhaul->val);
- halt();
-
+ ACPI_FLUSH_CPU_CACHE();
+ /* Change frequency on next halt or sleep */
+ wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ /* Invoke C3 */
+ inb(cx_address);
+ /* Dummy op - must do something useless after P_LVL3 read */
+ t = inl(acpi_fadt.xpm_tmr_blk.address);
+
+ /* Disable bus ratio bit */
local_irq_disable();
-
- outb(tmp_mask,0x21); /* restore mask */
-
- /* restore pci bus master state for all devices */
- dev = NULL;
- i = 0;
- do {
- dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
- if (dev != NULL) {
- pci_cmd = cmd_state[i++];
- pci_write_config_byte(dev, PCI_COMMAND, pci_cmd);
- }
- } while (dev != NULL);
- local_irq_restore(flags);
- preempt_enable();
-
- /* disable bus ratio bit */
- rdmsrl(MSR_VIA_LONGHAUL, longhaul->val);
- longhaul->bits.EnableSoftBusRatio = 0;
- longhaul->bits.RevisionKey = version;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul->val);
+ longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
+ longhaul.bits.EnableSoftBusRatio = 0;
+ longhaul.bits.EnableSoftBSEL = 0;
+ longhaul.bits.EnableSoftVID = 0;
+ wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
}
/**
{
int speed, mult;
struct cpufreq_freqs freqs;
- union msr_longhaul longhaul;
- union msr_bcr2 bcr2;
static unsigned int old_ratio=-1;
+ unsigned long flags;
+ unsigned int pic1_mask, pic2_mask;
if (old_ratio == clock_ratio_index)
return;
dprintk ("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
fsb, mult/10, mult%10, print_speed(speed/1000));
+ preempt_disable();
+ local_irq_save(flags);
+
+ pic2_mask = inb(0xA1);
+ pic1_mask = inb(0x21); /* works on C3. save mask. */
+ outb(0xFF,0xA1); /* Overkill */
+ outb(0xFE,0x21); /* TMR0 only */
+
+ /* Disable bus master arbitration */
+ if (pr->flags.bm_check) {
+ acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1,
+ ACPI_MTX_DO_NOT_LOCK);
+ }
+
switch (longhaul_version) {
/*
*/
case TYPE_LONGHAUL_V1:
case TYPE_LONGHAUL_V2:
- rdmsrl (MSR_VIA_BCR2, bcr2.val);
- /* Enable software clock multiplier */
- bcr2.bits.ESOFTBF = 1;
- bcr2.bits.CLOCKMUL = clock_ratio_index;
- local_irq_disable();
- wrmsrl (MSR_VIA_BCR2, bcr2.val);
- safe_halt();
-
- /* Disable software clock multiplier */
- rdmsrl (MSR_VIA_BCR2, bcr2.val);
- bcr2.bits.ESOFTBF = 0;
- local_irq_disable();
- wrmsrl (MSR_VIA_BCR2, bcr2.val);
- local_irq_enable();
+ do_longhaul1(cx->address, clock_ratio_index);
break;
/*
* to work in practice.
*/
case TYPE_POWERSAVER:
- do_powersaver(&longhaul, clock_ratio_index);
+ do_powersaver(cx->address, clock_ratio_index);
break;
}
+ /* Enable bus master arbitration */
+ if (pr->flags.bm_check) {
+ acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0,
+ ACPI_MTX_DO_NOT_LOCK);
+ }
+
+ outb(pic2_mask,0xA1); /* restore mask */
+ outb(pic1_mask,0x21);
+
+ local_irq_restore(flags);
+ preempt_enable();
+
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
static int __init longhaul_get_ranges(void)
{
unsigned long invalue;
- unsigned int multipliers[32]= {
- 50,30,40,100,55,35,45,95,90,70,80,60,120,75,85,65,
- -1,110,120,-1,135,115,125,105,130,150,160,140,-1,155,-1,145 };
+ unsigned int ezra_t_multipliers[32]= {
+ 90, 30, 40, 100, 55, 35, 45, 95,
+ 50, 70, 80, 60, 120, 75, 85, 65,
+ -1, 110, 120, -1, 135, 115, 125, 105,
+ 130, 150, 160, 140, -1, 155, -1, 145 };
unsigned int j, k = 0;
union msr_longhaul longhaul;
unsigned long lo, hi;
invalue = longhaul.bits.MaxMHzBR;
if (longhaul.bits.MaxMHzBR4)
invalue += 16;
- maxmult=multipliers[invalue];
+ maxmult=ezra_t_multipliers[invalue];
invalue = longhaul.bits.MinMHzBR;
if (longhaul.bits.MinMHzBR4 == 1)
minmult = 30;
else
- minmult = multipliers[invalue];
+ minmult = ezra_t_multipliers[invalue];
fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB];
break;
}
return calc_speed(longhaul_get_cpu_mult());
}
+static acpi_status longhaul_walk_callback(acpi_handle obj_handle,
+ u32 nesting_level,
+ void *context, void **return_value)
+{
+ struct acpi_device *d;
+
+ if ( acpi_bus_get_device(obj_handle, &d) ) {
+ return 0;
+ }
+ *return_value = (void *)acpi_driver_data(d);
+ return 1;
+}
static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
{
char *cpuname=NULL;
int ret;
+ /* Check ACPI support for C3 state */
+ acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
+ &longhaul_walk_callback, NULL, (void *)&pr);
+ if (pr == NULL) goto err_acpi;
+
+ cx = &pr->power.states[ACPI_STATE_C3];
+ if (cx->address == 0 || cx->latency > 1000) goto err_acpi;
+
+ /* Now check what we have on this motherboard */
switch (c->x86_model) {
case 6:
cpu_model = CPU_SAMUEL;
cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu);
return 0;
+
+err_acpi:
+ printk(KERN_ERR PFX "No ACPI support for CPU frequency changes.\n");
+ return -ENODEV;
}
static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy)
if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6)
return -ENODEV;
+#ifdef CONFIG_SMP
+ if (num_online_cpus() > 1) {
+ return -ENODEV;
+ printk(KERN_ERR PFX "More than 1 CPU detected, longhaul disabled.\n");
+ }
+#endif
+#ifdef CONFIG_X86_IO_APIC
+ if (cpu_has_apic) {
+ printk(KERN_ERR PFX "APIC detected. Longhaul is currently broken in this configuration.\n");
+ return -ENODEV;
+ }
+#endif
switch (c->x86_model) {
case 6 ... 9:
return cpufreq_register_driver(&longhaul_driver);
MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors.");
MODULE_LICENSE ("GPL");
-module_init(longhaul_init);
+late_initcall(longhaul_init);
module_exit(longhaul_exit);
if (num_cache_leaves == 0)
return 0;
- register_cpu_notifier(&cacheinfo_cpu_notifier);
+ register_hotcpu_notifier(&cacheinfo_cpu_notifier);
for_each_online_cpu(i) {
cacheinfo_cpu_callback(&cacheinfo_cpu_notifier, CPU_ONLINE,
/* Call the installed machine check handler for this CPU setup. */
extern fastcall void (*machine_check_vector)(struct pt_regs *, long error_code);
-extern int mce_disabled __initdata;
+extern int mce_disabled;
extern int nr_mce_banks;
ENTRY(ret_from_fork)
CFI_STARTPROC
pushl %eax
- CFI_ADJUST_CFA_OFFSET -4
+ CFI_ADJUST_CFA_OFFSET 4
call schedule_tail
GET_THREAD_INFO(%ebp)
popl %eax
int ret = 0;
kprobe_opcode_t *addr;
struct kprobe_ctlblk *kcb;
-#ifdef CONFIG_PREEMPT
- unsigned pre_preempt_count = preempt_count();
-#else
- unsigned pre_preempt_count = 1;
-#endif
addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t));
return 1;
ss_probe:
- if (pre_preempt_count && p->ainsn.boostable == 1 && !p->post_handler){
+#ifndef CONFIG_PREEMPT
+ if (p->ainsn.boostable == 1 && !p->post_handler){
/* Boost up -- we can execute copied instructions directly */
reset_current_kprobe();
regs->eip = (unsigned long)p->ainsn.insn;
preempt_enable_no_resched();
return 1;
}
+#endif
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_HIT_SS;
return 1;
memcpy((void *)reboot_code_buffer, relocate_new_kernel,
relocate_new_kernel_size);
- /* The segment registers are funny things, they are
- * automatically loaded from a table, in memory wherever you
- * set them to a specific selector, but this table is never
- * accessed again you set the segment to a different selector.
- *
- * The more common model is are caches where the behide
- * the scenes work is done, but is also dropped at arbitrary
- * times.
+ /* The segment registers are funny things, they have both a
+ * visible and an invisible part. Whenever the visible part is
+ * set to a specific selector, the invisible part is loaded
+ * with from a table in memory. At no other time is the
+ * descriptor table in memory accessed.
*
* I take advantage of this here by force loading the
* segments, before I zap the gdt with an invalid value.
*/
touch_softlockup_watchdog();
}
+EXPORT_SYMBOL(touch_nmi_watchdog);
extern void die_nmi(struct pt_regs *, const char *msg);
/*
* Now maybe handle debug registers and/or IO bitmaps
*/
- if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW))
- || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP))
+ if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW)
+ || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)))
__switch_to_xtra(next_p, tss);
disable_tsc(prev_p, next_p);
* then we print a warning if not, and always resync.
*/
-static atomic_t tsc_start_flag = ATOMIC_INIT(0);
-static atomic_t tsc_count_start = ATOMIC_INIT(0);
-static atomic_t tsc_count_stop = ATOMIC_INIT(0);
-static unsigned long long tsc_values[NR_CPUS];
+static struct {
+ atomic_t start_flag;
+ atomic_t count_start;
+ atomic_t count_stop;
+ unsigned long long values[NR_CPUS];
+} tsc __initdata = {
+ .start_flag = ATOMIC_INIT(0),
+ .count_start = ATOMIC_INIT(0),
+ .count_stop = ATOMIC_INIT(0),
+};
#define NR_LOOPS 5
-static void __init synchronize_tsc_bp (void)
+static void __init synchronize_tsc_bp(void)
{
int i;
unsigned long long t0;
/* convert from kcyc/sec to cyc/usec */
one_usec = cpu_khz / 1000;
- atomic_set(&tsc_start_flag, 1);
+ atomic_set(&tsc.start_flag, 1);
wmb();
/*
/*
* all APs synchronize but they loop on '== num_cpus'
*/
- while (atomic_read(&tsc_count_start) != num_booting_cpus()-1)
+ while (atomic_read(&tsc.count_start) != num_booting_cpus()-1)
cpu_relax();
- atomic_set(&tsc_count_stop, 0);
+ atomic_set(&tsc.count_stop, 0);
wmb();
/*
* this lets the APs save their current TSC:
*/
- atomic_inc(&tsc_count_start);
+ atomic_inc(&tsc.count_start);
- rdtscll(tsc_values[smp_processor_id()]);
+ rdtscll(tsc.values[smp_processor_id()]);
/*
* We clear the TSC in the last loop:
*/
/*
* Wait for all APs to leave the synchronization point:
*/
- while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1)
+ while (atomic_read(&tsc.count_stop) != num_booting_cpus()-1)
cpu_relax();
- atomic_set(&tsc_count_start, 0);
+ atomic_set(&tsc.count_start, 0);
wmb();
- atomic_inc(&tsc_count_stop);
+ atomic_inc(&tsc.count_stop);
}
sum = 0;
for (i = 0; i < NR_CPUS; i++) {
if (cpu_isset(i, cpu_callout_map)) {
- t0 = tsc_values[i];
+ t0 = tsc.values[i];
sum += t0;
}
}
avg = sum;
do_div(avg, num_booting_cpus());
- sum = 0;
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_isset(i, cpu_callout_map))
continue;
- delta = tsc_values[i] - avg;
+ delta = tsc.values[i] - avg;
if (delta < 0)
delta = -delta;
/*
* We report bigger than 2 microseconds clock differences.
*/
if (delta > 2*one_usec) {
- long realdelta;
+ long long realdelta;
+
if (!buggy) {
buggy = 1;
printk("\n");
}
realdelta = delta;
do_div(realdelta, one_usec);
- if (tsc_values[i] < avg)
+ if (tsc.values[i] < avg)
realdelta = -realdelta;
- if (realdelta > 0)
- printk(KERN_INFO "CPU#%d had %ld usecs TSC "
+ if (realdelta)
+ printk(KERN_INFO "CPU#%d had %Ld usecs TSC "
"skew, fixed it up.\n", i, realdelta);
}
-
- sum += delta;
}
if (!buggy)
printk("passed.\n");
}
-static void __init synchronize_tsc_ap (void)
+static void __init synchronize_tsc_ap(void)
{
int i;
* this gets called, so we first wait for the BP to
* finish SMP initialization:
*/
- while (!atomic_read(&tsc_start_flag))
+ while (!atomic_read(&tsc.start_flag))
cpu_relax();
for (i = 0; i < NR_LOOPS; i++) {
- atomic_inc(&tsc_count_start);
- while (atomic_read(&tsc_count_start) != num_booting_cpus())
+ atomic_inc(&tsc.count_start);
+ while (atomic_read(&tsc.count_start) != num_booting_cpus())
cpu_relax();
- rdtscll(tsc_values[smp_processor_id()]);
+ rdtscll(tsc.values[smp_processor_id()]);
if (i == NR_LOOPS-1)
write_tsc(0, 0);
- atomic_inc(&tsc_count_stop);
- while (atomic_read(&tsc_count_stop) != num_booting_cpus())
+ atomic_inc(&tsc.count_stop);
+ while (atomic_read(&tsc.count_stop) != num_booting_cpus())
cpu_relax();
}
}
{
unsigned long pc = instruction_pointer(regs);
- if (in_lock_functions(pc))
+ if (!user_mode_vm(regs) && in_lock_functions(pc))
return *(unsigned long *)(regs->ebp + 4);
return pc;
if (unwind_init_blocked(&info, task) == 0)
unw_ret = show_trace_unwind(&info, log_lvl);
}
- if (unw_ret > 0) {
- if (call_trace > 0)
+ if (unw_ret > 0 && !arch_unw_user_mode(&info)) {
+#ifdef CONFIG_STACK_UNWIND
+ print_symbol("DWARF2 unwinder stuck at %s\n",
+ UNW_PC(&info));
+ if (call_trace == 1) {
+ printk("Leftover inexact backtrace:\n");
+ if (UNW_SP(&info))
+ stack = (void *)UNW_SP(&info);
+ } else if (call_trace > 1)
return;
- printk("%sLegacy call trace:\n", log_lvl);
+ else
+ printk("Full inexact backtrace again:\n");
+#else
+ printk("Inexact backtrace:\n");
+#endif
}
}
if (in_interrupt())
panic("Fatal exception in interrupt");
- if (panic_on_oops) {
- printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
- ssleep(5);
+ if (panic_on_oops)
panic("Fatal exception");
- }
+
oops_exit();
do_exit(SIGSEGV);
}
call_trace = -1;
else if (strcmp(s, "both") == 0)
call_trace = 0;
- else if (strcmp(s, "new") == 0)
+ else if (strcmp(s, "newfallback") == 0)
call_trace = 1;
+ else if (strcmp(s, "new") == 2)
+ call_trace = 2;
return 1;
}
__setup("call_trace=", call_trace_setup);
. = VDSO_PRELINK + SIZEOF_HEADERS;
.hash : { *(.hash) } :text
+ .gnu.hash : { *(.gnu.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.gnu.version : { *(.gnu.version) }
#
CONFIG_IDE_GENERIC=y
CONFIG_BLK_DEV_IDEPCI=y
-# CONFIG_IDEPCI_SHARE_IRQ is not set
+CONFIG_IDEPCI_SHARE_IRQ=y
# CONFIG_BLK_DEV_OFFBOARD is not set
# CONFIG_BLK_DEV_GENERIC is not set
# CONFIG_BLK_DEV_OPTI621 is not set
# CONFIG_IDE_GENERIC is not set
# CONFIG_BLK_DEV_IDEPNP is not set
CONFIG_BLK_DEV_IDEPCI=y
-# CONFIG_IDEPCI_SHARE_IRQ is not set
+CONFIG_IDEPCI_SHARE_IRQ=y
# CONFIG_BLK_DEV_OFFBOARD is not set
CONFIG_BLK_DEV_GENERIC=y
# CONFIG_BLK_DEV_OPTI621 is not set
simscsi_sg_readwrite (struct scsi_cmnd *sc, int mode, unsigned long offset)
{
int list_len = sc->use_sg;
- struct scatterlist *sl = (struct scatterlist *)sc->buffer;
+ struct scatterlist *sl = (struct scatterlist *)sc->request_buffer;
struct disk_stat stat;
struct disk_req req;
if (scatterlen == 0)
memcpy(sc->request_buffer, buf, len);
- else for (slp = (struct scatterlist *)sc->buffer; scatterlen-- > 0 && len > 0; slp++) {
+ else for (slp = (struct scatterlist *)sc->request_buffer; scatterlen-- > 0 && len > 0; slp++) {
unsigned thislen = min(len, slp->length);
memcpy(page_address(slp->page) + slp->offset, buf, thislen);
quiet_cmd_gate = GATE $@
cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@
-GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1
+GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 \
+ $(call ld-option, -Wl$(comma)--hash-style=sysv)
$(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE
$(call if_changed,gate)
#define BAD_MADT_ENTRY(entry, end) ( \
(!entry) || (unsigned long)entry + sizeof(*entry) > end || \
- ((acpi_table_entry_header *)entry)->length != sizeof(*entry))
+ ((acpi_table_entry_header *)entry)->length < sizeof(*entry))
#define PREFIX "ACPI: "
if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT))
return md;
}
- return 0;
+ return NULL;
}
static efi_memory_desc_t *
if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT))
return md;
}
- return 0;
+ return NULL;
}
u32
void
efi_memmap_init(unsigned long *s, unsigned long *e)
{
- struct kern_memdesc *k, *prev = 0;
+ struct kern_memdesc *k, *prev = NULL;
u64 contig_low=0, contig_high=0;
u64 as, ae, lim;
void *efi_map_start, *efi_map_end, *p, *q;
. = GATE_ADDR + SIZEOF_HEADERS;
.hash : { *(.hash) } :readable
+ .gnu.hash : { *(.gnu.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.gnu.version : { *(.gnu.version) }
*/
GLOBAL_ENTRY(ia64_switch_mode_phys)
{
- alloc r2=ar.pfs,0,0,0,0
rsm psr.i | psr.ic // disable interrupts and interrupt collection
mov r15=ip
}
*/
GLOBAL_ENTRY(ia64_switch_mode_virt)
{
- alloc r2=ar.pfs,0,0,0,0
rsm psr.i | psr.ic // disable interrupts and interrupt collection
mov r15=ip
}
EXPORT_SYMBOL(__moddi3);
EXPORT_SYMBOL(__umoddi3);
-#if defined(CONFIG_MD_RAID5) || defined(CONFIG_MD_RAID5_MODULE)
+#if defined(CONFIG_MD_RAID456) || defined(CONFIG_MD_RAID456_MODULE)
extern void xor_ia64_2(void);
extern void xor_ia64_3(void);
extern void xor_ia64_4(void);
return 0;
}
+void __kprobes flush_insn_slot(struct kprobe *p)
+{
+ unsigned long arm_addr;
+
+ arm_addr = ((unsigned long)&p->opcode.bundle) & ~0xFULL;
+ flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
+}
+
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
unsigned long addr = (unsigned long)p->addr;
unsigned long arm_addr = addr & ~0xFULL;
+ flush_insn_slot(p);
memcpy((char *)arm_addr, &p->ainsn.insn.bundle, sizeof(bundle_t));
flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
}
.body
;;
ld8 loc2 = [loc2] // loc2 <- entry point
- mov out0 = in0 // first argument
- mov out1 = in1 // copy arg2
- mov out2 = in2 // copy arg3
- mov out3 = in3 // copy arg3
- ;;
- mov loc3 = psr // save psr
+ mov loc3 = psr // save psr
;;
mov loc4=ar.rsc // save RSE configuration
dep.z loc2=loc2,0,61 // convert pal entry point to physical
;;
andcm r16=loc3,r16 // removes bits to clear from psr
br.call.sptk.many rp=ia64_switch_mode_phys
-.ret6:
+
+ mov out0 = in0 // first argument
+ mov out1 = in1 // copy arg2
+ mov out2 = in2 // copy arg3
+ mov out3 = in3 // copy arg3
mov loc5 = r19
mov loc6 = r20
+
br.call.sptk.many rp=b7 // now make the call
-.ret7:
+
mov ar.rsc=0 // put RSE in enforced lazy, LE mode
mov r16=loc3 // r16= original psr
mov r19=loc5
mov r20=loc6
br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
-.ret8: mov psr.l = loc3 // restore init PSR
+ mov psr.l = loc3 // restore init PSR
mov ar.pfs = loc1
mov rp = loc0
;;
pal_version_u_t min_ver, cur_ver;
char *p = page;
- /* The PAL_VERSION call is advertised as being able to support
- * both physical and virtual mode calls. This seems to be a documentation
- * bug rather than firmware bug. In fact, it does only support physical mode.
- * So now the code reflects this fact and the pal_version() has been updated
- * accordingly.
- */
- if (ia64_pal_version(&min_ver, &cur_ver) != 0) return 0;
+ if (ia64_pal_version(&min_ver, &cur_ver) != 0)
+ return 0;
p += sprintf(p,
"PAL_vendor : 0x%02x (min=0x%02x)\n"
- "PAL_A : %x.%x.%x (min=%x.%x.%x)\n"
- "PAL_B : %x.%x.%x (min=%x.%x.%x)\n",
- cur_ver.pal_version_s.pv_pal_vendor, min_ver.pal_version_s.pv_pal_vendor,
-
- cur_ver.pal_version_s.pv_pal_a_model>>4,
- cur_ver.pal_version_s.pv_pal_a_model&0xf, cur_ver.pal_version_s.pv_pal_a_rev,
- min_ver.pal_version_s.pv_pal_a_model>>4,
- min_ver.pal_version_s.pv_pal_a_model&0xf, min_ver.pal_version_s.pv_pal_a_rev,
-
- cur_ver.pal_version_s.pv_pal_b_model>>4,
- cur_ver.pal_version_s.pv_pal_b_model&0xf, cur_ver.pal_version_s.pv_pal_b_rev,
- min_ver.pal_version_s.pv_pal_b_model>>4,
- min_ver.pal_version_s.pv_pal_b_model&0xf, min_ver.pal_version_s.pv_pal_b_rev);
+ "PAL_A : %02x.%02x (min=%02x.%02x)\n"
+ "PAL_B : %02x.%02x (min=%02x.%02x)\n",
+ cur_ver.pal_version_s.pv_pal_vendor,
+ min_ver.pal_version_s.pv_pal_vendor,
+ cur_ver.pal_version_s.pv_pal_a_model,
+ cur_ver.pal_version_s.pv_pal_a_rev,
+ min_ver.pal_version_s.pv_pal_a_model,
+ min_ver.pal_version_s.pv_pal_a_rev,
+ cur_ver.pal_version_s.pv_pal_b_model,
+ cur_ver.pal_version_s.pv_pal_b_rev,
+ min_ver.pal_version_s.pv_pal_b_model,
+ min_ver.pal_version_s.pv_pal_b_rev);
return p - page;
}
}
}
-static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
+#ifdef CONFIG_HOTPLUG_CPU
+static int palinfo_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
{
unsigned int hotcpu = (unsigned long)hcpu;
case CPU_ONLINE:
create_palinfo_proc_entries(hotcpu);
break;
-#ifdef CONFIG_HOTPLUG_CPU
case CPU_DEAD:
remove_palinfo_proc_entries(hotcpu);
break;
-#endif
}
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata palinfo_cpu_notifier =
+static struct notifier_block palinfo_cpu_notifier =
{
.notifier_call = palinfo_cpu_callback,
.priority = 0,
};
+#endif
static int __init
palinfo_init(void)
/*
* Unregister from cpu notifier callbacks
*/
- unregister_cpu_notifier(&palinfo_cpu_notifier);
+ unregister_hotcpu_notifier(&palinfo_cpu_notifier);
}
module_init(palinfo_init);
(void *)(long)i);
}
- register_cpu_notifier(&cache_cpu_notifier);
+ register_hotcpu_notifier(&cache_cpu_notifier);
return 0;
}
die.lock_owner = -1;
spin_unlock_irq(&die.lock);
- if (panic_on_oops) {
- printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
- ssleep(5);
+ if (panic_on_oops)
panic("Fatal exception");
- }
do_exit(SIGSEGV);
}
extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *);
-#define MAX_UNCACHED_GRANULES 5
-static int allocated_granules;
+struct uncached_pool {
+ struct gen_pool *pool;
+ struct mutex add_chunk_mutex; /* serialize adding a converted chunk */
+ int nchunks_added; /* #of converted chunks added to pool */
+ atomic_t status; /* smp called function's return status*/
+};
+
+#define MAX_CONVERTED_CHUNKS_PER_NODE 2
-struct gen_pool *uncached_pool[MAX_NUMNODES];
+struct uncached_pool uncached_pools[MAX_NUMNODES];
static void uncached_ipi_visibility(void *data)
{
int status;
+ struct uncached_pool *uc_pool = (struct uncached_pool *)data;
status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
if ((status != PAL_VISIBILITY_OK) &&
(status != PAL_VISIBILITY_OK_REMOTE_NEEDED))
- printk(KERN_DEBUG "pal_prefetch_visibility() returns %i on "
- "CPU %i\n", status, raw_smp_processor_id());
+ atomic_inc(&uc_pool->status);
}
static void uncached_ipi_mc_drain(void *data)
{
int status;
+ struct uncached_pool *uc_pool = (struct uncached_pool *)data;
status = ia64_pal_mc_drain();
- if (status)
- printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on "
- "CPU %i\n", status, raw_smp_processor_id());
+ if (status != PAL_STATUS_SUCCESS)
+ atomic_inc(&uc_pool->status);
}
* This is accomplished by first allocating a granule of cached memory pages
* and then converting them to uncached memory pages.
*/
-static int uncached_add_chunk(struct gen_pool *pool, int nid)
+static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
{
struct page *page;
- int status, i;
+ int status, i, nchunks_added = uc_pool->nchunks_added;
unsigned long c_addr, uc_addr;
- if (allocated_granules >= MAX_UNCACHED_GRANULES)
+ if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0)
+ return -1; /* interrupted by a signal */
+
+ if (uc_pool->nchunks_added > nchunks_added) {
+ /* someone added a new chunk while we were waiting */
+ mutex_unlock(&uc_pool->add_chunk_mutex);
+ return 0;
+ }
+
+ if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) {
+ mutex_unlock(&uc_pool->add_chunk_mutex);
return -1;
+ }
/* attempt to allocate a granule's worth of cached memory pages */
page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO,
IA64_GRANULE_SHIFT-PAGE_SHIFT);
- if (!page)
+ if (!page) {
+ mutex_unlock(&uc_pool->add_chunk_mutex);
return -1;
+ }
/* convert the memory pages from cached to uncached */
flush_tlb_kernel_range(uc_addr, uc_adddr + IA64_GRANULE_SIZE);
status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
- if (!status) {
- status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1);
- if (status)
+ if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
+ atomic_set(&uc_pool->status, 0);
+ status = smp_call_function(uncached_ipi_visibility, uc_pool,
+ 0, 1);
+ if (status || atomic_read(&uc_pool->status))
goto failed;
- }
+ } else if (status != PAL_VISIBILITY_OK)
+ goto failed;
preempt_disable();
preempt_enable();
- ia64_pal_mc_drain();
- status = smp_call_function(uncached_ipi_mc_drain, NULL, 0, 1);
- if (status)
+ status = ia64_pal_mc_drain();
+ if (status != PAL_STATUS_SUCCESS)
+ goto failed;
+ atomic_set(&uc_pool->status, 0);
+ status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 0, 1);
+ if (status || atomic_read(&uc_pool->status))
goto failed;
/*
* The chunk of memory pages has been converted to uncached so now we
* can add it to the pool.
*/
- status = gen_pool_add(pool, uc_addr, IA64_GRANULE_SIZE, nid);
+ status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid);
if (status)
goto failed;
- allocated_granules++;
+ uc_pool->nchunks_added++;
+ mutex_unlock(&uc_pool->add_chunk_mutex);
return 0;
/* failed to convert or add the chunk so give it back to the kernel */
ClearPageUncached(&page[i]);
free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT);
+ mutex_unlock(&uc_pool->add_chunk_mutex);
return -1;
}
unsigned long uncached_alloc_page(int starting_nid)
{
unsigned long uc_addr;
- struct gen_pool *pool;
+ struct uncached_pool *uc_pool;
int nid;
if (unlikely(starting_nid >= MAX_NUMNODES))
do {
if (!node_online(nid))
continue;
- pool = uncached_pool[nid];
- if (pool == NULL)
+ uc_pool = &uncached_pools[nid];
+ if (uc_pool->pool == NULL)
continue;
do {
- uc_addr = gen_pool_alloc(pool, PAGE_SIZE);
+ uc_addr = gen_pool_alloc(uc_pool->pool, PAGE_SIZE);
if (uc_addr != 0)
return uc_addr;
- } while (uncached_add_chunk(pool, nid) == 0);
+ } while (uncached_add_chunk(uc_pool, nid) == 0);
} while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid);
void uncached_free_page(unsigned long uc_addr)
{
int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET);
- struct gen_pool *pool = uncached_pool[nid];
+ struct gen_pool *pool = uncached_pools[nid].pool;
if (unlikely(pool == NULL))
return;
unsigned long uc_end, void *arg)
{
int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET);
- struct gen_pool *pool = uncached_pool[nid];
+ struct gen_pool *pool = uncached_pools[nid].pool;
size_t size = uc_end - uc_start;
touch_softlockup_watchdog();
int nid;
for_each_online_node(nid) {
- uncached_pool[nid] = gen_pool_create(PAGE_SHIFT, nid);
+ uncached_pools[nid].pool = gen_pool_create(PAGE_SHIFT, nid);
+ mutex_init(&uncached_pools[nid].add_chunk_mutex);
}
efi_memmap_walk_uc(uncached_build_memmap, NULL);
lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o
lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o
lib-$(CONFIG_PERFMON) += carta_random.o
-lib-$(CONFIG_MD_RAID5) += xor.o
+lib-$(CONFIG_MD_RAID456) += xor.o
AFLAGS___divdi3.o =
AFLAGS___udivdi3.o = -DUNSIGNED
#ifdef CONFIG_VIRTUAL_MEM_MAP
static unsigned long num_dma_physpages;
+static unsigned long max_gap;
#endif
/**
printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
i = max_mapnr;
- while (i-- > 0) {
- if (!pfn_valid(i))
+ for (i = 0; i < max_mapnr; i++) {
+ if (!pfn_valid(i)) {
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+ if (max_gap < LARGE_GAP)
+ continue;
+ i = vmemmap_find_next_valid_pfn(0, i) - 1;
+#endif
continue;
+ }
total++;
if (PageReserved(mem_map+i))
reserved++;
unsigned long zones_size[MAX_NR_ZONES];
#ifdef CONFIG_VIRTUAL_MEM_MAP
unsigned long zholes_size[MAX_NR_ZONES];
- unsigned long max_gap;
#endif
/* initialize mem_map[] */
}
}
- max_gap = 0;
efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
if (max_gap < LARGE_GAP) {
vmem_map = (struct page *) 0;
/* allocate virtual_mem_map */
- map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page));
+ map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
+ sizeof(struct page));
vmalloc_end -= map_size;
vmem_map = (struct page *) vmalloc_end;
efi_memmap_walk(create_mem_map_page_table, NULL);
}
#endif /* CONFIG_SMP */
-#ifdef CONFIG_VIRTUAL_MEM_MAP
-static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i)
-{
- unsigned long end_address, hole_next_pfn;
- unsigned long stop_address;
-
- end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
- end_address = PAGE_ALIGN(end_address);
-
- stop_address = (unsigned long) &vmem_map[
- pgdat->node_start_pfn + pgdat->node_spanned_pages];
-
- do {
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
-
- pgd = pgd_offset_k(end_address);
- if (pgd_none(*pgd)) {
- end_address += PGDIR_SIZE;
- continue;
- }
-
- pud = pud_offset(pgd, end_address);
- if (pud_none(*pud)) {
- end_address += PUD_SIZE;
- continue;
- }
-
- pmd = pmd_offset(pud, end_address);
- if (pmd_none(*pmd)) {
- end_address += PMD_SIZE;
- continue;
- }
-
- pte = pte_offset_kernel(pmd, end_address);
-retry_pte:
- if (pte_none(*pte)) {
- end_address += PAGE_SIZE;
- pte++;
- if ((end_address < stop_address) &&
- (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
- goto retry_pte;
- continue;
- }
- /* Found next valid vmem_map page */
- break;
- } while (end_address < stop_address);
-
- end_address = min(end_address, stop_address);
- end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
- hole_next_pfn = end_address / sizeof(struct page);
- return hole_next_pfn - pgdat->node_start_pfn;
-}
-#else
-static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i)
-{
- return i + 1;
-}
-#endif
-
/**
* show_mem - give short summary of memory stats
*
if (pfn_valid(pgdat->node_start_pfn + i))
page = pfn_to_page(pgdat->node_start_pfn + i);
else {
- i = find_next_valid_pfn_for_pgdat(pgdat, i) - 1;
+ i = vmemmap_find_next_valid_pfn(pgdat->node_id,
+ i) - 1;
continue;
}
if (PageReserved(page))
efi_memmap_walk(filter_rsvd_memory, count_node_pages);
#ifdef CONFIG_VIRTUAL_MEM_MAP
- vmalloc_end -= PAGE_ALIGN(max_low_pfn * sizeof(struct page));
+ vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
+ sizeof(struct page));
vmem_map = (struct page *) vmalloc_end;
efi_memmap_walk(create_mem_map_page_table, NULL);
printk("Virtual mem_map starts at 0x%p\n", vmem_map);
}
#ifdef CONFIG_VIRTUAL_MEM_MAP
+int vmemmap_find_next_valid_pfn(int node, int i)
+{
+ unsigned long end_address, hole_next_pfn;
+ unsigned long stop_address;
+ pg_data_t *pgdat = NODE_DATA(node);
+
+ end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
+ end_address = PAGE_ALIGN(end_address);
+
+ stop_address = (unsigned long) &vmem_map[
+ pgdat->node_start_pfn + pgdat->node_spanned_pages];
+
+ do {
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pgd = pgd_offset_k(end_address);
+ if (pgd_none(*pgd)) {
+ end_address += PGDIR_SIZE;
+ continue;
+ }
+
+ pud = pud_offset(pgd, end_address);
+ if (pud_none(*pud)) {
+ end_address += PUD_SIZE;
+ continue;
+ }
+
+ pmd = pmd_offset(pud, end_address);
+ if (pmd_none(*pmd)) {
+ end_address += PMD_SIZE;
+ continue;
+ }
+
+ pte = pte_offset_kernel(pmd, end_address);
+retry_pte:
+ if (pte_none(*pte)) {
+ end_address += PAGE_SIZE;
+ pte++;
+ if ((end_address < stop_address) &&
+ (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
+ goto retry_pte;
+ continue;
+ }
+ /* Found next valid vmem_map page */
+ break;
+ } while (end_address < stop_address);
+
+ end_address = min(end_address, stop_address);
+ end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
+ hole_next_pfn = end_address / sizeof(struct page);
+ return hole_next_pfn - pgdat->node_start_pfn;
+}
int __init
create_mem_map_page_table (u64 start, u64 end, void *arg)
*/
attr = kern_mem_attribute(offset, size);
if (attr & EFI_MEMORY_WB)
- return phys_to_virt(offset);
+ return (void __iomem *) phys_to_virt(offset);
else if (attr & EFI_MEMORY_UC)
return __ioremap(offset, size);
gran_base = GRANULEROUNDDOWN(offset);
gran_size = GRANULEROUNDUP(offset + size) - gran_base;
if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB)
- return phys_to_virt(offset);
+ return (void __iomem *) phys_to_virt(offset);
return __ioremap(offset, size);
}
ioremap_nocache (unsigned long offset, unsigned long size)
{
if (kern_mem_attribute(offset, size) & EFI_MEMORY_WB)
- return 0;
+ return NULL;
return __ioremap(offset, size);
}
partid_t partid = (u64) __partid;
struct xpc_partition *part = &xpc_partitions[partid];
unsigned long irq_flags;
- struct sched_param param = { sched_priority: MAX_RT_PRIO - 1 };
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
int ret;
else
mmr_war_offset = 0x158;
- readq_relaxed((void *)(mmr_base + mmr_war_offset));
+ readq_relaxed((void __iomem *)(mmr_base + mmr_war_offset));
}
}
if (mmr_offset < 0x45000) {
if (mmr_offset == 0x100)
- readq_relaxed((void *)(mmr_base + 0x38));
- readq_relaxed((void *)(mmr_base + 0xb050));
+ readq_relaxed((void __iomem *)(mmr_base + 0x38));
+ readq_relaxed((void __iomem *)(mmr_base + 0xb050));
}
}
*(.dynstr)
*(.dynamic)
*(.hash)
+ *(.gnu.hash)
#endif
}
config CPU_FREQ_PMAC64
bool "Support for some Apple G5s"
- depends on CPU_FREQ && PMAC_SMU && PPC64
+ depends on CPU_FREQ && PPC64
select CPU_FREQ_TABLE
help
This adds support for frequency switching on Apple iMac G5,
bool
config PCI
- bool "PCI support" if 40x || CPM2 || PPC_83xx || PPC_85xx || PPC_MPC52xx || (EMBEDDED && PPC_ISERIES) \
- || MPC7448HPC2
- default y if !40x && !CPM2 && !8xx && !APUS && !PPC_83xx && !PPC_85xx && !PPC_86xx
+ bool "PCI support" if 40x || CPM2 || PPC_83xx || PPC_85xx || PPC_86xx \
+ || PPC_MPC52xx || (EMBEDDED && PPC_ISERIES) || MPC7448HPC2
+ default y if !40x && !CPM2 && !8xx && !APUS && !PPC_83xx \
+ && !PPC_85xx && !PPC_86xx
default PCI_PERMEDIA if !4xx && !CPM2 && !8xx && APUS
default PCI_QSPAN if !4xx && !CPM2 && 8xx
help
--- /dev/null
+/*
+ * MPC8641 HPCN Device Tree Source
+ *
+ * Copyright 2006 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+
+/ {
+ model = "MPC8641HPCN";
+ compatible = "mpc86xx";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpus {
+ #cpus = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,8641@0 {
+ device_type = "cpu";
+ reg = <0>;
+ d-cache-line-size = <20>; // 32 bytes
+ i-cache-line-size = <20>; // 32 bytes
+ d-cache-size = <8000>; // L1, 32K
+ i-cache-size = <8000>; // L1, 32K
+ timebase-frequency = <0>; // 33 MHz, from uboot
+ bus-frequency = <0>; // From uboot
+ clock-frequency = <0>; // From uboot
+ 32-bit;
+ linux,boot-cpu;
+ };
+ PowerPC,8641@1 {
+ device_type = "cpu";
+ reg = <1>;
+ d-cache-line-size = <20>; // 32 bytes
+ i-cache-line-size = <20>; // 32 bytes
+ d-cache-size = <8000>; // L1, 32K
+ i-cache-size = <8000>; // L1, 32K
+ timebase-frequency = <0>; // 33 MHz, from uboot
+ bus-frequency = <0>; // From uboot
+ clock-frequency = <0>; // From uboot
+ 32-bit;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <00000000 40000000>; // 1G at 0x0
+ };
+
+ soc8641@f8000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ #interrupt-cells = <2>;
+ device_type = "soc";
+ ranges = <0 f8000000 00100000>;
+ reg = <f8000000 00100000>; // CCSRBAR 1M
+ bus-frequency = <0>;
+
+ i2c@3000 {
+ device_type = "i2c";
+ compatible = "fsl-i2c";
+ reg = <3000 100>;
+ interrupts = <2b 2>;
+ interrupt-parent = <40000>;
+ dfsrr;
+ };
+
+ i2c@3100 {
+ device_type = "i2c";
+ compatible = "fsl-i2c";
+ reg = <3100 100>;
+ interrupts = <2b 2>;
+ interrupt-parent = <40000>;
+ dfsrr;
+ };
+
+ mdio@24520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ device_type = "mdio";
+ compatible = "gianfar";
+ reg = <24520 20>;
+ linux,phandle = <24520>;
+ ethernet-phy@0 {
+ linux,phandle = <2452000>;
+ interrupt-parent = <40000>;
+ interrupts = <4a 1>;
+ reg = <0>;
+ device_type = "ethernet-phy";
+ };
+ ethernet-phy@1 {
+ linux,phandle = <2452001>;
+ interrupt-parent = <40000>;
+ interrupts = <4a 1>;
+ reg = <1>;
+ device_type = "ethernet-phy";
+ };
+ ethernet-phy@2 {
+ linux,phandle = <2452002>;
+ interrupt-parent = <40000>;
+ interrupts = <4a 1>;
+ reg = <2>;
+ device_type = "ethernet-phy";
+ };
+ ethernet-phy@3 {
+ linux,phandle = <2452003>;
+ interrupt-parent = <40000>;
+ interrupts = <4a 1>;
+ reg = <3>;
+ device_type = "ethernet-phy";
+ };
+ };
+
+ ethernet@24000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ device_type = "network";
+ model = "TSEC";
+ compatible = "gianfar";
+ &n