* git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core-2.6: (60 commits)
uio: make uio_info's name and version const
UIO: Documentation for UIO ioport info handling
UIO: Pass information about ioports to userspace (V2)
UIO: uio_pdrv_genirq: allow custom irq_flags
UIO: use pci_ioremap_bar() in drivers/uio
arm: struct device - replace bus_id with dev_name(), dev_set_name()
libata: struct device - replace bus_id with dev_name(), dev_set_name()
avr: struct device - replace bus_id with dev_name(), dev_set_name()
block: struct device - replace bus_id with dev_name(), dev_set_name()
chris: struct device - replace bus_id with dev_name(), dev_set_name()
dmi: struct device - replace bus_id with dev_name(), dev_set_name()
gadget: struct device - replace bus_id with dev_name(), dev_set_name()
gpio: struct device - replace bus_id with dev_name(), dev_set_name()
gpu: struct device - replace bus_id with dev_name(), dev_set_name()
hwmon: struct device - replace bus_id with dev_name(), dev_set_name()
i2o: struct device - replace bus_id with dev_name(), dev_set_name()
IA64: struct device - replace bus_id with dev_name(), dev_set_name()
i7300_idle: struct device - replace bus_id with dev_name(), dev_set_name()
infiniband: struct device - replace bus_id with dev_name(), dev_set_name()
ISDN: struct device - replace bus_id with dev_name(), dev_set_name()
...
</abstract>
<revhistory>
+ <revision>
+ <revnumber>0.6</revnumber>
+ <date>2008-12-05</date>
+ <authorinitials>hjk</authorinitials>
+ <revremark>Added description of portio sysfs attributes.</revremark>
+ </revision>
<revision>
<revnumber>0.5</revnumber>
<date>2008-05-22</date>
offset = N * getpagesize();
</programlisting>
+<para>
+ Sometimes there is hardware with memory-like regions that can not be
+ mapped with the technique described here, but there are still ways to
+ access them from userspace. The most common example are x86 ioports.
+ On x86 systems, userspace can access these ioports using
+ <function>ioperm()</function>, <function>iopl()</function>,
+ <function>inb()</function>, <function>outb()</function>, and similar
+ functions.
+</para>
+<para>
+ Since these ioport regions can not be mapped, they will not appear under
+ <filename>/sys/class/uio/uioX/maps/</filename> like the normal memory
+ described above. Without information about the port regions a hardware
+ has to offer, it becomes difficult for the userspace part of the
+ driver to find out which ports belong to which UIO device.
+</para>
+<para>
+ To address this situation, the new directory
+ <filename>/sys/class/uio/uioX/portio/</filename> was added. It only
+ exists if the driver wants to pass information about one or more port
+ regions to userspace. If that is the case, subdirectories named
+ <filename>port0</filename>, <filename>port1</filename>, and so on,
+ will appear underneath
+ <filename>/sys/class/uio/uioX/portio/</filename>.
+</para>
+<para>
+ Each <filename>portX/</filename> directory contains three read-only
+ files that show start, size, and type of the port region:
+</para>
+<itemizedlist>
+<listitem>
+ <para>
+ <filename>start</filename>: The first port of this region.
+ </para>
+</listitem>
+<listitem>
+ <para>
+ <filename>size</filename>: The number of ports in this region.
+ </para>
+</listitem>
+<listitem>
+ <para>
+ <filename>porttype</filename>: A string describing the type of port.
+ </para>
+</listitem>
+</itemizedlist>
+
+
</sect1>
</chapter>
<itemizedlist>
<listitem><para>
-<varname>char *name</varname>: Required. The name of your driver as
+<varname>const char *name</varname>: Required. The name of your driver as
it will appear in sysfs. I recommend using the name of your module for this.
</para></listitem>
<listitem><para>
-<varname>char *version</varname>: Required. This string appears in
+<varname>const char *version</varname>: Required. This string appears in
<filename>/sys/class/uio/uioX/version</filename>.
</para></listitem>
See the description below for details.
</para></listitem>
+<listitem><para>
+<varname>struct uio_port port[ MAX_UIO_PORTS_REGIONS ]</varname>: Required
+if you want to pass information about ioports to userspace. For each port
+region you need to fill one of the <varname>uio_port</varname> structures.
+See the description below for details.
+</para></listitem>
+
<listitem><para>
<varname>long irq</varname>: Required. If your hardware generates an
interrupt, it's your modules task to determine the irq number during
<varname>struct uio_mem</varname>! It is used by the UIO framework
to set up sysfs files for this mapping. Simply leave it alone.
</para>
+
+<para>
+Sometimes, your device can have one or more port regions which can not be
+mapped to userspace. But if there are other possibilities for userspace to
+access these ports, it makes sense to make information about the ports
+available in sysfs. For each region, you have to set up a
+<varname>struct uio_port</varname> in the <varname>port[]</varname> array.
+Here's a description of the fields of <varname>struct uio_port</varname>:
+</para>
+
+<itemizedlist>
+<listitem><para>
+<varname>char *porttype</varname>: Required. Set this to one of the predefined
+constants. Use <varname>UIO_PORT_X86</varname> for the ioports found in x86
+architectures.
+</para></listitem>
+
+<listitem><para>
+<varname>unsigned long start</varname>: Required if the port region is used.
+Fill in the number of the first port of this region.
+</para></listitem>
+
+<listitem><para>
+<varname>unsigned long size</varname>: Fill in the number of ports in this
+region. If <varname>size</varname> is zero, the region is considered unused.
+Note that you <emphasis>must</emphasis> initialize <varname>size</varname>
+with zero for all unused regions.
+</para></listitem>
+</itemizedlist>
+
+<para>
+Please do not touch the <varname>portio</varname> element of
+<varname>struct uio_port</varname>! It is used internally by the UIO
+framework to set up sysfs files for this region. Simply leave it alone.
+</para>
+
</sect1>
<sect1 id="adding_irq_handler">
int kobject_rename(struct kobject *kobj, const char *new_name);
-Note kobject_rename does perform any locking or have a solid notion of
-what names are valid so the provide must provide their own sanity checking
+kobject_rename does not perform any locking or have a solid notion of
+what names are valid so the caller must provide their own sanity checking
and serialization.
There is a function called kobject_set_name() but that is legacy cruft and
ec->dma = NO_DMA;
ec->ops = &ecard_default_ops;
- snprintf(ec->dev.bus_id, sizeof(ec->dev.bus_id), "ecard%d", slot);
+ dev_set_name(&ec->dev, "ecard%d", slot);
ec->dev.parent = NULL;
ec->dev.bus = &ecard_bus_type;
ec->dev.dma_mask = &ec->dma_mask;
static struct amba_device clcd_device = {
.dev = {
- .bus_id = "mb:16",
+ .init_name = "mb:16",
.coherent_dma_mask = ~0,
.platform_data = &clcd_plat_data,
},
static struct amba_device uart1_device = {
.dev = {
- .bus_id = "apb:uart1",
+ .init_name = "apb:uart1",
.platform_data = &ep93xx_uart_data,
},
.res = {
static struct amba_device uart2_device = {
.dev = {
- .bus_id = "apb:uart2",
+ .init_name = "apb:uart2",
.platform_data = &ep93xx_uart_data,
},
.res = {
static struct amba_device uart3_device = {
.dev = {
- .bus_id = "apb:uart3",
+ .init_name = "apb:uart3",
.platform_data = &ep93xx_uart_data,
},
.res = {
static struct amba_device rtc_device = {
.dev = {
- .bus_id = "mb:15",
+ .init_name = "mb:15",
},
.res = {
.start = INTEGRATOR_RTC_BASE,
static struct amba_device uart0_device = {
.dev = {
- .bus_id = "mb:16",
+ .init_name = "mb:16",
.platform_data = &integrator_uart_data,
},
.res = {
static struct amba_device uart1_device = {
.dev = {
- .bus_id = "mb:17",
+ .init_name = "mb:17",
.platform_data = &integrator_uart_data,
},
.res = {
static struct amba_device kmi0_device = {
.dev = {
- .bus_id = "mb:18",
+ .init_name = "mb:18",
},
.res = {
.start = KMI0_BASE,
static struct amba_device kmi1_device = {
.dev = {
- .bus_id = "mb:19",
+ .init_name = "mb:19",
},
.res = {
.start = KMI1_BASE,
static struct amba_device mmc_device = {
.dev = {
- .bus_id = "mb:1c",
+ .init_name = "mb:1c",
.platform_data = &mmc_data,
},
.res = {
static struct amba_device aaci_device = {
.dev = {
- .bus_id = "mb:1d",
+ .init_name = "mb:1d",
},
.res = {
.start = INTCP_PA_AACI_BASE,
static struct amba_device clcd_device = {
.dev = {
- .bus_id = "mb:c0",
+ .init_name = "mb:c0",
.coherent_dma_mask = ~0,
.platform_data = &clcd_data,
},
static struct amba_device name##_device = { \
.dev = { \
.coherent_dma_mask = ~0, \
- .bus_id = busid, \
+ .init_name = busid, \
.platform_data = plat, \
}, \
.res = { \
static struct amba_device fb_device = {
.dev = {
- .bus_id = "fb",
+ .init_name = "fb",
.coherent_dma_mask = ~0,
},
.res = {
static struct amba_device name##_device = { \
.dev = { \
.coherent_dma_mask = ~0, \
- .bus_id = busid, \
+ .init_name = busid, \
.platform_data = plat, \
}, \
.res = { \
static struct amba_device name##_device = { \
.dev = { \
.coherent_dma_mask = ~0, \
- .bus_id = busid, \
+ .init_name = busid, \
.platform_data = plat, \
}, \
.res = { \
#define virt_to_lbus(x) ((x) - PAGE_OFFSET + OMAP1510_LB_OFFSET)
#define lbus_to_virt(x) ((x) - OMAP1510_LB_OFFSET + PAGE_OFFSET)
-#define is_lbus_device(dev) (cpu_is_omap15xx() && dev && (strncmp(dev->bus_id, "ohci", 4) == 0))
+#define is_lbus_device(dev) (cpu_is_omap15xx() && dev && (strncmp(dev_name(dev), "ohci", 4) == 0))
#define __arch_page_to_dma(dev, page) ({is_lbus_device(dev) ? \
(dma_addr_t)virt_to_lbus(page_address(page)) : \
unsigned i;
/* skip clocks coupled to devices that aren't registered */
- if (parent->dev && !parent->dev->bus_id[0] && !parent->users)
+ if (parent->dev && !dev_name(parent->dev) && !parent->users)
return;
/* <nest spaces> name <pad to end> */
parent->users ? "on" : "off", /* NOTE: not-paranoid!! */
clk_get_rate(parent));
if (parent->dev)
- seq_printf(r->s, ", for %s", parent->dev->bus_id);
+ seq_printf(r->s, ", for %s", dev_name(parent->dev));
seq_printf(r->s, "\n");
/* cost of this scan is small, but not linear... */
#error "Please contact <greg@kroah.com> for details on how to fix it properly."
static struct device iop_spu_device[2] = {
- { .bus_id = "iop-spu0", },
- { .bus_id = "iop-spu1", },
+ { .init_name = "iop-spu0", },
+ { .init_name = "iop-spu1", },
};
static struct device iop_mpu_device = {
- .bus_id = "iop-mpu",
+ .init_name = "iop-mpu",
};
static int wait_mpu_idle(void)
be probably a smaller DMA mask, but this is bug-to-bug compatible
to i386. */
struct device fallback_dev = {
- .bus_id = "fallback device",
+ .init_name = "fallback device",
.coherent_dma_mask = DMA_32BIT_MASK,
.dma_mask = &fallback_dev.coherent_dma_mask,
};
cx_dev->dev.parent = NULL;
cx_dev->dev.bus = &tiocx_bus_type;
cx_dev->dev.release = tiocx_bus_release;
- snprintf(cx_dev->dev.bus_id, BUS_ID_SIZE, "%d",
- cx_dev->cx_id.nasid);
+ dev_set_name(&cx_dev->dev, "%d", cx_dev->cx_id.nasid);
device_register(&cx_dev->dev);
get_device(&cx_dev->dev);
device_initialize(&vpe_device);
vpe_device.class = &vpe_class,
vpe_device.parent = NULL,
- strlcpy(vpe_device.bus_id, "vpe1", BUS_ID_SIZE);
+ dev_set_name(&vpe_device, "vpe1");
vpe_device.devt = MKDEV(major, minor);
err = device_add(&vpe_device);
if (err) {
+++ /dev/null
-/*
- * include/asm-s390/ccwdev.h
- *
- * Copyright (C) 2002,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
- * Carsten Otte <cotte@de.ibm.com>
- *
- * Interface for s390 root device
- */
-
-#ifndef _S390_RDEV_H_
-#define _S390_RDEV_H_
-extern struct device *s390_root_dev_register(const char *);
-extern void s390_root_dev_unregister(struct device *);
-#endif /* _S390_RDEV_H_ */
int done_cmds;
wait_queue_head_t wq_done;
wait_queue_head_t wq_free;
- char name[BUS_ID_SIZE];
+ char name[20];
int max_queue;
unsigned long flags;
};
mutex_lock(&bsg_mutex);
hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
- strncpy(bd->name, rq->bsg_dev.class_dev->bus_id, sizeof(bd->name) - 1);
+ strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
dprintk("bound to <%s>, max queue %d\n",
format_dev_t(buf, inode->i_rdev), bd->max_queue);
if (name)
devname = name;
else
- devname = parent->bus_id;
+ devname = dev_name(parent);
/*
* we need a proper transport to send commands, not a stacked device
struct gendisk *disk = dev_to_disk(dev);
struct hd_struct *part;
- if (strcmp(dev->bus_id, name))
+ if (strcmp(dev_name(dev), name))
continue;
part = disk_get_part(disk, partno);
if (sdev) {
ata_dev_printk(dev, KERN_INFO, "detaching (SCSI %s)\n",
- sdev->sdev_gendev.bus_id);
+ dev_name(&sdev->sdev_gendev));
scsi_remove_device(sdev);
scsi_device_put(sdev);
ic->classdev.parent = get_device(dev);
ic->classdev.class = cont->class;
cont->class->dev_release = attribute_container_release;
- strcpy(ic->classdev.bus_id, dev->bus_id);
+ dev_set_name(&ic->classdev, dev_name(dev));
if (fn)
fn(cont, dev, &ic->classdev);
else
#define to_class(obj) \
container_of(obj, struct class_private, class_subsys.kobj)
+/**
+ * struct device_private - structure to hold the private to the driver core portions of the device structure.
+ *
+ * @klist_children - klist containing all children of this device
+ * @knode_parent - node in sibling list
+ * @knode_driver - node in driver list
+ * @knode_bus - node in bus list
+ * @device - pointer back to the struct class that this structure is
+ * associated with.
+ *
+ * Nothing outside of the driver core should ever touch these fields.
+ */
+struct device_private {
+ struct klist klist_children;
+ struct klist_node knode_parent;
+ struct klist_node knode_driver;
+ struct klist_node knode_bus;
+ struct device *device;
+};
+#define to_device_private_parent(obj) \
+ container_of(obj, struct device_private, knode_parent)
+#define to_device_private_driver(obj) \
+ container_of(obj, struct device_private, knode_driver)
+#define to_device_private_bus(obj) \
+ container_of(obj, struct device_private, knode_bus)
+
/* initialisation functions */
extern int devices_init(void);
extern int buses_init(void);
static struct device *next_device(struct klist_iter *i)
{
struct klist_node *n = klist_next(i);
- return n ? container_of(n, struct device, knode_bus) : NULL;
+ struct device *dev = NULL;
+ struct device_private *dev_prv;
+
+ if (n) {
+ dev_prv = to_device_private_bus(n);
+ dev = dev_prv->device;
+ }
+ return dev;
}
/**
return -EINVAL;
klist_iter_init_node(&bus->p->klist_devices, &i,
- (start ? &start->knode_bus : NULL));
+ (start ? &start->p->knode_bus : NULL));
while ((dev = next_device(&i)) && !error)
error = fn(dev, data);
klist_iter_exit(&i);
return NULL;
klist_iter_init_node(&bus->p->klist_devices, &i,
- (start ? &start->knode_bus : NULL));
+ (start ? &start->p->knode_bus : NULL));
while ((dev = next_device(&i)))
if (match(dev, data) && get_device(dev))
break;
{
const char *name = data;
- return sysfs_streq(name, dev->bus_id);
+ return sysfs_streq(name, dev_name(dev));
}
/**
int error = 0;
if (bus) {
- pr_debug("bus: '%s': add device %s\n", bus->name, dev->bus_id);
+ pr_debug("bus: '%s': add device %s\n", bus->name, dev_name(dev));
error = device_add_attrs(bus, dev);
if (error)
goto out_put;
error = sysfs_create_link(&bus->p->devices_kset->kobj,
- &dev->kobj, dev->bus_id);
+ &dev->kobj, dev_name(dev));
if (error)
goto out_id;
error = sysfs_create_link(&dev->kobj,
out_deprecated:
sysfs_remove_link(&dev->kobj, "subsystem");
out_subsys:
- sysfs_remove_link(&bus->p->devices_kset->kobj, dev->bus_id);
+ sysfs_remove_link(&bus->p->devices_kset->kobj, dev_name(dev));
out_id:
device_remove_attrs(bus, dev);
out_put:
ret = device_attach(dev);
WARN_ON(ret < 0);
if (ret >= 0)
- klist_add_tail(&dev->knode_bus, &bus->p->klist_devices);
+ klist_add_tail(&dev->p->knode_bus,
+ &bus->p->klist_devices);
}
}
sysfs_remove_link(&dev->kobj, "subsystem");
remove_deprecated_bus_links(dev);
sysfs_remove_link(&dev->bus->p->devices_kset->kobj,
- dev->bus_id);
+ dev_name(dev));
device_remove_attrs(dev->bus, dev);
- if (klist_node_attached(&dev->knode_bus))
- klist_del(&dev->knode_bus);
+ if (klist_node_attached(&dev->p->knode_bus))
+ klist_del(&dev->p->knode_bus);
pr_debug("bus: '%s': remove device %s\n",
- dev->bus->name, dev->bus_id);
+ dev->bus->name, dev_name(dev));
device_release_driver(dev);
bus_put(dev->bus);
}
static void klist_devices_get(struct klist_node *n)
{
- struct device *dev = container_of(n, struct device, knode_bus);
+ struct device_private *dev_prv = to_device_private_bus(n);
+ struct device *dev = dev_prv->device;
get_device(dev);
}
static void klist_devices_put(struct klist_node *n)
{
- struct device *dev = container_of(n, struct device, knode_bus);
+ struct device_private *dev_prv = to_device_private_bus(n);
+ struct device *dev = dev_prv->device;
put_device(dev);
}
{
struct list_head *pos;
struct klist_node *n;
+ struct device_private *dev_prv;
struct device *b;
list_for_each(pos, list) {
n = container_of(pos, struct klist_node, n_node);
- b = container_of(n, struct device, knode_bus);
+ dev_prv = to_device_private_bus(n);
+ b = dev_prv->device;
if (compare(a, b) <= 0) {
- list_move_tail(&a->knode_bus.n_node,
- &b->knode_bus.n_node);
+ list_move_tail(&a->p->knode_bus.n_node,
+ &b->p->knode_bus.n_node);
return;
}
}
- list_move_tail(&a->knode_bus.n_node, list);
+ list_move_tail(&a->p->knode_bus.n_node, list);
}
void bus_sort_breadthfirst(struct bus_type *bus,
LIST_HEAD(sorted_devices);
struct list_head *pos, *tmp;
struct klist_node *n;
+ struct device_private *dev_prv;
struct device *dev;
struct klist *device_klist;
spin_lock(&device_klist->k_lock);
list_for_each_safe(pos, tmp, &device_klist->k_list) {
n = container_of(pos, struct klist_node, n_node);
- dev = container_of(n, struct device, knode_bus);
+ dev_prv = to_device_private_bus(n);
+ dev = dev_prv->device;
device_insertion_sort_klist(dev, &sorted_devices, compare);
}
list_splice(&sorted_devices, &device_klist->k_list);
static void device_release(struct kobject *kobj)
{
struct device *dev = to_dev(kobj);
+ struct device_private *p = dev->p;
if (dev->release)
dev->release(dev);
else
WARN(1, KERN_ERR "Device '%s' does not have a release() "
"function, it is broken and must be fixed.\n",
- dev->bus_id);
+ dev_name(dev));
+ kfree(p);
}
static struct kobj_type device_ktype = {
retval = dev->bus->uevent(dev, env);
if (retval)
pr_debug("device: '%s': %s: bus uevent() returned %d\n",
- dev->bus_id, __func__, retval);
+ dev_name(dev), __func__, retval);
}
/* have the class specific function add its stuff */
retval = dev->class->dev_uevent(dev, env);
if (retval)
pr_debug("device: '%s': %s: class uevent() "
- "returned %d\n", dev->bus_id,
+ "returned %d\n", dev_name(dev),
__func__, retval);
}
retval = dev->type->uevent(dev, env);
if (retval)
pr_debug("device: '%s': %s: dev_type uevent() "
- "returned %d\n", dev->bus_id,
+ "returned %d\n", dev_name(dev),
__func__, retval);
}
static void klist_children_get(struct klist_node *n)
{
- struct device *dev = container_of(n, struct device, knode_parent);
+ struct device_private *p = to_device_private_parent(n);
+ struct device *dev = p->device;
get_device(dev);
}
static void klist_children_put(struct klist_node *n)
{
- struct device *dev = container_of(n, struct device, knode_parent);
+ struct device_private *p = to_device_private_parent(n);
+ struct device *dev = p->device;
put_device(dev);
}
*/
void device_initialize(struct device *dev)
{
+ dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL);
+ if (!dev->p) {
+ WARN_ON(1);
+ return;
+ }
+ dev->p->device = dev;
dev->kobj.kset = devices_kset;
kobject_init(&dev->kobj, &device_ktype);
- klist_init(&dev->klist_children, klist_children_get,
+ klist_init(&dev->p->klist_children, klist_children_get,
klist_children_put);
INIT_LIST_HEAD(&dev->dma_pools);
init_MUTEX(&dev->sem);
if (dev->kobj.parent != &dev->class->p->class_subsys.kobj &&
device_is_not_partition(dev)) {
error = sysfs_create_link(&dev->class->p->class_subsys.kobj,
- &dev->kobj, dev->bus_id);
+ &dev->kobj, dev_name(dev));
if (error)
goto out_subsys;
}
if (dev->kobj.parent != &dev->class->p->class_subsys.kobj &&
device_is_not_partition(dev))
sysfs_remove_link(&dev->class->p->class_subsys.kobj,
- dev->bus_id);
+ dev_name(dev));
#else
/* link in the class directory pointing to the device */
error = sysfs_create_link(&dev->class->p->class_subsys.kobj,
- &dev->kobj, dev->bus_id);
+ &dev->kobj, dev_name(dev));
if (error)
goto out_subsys;
return 0;
out_busid:
- sysfs_remove_link(&dev->class->p->class_subsys.kobj, dev->bus_id);
+ sysfs_remove_link(&dev->class->p->class_subsys.kobj, dev_name(dev));
#endif
out_subsys:
if (dev->kobj.parent != &dev->class->p->class_subsys.kobj &&
device_is_not_partition(dev))
sysfs_remove_link(&dev->class->p->class_subsys.kobj,
- dev->bus_id);
+ dev_name(dev));
#else
if (dev->parent && device_is_not_partition(dev))
sysfs_remove_link(&dev->kobj, "device");
- sysfs_remove_link(&dev->class->p->class_subsys.kobj, dev->bus_id);
+ sysfs_remove_link(&dev->class->p->class_subsys.kobj, dev_name(dev));
#endif
sysfs_remove_link(&dev->kobj, "subsystem");
if (!strlen(dev->bus_id))
goto done;
- pr_debug("device: '%s': %s\n", dev->bus_id, __func__);
+ pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
parent = get_device(dev->parent);
setup_parent(dev, parent);
set_dev_node(dev, dev_to_node(parent));
/* first, register with generic layer. */
- error = kobject_add(&dev->kobj, dev->kobj.parent, "%s", dev->bus_id);
+ error = kobject_add(&dev->kobj, dev->kobj.parent, "%s", dev_name(dev));
if (error)
goto Error;
if (platform_notify)
platform_notify(dev);
- /* notify clients of device entry (new way) */
- if (dev->bus)
- blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- BUS_NOTIFY_ADD_DEVICE, dev);
-
error = device_create_file(dev, &uevent_attr);
if (error)
goto attrError;
if (error)
goto DPMError;
device_pm_add(dev);
+
+ /* Notify clients of device addition. This call must come
+ * after dpm_sysf_add() and before kobject_uevent().
+ */
+ if (dev->bus)
+ blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+ BUS_NOTIFY_ADD_DEVICE, dev);
+
kobject_uevent(&dev->kobj, KOBJ_ADD);
bus_attach_device(dev);
if (parent)
- klist_add_tail(&dev->knode_parent, &parent->klist_children);
+ klist_add_tail(&dev->p->knode_parent,
+ &parent->p->klist_children);
if (dev->class) {
mutex_lock(&dev->class->p->class_mutex);
DPMError:
bus_remove_device(dev);
BusError:
- if (dev->bus)
- blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- BUS_NOTIFY_DEL_DEVICE, dev);
device_remove_attrs(dev);
AttrsError:
device_remove_class_symlinks(dev);
struct device *parent = dev->parent;
struct class_interface *class_intf;
+ /* Notify clients of device removal. This call must come
+ * before dpm_sysfs_remove().
+ */
+ if (dev->bus)
+ blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+ BUS_NOTIFY_DEL_DEVICE, dev);
device_pm_remove(dev);
dpm_sysfs_remove(dev);
if (parent)
- klist_del(&dev->knode_parent);
+ klist_del(&dev->p->knode_parent);
if (MAJOR(dev->devt)) {
device_remove_sys_dev_entry(dev);
device_remove_file(dev, &devt_attr);
*/
if (platform_notify_remove)
platform_notify_remove(dev);
- if (dev->bus)
- blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- BUS_NOTIFY_DEL_DEVICE, dev);
kobject_uevent(&dev->kobj, KOBJ_REMOVE);
cleanup_device_parent(dev);
kobject_del(&dev->kobj);
*/
void device_unregister(struct device *dev)
{
- pr_debug("device: '%s': %s\n", dev->bus_id, __func__);
+ pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
device_del(dev);
put_device(dev);
}
static struct device *next_device(struct klist_iter *i)
{
struct klist_node *n = klist_next(i);
- return n ? container_of(n, struct device, knode_parent) : NULL;
+ struct device *dev = NULL;
+ struct device_private *p;
+
+ if (n) {
+ p = to_device_private_parent(n);
+ dev = p->device;
+ }
+ return dev;
}
/**
struct device *child;
int error = 0;
- klist_iter_init(&parent->klist_children, &i);
+ klist_iter_init(&parent->p->klist_children, &i);
while ((child = next_device(&i)) && !error)
error = fn(child, data);
klist_iter_exit(&i);
if (!parent)
return NULL;
- klist_iter_init(&parent->klist_children, &i);
+ klist_iter_init(&parent->p->klist_children, &i);
while ((child = next_device(&i)))
if (match(child, data) && get_device(child))
break;
EXPORT_SYMBOL_GPL(device_create_file);
EXPORT_SYMBOL_GPL(device_remove_file);
+struct root_device
+{
+ struct device dev;
+ struct module *owner;
+};
+
+#define to_root_device(dev) container_of(dev, struct root_device, dev)
+
+static void root_device_release(struct device *dev)
+{
+ kfree(to_root_device(dev));
+}
+
+/**
+ * __root_device_register - allocate and register a root device
+ * @name: root device name
+ * @owner: owner module of the root device, usually THIS_MODULE
+ *
+ * This function allocates a root device and registers it
+ * using device_register(). In order to free the returned
+ * device, use root_device_unregister().
+ *
+ * Root devices are dummy devices which allow other devices
+ * to be grouped under /sys/devices. Use this function to
+ * allocate a root device and then use it as the parent of
+ * any device which should appear under /sys/devices/{name}
+ *
+ * The /sys/devices/{name} directory will also contain a
+ * 'module' symlink which points to the @owner directory
+ * in sysfs.
+ *
+ * Note: You probably want to use root_device_register().
+ */
+struct device *__root_device_register(const char *name, struct module *owner)
+{
+ struct root_device *root;
+ int err = -ENOMEM;
+
+ root = kzalloc(sizeof(struct root_device), GFP_KERNEL);
+ if (!root)
+ return ERR_PTR(err);
+
+ err = dev_set_name(&root->dev, name);
+ if (err) {
+ kfree(root);
+ return ERR_PTR(err);
+ }
+
+ root->dev.release = root_device_release;
+
+ err = device_register(&root->dev);
+ if (err) {
+ put_device(&root->dev);
+ return ERR_PTR(err);
+ }
+
+#ifdef CONFIG_MODULE /* gotta find a "cleaner" way to do this */
+ if (owner) {
+ struct module_kobject *mk = &owner->mkobj;
+
+ err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module");
+ if (err) {
+ device_unregister(&root->dev);
+ return ERR_PTR(err);
+ }
+ root->owner = owner;
+ }
+#endif
+
+ return &root->dev;
+}
+EXPORT_SYMBOL_GPL(__root_device_register);
+
+/**
+ * root_device_unregister - unregister and free a root device
+ * @root: device going away.
+ *
+ * This function unregisters and cleans up a device that was created by
+ * root_device_register().
+ */
+void root_device_unregister(struct device *dev)
+{
+ struct root_device *root = to_root_device(dev);
+
+ if (root->owner)
+ sysfs_remove_link(&root->dev.kobj, "module");
+
+ device_unregister(dev);
+}
+EXPORT_SYMBOL_GPL(root_device_unregister);
+
static void device_create_release(struct device *dev)
{
- pr_debug("device: '%s': %s\n", dev->bus_id, __func__);
+ pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
kfree(dev);
}
if (!dev)
return -EINVAL;
- pr_debug("device: '%s': %s: renaming to '%s'\n", dev->bus_id,
+ pr_debug("device: '%s': %s: renaming to '%s'\n", dev_name(dev),
__func__, new_name);
#ifdef CONFIG_SYSFS_DEPRECATED
#else
if (dev->class) {
error = sysfs_create_link_nowarn(&dev->class->p->class_subsys.kobj,
- &dev->kobj, dev->bus_id);
+ &dev->kobj, dev_name(dev));
if (error)
goto out;
sysfs_remove_link(&dev->class->p->class_subsys.kobj,
new_parent = get_device(new_parent);
new_parent_kobj = get_device_parent(dev, new_parent);
- pr_debug("device: '%s': %s: moving to '%s'\n", dev->bus_id,
- __func__, new_parent ? new_parent->bus_id : "<NULL>");
+ pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev),
+ __func__, new_parent ? dev_name(new_parent) : "<NULL>");
error = kobject_move(&dev->kobj, new_parent_kobj);
if (error) {
cleanup_glue_dir(dev, new_parent_kobj);
old_parent = dev->parent;
dev->parent = new_parent;
if (old_parent)
- klist_remove(&dev->knode_parent);
+ klist_remove(&dev->p->knode_parent);
if (new_parent) {
- klist_add_tail(&dev->knode_parent, &new_parent->klist_children);
+ klist_add_tail(&dev->p->knode_parent,
+ &new_parent->p->klist_children);
set_dev_node(dev, dev_to_node(new_parent));
}
device_move_class_links(dev, new_parent, old_parent);
if (!kobject_move(&dev->kobj, &old_parent->kobj)) {
if (new_parent)
- klist_remove(&dev->knode_parent);
+ klist_remove(&dev->p->knode_parent);
dev->parent = old_parent;
if (old_parent) {
- klist_add_tail(&dev->knode_parent,
- &old_parent->klist_children);
+ klist_add_tail(&dev->p->knode_parent,
+ &old_parent->p->klist_children);
set_dev_node(dev, dev_to_node(old_parent));
}
}
static void driver_bound(struct device *dev)
{
- if (klist_node_attached(&dev->knode_driver)) {
+ if (klist_node_attached(&dev->p->knode_driver)) {
printk(KERN_WARNING "%s: device %s already bound\n",
__func__, kobject_name(&dev->kobj));
return;
}
- pr_debug("driver: '%s': %s: bound to device '%s'\n", dev->bus_id,
+ pr_debug("driver: '%s': %s: bound to device '%s'\n", dev_name(dev),
__func__, dev->driver->name);
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_BOUND_DRIVER, dev);
- klist_add_tail(&dev->knode_driver, &dev->driver->p->klist_devices);
+ klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
}
static int driver_sysfs_add(struct device *dev)
atomic_inc(&probe_count);
pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
- drv->bus->name, __func__, drv->name, dev->bus_id);
+ drv->bus->name, __func__, drv->name, dev_name(dev));
WARN_ON(!list_empty(&dev->devres_head));
dev->driver = drv;
if (driver_sysfs_add(dev)) {
printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n",
- __func__, dev->bus_id);
+ __func__, dev_name(dev));
goto probe_failed;
}
driver_bound(dev);
ret = 1;
pr_debug("bus: '%s': %s: bound device %s to driver %s\n",
- drv->bus->name, __func__, dev->bus_id, drv->name);
+ drv->bus->name, __func__, dev_name(dev), drv->name);
goto done;
probe_failed:
/* driver matched but the probe failed */
printk(KERN_WARNING
"%s: probe of %s failed with error %d\n",
- drv->name, dev->bus_id, ret);
+ drv->name, dev_name(dev), ret);
}
/*
* Ignore errors returned by ->probe so that the next driver can try
goto done;
pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
- drv->bus->name, __func__, dev->bus_id, drv->name);
+ drv->bus->name, __func__, dev_name(dev), drv->name);
ret = really_probe(dev, drv);
drv = dev->driver;
if (drv) {
driver_sysfs_remove(dev);
- sysfs_remove_link(&dev->kobj, "driver");
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
drv->remove(dev);
devres_release_all(dev);
dev->driver = NULL;
- klist_remove(&dev->knode_driver);
+ klist_remove(&dev->p->knode_driver);
}
}
*/
void driver_detach(struct device_driver *drv)
{
+ struct device_private *dev_prv;
struct device *dev;
for (;;) {
spin_unlock(&drv->p->klist_devices.k_lock);
break;
}
- dev = list_entry(drv->p->klist_devices.k_list.prev,
- struct device, knode_driver.n_node);
+ dev_prv = list_entry(drv->p->klist_devices.k_list.prev,
+ struct device_private,
+ knode_driver.n_node);
+ dev = dev_prv->device;
get_device(dev);
spin_unlock(&drv->p->klist_devices.k_lock);
static struct device *next_device(struct klist_iter *i)
{
struct klist_node *n = klist_next(i);
- return n ? container_of(n, struct device, knode_driver) : NULL;
+ struct device *dev = NULL;
+ struct device_private *dev_prv;
+
+ if (n) {
+ dev_prv = to_device_private_driver(n);
+ dev = dev_prv->device;
+ }
+ return dev;
}
/**
return -EINVAL;
klist_iter_init_node(&drv->p->klist_devices, &i,
- start ? &start->knode_driver : NULL);
+ start ? &start->p->knode_driver : NULL);
while ((dev = next_device(&i)) && !error)
error = fn(dev, data);
klist_iter_exit(&i);
return NULL;
klist_iter_init_node(&drv->p->klist_devices, &i,
- (start ? &start->knode_driver : NULL));
+ (start ? &start->p->knode_driver : NULL));
while ((dev = next_device(&i)))
if (match(dev, data) && get_device(dev))
break;
fw_load_abort(fw_priv);
}
-static inline void fw_setup_device_id(struct device *f_dev, struct device *dev)
-{
- /* XXX warning we should watch out for name collisions */
- strlcpy(f_dev->bus_id, dev->bus_id, BUS_ID_SIZE);
-}
-
static int fw_register_device(struct device **dev_p, const char *fw_name,
struct device *device)
{
fw_priv->timeout.data = (u_long) fw_priv;
init_timer(&fw_priv->timeout);
- fw_setup_device_id(f_dev, device);
+ dev_set_name(f_dev, dev_name(device));
f_dev->parent = device;
f_dev->class = &firmware_class;
dev_set_drvdata(f_dev, fw_priv);
#include <linux/isa.h>
static struct device isa_bus = {
- .bus_id = "isa"
+ .init_name = "isa"
};
struct isa_dev {
isa_dev->dev.parent = &isa_bus;
isa_dev->dev.bus = &isa_bus_type;
- snprintf(isa_dev->dev.bus_id, BUS_ID_SIZE, "%s.%u",
- isa_driver->driver.name, id);
-
+ dev_set_name(&isa_dev->dev, "%s.%u",
+ isa_driver->driver.name, id);
isa_dev->dev.platform_data = isa_driver;
isa_dev->dev.release = isa_dev_release;
isa_dev->id = id;
driver))
struct device platform_bus = {
- .bus_id = "platform",
+ .init_name = "platform",
};
EXPORT_SYMBOL_GPL(platform_bus);
pdev->dev.bus = &platform_bus_type;
if (pdev->id != -1)
- snprintf(pdev->dev.bus_id, BUS_ID_SIZE, "%s.%d", pdev->name,
- pdev->id);
+ dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
else
- strlcpy(pdev->dev.bus_id, pdev->name, BUS_ID_SIZE);
+ dev_set_name(&pdev->dev, pdev->name);
for (i = 0; i < pdev->num_resources; i++) {
struct resource *p, *r = &pdev->resource[i];
if (r->name == NULL)
- r->name = pdev->dev.bus_id;
+ r->name = dev_name(&pdev->dev);
p = r->parent;
if (!p) {
if (p && insert_resource(p, r)) {
printk(KERN_ERR
"%s: failed to claim resource %d\n",
- pdev->dev.bus_id, i);
+ dev_name(&pdev->dev), i);
ret = -EBUSY;
goto failed;
}
}
pr_debug("Registering platform device '%s'. Parent at %s\n",
- pdev->dev.bus_id, pdev->dev.parent->bus_id);
+ dev_name(&pdev->dev), dev_name(pdev->dev.parent));
ret = device_add(&pdev->dev);
if (ret == 0)
drv->driver.suspend = platform_drv_suspend;
if (drv->resume)
drv->driver.resume = platform_drv_resume;
- if (drv->pm)
- drv->driver.pm = &drv->pm->base;
return driver_register(&drv->driver);
}
EXPORT_SYMBOL_GPL(platform_driver_register);
struct platform_device *pdev;
pdev = container_of(dev, struct platform_device, dev);
- return (strncmp(pdev->name, drv->name, BUS_ID_SIZE) == 0);
+ return (strcmp(pdev->name, drv->name) == 0);
}
#ifdef CONFIG_PM_SLEEP
struct device_driver *drv = dev->driver;
int ret = 0;
- if (drv && drv->pm) {
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
if (drv->pm->suspend)
ret = drv->pm->suspend(dev);
} else {
static int platform_pm_suspend_noirq(struct device *dev)
{
- struct platform_driver *pdrv;
+ struct device_driver *drv = dev->driver;
int ret = 0;
- if (!dev->driver)
+ if (!drv)
return 0;
- pdrv = to_platform_driver(dev->driver);
- if (pdrv->pm) {
- if (pdrv->pm->suspend_noirq)
- ret = pdrv->pm->suspend_noirq(dev);
+ if (drv->pm) {
+ if (drv->pm->suspend_noirq)
+ ret = drv->pm->suspend_noirq(dev);
} else {
ret = platform_legacy_suspend_late(dev, PMSG_SUSPEND);
}
struct device_driver *drv = dev->driver;
int ret = 0;
- if (drv && drv->pm) {
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
if (drv->pm->resume)
ret = drv->pm->resume(dev);
} else {
static int platform_pm_resume_noirq(struct device *dev)
{
- struct platform_driver *pdrv;
+ struct device_driver *drv = dev->driver;
int ret = 0;
- if (!dev->driver)
+ if (!drv)
return 0;
- pdrv = to_platform_driver(dev->driver);
- if (pdrv->pm) {
- if (pdrv->pm->resume_noirq)
- ret = pdrv->pm->resume_noirq(dev);
+ if (drv->pm) {
+ if (drv->pm->resume_noirq)
+ ret = drv->pm->resume_noirq(dev);
} else {
ret = platform_legacy_resume_early(dev);
}
static int platform_pm_freeze_noirq(struct device *dev)
{
- struct platform_driver *pdrv;
+ struct device_driver *drv = dev->driver;
int ret = 0;
- if (!dev->driver)
+ if (!drv)
return 0;
- pdrv = to_platform_driver(dev->driver);
- if (pdrv->pm) {
- if (pdrv->pm->freeze_noirq)
- ret = pdrv->pm->freeze_noirq(dev);
+ if (drv->pm) {
+ if (drv->pm->freeze_noirq)
+ ret = drv->pm->freeze_noirq(dev);
} else {
ret = platform_legacy_suspend_late(dev, PMSG_FREEZE);
}
struct device_driver *drv = dev->driver;
int ret = 0;
- if (drv && drv->pm) {
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
if (drv->pm->thaw)
ret = drv->pm->thaw(dev);
} else {
static int platform_pm_thaw_noirq(struct device *dev)
{
- struct platform_driver *pdrv;
+ struct device_driver *drv = dev->driver;
int ret = 0;
- if (!dev->driver)
+ if (!drv)
return 0;
- pdrv = to_platform_driver(dev->driver);
- if (pdrv->pm) {
- if (pdrv->pm->thaw_noirq)
- ret = pdrv->pm->thaw_noirq(dev);
+ if (drv->pm) {
+ if (drv->pm->thaw_noirq)
+ ret = drv->pm->thaw_noirq(dev);
} else {
ret = platform_legacy_resume_early(dev);
}
struct device_driver *drv = dev->driver;
int ret = 0;
- if (drv && drv->pm) {
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
if (drv->pm->poweroff)
ret = drv->pm->poweroff(dev);
} else {
static int platform_pm_poweroff_noirq(struct device *dev)
{
- struct platform_driver *pdrv;
+ struct device_driver *drv = dev->driver;
int ret = 0;
- if (!dev->driver)
+ if (!drv)
return 0;
- pdrv = to_platform_driver(dev->driver);
- if (pdrv->pm) {
- if (pdrv->pm->poweroff_noirq)
- ret = pdrv->pm->poweroff_noirq(dev);
+ if (drv->pm) {
+ if (drv->pm->poweroff_noirq)
+ ret = drv->pm->poweroff_noirq(dev);
} else {
ret = platform_legacy_suspend_late(dev, PMSG_HIBERNATE);
}
struct device_driver *drv = dev->driver;
int ret = 0;
- if (drv && drv->pm) {
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
if (drv->pm->restore)
ret = drv->pm->restore(dev);
} else {
static int platform_pm_restore_noirq(struct device *dev)
{
- struct platform_driver *pdrv;
+ struct device_driver *drv = dev->driver;
int ret = 0;
- if (!dev->driver)
+ if (!drv)
return 0;
- pdrv = to_platform_driver(dev->driver);
- if (pdrv->pm) {
- if (pdrv->pm->restore_noirq)
- ret = pdrv->pm->restore_noirq(dev);
+ if (drv->pm) {
+ if (drv->pm->restore_noirq)
+ ret = drv->pm->restore_noirq(dev);
} else {
ret = platform_legacy_resume_early(dev);
}
#endif /* !CONFIG_HIBERNATION */
-static struct pm_ext_ops platform_pm_ops = {
- .base = {
- .prepare = platform_pm_prepare,
- .complete = platform_pm_complete,
- .suspend = platform_pm_suspend,
- .resume = platform_pm_resume,
- .freeze = platform_pm_freeze,
- .thaw = platform_pm_thaw,
- .poweroff = platform_pm_poweroff,
- .restore = platform_pm_restore,
- },
+static struct dev_pm_ops platform_dev_pm_ops = {
+ .prepare = platform_pm_prepare,
+ .complete = platform_pm_complete,
+ .suspend = platform_pm_suspend,
+ .resume = platform_pm_resume,
+ .freeze = platform_pm_freeze,
+ .thaw = platform_pm_thaw,
+ .poweroff = platform_pm_poweroff,
+ .restore = platform_pm_restore,
.suspend_noirq = platform_pm_suspend_noirq,
.resume_noirq = platform_pm_resume_noirq,
.freeze_noirq = platform_pm_freeze_noirq,
.restore_noirq = platform_pm_restore_noirq,
};
-#define PLATFORM_PM_OPS_PTR &platform_pm_ops
+#define PLATFORM_PM_OPS_PTR (&platform_dev_pm_ops)
#else /* !CONFIG_PM_SLEEP */
if (dev->parent) {
if (dev->parent->power.status >= DPM_SUSPENDING)
dev_warn(dev, "parent %s should not be sleeping\n",
- dev->parent->bus_id);
+ dev_name(dev->parent));
} else if (transition_started) {
/*
* We refuse to register parentless devices while a PM
* @ops: PM operations to choose from.
* @state: PM transition of the system being carried out.
*/
-static int pm_op(struct device *dev, struct pm_ops *ops, pm_message_t state)
+static int pm_op(struct device *dev, struct dev_pm_ops *ops,
+ pm_message_t state)
{
int error = 0;
* The operation is executed with interrupts disabled by the only remaining
* functional CPU in the system.
*/
-static int pm_noirq_op(struct device *dev, struct pm_ext_ops *ops,
+static int pm_noirq_op(struct device *dev, struct dev_pm_ops *ops,
pm_message_t state)
{
int error = 0;
if (dev->bus) {
if (dev->bus->pm) {
pm_dev_dbg(dev, state, "");
- error = pm_op(dev, &dev->bus->pm->base, state);
+ error = pm_op(dev, dev->bus->pm, state);
} else if (dev->bus->resume) {
pm_dev_dbg(dev, state, "legacy ");
error = dev->bus->resume(dev);
dev->type->pm->complete(dev);
}
- if (dev->bus && dev->bus->pm && dev->bus->pm->base.complete) {
+ if (dev->bus && dev->bus->pm && dev->bus->pm->complete) {
pm_dev_dbg(dev, state, "completing ");
- dev->bus->pm->base.complete(dev);
+ dev->bus->pm->complete(dev);
}
up(&dev->sem);
if (dev->bus) {
if (dev->bus->pm) {
pm_dev_dbg(dev, state, "");
- error = pm_op(dev, &dev->bus->pm->base, state);
+ error = pm_op(dev, dev->bus->pm, state);
} else if (dev->bus->suspend) {
pm_dev_dbg(dev, state, "legacy ");
error = dev->bus->suspend(dev, state);
down(&dev->sem);
- if (dev->bus && dev->bus->pm && dev->bus->pm->base.prepare) {
+ if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) {
pm_dev_dbg(dev, state, "preparing ");
- error = dev->bus->pm->base.prepare(dev);
- suspend_report_result(dev->bus->pm->base.prepare, error);
+ error = dev->bus->pm->prepare(dev);
+ suspend_report_result(dev->bus->pm->prepare, error);
if (error)
goto End;
}
void set_trace_device(struct device *dev)
{
- dev_hash_value = hash_string(DEVSEED, dev->bus_id, DEVHASH);
+ dev_hash_value = hash_string(DEVSEED, dev_name(dev), DEVHASH);
}
EXPORT_SYMBOL(set_trace_device);
while (entry != &dpm_list) {
struct device * dev = to_device(entry);
- unsigned int hash = hash_string(DEVSEED, dev->bus_id, DEVHASH);
+ unsigned int hash = hash_string(DEVSEED, dev_name(dev), DEVHASH);
if (hash == value) {
dev_info(dev, "hash matches\n");
match++;
#if 0
/* sysfs */
memset(&mwave_device, 0, sizeof (struct device));
- snprintf(mwave_device.bus_id, BUS_ID_SIZE, "mwave");
+ dev_set_name(&mwave_device, "mwave");
if (device_register(&mwave_device))
goto cleanup_error;
}
dmi_dev->class = &dmi_class;
- strcpy(dmi_dev->bus_id, "id");
+ dev_set_name(dmi_dev, "id");
dmi_dev->groups = sys_dmi_attribute_groups;
ret = device_register(dmi_dev);
if (dev)
seq_printf(s, ", %s/%s",
dev->bus ? dev->bus->name : "no-bus",
- dev->bus_id);
+ dev_name(dev));
if (chip->label)
seq_printf(s, ", %s", chip->label);
if (chip->can_sleep)
else
minor_str = "card%d";
- snprintf(minor->kdev.bus_id, BUS_ID_SIZE, minor_str, minor->index);
+ dev_set_name(&minor->kdev, minor_str, minor->index);
err = device_register(&minor->kdev);
if (err) {
{
int id;
- if (likely(sscanf(dev->bus_id, HWMON_ID_FORMAT, &id) == 1)) {
+ if (likely(sscanf(dev_name(dev), HWMON_ID_FORMAT, &id) == 1)) {
device_unregister(dev);
spin_lock(&idr_lock);
idr_remove(&hwmon_idr, id);
}
dev_info(&client->dev, "%s: sensor '%s'\n",
- data->hwmon_dev->bus_id, client->name);
+ dev_name(data->hwmon_dev), client->name);
return 0;
}
static struct device dummy_dma_dev = {
- .bus_id = "fallback device",
+ .init_name = "fallback device",
.coherent_dma_mask = DMA_64BIT_MASK,
.dma_mask = &dummy_dma_dev.coherent_dma_mask,
};
class_dev->class = &ib_class;
class_dev->driver_data = device;
class_dev->parent = device->dma_device;
- strlcpy(class_dev->bus_id, device->name, BUS_ID_SIZE);
+ dev_set_name(class_dev, device->name);
INIT_LIST_HEAD(&device->port_list);
ucm_dev->dev.parent = device->dma_device;
ucm_dev->dev.devt = ucm_dev->cdev.dev;
ucm_dev->dev.release = ib_ucm_release_dev;
- snprintf(ucm_dev->dev.bus_id, BUS_ID_SIZE, "ucm%d",
- ucm_dev->devnum);
+ dev_set_name(&ucm_dev->dev, "ucm%d", ucm_dev->devnum);
if (device_register(&ucm_dev->dev))
goto err_cdev;
host->dev.class = &srp_class;
host->dev.parent = device->dev->dma_device;
- snprintf(host->dev.bus_id, BUS_ID_SIZE, "srp-%s-%d",
- device->dev->name, port);
+ dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
if (device_register(&host->dev))
goto free_host;
entry->dev.class = elements_class;
dev_set_drvdata(&entry->dev, elem);
- snprintf(entry->dev.bus_id, BUS_ID_SIZE, elem->name);
+ dev_set_name(&entry->dev, elem->name);
ret = device_register(&entry->dev);
if (ret) {
printk(KERN_ERR "%s: failed to register %s\n",
/* The root device for the lguest virtio devices. This makes them appear as
* /sys/devices/lguest/0,1,2 not /sys/devices/0,1,2. */
-static struct device lguest_root = {
- .parent = NULL,
- .bus_id = "lguest",
-};
+static struct device *lguest_root;
/*D:120 This is the core of the lguest bus: actually adding a new device.
* It's a separate function because it's neater that way, and because an
}
/* This devices' parent is the lguest/ dir. */
- ldev->vdev.dev.parent = &lguest_root;
+ ldev->vdev.dev.parent = lguest_root;
/* We have a unique device index thanks to the dev_index counter. */
ldev->vdev.id.device = d->type;
/* We have a simple set of routines for querying the device's
if (strcmp(pv_info.name, "lguest") != 0)
return 0;
- if (device_register(&lguest_root) != 0)
+ lguest_root = root_device_register("lguest");
+ if (IS_ERR(lguest_root))
panic("Could not register lguest root");
/* Devices are in a single page above top of "normal" mem */
#undef DEBUG
-#define MAX_NODE_NAME_SIZE (BUS_ID_SIZE - 12)
+#define MAX_NODE_NAME_SIZE (20 - 12)
static struct macio_chip *macio_on_hold;
if (irq != NO_IRQ) {
dev->interrupt[index].start = irq;
dev->interrupt[index].flags = IORESOURCE_IRQ;
- dev->interrupt[index].name = dev->ofdev.dev.bus_id;
+ dev->interrupt[index].name = dev_name(&dev->ofdev.dev);
}
if (dev->n_interrupts <= index)
dev->n_interrupts = index + 1;
break;
res->start = irq;
res->flags = IORESOURCE_IRQ;
- res->name = dev->ofdev.dev.bus_id;
+ res->name = dev_name(&dev->ofdev.dev);
if (macio_resource_quirks(np, res, i - 1)) {
memset(res, 0, sizeof(struct resource));
continue;
if (index >= MACIO_DEV_COUNT_RESOURCES)
break;
*res = r;
- res->name = dev->ofdev.dev.bus_id;
+ res->name = dev_name(&dev->ofdev.dev);
if (macio_resource_quirks(np, res, index)) {
memset(res, 0, sizeof(struct resource));
if (insert_resource(parent_res, res)) {
printk(KERN_WARNING "Can't request resource "
"%d for MacIO device %s\n",
- index, dev->ofdev.dev.bus_id);
+ index, dev_name(&dev->ofdev.dev));
}
}
dev->n_resources = index;
/* MacIO itself has a different reg, we use it's PCI base */
if (np == chip->of_node) {
- sprintf(dev->ofdev.dev.bus_id, "%1d.%08x:%.*s",
- chip->lbus.index,
+ dev_set_name(&dev->ofdev.dev, "%1d.%08x:%.*s",
+ chip->lbus.index,
#ifdef CONFIG_PCI
(unsigned int)pci_resource_start(chip->lbus.pdev, 0),
#else
MAX_NODE_NAME_SIZE, np->name);
} else {
reg = of_get_property(np, "reg", NULL);
- sprintf(dev->ofdev.dev.bus_id, "%1d.%08x:%.*s",
- chip->lbus.index,
- reg ? *reg : 0, MAX_NODE_NAME_SIZE, np->name);
+ dev_set_name(&dev->ofdev.dev, "%1d.%08x:%.*s",
+ chip->lbus.index,
+ reg ? *reg : 0, MAX_NODE_NAME_SIZE, np->name);
}
/* Setup interrupts & resources */
/* Register with core */
if (of_device_register(&dev->ofdev) != 0) {
printk(KERN_DEBUG"macio: device registration error for %s!\n",
- dev->ofdev.dev.bus_id);
+ dev_name(&dev->ofdev.dev));
kfree(dev);
return NULL;
}
resource_no,
macio_resource_len(dev, resource_no),
macio_resource_start(dev, resource_no),
- dev->ofdev.dev.bus_id);
+ dev_name(&dev->ofdev.dev));
return -EBUSY;
}
if (card) {
card->host = host;
- snprintf(card->dev.bus_id, sizeof(card->dev.bus_id),
- "%s", host->dev.bus_id);
+ dev_set_name(&card->dev, "%s", dev_name(&host->dev));
card->dev.parent = &host->dev;
card->dev.bus = &memstick_bus_type;
card->dev.release = memstick_free_card;
if (rc)
return rc;
- snprintf(host->dev.bus_id, BUS_ID_SIZE, "memstick%u", host->id);
+ dev_set_name(&host->dev, "memstick%u", host->id);
rc = device_add(&host->dev);
if (rc) {
if (rc) {
printk(KERN_WARNING
"%s: could not switch to 4-bit mode, error %d\n",
- card->dev.bus_id, rc);
+ dev_name(&card->dev), rc);
return 0;
}
msb->system = MEMSTICK_SYS_PAR4;
host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
printk(KERN_INFO "%s: switching to 4-bit parallel mode\n",
- card->dev.bus_id);
+ dev_name(&card->dev));
if (msb->caps & MEMSTICK_CAP_PAR8) {
rc = mspro_block_set_interface(card, MEMSTICK_SYS_PAR8);
MEMSTICK_PAR8);
printk(KERN_INFO
"%s: switching to 8-bit parallel mode\n",
- card->dev.bus_id);
+ dev_name(&card->dev));
} else
printk(KERN_WARNING
"%s: could not switch to 8-bit mode, error %d\n",
- card->dev.bus_id, rc);
+ dev_name(&card->dev), rc);
}
card->next_request = h_mspro_block_req_init;
if (rc) {
printk(KERN_WARNING
"%s: interface error, trying to fall back to serial\n",
- card->dev.bus_id);
+ dev_name(&card->dev));
msb->system = MEMSTICK_SYS_SERIAL;
host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF);
msleep(10);
if (be16_to_cpu(attr->signature) != MSPRO_BLOCK_SIGNATURE) {
printk(KERN_ERR "%s: unrecognized device signature %x\n",
- card->dev.bus_id, be16_to_cpu(attr->signature));
+ dev_name(&card->dev), be16_to_cpu(attr->signature));
rc = -ENODEV;
goto out_free_attr;
}
if (attr->count > MSPRO_BLOCK_MAX_ATTRIBUTES) {
printk(KERN_WARNING "%s: way too many attribute entries\n",
- card->dev.bus_id);
+ dev_name(&card->dev));
attr_count = MSPRO_BLOCK_MAX_ATTRIBUTES;
} else
attr_count = attr->count;
printk(KERN_ERR
"%s : card failed to respond for a long period of time "
"(%x, %x)\n",
- host->dev->dev.bus_id, host->req ? host->req->tpc : 0,
+ dev_name(&host->dev->dev), host->req ? host->req->tpc : 0,
host->cmd_flags);
tifm_eject(host->dev);
if (!(TIFM_SOCK_STATE_OCCUPIED
& readl(sock->addr + SOCK_PRESENT_STATE))) {
printk(KERN_WARNING "%s : card gone, unexpectedly\n",
- sock->dev.bus_id);
+ dev_name(&sock->dev));
return rc;
}
{
struct i2o_device *i2o_dev = to_i2o_device(dev);
- pr_debug("i2o: device %s released\n", dev->bus_id);
+ pr_debug("i2o: device %s released\n", dev_name(dev));
kfree(i2o_dev);
}
i2o_dev->lct_data = *entry;
- snprintf(i2o_dev->device.bus_id, BUS_ID_SIZE, "%d:%03x", c->unit,
- i2o_dev->lct_data.tid);
+ dev_set_name(&i2o_dev->device, "%d:%03x", c->unit,
+ i2o_dev->lct_data.tid);
i2o_dev->iop = c;
i2o_dev->device.parent = &c->device;
i2o_driver_notify_device_add_all(i2o_dev);
- pr_debug("i2o: device %s added\n", i2o_dev->device.bus_id);
+ pr_debug("i2o: device %s added\n", dev_name(&i2o_dev->device));
return 0;
{
struct i2o_device *d = (struct i2o_device *)seq->private;
- seq_printf(seq, "%s\n", d->device.bus_id);
+ seq_printf(seq, "%s\n", dev_name(&d->device));
return 0;
}
c->device.release = &i2o_iop_release;
- snprintf(c->device.bus_id, BUS_ID_SIZE, "iop%d", c->unit);
+ dev_set_name(&c->device, "iop%d", c->unit);
#if BITS_PER_LONG == 64
spin_lock_init(&c->context_list_lock);
};
static struct device gru_device = {
- .bus_id = {0},
+ .init_name = "",
.driver = &gru_driver,
};
};
struct device xp_dbg_subname = {
- .bus_id = {0}, /* set to "" */
+ .init_name = "", /* set to "" */
.driver = &xp_dbg_name
};
};
struct device xpc_part_dbg_subname = {
- .bus_id = {0}, /* set to "part" at xpc_init() time */
+ .init_name = "", /* set to "part" at xpc_init() time */
.driver = &xpc_dbg_name
};
struct device xpc_chan_dbg_subname = {
- .bus_id = {0}, /* set to "chan" at xpc_init() time */
+ .init_name = "", /* set to "chan" at xpc_init() time */
.driver = &xpc_dbg_name
};
int ret;
struct task_struct *kthread;
- snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part");
- snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan");
+ dev_set_name(xpc_part, "part");
+ dev_set_name(xpc_chan, "chan");
if (is_shub()) {
/*
};
struct device xpnet_dbg_subname = {
- .bus_id = {0}, /* set to "" */
+ .init_name = "", /* set to "" */
.driver = &xpnet_dbg_name
};
if (sock) {
printk(KERN_INFO
"%s : demand removing card from socket %u:%u\n",
- fm->dev.bus_id, fm->id, cnt);
+ dev_name(&fm->dev), fm->id, cnt);
fm->sockets[cnt] = NULL;
sock_addr = sock->addr;
spin_unlock_irqrestore(&fm->lock, flags);
if (rc)
return rc;
- snprintf(fm->dev.bus_id, BUS_ID_SIZE, "tifm%u", fm->id);
+ dev_set_name(&fm->dev, "tifm%u", fm->id);
rc = device_add(&fm->dev);
if (rc) {
spin_lock(&tifm_adapter_lock);
sock->dev.dma_mask = fm->dev.parent->dma_mask;
sock->dev.release = tifm_free_device;
- snprintf(sock->dev.bus_id, BUS_ID_SIZE,
- "tifm_%s%u:%u", tifm_media_type_name(type, 2),
- fm->id, id);
+ dev_set_name(&sock->dev, "tifm_%s%u:%u",
+ tifm_media_type_name(type, 2), fm->id, id);
printk(KERN_INFO DRIVER_NAME
": %s card detected in socket %u:%u\n",
tifm_media_type_name(type, 0), fm->id, id);
static int erase_chip(struct m25p *flash)
{
DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB\n",
- flash->spi->dev.bus_id, __func__,
+ dev_name(&flash->spi->dev), __func__,
flash->mtd.size / 1024);
/* Wait until finished previous write command. */
static int erase_sector(struct m25p *flash, u32 offset)
{
DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB at 0x%08x\n",
- flash->spi->dev.bus_id, __func__,
+ dev_name(&flash->spi->dev), __func__,
flash->mtd.erasesize / 1024, offset);
/* Wait until finished previous write command. */
u32 addr,len;
DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %d\n",
- flash->spi->dev.bus_id, __func__, "at",
+ dev_name(&flash->spi->dev), __func__, "at",
(u32)instr->addr, instr->len);
/* sanity checks */
struct spi_message m;
DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
- flash->spi->dev.bus_id, __func__, "from",
+ dev_name(&flash->spi->dev), __func__, "from",
(u32)from, len);
/* sanity checks */
struct spi_message m;
DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
- flash->spi->dev.bus_id, __func__, "to",
+ dev_name(&flash->spi->dev), __func__, "to",
(u32)to, len);
if (retlen)
tmp = spi_write_then_read(spi, &code, 1, id, 5);
if (tmp < 0) {
DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n",
- spi->dev.bus_id, tmp);
+ dev_name(&spi->dev), tmp);
return NULL;
}
jedec = id[0];
/* unrecognized chip? */
if (i == ARRAY_SIZE(m25p_data)) {
DEBUG(MTD_DEBUG_LEVEL0, "%s: unrecognized id %s\n",
- spi->dev.bus_id, data->type);
+ dev_name(&spi->dev), data->type);
info = NULL;
/* recognized; is that chip really what's there? */
if (data && data->name)
flash->mtd.name = data->name;
else
- flash->mtd.name = spi->dev.bus_id;
+ flash->mtd.name = dev_name(&spi->dev);
flash->mtd.type = MTD_NORFLASH;
flash->mtd.writesize = 1;
status = dataflash_status(spi);
if (status < 0) {
DEBUG(MTD_DEBUG_LEVEL1, "%s: status %d?\n",
- spi->dev.bus_id, status);
+ dev_name(&spi->dev), status);
status = 0;
}
uint8_t *command;
DEBUG(MTD_DEBUG_LEVEL2, "%s: erase addr=0x%x len 0x%x\n",
- spi->dev.bus_id,
+ dev_name(&spi->dev),
instr->addr, instr->len);
/* Sanity checks */
if (status < 0) {
printk(KERN_ERR "%s: erase %x, err %d\n",
- spi->dev.bus_id, pageaddr, status);
+ dev_name(&spi->dev), pageaddr, status);
/* REVISIT: can retry instr->retries times; or
* giveup and instr->fail_addr = instr->addr;
*/
int status;
DEBUG(MTD_DEBUG_LEVEL2, "%s: read 0x%x..0x%x\n",
- priv->spi->dev.bus_id, (unsigned)from, (unsigned)(from + len));
+ dev_name(&priv->spi->dev), (unsigned)from, (unsigned)(from + len));
*retlen = 0;
status = 0;
} else
DEBUG(MTD_DEBUG_LEVEL1, "%s: read %x..%x --> %d\n",
- priv->spi->dev.bus_id,
+ dev_name(&priv->spi->dev),
(unsigned)from, (unsigned)(from + len),
status);
return status;
uint8_t *command;
DEBUG(MTD_DEBUG_LEVEL2, "%s: write 0x%x..0x%x\n",
- spi->dev.bus_id, (unsigned)to, (unsigned)(to + len));
+ dev_name(&spi->dev), (unsigned)to, (unsigned)(to + len));
*retlen = 0;
status = spi_sync(spi, &msg);
if (status < 0)
DEBUG(MTD_DEBUG_LEVEL1, "%s: xfer %u -> %d \n",
- spi->dev.bus_id, addr, status);
+ dev_name(&spi->dev), addr, status);
(void) dataflash_waitready(priv->spi);
}
spi_transfer_del(x + 1);
if (status < 0)
DEBUG(MTD_DEBUG_LEVEL1, "%s: pgm %u/%u -> %d \n",
- spi->dev.bus_id, addr, writelen, status);
+ dev_name(&spi->dev), addr, writelen, status);
(void) dataflash_waitready(priv->spi);
status = spi_sync(spi, &msg);
if (status < 0)
DEBUG(MTD_DEBUG_LEVEL1, "%s: compare %u -> %d \n",
- spi->dev.bus_id, addr, status);
+ dev_name(&spi->dev), addr, status);
status = dataflash_waitready(priv->spi);
/* Check result of the compare operation */
if (status & (1 << 6)) {
printk(KERN_ERR "%s: compare page %u, err %d\n",
- spi->dev.bus_id, pageaddr, status);
+ dev_name(&spi->dev), pageaddr, status);
remaining = 0;
status = -EIO;
break;
tmp = spi_write_then_read(spi, &code, 1, id, 3);
if (tmp < 0) {
DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n",
- spi->dev.bus_id, tmp);
+ dev_name(&spi->dev), tmp);
return ERR_PTR(tmp);
}
if (id[0] != 0x1f)
status = dataflash_status(spi);
if (status <= 0 || status == 0xff) {
DEBUG(MTD_DEBUG_LEVEL1, "%s: status error %d\n",
- spi->dev.bus_id, status);
+ dev_name(&spi->dev), status);
if (status == 0 || status == 0xff)
status = -ENODEV;
return status;
/* obsolete AT45DB1282 not (yet?) supported */
default:
DEBUG(MTD_DEBUG_LEVEL1, "%s: unsupported device (%x)\n",
- spi->dev.bus_id, status & 0x3c);
+ dev_name(&spi->dev), status & 0x3c);
status = -ENODEV;
}
if (status < 0)
DEBUG(MTD_DEBUG_LEVEL1, "%s: add_dataflash --> %d\n",
- spi->dev.bus_id, status);
+ dev_name(&spi->dev), status);
return status;
}
struct dataflash *flash = dev_get_drvdata(&spi->dev);
int status;
- DEBUG(MTD_DEBUG_LEVEL1, "%s: remove\n", spi->dev.bus_id);
+ DEBUG(MTD_DEBUG_LEVEL1, "%s: remove\n", dev_name(&spi->dev));
if (mtd_has_partitions() && flash->partitioned)
status = del_mtd_partitions(&flash->mtd);
info->map.bankwidth = plat->width;
info->map.phys = res->start;
info->map.virt = base;
- info->map.name = dev->dev.bus_id;
+ info->map.name = dev_name(&dev->dev);
info->map.set_vpp = armflash_set_vpp;
simple_map_init(&info->map);
*/
info->map.map_priv_2 = (unsigned long) ixp_data->bank_setup;
- info->map.name = dev->dev.bus_id;
+ info->map.name = dev_name(&dev->dev);
info->map.read = ixp2000_flash_read8;
info->map.write = ixp2000_flash_write8;
info->map.copy_from = ixp2000_flash_copy_from;
info->res = request_mem_region(dev->resource->start,
dev->resource->end - dev->resource->start + 1,
- dev->dev.bus_id);
+ dev_name(&dev->dev));
if (!info->res) {
dev_err(&dev->dev, "Could not reserve memory region\n");
err = -ENOMEM;
* handle that.
*/
info->map.bankwidth = 2;
- info->map.name = dev->dev.bus_id;
+ info->map.name = dev_name(&dev->dev);
info->map.read = ixp4xx_read16,
info->map.write = ixp4xx_probe_write16,
info->map.copy_from = ixp4xx_copy_from,
err = -ENOMEM;
goto out_release_mem_region;
}
- info->map.name = pdev->dev.bus_id;
+ info->map.name = dev_name(&pdev->dev);
info->map.phys = res->start;
info->map.size = size;
info->map.bankwidth = pdata->width;
if (!devm_request_mem_region(&dev->dev,
dev->resource[i].start,
dev->resource[i].end - dev->resource[i].start + 1,
- dev->dev.bus_id)) {
+ dev_name(&dev->dev))) {
dev_err(&dev->dev, "Could not reserve memory region\n");
err = -ENOMEM;
goto err_out;
}
- info->map[i].name = dev->dev.bus_id;
+ info->map[i].name = dev_name(&dev->dev);
info->map[i].phys = dev->resource[i].start;
info->map[i].size = dev->resource[i].end - dev->resource[i].start + 1;
info->map[i].bankwidth = physmap_data->width;
* We detected multiple devices. Concatenate them together.
*/
#ifdef CONFIG_MTD_CONCAT
- info->cmtd = mtd_concat_create(info->mtd, devices_found, dev->dev.bus_id);
+ info->cmtd = mtd_concat_create(info->mtd, devices_found, dev_name(&dev->dev));
if (info->cmtd == NULL)
err = -ENXIO;
#else
err = -EBUSY;
info->res = request_mem_region(res.start, res.end - res.start + 1,
- dev->dev.bus_id);
+ dev_name(&dev->dev));
if (!info->res)
goto err_out;
goto err_out;
}
- info->map.name = dev->dev.bus_id;
+ info->map.name = dev_name(&dev->dev);
info->map.phys = res.start;
info->map.size = res.end - res.start + 1;
info->map.bankwidth = *width;
*/
struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */
int num_devs, /* number of subdevices */
- char *name)
+ const char *name)
{ /* name for the new device */
int i;
size_t size;
fun->rnb_gpio = of_get_gpio(ofdev->node, 0);
if (fun->rnb_gpio >= 0) {
- ret = gpio_request(fun->rnb_gpio, ofdev->dev.bus_id);
+ ret = gpio_request(fun->rnb_gpio, dev_name(&ofdev->dev));
if (ret) {
dev_err(&ofdev->dev, "can't request RNB gpio\n");
goto err2;
data->chip.priv = &data;
data->mtd.priv = &data->chip;
data->mtd.owner = THIS_MODULE;
- data->mtd.name = pdev->dev.bus_id;
+ data->mtd.name = dev_name(&pdev->dev);
data->chip.IO_ADDR_R = data->io_base;
data->chip.IO_ADDR_W = data->io_base;
nand_chip->chip_delay = 15;
retval = request_irq(irq, &tmio_irq,
- IRQF_DISABLED, dev->dev.bus_id, tmio);
+ IRQF_DISABLED, dev_name(&dev->dev), tmio);
if (retval) {
dev_err(&dev->dev, "request_irq error %d\n", retval);
goto err_irq;
info->onenand.mmcontrol = pdata->mmcontrol;
info->onenand.irq = platform_get_irq(pdev, 0);
- info->mtd.name = pdev->dev.bus_id;
+ info->mtd.name = dev_name(&pdev->dev);
info->mtd.priv = &info->onenand;
info->mtd.owner = THIS_MODULE;
c->onenand.base);
c->pdev = pdev;
- c->mtd.name = pdev->dev.bus_id;
+ c->mtd.name = dev_name(&pdev->dev);
c->mtd.priv = &c->onenand;
c->mtd.owner = THIS_MODULE;
ubi->dev.release = dev_release;
ubi->dev.devt = ubi->cdev.dev;
ubi->dev.class = ubi_class;
- sprintf(&ubi->dev.bus_id[0], UBI_NAME_STR"%d", ubi->ubi_num);
+ dev_set_name(&ubi->dev, UBI_NAME_STR"%d", ubi->ubi_num);
err = device_register(&ubi->dev);
if (err)
return err;
vol->dev.devt = dev;
vol->dev.class = ubi_class;
- sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id);
+ dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
err = device_register(&vol->dev);
if (err) {
ubi_err("cannot register device");
vol->dev.parent = &ubi->dev;
vol->dev.devt = dev;
vol->dev.class = ubi_class;
- sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id);
+ dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
err = device_register(&vol->dev);
if (err)
goto out_gluebi;
#ifdef CONFIG_PM_SLEEP
+static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
+{
+ struct pci_driver *drv = pci_dev->driver;
+
+ return drv && (drv->suspend || drv->suspend_late || drv->resume
+ || drv->resume_early);
+}
+
/*
* Default "suspend" method for devices that have no driver provided suspend,
* or not even a driver at all.
/*
* Default "resume" method for devices that have no driver provided resume,
- * or not even a driver at all.
+ * or not even a driver at all (first part).
*/
-static int pci_default_pm_resume(struct pci_dev *pci_dev)
+static void pci_default_pm_resume_early(struct pci_dev *pci_dev)
{
- int retval = 0;
-
/* restore the PCI config space */
pci_restore_state(pci_dev);
+}
+
+/*
+ * Default "resume" method for devices that have no driver provided resume,
+ * or not even a driver at all (second part).
+ */
+static int pci_default_pm_resume_late(struct pci_dev *pci_dev)
+{
+ int retval;
+
/* if the device was enabled before suspend, reenable */
retval = pci_reenable_device(pci_dev);
/*
struct pci_dev * pci_dev = to_pci_dev(dev);
struct pci_driver * drv = pci_dev->driver;
- if (drv && drv->resume)
+ if (drv && drv->resume) {
error = drv->resume(pci_dev);
- else
- error = pci_default_pm_resume(pci_dev);
+ } else {
+ pci_default_pm_resume_early(pci_dev);
+ error = pci_default_pm_resume_late(pci_dev);
+ }
return error;
}
if (drv->pm->suspend) {
error = drv->pm->suspend(dev);
suspend_report_result(drv->pm->suspend, error);
- } else {
- pci_default_pm_suspend(pci_dev);
}
- } else {
+ } else if (pci_has_legacy_pm_support(pci_dev)) {
error = pci_legacy_suspend(dev, PMSG_SUSPEND);
}
pci_fixup_device(pci_fixup_suspend, pci_dev);
static int pci_pm_suspend_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct pci_driver *drv = pci_dev->driver;
+ struct device_driver *drv = dev->driver;
int error = 0;
if (drv && drv->pm) {
error = drv->pm->suspend_noirq(dev);
suspend_report_result(drv->pm->suspend_noirq, error);
}
- } else {
+ } else if (pci_has_legacy_pm_support(pci_dev)) {
error = pci_legacy_suspend_late(dev, PMSG_SUSPEND);
+ } else {
+ pci_default_pm_suspend(pci_dev);
}
return error;
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct device_driver *drv = dev->driver;
- int error;
+ int error = 0;
pci_fixup_device(pci_fixup_resume, pci_dev);
if (drv && drv->pm) {
- error = drv->pm->resume ? drv->pm->resume(dev) :
- pci_default_pm_resume(pci_dev);
- } else {
+ if (drv->pm->resume)
+ error = drv->pm->resume(dev);
+ } else if (pci_has_legacy_pm_support(pci_dev)) {
error = pci_legacy_resume(dev);
+ } else {
+ error = pci_default_pm_resume_late(pci_dev);
}
return error;
static int pci_pm_resume_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct pci_driver *drv = pci_dev->driver;
+ struct device_driver *drv = dev->driver;
int error = 0;
- pci_fixup_device(pci_fixup_resume_early, pci_dev);
+ pci_fixup_device(pci_fixup_resume_early, to_pci_dev(dev));
if (drv && drv->pm) {
if (drv->pm->resume_noirq)
error = drv->pm->resume_noirq(dev);
- } else {
+ } else if (pci_has_legacy_pm_support(pci_dev)) {
error = pci_legacy_resume_early(dev);
+ } else {
+ pci_default_pm_resume_early(pci_dev);
}
return error;
if (drv->pm->freeze) {
error = drv->pm->freeze(dev);
suspend_report_result(drv->pm->freeze, error);
- } else {
- pci_default_pm_suspend(pci_dev);
}
- } else {
+ } else if (pci_has_legacy_pm_support(pci_dev)) {
error = pci_legacy_suspend(dev, PMSG_FREEZE);
pci_fixup_device(pci_fixup_suspend, pci_dev);
}
static int pci_pm_freeze_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct pci_driver *drv = pci_dev->driver;
+ struct device_driver *drv = dev->driver;
int error = 0;
if (drv && drv->pm) {
error = drv->pm->freeze_noirq(dev);
suspend_report_result(drv->pm->freeze_noirq, error);
}
- } else {
+ } else if (pci_has_legacy_pm_support(pci_dev)) {
error = pci_legacy_suspend_late(dev, PMSG_FREEZE);
+ } else {
+ pci_default_pm_suspend(pci_dev);
}
return error;
static int pci_pm_thaw(struct device *dev)
{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
struct device_driver *drv = dev->driver;
int error = 0;
if (drv && drv->pm) {
if (drv->pm->thaw)
error = drv->pm->thaw(dev);
- } else {
- pci_fixup_device(pci_fixup_resume, to_pci_dev(dev));
+ } else if (pci_has_legacy_pm_support(pci_dev)) {
+ pci_fixup_device(pci_fixup_resume, pci_dev);
error = pci_legacy_resume(dev);
}
static int pci_pm_thaw_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct pci_driver *drv = pci_dev->driver;
+ struct device_driver *drv = dev->driver;
int error = 0;
if (drv && drv->pm) {
if (drv->pm->thaw_noirq)
error = drv->pm->thaw_noirq(dev);
- } else {
- pci_fixup_device(pci_fixup_resume_early, pci_dev);
+ } else if (pci_has_legacy_pm_support(pci_dev)) {
+ pci_fixup_device(pci_fixup_resume_early, to_pci_dev(dev));
error = pci_legacy_resume_early(dev);
}
static int pci_pm_poweroff(struct device *dev)
{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
struct device_driver *drv = dev->driver;
int error = 0;
- pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev));
+ pci_fixup_device(pci_fixup_suspend, pci_dev);
if (drv && drv->pm) {
if (drv->pm->poweroff) {
error = drv->pm->poweroff(dev);
suspend_report_result(drv->pm->poweroff, error);
}
- } else {
+ } else if (pci_has_legacy_pm_support(pci_dev)) {
error = pci_legacy_suspend(dev, PMSG_HIBERNATE);
}
static int pci_pm_poweroff_noirq(struct device *dev)
{
- struct pci_dev *pci_dev = to_pci_dev(dev);
- struct pci_driver *drv = pci_dev->driver;
+ struct device_driver *drv = dev->driver;
int error = 0;
if (drv && drv->pm) {
error = drv->pm->poweroff_noirq(dev);
suspend_report_result(drv->pm->poweroff_noirq, error);
}
- } else {
+ } else if (pci_has_legacy_pm_support(to_pci_dev(dev))) {
error = pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
}
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct device_driver *drv = dev->driver;
- int error;
+ int error = 0;
if (drv && drv->pm) {
- error = drv->pm->restore ? drv->pm->restore(dev) :
- pci_default_pm_resume(pci_dev);
- } else {
+ if (drv->pm->restore)
+ error = drv->pm->restore(dev);
+ } else if (pci_has_legacy_pm_support(pci_dev)) {
error = pci_legacy_resume(dev);
+ } else {
+ error = pci_default_pm_resume_late(pci_dev);
}
pci_fixup_device(pci_fixup_resume, pci_dev);
static int pci_pm_restore_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct pci_driver *drv = pci_dev->driver;
+ struct device_driver *drv = dev->driver;
int error = 0;
pci_fixup_device(pci_fixup_resume, pci_dev);
if (drv && drv->pm) {
if (drv->pm->restore_noirq)
error = drv->pm->restore_noirq(dev);
- } else {
+ } else if (pci_has_legacy_pm_support(pci_dev)) {
error = pci_legacy_resume_early(dev);
+ } else {
+ pci_default_pm_resume_early(pci_dev);
}
pci_fixup_device(pci_fixup_resume_early, pci_dev);
#endif /* !CONFIG_HIBERNATION */
-struct pm_ext_ops pci_pm_ops = {
- .base = {
- .prepare = pci_pm_prepare,
- .complete = pci_pm_complete,
- .suspend = pci_pm_suspend,
- .resume = pci_pm_resume,
- .freeze = pci_pm_freeze,
- .thaw = pci_pm_thaw,
- .poweroff = pci_pm_poweroff,
- .restore = pci_pm_restore,
- },
+struct dev_pm_ops pci_dev_pm_ops = {
+ .prepare = pci_pm_prepare,
+ .complete = pci_pm_complete,
+ .suspend = pci_pm_suspend,
+ .resume = pci_pm_resume,
+ .freeze = pci_pm_freeze,
+ .thaw = pci_pm_thaw,
+ .poweroff = pci_pm_poweroff,
+ .restore = pci_pm_restore,
.suspend_noirq = pci_pm_suspend_noirq,
.resume_noirq = pci_pm_resume_noirq,
.freeze_noirq = pci_pm_freeze_noirq,
.restore_noirq = pci_pm_restore_noirq,
};
-#define PCI_PM_OPS_PTR &pci_pm_ops
+#define PCI_PM_OPS_PTR (&pci_dev_pm_ops)
#else /* !CONFIG_PM_SLEEP */
drv->driver.owner = owner;
drv->driver.mod_name = mod_name;
- if (drv->pm)
- drv->driver.pm = &drv->pm->base;
-
spin_lock_init(&drv->dynids.lock);
INIT_LIST_HEAD(&drv->dynids.list);
card->number = id;
card->dev.parent = &card->protocol->dev;
- sprintf(card->dev.bus_id, "%02x:%02x", card->protocol->number,
- card->number);
+ dev_set_name(&card->dev, "%02x:%02x", card->protocol->number, card->number);
card->dev.coherent_dma_mask = DMA_24BIT_MASK;
card->dev.dma_mask = &card->dev.coherent_dma_mask;
{
dev->dev.parent = &card->dev;
dev->card_link = NULL;
- snprintf(dev->dev.bus_id, BUS_ID_SIZE, "%02x:%02x.%02x",
- dev->protocol->number, card->number, dev->number);
+ dev_set_name(&dev->dev, "%02x:%02x.%02x",
+ dev->protocol->number, card->number, dev->number);
spin_lock(&pnp_lock);
dev->card = card;
list_add_tail(&dev->card_list, &card->devices);
spin_unlock(&pnp_lock);
protocol->number = nodenum;
- sprintf(protocol->dev.bus_id, "pnp%d", nodenum);
+ dev_set_name(&protocol->dev, "pnp%d", nodenum);
return device_register(&protocol->dev);
}
dev->dev.coherent_dma_mask = dev->dma_mask;
dev->dev.release = &pnp_release_device;
- sprintf(dev->dev.bus_id, "%02x:%02x", dev->protocol->number,
- dev->number);
+ dev_set_name(&dev->dev, "%02x:%02x", dev->protocol->number, dev->number);
dev_id = pnp_add_id(dev, pnpid);
if (!dev_id) {
resource_size_t end, int port)
{
char *regionid;
- const char *pnpid = dev->dev.bus_id;
+ const char *pnpid = dev_name(&dev->dev);
struct resource *res;
regionid = kmalloc(16, GFP_KERNEL);
pdata = pdev->dev.platform_data;
di->dev = &pdev->dev;
di->w1_dev = pdev->dev.parent;
- di->bat.name = pdev->dev.bus_id;
+ di->bat.name = dev_name(&pdev->dev);
di->bat.type = POWER_SUPPLY_TYPE_BATTERY;
di->bat.properties = ds2760_battery_props;
di->bat.num_properties = ARRAY_SIZE(ds2760_battery_props);
}
INIT_DELAYED_WORK(&di->monitor_work, ds2760_battery_work);
- di->monitor_wqueue = create_singlethread_workqueue(pdev->dev.bus_id);
+ di->monitor_wqueue = create_singlethread_workqueue(dev_name(&pdev->dev));
if (!di->monitor_wqueue) {
retval = -ESRCH;
goto workqueue_failed;
CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
-obj-y += s390mach.o sysinfo.o s390_rdev.o
+obj-y += s390mach.o sysinfo.o
obj-y += cio/ block/ char/ crypto/ net/ scsi/ kvm/
drivers-y += drivers/s390/built-in.o
#include <asm/io.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
-#include <asm/s390_rdev.h>
#define DCSSBLK_NAME "dcssblk"
#define DCSSBLK_MINORS_PER_DISK 1
static void __exit
dcssblk_exit(void)
{
- s390_root_dev_unregister(dcssblk_root_dev);
+ root_device_unregister(dcssblk_root_dev);
unregister_blkdev(dcssblk_major, DCSSBLK_NAME);
}
{
int rc;
- dcssblk_root_dev = s390_root_dev_register("dcssblk");
+ dcssblk_root_dev = root_device_register("dcssblk");
if (IS_ERR(dcssblk_root_dev))
return PTR_ERR(dcssblk_root_dev);
rc = device_create_file(dcssblk_root_dev, &dev_attr_add);
if (rc) {
- s390_root_dev_unregister(dcssblk_root_dev);
+ root_device_unregister(dcssblk_root_dev);
return rc;
}
rc = device_create_file(dcssblk_root_dev, &dev_attr_remove);
if (rc) {
- s390_root_dev_unregister(dcssblk_root_dev);
+ root_device_unregister(dcssblk_root_dev);
return rc;
}
rc = register_blkdev(0, DCSSBLK_NAME);
if (rc < 0) {
- s390_root_dev_unregister(dcssblk_root_dev);
+ root_device_unregister(dcssblk_root_dev);
return rc;
}
dcssblk_major = rc;
#include <linux/notifier.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
-#include <asm/s390_rdev.h>
#include <asm/reset.h>
#include <asm/airq.h>
#include <asm/atomic.h>
}
/* Create /sys/devices/ap. */
- ap_root_device = s390_root_dev_register("ap");
+ ap_root_device = root_device_register("ap");
rc = IS_ERR(ap_root_device) ? PTR_ERR(ap_root_device) : 0;
if (rc)
goto out_bus;
hrtimer_cancel(&ap_poll_timer);
destroy_workqueue(ap_work_queue);
out_root:
- s390_root_dev_unregister(ap_root_device);
+ root_device_unregister(ap_root_device);
out_bus:
while (i--)
bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
hrtimer_cancel(&ap_poll_timer);
destroy_workqueue(ap_work_queue);
tasklet_kill(&ap_tasklet);
- s390_root_dev_unregister(ap_root_device);
+ root_device_unregister(ap_root_device);
while ((dev = bus_find_device(&ap_bus_type, NULL, NULL,
__ap_match_all)))
{
#include <asm/kvm_virtio.h>
#include <asm/setup.h>
#include <asm/s390_ext.h>
-#include <asm/s390_rdev.h>
#define VIRTIO_SUBCODE_64 0x0D00
if (!MACHINE_IS_KVM)
return -ENODEV;
- kvm_root = s390_root_dev_register("kvm_s390");
+ kvm_root = root_device_register("kvm_s390");
if (IS_ERR(kvm_root)) {
rc = PTR_ERR(kvm_root);
printk(KERN_ERR "Could not register kvm_s390 root device");
rc = vmem_add_mapping(real_memory_size, PAGE_SIZE);
if (rc) {
- s390_root_dev_unregister(kvm_root);
+ root_device_unregister(kvm_root);
return rc;
}
#include <linux/module.h>
#include <linux/err.h>
-#include <asm/s390_rdev.h>
#include <asm/ccwdev.h>
#include <asm/ccwgroup.h>
{
int rc;
- cu3088_root_dev = s390_root_dev_register("cu3088");
+ cu3088_root_dev = root_device_register("cu3088");
if (IS_ERR(cu3088_root_dev))
return PTR_ERR(cu3088_root_dev);
rc = ccw_driver_register(&cu3088_driver);
if (rc)
- s390_root_dev_unregister(cu3088_root_dev);
+ root_device_unregister(cu3088_root_dev);
return rc;
}
cu3088_exit (void)
{
ccw_driver_unregister(&cu3088_driver);
- s390_root_dev_unregister(cu3088_root_dev);
+ root_device_unregister(cu3088_root_dev);
}
MODULE_DEVICE_TABLE(ccw,cu3088_ids);
#include <asm/ebcdic.h>
#include <asm/io.h>
-#include <asm/s390_rdev.h>
#include "qeth_core.h"
#include "qeth_core_offl.h"
&driver_attr_group);
if (rc)
goto driver_err;
- qeth_core_root_dev = s390_root_dev_register("qeth");
+ qeth_core_root_dev = root_device_register("qeth");
rc = IS_ERR(qeth_core_root_dev) ? PTR_ERR(qeth_core_root_dev) : 0;
if (rc)
goto register_err;
return 0;
slab_err:
- s390_root_dev_unregister(qeth_core_root_dev);
+ root_device_unregister(qeth_core_root_dev);
register_err:
driver_remove_file(&qeth_core_ccwgroup_driver.driver,
&driver_attr_group);
static void __exit qeth_core_exit(void)
{
- s390_root_dev_unregister(qeth_core_root_dev);
+ root_device_unregister(qeth_core_root_dev);
driver_remove_file(&qeth_core_ccwgroup_driver.driver,
&driver_attr_group);
ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
#include <linux/mii.h>
#include <linux/ip.h>
-#include <asm/s390_rdev.h>
-
#include "qeth_core.h"
#include "qeth_core_offl.h"
#include <net/ip.h>
#include <net/arp.h>
-#include <asm/s390_rdev.h>
-
#include "qeth_l3.h"
#include "qeth_core_offl.h"
+++ /dev/null
-/*
- * drivers/s390/s390_rdev.c
- * s390 root device
- *
- * Copyright (C) 2002, 2005 IBM Deutschland Entwicklung GmbH,
- * IBM Corporation
- * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
- * Carsten Otte (cotte@de.ibm.com)
- */
-
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/device.h>
-#include <asm/s390_rdev.h>
-
-static void
-s390_root_dev_release(struct device *dev)
-{
- kfree(dev);
-}
-
-struct device *
-s390_root_dev_register(const char *name)
-{
- struct device *dev;
- int ret;
-
- if (!strlen(name))
- return ERR_PTR(-EINVAL);
- dev = kzalloc(sizeof(struct device), GFP_KERNEL);
- if (!dev)
- return ERR_PTR(-ENOMEM);
- dev_set_name(dev, name);
- dev->release = s390_root_dev_release;
- ret = device_register(dev);
- if (ret) {
- kfree(dev);
- return ERR_PTR(ret);
- }
- return dev;
-}
-
-void
-s390_root_dev_unregister(struct device *dev)
-{
- if (dev)
- device_unregister(dev);
-}
-
-EXPORT_SYMBOL(s390_root_dev_register);
-EXPORT_SYMBOL(s390_root_dev_unregister);
if (!tries)
printk(KERN_ERR "%s%s%s%d: Unable to drain "
"transmitter\n",
- port->dev ? port->dev->bus_id : "",
+ port->dev ? dev_name(port->dev) : "",
port->dev ? ": " : "",
drv->dev_name,
drv->tty_driver->name_base + port->line);
}
printk(KERN_INFO "%s%s%s%d at %s (irq = %d) is a %s\n",
- port->dev ? port->dev->bus_id : "",
+ port->dev ? dev_name(port->dev) : "",
port->dev ? ": " : "",
drv->dev_name,
drv->tty_driver->name_base + port->line,
{
const struct spi_device *spi = to_spi_device(dev);
- return snprintf(buf, BUS_ID_SIZE + 1, "%s\n", spi->modalias);
+ return sprintf(buf, "%s\n", spi->modalias);
}
static struct device_attribute spi_dev_attrs[] = {
{
const struct spi_device *spi = to_spi_device(dev);
- return strncmp(spi->modalias, drv->name, BUS_ID_SIZE) == 0;
+ return strcmp(spi->modalias, drv->name) == 0;
}
static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
}
/* Set the bus ID string */
- snprintf(spi->dev.bus_id, sizeof spi->dev.bus_id,
- "%s.%u", spi->master->dev.bus_id,
+ dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
spi->chip_select);
*/
mutex_lock(&spi_add_lock);
- if (bus_find_device_by_name(&spi_bus_type, NULL, spi->dev.bus_id)
+ if (bus_find_device_by_name(&spi_bus_type, NULL, dev_name(&spi->dev))
!= NULL) {
dev_err(dev, "chipselect %d already in use\n",
spi->chip_select);
status = spi->master->setup(spi);
if (status < 0) {
dev_err(dev, "can't %s %s, status %d\n",
- "setup", spi->dev.bus_id, status);
+ "setup", dev_name(&spi->dev), status);
goto done;
}
status = device_add(&spi->dev);
if (status < 0)
dev_err(dev, "can't %s %s, status %d\n",
- "add", spi->dev.bus_id, status);
+ "add", dev_name(&spi->dev), status);
else
- dev_dbg(dev, "registered child %s\n", spi->dev.bus_id);
+ dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
done:
mutex_unlock(&spi_add_lock);
/* register the device, then userspace will see it.
* registration fails if the bus ID is in use.
*/
- snprintf(master->dev.bus_id, sizeof master->dev.bus_id,
- "spi%u", master->bus_num);
+ dev_set_name(&master->dev, "spi%u", master->bus_num);
status = device_add(&master->dev);
if (status < 0)
goto done;
- dev_dbg(dev, "registered master %s%s\n", master->dev.bus_id,
+ dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
dynamic ? " (dynamic)" : "");
/* populate children from any spi device tables */
/* this task is the only thing to touch the SPI bits */
bitbang->busy = 0;
bitbang->workqueue = create_singlethread_workqueue(
- bitbang->master->dev.parent->bus_id);
+ dev_name(bitbang->master->dev.parent));
if (bitbang->workqueue == NULL) {
status = -EBUSY;
goto err1;
pp->dataflash = spi_new_device(pp->bitbang.master, &pp->info[0]);
if (pp->dataflash)
pr_debug("%s: dataflash at %s\n", p->name,
- pp->dataflash->dev.bus_id);
+ dev_name(&pp->dataflash->dev));
// dev_info(_what?_, ...)
pr_info("%s: AVR Butterfly\n", p->name);
pp->spidev_lm70 = spi_new_device(pp->bitbang.master, &pp->info);
if (pp->spidev_lm70)
dev_dbg(&pp->spidev_lm70->dev, "spidev_lm70 at %s\n",
- pp->spidev_lm70->dev.bus_id);
+ dev_name(&pp->spidev_lm70->dev));
else {
printk(KERN_WARNING "%s: spi_new_device failed\n", DRVNAME);
status = -ENODEV;
struct thermal_zone_device *tz;
struct thermal_cooling_device *cdev;
- if (!strncmp(dev->bus_id, "thermal_zone", sizeof "thermal_zone" - 1)) {
+ if (!strncmp(dev_name(dev), "thermal_zone", sizeof "thermal_zone" - 1)) {
tz = to_thermal_zone(dev);
kfree(tz);
} else {
cdev->ops = ops;
cdev->device.class = &thermal_class;
cdev->devdata = devdata;
- sprintf(cdev->device.bus_id, "cooling_device%d", cdev->id);
+ dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
result = device_register(&cdev->device);
if (result) {
release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id);
tz->device.class = &thermal_class;
tz->devdata = devdata;
tz->trips = trips;
- sprintf(tz->device.bus_id, "thermal_zone%d", tz->id);
+ dev_set_name(&tz->device, "thermal_zone%d", tz->id);
result = device_register(&tz->device);
if (result) {
release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
int vma_count;
struct uio_info *info;
struct kobject *map_dir;
+ struct kobject *portio_dir;
};
static int uio_major;
return sprintf(buf, "0x%lx\n", mem->addr & ~PAGE_MASK);
}
-struct uio_sysfs_entry {
+struct map_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct uio_mem *, char *);
ssize_t (*store)(struct uio_mem *, const char *, size_t);
};
-static struct uio_sysfs_entry addr_attribute =
+static struct map_sysfs_entry addr_attribute =
__ATTR(addr, S_IRUGO, map_addr_show, NULL);
-static struct uio_sysfs_entry size_attribute =
+static struct map_sysfs_entry size_attribute =
__ATTR(size, S_IRUGO, map_size_show, NULL);
-static struct uio_sysfs_entry offset_attribute =
+static struct map_sysfs_entry offset_attribute =
__ATTR(offset, S_IRUGO, map_offset_show, NULL);
static struct attribute *attrs[] = {
{
struct uio_map *map = to_map(kobj);
struct uio_mem *mem = map->mem;
- struct uio_sysfs_entry *entry;
+ struct map_sysfs_entry *entry;
- entry = container_of(attr, struct uio_sysfs_entry, attr);
+ entry = container_of(attr, struct map_sysfs_entry, attr);
if (!entry->show)
return -EIO;
return entry->show(mem, buf);
}
-static struct sysfs_ops uio_sysfs_ops = {
+static struct sysfs_ops map_sysfs_ops = {
.show = map_type_show,
};
static struct kobj_type map_attr_type = {
.release = map_release,
- .sysfs_ops = &uio_sysfs_ops,
+ .sysfs_ops = &map_sysfs_ops,
.default_attrs = attrs,
};
+struct uio_portio {
+ struct kobject kobj;
+ struct uio_port *port;
+};
+#define to_portio(portio) container_of(portio, struct uio_portio, kobj)
+
+static ssize_t portio_start_show(struct uio_port *port, char *buf)
+{
+ return sprintf(buf, "0x%lx\n", port->start);
+}
+
+static ssize_t portio_size_show(struct uio_port *port, char *buf)
+{
+ return sprintf(buf, "0x%lx\n", port->size);
+}
+
+static ssize_t portio_porttype_show(struct uio_port *port, char *buf)
+{
+ const char *porttypes[] = {"none", "x86", "gpio", "other"};
+
+ if ((port->porttype < 0) || (port->porttype > UIO_PORT_OTHER))
+ return -EINVAL;
+
+ return sprintf(buf, "port_%s\n", porttypes[port->porttype]);
+}
+
+struct portio_sysfs_entry {
+ struct attribute attr;
+ ssize_t (*show)(struct uio_port *, char *);
+ ssize_t (*store)(struct uio_port *, const char *, size_t);
+};
+
+static struct portio_sysfs_entry portio_start_attribute =
+ __ATTR(start, S_IRUGO, portio_start_show, NULL);
+static struct portio_sysfs_entry portio_size_attribute =
+ __ATTR(size, S_IRUGO, portio_size_show, NULL);
+static struct portio_sysfs_entry portio_porttype_attribute =
+ __ATTR(porttype, S_IRUGO, portio_porttype_show, NULL);
+
+static struct attribute *portio_attrs[] = {
+ &portio_start_attribute.attr,
+ &portio_size_attribute.attr,
+ &portio_porttype_attribute.attr,
+ NULL,
+};
+
+static void portio_release(struct kobject *kobj)
+{
+ struct uio_portio *portio = to_portio(kobj);
+ kfree(portio);
+}
+
+static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct uio_portio *portio = to_portio(kobj);
+ struct uio_port *port = portio->port;
+ struct portio_sysfs_entry *entry;
+
+ entry = container_of(attr, struct portio_sysfs_entry, attr);
+
+ if (!entry->show)
+ return -EIO;
+
+ return entry->show(port, buf);
+}
+
+static struct sysfs_ops portio_sysfs_ops = {
+ .show = portio_type_show,
+};
+
+static struct kobj_type portio_attr_type = {
+ .release = portio_release,
+ .sysfs_ops = &portio_sysfs_ops,
+ .default_attrs = portio_attrs,
+};
+
static ssize_t show_name(struct device *dev,
struct device_attribute *attr, char *buf)
{
static int uio_dev_add_attributes(struct uio_device *idev)
{
int ret;
- int mi;
+ int mi, pi;
int map_found = 0;
+ int portio_found = 0;
struct uio_mem *mem;
struct uio_map *map;
+ struct uio_port *port;
+ struct uio_portio *portio;
ret = sysfs_create_group(&idev->dev->kobj, &uio_attr_grp);
if (ret)
idev->map_dir = kobject_create_and_add("maps",
&idev->dev->kobj);
if (!idev->map_dir)
- goto err;
+ goto err_map;
}
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map)
- goto err;
+ goto err_map;
kobject_init(&map->kobj, &map_attr_type);
map->mem = mem;
mem->map = map;
ret = kobject_add(&map->kobj, idev->map_dir, "map%d", mi);
if (ret)
- goto err;
+ goto err_map;
ret = kobject_uevent(&map->kobj, KOBJ_ADD);
if (ret)
- goto err;
+ goto err_map;
+ }
+
+ for (pi = 0; pi < MAX_UIO_PORT_REGIONS; pi++) {
+ port = &idev->info->port[pi];
+ if (port->size == 0)
+ break;
+ if (!portio_found) {
+ portio_found = 1;
+ idev->portio_dir = kobject_create_and_add("portio",
+ &idev->dev->kobj);
+ if (!idev->portio_dir)
+ goto err_portio;
+ }
+ portio = kzalloc(sizeof(*portio), GFP_KERNEL);
+ if (!portio)
+ goto err_portio;
+ kobject_init(&portio->kobj, &portio_attr_type);
+ portio->port = port;
+ port->portio = portio;
+ ret = kobject_add(&portio->kobj, idev->portio_dir,
+ "port%d", pi);
+ if (ret)
+ goto err_portio;
+ ret = kobject_uevent(&portio->kobj, KOBJ_ADD);
+ if (ret)
+ goto err_portio;
}
return 0;
-err:
+err_portio:
+ for (pi--; pi >= 0; pi--) {
+ port = &idev->info->port[pi];
+ portio = port->portio;
+ kobject_put(&portio->kobj);
+ }
+ kobject_put(idev->portio_dir);
+err_map:
for (mi--; mi>=0; mi--) {
mem = &idev->info->mem[mi];
map = mem->map;
static void uio_dev_del_attributes(struct uio_device *idev)
{
- int mi;
+ int i;
struct uio_mem *mem;
- for (mi = 0; mi < MAX_UIO_MAPS; mi++) {
- mem = &idev->info->mem[mi];
+ struct uio_port *port;
+
+ for (i = 0; i < MAX_UIO_MAPS; i++) {
+ mem = &idev->info->mem[i];
if (mem->size == 0)
break;
kobject_put(&mem->map->kobj);
}
kobject_put(idev->map_dir);
+
+ for (i = 0; i < MAX_UIO_PORT_REGIONS; i++) {
+ port = &idev->info->port[i];
+ if (port->size == 0)
+ break;
+ kobject_put(&port->portio->kobj);
+ }
+ kobject_put(idev->portio_dir);
+
sysfs_remove_group(&idev->dev->kobj, &uio_attr_grp);
}
info->mem[0].addr = pci_resource_start(dev, 0);
if (!info->mem[0].addr)
goto out_release;
- info->mem[0].internal_addr = ioremap(pci_resource_start(dev, 0),
- pci_resource_len(dev, 0));
+ info->mem[0].internal_addr = pci_ioremap_bar(dev, 0);
if (!info->mem[0].internal_addr)
goto out_release;
goto bad0;
}
- if (uioinfo->handler || uioinfo->irqcontrol || uioinfo->irq_flags) {
+ if (uioinfo->handler || uioinfo->irqcontrol ||
+ uioinfo->irq_flags & IRQF_SHARED) {
dev_err(&pdev->dev, "interrupt configuration error\n");
goto bad0;
}
* Interrupt sharing is not supported.
*/
- uioinfo->irq_flags = IRQF_DISABLED;
+ uioinfo->irq_flags |= IRQF_DISABLED;
uioinfo->handler = uio_pdrv_genirq_handler;
uioinfo->irqcontrol = uio_pdrv_genirq_irqcontrol;
uioinfo->priv = priv;
return usb_resume(dev);
}
-static struct pm_ops usb_device_pm_ops = {
+static struct dev_pm_ops usb_device_pm_ops = {
.prepare = usb_dev_prepare,
.complete = usb_dev_complete,
.suspend = usb_dev_suspend,
#define ksuspend_usb_init() 0
#define ksuspend_usb_cleanup() do {} while (0)
-#define usb_device_pm_ops (*(struct pm_ops *)0)
+#define usb_device_pm_ops (*(struct dev_pm_ops *)0)
#endif /* CONFIG_PM */
.ep0 = &controller.ep[0].ep,
.name = driver_name,
.dev = {
- .bus_id = "gadget",
+ .init_name = "gadget",
.release = nop_release,
}
},
.is_dualspeed = 1,
.name = "atmel_usba_udc",
.dev = {
- .bus_id = "gadget",
+ .init_name = "gadget",
.release = nop_release,
},
},
device_initialize(&udc_controller->gadget.dev);
- strcpy(udc_controller->gadget.dev.bus_id, "gadget");
+ dev_set_name(&udc_controller->gadget.dev, "gadget");
udc_controller->gadget.dev.release = qe_udc_release;
udc_controller->gadget.dev.parent = &ofdev->dev;
.ep0 = &memory.ep[0].ep,
.name = driver_name,
.dev = {
- .bus_id = "gadget",
+ .init_name = "gadget",
.release = nop_release,
},
},
.ep0 = &memory.ep[0].ep,
.name = driver_name,
.dev = {
- .bus_id = "gadget",
+ .init_name = "gadget",
.release = nop_release,
},
},
.ep0 = &memory.udc_usb_ep[0].usb_ep,
.name = driver_name,
.dev = {
- .bus_id = "gadget",
+ .init_name = "gadget",
},
},
.ep0 = &memory.ep[0].ep,
.name = gadget_name,
.dev = {
- .bus_id = "gadget",
+ .init_name = "gadget",
},
},
new_bd->dev.class = backlight_class;
new_bd->dev.parent = parent;
new_bd->dev.release = bl_device_release;
- strlcpy(new_bd->dev.bus_id, name, BUS_ID_SIZE);
+ dev_set_name(&new_bd->dev, name);
dev_set_drvdata(&new_bd->dev, devdata);
rc = device_register(&new_bd->dev);
new_ld->dev.class = lcd_class;
new_ld->dev.parent = parent;
new_ld->dev.release = lcd_device_release;
- strlcpy(new_ld->dev.bus_id, name, BUS_ID_SIZE);
+ dev_set_name(&new_ld->dev, name);
dev_set_drvdata(&new_ld->dev, devdata);
rc = device_register(&new_ld->dev);
new_dev->props = op;
new_dev->dev.class = &video_output_class;
new_dev->dev.parent = dev;
- strlcpy(new_dev->dev.bus_id,name, BUS_ID_SIZE);
+ dev_set_name(&new_dev->dev, name);
dev_set_drvdata(&new_dev->dev, devdata);
ret_code = device_register(&new_dev->dev);
if (ret_code) {
/* A PCI device has it's own struct device and so does a virtio device so
* we create a place for the virtio devices to show up in sysfs. I think it
* would make more sense for virtio to not insist on having it's own device. */
-static struct device virtio_pci_root = {
- .parent = NULL,
- .init_name = "virtio-pci",
-};
+static struct device *virtio_pci_root;
/* Convert a generic virtio device to our structure */
static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
if (vp_dev == NULL)
return -ENOMEM;
- vp_dev->vdev.dev.parent = &virtio_pci_root;
+ vp_dev->vdev.dev.parent = virtio_pci_root;
vp_dev->vdev.dev.release = virtio_pci_release_dev;
vp_dev->vdev.config = &virtio_pci_config_ops;
vp_dev->pci_dev = pci_dev;
{
int err;
- err = device_register(&virtio_pci_root);
- if (err)
- return err;
+ virtio_pci_root = root_device_register("virtio-pci");
+ if (IS_ERR(virtio_pci_root))
+ return PTR_ERR(virtio_pci_root);
err = pci_register_driver(&virtio_pci_driver);
if (err)
- device_unregister(&virtio_pci_root);
+ device_unregister(virtio_pci_root);
return err;
}
static void __exit virtio_pci_exit(void)
{
- device_unregister(&virtio_pci_root);
pci_unregister_driver(&virtio_pci_driver);
+ root_device_unregister(virtio_pci_root);
}
module_exit(virtio_pci_exit);
struct device w1_master_device = {
.parent = NULL,
.bus = &w1_bus_type,
- .bus_id = "w1 bus master",
+ .init_name = "w1 bus master",
.driver = &w1_master_driver,
.release = &w1_master_release
};
struct device w1_slave_device = {
.parent = NULL,
.bus = &w1_bus_type,
- .bus_id = "w1 bus slave",
+ .init_name = "w1 bus slave",
.driver = &w1_slave_driver,
.release = &w1_slave_release
};
}
dev_dbg(dev, "Hotplug event for %s %s, bus_id=%s.\n",
- event_owner, name, dev->bus_id);
+ event_owner, name, dev_name(dev));
if (dev->driver != &w1_slave_driver || !sl)
return 0;
sl->dev.bus = &w1_bus_type;
sl->dev.release = &w1_slave_release;
- snprintf(&sl->dev.bus_id[0], sizeof(sl->dev.bus_id),
- "%02x-%012llx",
+ dev_set_name(&sl->dev, "%02x-%012llx",
(unsigned int) sl->reg_num.family,
(unsigned long long) sl->reg_num.id);
snprintf(&sl->name[0], sizeof(sl->name),
(unsigned long long) sl->reg_num.id);
dev_dbg(&sl->dev, "%s: registering %s as %p.\n", __func__,
- &sl->dev.bus_id[0], sl);
+ dev_name(&sl->dev), sl);
err = device_register(&sl->dev);
if (err < 0) {
dev_err(&sl->dev,
"Device registration [%s] failed. err=%d\n",
- sl->dev.bus_id, err);
+ dev_name(&sl->dev), err);
return err;
}
if (err < 0) {
dev_err(&sl->dev,
"sysfs file creation for [%s] failed. err=%d\n",
- sl->dev.bus_id, err);
+ dev_name(&sl->dev), err);
goto out_unreg;
}
if (err < 0) {
dev_err(&sl->dev,
"sysfs file creation for [%s] failed. err=%d\n",
- sl->dev.bus_id, err);
+ dev_name(&sl->dev), err);
goto out_rem1;
}
((err = sl->family->fops->add_slave(sl)) < 0)) {
dev_err(&sl->dev,
"sysfs file creation for [%s] failed. err=%d\n",
- sl->dev.bus_id, err);
+ dev_name(&sl->dev), err);
goto out_rem2;
}
mutex_init(&dev->mutex);
memcpy(&dev->dev, device, sizeof(struct device));
- snprintf(dev->dev.bus_id, sizeof(dev->dev.bus_id),
- "w1_bus_master%u", dev->id);
+ dev_set_name(&dev->dev, "w1_bus_master%u", dev->id);
snprintf(dev->name, sizeof(dev->name), "w1_bus_master%u", dev->id);
dev->driver = driver;
}
/* device/<type>/<id> => <type>-<id> */
-static int frontend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename)
+static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
{
nodename = strchr(nodename, '/');
- if (!nodename || strlen(nodename + 1) >= BUS_ID_SIZE) {
+ if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) {
printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename);
return -EINVAL;
}
- strlcpy(bus_id, nodename + 1, BUS_ID_SIZE);
+ strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE);
if (!strchr(bus_id, '/')) {
printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id);
return -EINVAL;
const char *type,
const char *nodename)
{
+ char devname[XEN_BUS_ID_SIZE];
int err;
struct xenbus_device *xendev;
size_t stringlen;
xendev->dev.bus = &bus->bus;
xendev->dev.release = xenbus_dev_release;
- err = bus->get_bus_id(xendev->dev.bus_id, xendev->nodename);
+ err = bus->get_bus_id(devname, xendev->nodename);
if (err)
goto fail;
+ dev_set_name(&xendev->dev, devname);
+
/* Register with generic device framework. */
err = device_register(&xendev->dev);
if (err)
{
int exists, rootlen;
struct xenbus_device *dev;
- char type[BUS_ID_SIZE];
+ char type[XEN_BUS_ID_SIZE];
const char *p, *root;
if (char_count(node, '/') < 2)
/* backend/<type>/... or device/<type>/... */
p = strchr(node, '/') + 1;
- snprintf(type, BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p);
- type[BUS_ID_SIZE-1] = '\0';
+ snprintf(type, XEN_BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p);
+ type[XEN_BUS_ID_SIZE-1] = '\0';
rootlen = strsep_len(node, '/', bus->levels);
if (rootlen < 0)
err = drv->suspend(xdev);
if (err)
printk(KERN_WARNING
- "xenbus: suspend %s failed: %i\n", dev->bus_id, err);
+ "xenbus: suspend %s failed: %i\n", dev_name(dev), err);
return 0;
}
if (err)
printk(KERN_WARNING
"xenbus: suspend_cancel %s failed: %i\n",
- dev->bus_id, err);
+ dev_name(dev), err);
return 0;
}
if (err) {
printk(KERN_WARNING
"xenbus: resume (talk_to_otherend) %s failed: %i\n",
- dev->bus_id, err);
+ dev_name(dev), err);
return err;
}
if (err) {
printk(KERN_WARNING
"xenbus: resume %s failed: %i\n",
- dev->bus_id, err);
+ dev_name(dev), err);
return err;
}
}
if (err) {
printk(KERN_WARNING
"xenbus_probe: resume (watch_otherend) %s failed: "
- "%d.\n", dev->bus_id, err);
+ "%d.\n", dev_name(dev), err);
return err;
}
#ifndef _XENBUS_PROBE_H
#define _XENBUS_PROBE_H
+#define XEN_BUS_ID_SIZE 20
+
#ifdef CONFIG_XEN_BACKEND
extern void xenbus_backend_suspend(int (*fn)(struct device *, void *));
extern void xenbus_backend_resume(int (*fn)(struct device *, void *));
{
char *root;
unsigned int levels;
- int (*get_bus_id)(char bus_id[BUS_ID_SIZE], const char *nodename);
+ int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename);
int (*probe)(const char *type, const char *dir);
struct bus_type bus;
};
dname = dev_name(ddev);
if (isdigit(dname[strlen(dname) - 1]))
- snprintf(pdev->bus_id, BUS_ID_SIZE, "%sp%d", dname, partno);
+ dev_set_name(pdev, "%sp%d", dname, partno);
else
- snprintf(pdev->bus_id, BUS_ID_SIZE, "%s%d", dname, partno);
+ dev_set_name(pdev, "%s%d", dname, partno);
device_initialize(pdev);
pdev->class = &block_class;
struct block_device *bdev;
struct disk_part_iter piter;
struct hd_struct *part;
- char *s;
int err;
ddev->parent = disk->driverfs_dev;
- strlcpy(ddev->bus_id, disk->disk_name, BUS_ID_SIZE);
- /* ewww... some of these buggers have / in the name... */
- s = strchr(ddev->bus_id, '/');
- if (s)
- *s = '!';
+ dev_set_name(ddev, disk->disk_name);
/* delay uevents, until we scanned partition table */
ddev->uevent_suppress = 1;
#define BUS_ID_SIZE 20
struct device;
+struct device_private;
struct device_driver;
struct driver_private;
struct class;
int (*resume_early)(struct device *dev);
int (*resume)(struct device *dev);
- struct pm_ext_ops *pm;
+ struct dev_pm_ops *pm;
struct bus_type_private *p;
};
int (*resume) (struct device *dev);
struct attribute_group **groups;
- struct pm_ops *pm;
+ struct dev_pm_ops *pm;
struct driver_private *p;
};
int (*suspend)(struct device *dev, pm_message_t state);
int (*resume)(struct device *dev);
- struct pm_ops *pm;
+ struct dev_pm_ops *pm;
struct class_private *p;
};
int (*suspend)(struct device *dev, pm_message_t state);
int (*resume)(struct device *dev);
- struct pm_ops *pm;
+ struct dev_pm_ops *pm;
};
/* interface for exporting device attributes */
};
struct device {
- struct klist klist_children;
- struct klist_node knode_parent; /* node in sibling list */
- struct klist_node knode_driver;
- struct klist_node knode_bus;
struct device *parent;
+ struct device_private *p;
+
struct kobject kobj;
char bus_id[BUS_ID_SIZE]; /* position on parent bus */
+ unsigned uevent_suppress:1;
const char *init_name; /* initial name of the device */
struct device_type *type;
- unsigned uevent_suppress:1;
struct semaphore sem; /* semaphore to synchronize calls to
* its driver.
/* arch specific additions */
struct dev_archdata archdata;
+ dev_t devt; /* dev_t, creates the sysfs "dev" */
+
spinlock_t devres_lock;
struct list_head devres_head;
struct klist_node knode_class;
struct class *class;
- dev_t devt; /* dev_t, creates the sysfs "dev" */
struct attribute_group **groups; /* optional groups */
void (*release)(struct device *dev);
extern int device_rename(struct device *dev, char *new_name);
extern int device_move(struct device *dev, struct device *new_parent);
+/*
+ * Root device objects for grouping under /sys/devices
+ */
+extern struct device *__root_device_register(const char *name,
+ struct module *owner);
+static inline struct device *root_device_register(const char *name)
+{
+ return __root_device_register(name, THIS_MODULE);
+}
+extern void root_device_unregister(struct device *root);
+
/*
* Manual binding of a device to driver. See drivers/base/bus.c
* for information on use.
#define dev_info(dev, format, arg...) \
dev_printk(KERN_INFO , dev , format , ## arg)
-#if defined(CONFIG_DYNAMIC_PRINTK_DEBUG)
+#if defined(DEBUG)
+#define dev_dbg(dev, format, arg...) \
+ dev_printk(KERN_DEBUG , dev , format , ## arg)
+#elif defined(CONFIG_DYNAMIC_PRINTK_DEBUG)
#define dev_dbg(dev, format, ...) do { \
dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \
} while (0)
-#elif defined(DEBUG)
-#define dev_dbg(dev, format, arg...) \
- dev_printk(KERN_DEBUG , dev , format , ## arg)
#else
#define dev_dbg(dev, format, arg...) \
({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; })
printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
/* If you are writing a driver, please use dev_dbg instead */
-#if defined(CONFIG_DYNAMIC_PRINTK_DEBUG)
+#if defined(DEBUG)
+#define pr_debug(fmt, ...) \
+ printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#elif defined(CONFIG_DYNAMIC_PRINTK_DEBUG)
#define pr_debug(fmt, ...) do { \
dynamic_pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
} while (0)
-#elif defined(DEBUG)
-#define pr_debug(fmt, ...) \
- printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#else
#define pr_debug(fmt, ...) \
({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; })
#define _LINUX_KLIST_H
#include <linux/spinlock.h>
-#include <linux/completion.h>
#include <linux/kref.h>
#include <linux/list.h>
void *n_klist; /* never access directly */
struct list_head n_node;
struct kref n_ref;
- struct completion n_removed;
};
extern void klist_add_tail(struct klist_node *n, struct klist *k);
struct mtd_info *mtd_concat_create(
struct mtd_info *subdev[], /* subdevices to concatenate */
int num_devs, /* number of subdevices */
- char *name); /* name for the new device */
+ const char *name); /* name for the new device */
void mtd_concat_destroy(struct mtd_info *mtd);
int (*resume_early) (struct pci_dev *dev);
int (*resume) (struct pci_dev *dev); /* Device woken up */
void (*shutdown) (struct pci_dev *dev);
- struct pm_ext_ops *pm;
struct pci_error_handlers *err_handler;
struct device_driver driver;
struct pci_dynids dynids;
int (*suspend_late)(struct platform_device *, pm_message_t state);
int (*resume_early)(struct platform_device *);
int (*resume)(struct platform_device *);
- struct pm_ext_ops *pm;
struct device_driver driver;
};
} pm_message_t;
/**
- * struct pm_ops - device PM callbacks
+ * struct dev_pm_ops - device PM callbacks
*
* Several driver power state transitions are externally visible, affecting
* the state of pending I/O queues and (for drivers that touch hardware)
* On most platforms, there are no restrictions on availability of
* resources like clocks during @restore().
*
- * All of the above callbacks, except for @complete(), return error codes.
- * However, the error codes returned by the resume operations, @resume(),
- * @thaw(), and @restore(), do not cause the PM core to abort the resume
- * transition during which they are returned. The error codes returned in
- * that cases are only printed by the PM core to the system logs for debugging
- * purposes. Still, it is recommended that drivers only return error codes
- * from their resume methods in case of an unrecoverable failure (i.e. when the
- * device being handled refuses to resume and becomes unusable) to allow us to
- * modify the PM core in the future, so that it can avoid attempting to handle
- * devices that failed to resume and their children.
- *
- * It is allowed to unregister devices while the above callbacks are being
- * executed. However, it is not allowed to unregister a device from within any
- * of its own callbacks.
- */
-
-struct pm_ops {
- int (*prepare)(struct device *dev);
- void (*complete)(struct device *dev);
- int (*suspend)(struct device *dev);
- int (*resume)(struct device *dev);
- int (*freeze)(struct device *dev);
- int (*thaw)(struct device *dev);
- int (*poweroff)(struct device *dev);
- int (*restore)(struct device *dev);
-};
-
-/**
- * struct pm_ext_ops - extended device PM callbacks
- *
- * Some devices require certain operations related to suspend and hibernation
- * to be carried out with interrupts disabled. Thus, 'struct pm_ext_ops' below
- * is defined, adding callbacks to be executed with interrupts disabled to
- * 'struct pm_ops'.
- *
- * The following callbacks included in 'struct pm_ext_ops' are executed with
- * the nonboot CPUs switched off and with interrupts disabled on the only
- * functional CPU. They also are executed with the PM core list of devices
- * locked, so they must NOT unregister any devices.
- *
* @suspend_noirq: Complete the operations of ->suspend() by carrying out any
* actions required for suspending the device that need interrupts to be
* disabled
* actions required for restoring the operations of the device that need
* interrupts to be disabled
*
- * All of the above callbacks return error codes, but the error codes returned
- * by the resume operations, @resume_noirq(), @thaw_noirq(), and
- * @restore_noirq(), do not cause the PM core to abort the resume transition
- * during which they are returned. The error codes returned in that cases are
- * only printed by the PM core to the system logs for debugging purposes.
- * Still, as stated above, it is recommended that drivers only return error
- * codes from their resume methods if the device being handled fails to resume
- * and is not usable any more.
+ * All of the above callbacks, except for @complete(), return error codes.
+ * However, the error codes returned by the resume operations, @resume(),
+ * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq() do
+ * not cause the PM core to abort the resume transition during which they are
+ * returned. The error codes returned in that cases are only printed by the PM
+ * core to the system logs for debugging purposes. Still, it is recommended
+ * that drivers only return error codes from their resume methods in case of an
+ * unrecoverable failure (i.e. when the device being handled refuses to resume
+ * and becomes unusable) to allow us to modify the PM core in the future, so
+ * that it can avoid attempting to handle devices that failed to resume and
+ * their children.
+ *
+ * It is allowed to unregister devices while the above callbacks are being
+ * executed. However, it is not allowed to unregister a device from within any
+ * of its own callbacks.
*/
-struct pm_ext_ops {
- struct pm_ops base;
+struct dev_pm_ops {
+ int (*prepare)(struct device *dev);
+ void (*complete)(struct device *dev);
+ int (*suspend)(struct device *dev);
+ int (*resume)(struct device *dev);
+ int (*freeze)(struct device *dev);
+ int (*thaw)(struct device *dev);
+ int (*poweroff)(struct device *dev);
+ int (*restore)(struct device *dev);
int (*suspend_noirq)(struct device *dev);
int (*resume_noirq)(struct device *dev);
int (*freeze_noirq)(struct device *dev);
#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE)
#define PM_EVENT_USER_SUSPEND (PM_EVENT_USER | PM_EVENT_SUSPEND)
#define PM_EVENT_USER_RESUME (PM_EVENT_USER | PM_EVENT_RESUME)
-#define PM_EVENT_REMOTE_WAKEUP (PM_EVENT_REMOTE | PM_EVENT_RESUME)
+#define PM_EVENT_REMOTE_RESUME (PM_EVENT_REMOTE | PM_EVENT_RESUME)
#define PM_EVENT_AUTO_SUSPEND (PM_EVENT_AUTO | PM_EVENT_SUSPEND)
#define PM_EVENT_AUTO_RESUME (PM_EVENT_AUTO | PM_EVENT_RESUME)
#define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, })
#define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, })
#define PMSG_RECOVER ((struct pm_message){ .event = PM_EVENT_RECOVER, })
-#define PMSG_USER_SUSPEND ((struct pm_messge) \
+#define PMSG_USER_SUSPEND ((struct pm_message) \
{ .event = PM_EVENT_USER_SUSPEND, })
-#define PMSG_USER_RESUME ((struct pm_messge) \
+#define PMSG_USER_RESUME ((struct pm_message) \
{ .event = PM_EVENT_USER_RESUME, })
-#define PMSG_REMOTE_RESUME ((struct pm_messge) \
+#define PMSG_REMOTE_RESUME ((struct pm_message) \
{ .event = PM_EVENT_REMOTE_RESUME, })
-#define PMSG_AUTO_SUSPEND ((struct pm_messge) \
+#define PMSG_AUTO_SUSPEND ((struct pm_message) \
{ .event = PM_EVENT_AUTO_SUSPEND, })
-#define PMSG_AUTO_RESUME ((struct pm_messge) \
+#define PMSG_AUTO_RESUME ((struct pm_message) \
{ .event = PM_EVENT_AUTO_RESUME, })
/**
#define MAX_UIO_MAPS 5
+struct uio_portio;
+
+/**
+ * struct uio_port - description of a UIO port region
+ * @start: start of port region
+ * @size: size of port region
+ * @porttype: type of port (see UIO_PORT_* below)
+ * @portio: for use by the UIO core only.
+ */
+struct uio_port {
+ unsigned long start;
+ unsigned long size;
+ int porttype;
+ struct uio_portio *portio;
+};
+
+#define MAX_UIO_PORT_REGIONS 5
+
struct uio_device;
/**
* @name: device name
* @version: device driver version
* @mem: list of mappable memory regions, size==0 for end of list
+ * @port: list of port regions, size==0 for end of list
* @irq: interrupt number or UIO_IRQ_CUSTOM
* @irq_flags: flags for request_irq()
* @priv: optional private data
*/
struct uio_info {
struct uio_device *uio_dev;
- char *name;
- char *version;
+ const char *name;
+ const char *version;
struct uio_mem mem[MAX_UIO_MAPS];
+ struct uio_port port[MAX_UIO_PORT_REGIONS];
long irq;
unsigned long irq_flags;
void *priv;
#define UIO_MEM_LOGICAL 2
#define UIO_MEM_VIRTUAL 3
+/* defines for uio_port->porttype */
+#define UIO_PORT_NONE 0
+#define UIO_PORT_X86 1
+#define UIO_PORT_GPIO 2
+#define UIO_PORT_OTHER 3
+
#endif /* _LINUX_UIO_DRIVER_H_ */
bool
config SYSFS_DEPRECATED_V2
- bool "Create deprecated sysfs files"
+ bool "Create deprecated sysfs layout for older userspace tools"
depends on SYSFS
default y
select SYSFS_DEPRECATED
help
- This option creates deprecated symlinks such as the
- "device"-link, the <subsystem>:<name>-link, and the
- "bus"-link. It may also add deprecated key in the
- uevent environment.
- None of these features or values should be used today, as
- they export driver core implementation details to userspace
- or export properties which can't be kept stable across kernel
- releases.
-
- If enabled, this option will also move any device structures
- that belong to a class, back into the /sys/class hierarchy, in
- order to support older versions of udev and some userspace
- programs.
-
- If you are using a distro with the most recent userspace
- packages, it should be safe to say N here.
+ This option switches the layout of sysfs to the deprecated
+ version.
+
+ The current sysfs layout features a unified device tree at
+ /sys/devices/, which is able to express a hierarchy between
+ class devices. If the deprecated option is set to Y, the
+ unified device tree is split into a bus device tree at
+ /sys/devices/ and several individual class device trees at
+ /sys/class/. The class and bus devices will be connected by
+ "<subsystem>:<name>" and the "device" links. The "block"
+ class devices, will not show up in /sys/class/block/. Some
+ subsystems will suppress the creation of some devices which
+ depend on the unified device tree.
+
+ This option is not a pure compatibility option that can
+ be safely enabled on newer distributions. It will change the
+ layout of sysfs to the non-extensible deprecated version,
+ and disable some features, which can not be exported without
+ confusing older userspace tools. Since 2007/2008 all major
+ distributions do not enable this option, and ship no tools which
+ depend on the deprecated layout or this option.
+
+ If you are using a new kernel on an older distribution, or use
+ older userspace tools, you might need to say Y here. Do not say Y,
+ if the original kernel, that came with your distribution, has
+ this option set to N.
config PROC_PID_CPUSET
bool "Include legacy /proc/<pid>/cpuset file"
static struct kobj_attribute _name##_attr = \
__ATTR(_name, 0644, _name##_show, _name##_store)
-#if defined(CONFIG_HOTPLUG) && defined(CONFIG_NET)
+#if defined(CONFIG_HOTPLUG)
/* current uevent sequence number */
static ssize_t uevent_seqnum_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
EXPORT_SYMBOL_GPL(kernel_kobj);
static struct attribute * kernel_attrs[] = {
-#if defined(CONFIG_HOTPLUG) && defined(CONFIG_NET)
+#if defined(CONFIG_HOTPLUG)
&uevent_seqnum_attr.attr,
&uevent_helper_attr.attr,
#endif
/* this may fail if the RTC hasn't been initialized */
status = rtc_read_time(rtc, &alm.time);
if (status < 0) {
- printk(err_readtime, rtc->dev.bus_id, status);
+ printk(err_readtime, dev_name(&rtc->dev), status);
return;
}
rtc_tm_to_time(&alm.time, &now);
status = rtc_set_alarm(rtc, &alm);
if (status < 0) {
- printk(err_wakealarm, rtc->dev.bus_id, status);
+ printk(err_wakealarm, dev_name(&rtc->dev), status);
return;
}
if (!device_may_wakeup(candidate->dev.parent))
return 0;
- *(char **)name_ptr = dev->bus_id;
+ *(const char **)name_ptr = dev_name(dev);
return 1;
}
dynamic_enabled = DYNAMIC_ENABLED_NONE;
}
err = 0;
- } else {
- if (elem) {
- if (value && (elem->enable == 0)) {
- dynamic_printk_enabled |=
- (1LL << elem->hash1);
- dynamic_printk_enabled2 |=
- (1LL << elem->hash2);
- elem->enable = 1;
- num_enabled++;
- dynamic_enabled = DYNAMIC_ENABLED_SOME;
- err = 0;
- printk(KERN_DEBUG
- "debugging enabled for module %s\n",
- elem->name);
- } else if (!value && (elem->enable == 1)) {
- elem->enable = 0;
- num_enabled--;
- if (disabled_hash(elem->hash1, true))
- dynamic_printk_enabled &=
+ } else if (elem) {
+ if (value && (elem->enable == 0)) {
+ dynamic_printk_enabled |= (1LL << elem->hash1);
+ dynamic_printk_enabled2 |= (1LL << elem->hash2);
+ elem->enable = 1;
+ num_enabled++;
+ dynamic_enabled = DYNAMIC_ENABLED_SOME;
+ err = 0;
+ printk(KERN_DEBUG
+ "debugging enabled for module %s\n",
+ elem->name);
+ } else if (!value && (elem->enable == 1)) {
+ elem->enable = 0;
+ num_enabled--;
+ if (disabled_hash(elem->hash1, true))
+ dynamic_printk_enabled &=
~(1LL << elem->hash1);
- if (disabled_hash(elem->hash2, false))
- dynamic_printk_enabled2 &=
+ if (disabled_hash(elem->hash2, false))
+ dynamic_printk_enabled2 &=
~(1LL << elem->hash2);
- if (num_enabled)
- dynamic_enabled =
- DYNAMIC_ENABLED_SOME;
- else
- dynamic_enabled =
- DYNAMIC_ENABLED_NONE;
- err = 0;
- printk(KERN_DEBUG
- "debugging disabled for module "
- "%s\n", elem->name);
- }
+ if (num_enabled)
+ dynamic_enabled = DYNAMIC_ENABLED_SOME;
+ else
+ dynamic_enabled = DYNAMIC_ENABLED_NONE;
+ err = 0;
+ printk(KERN_DEBUG
+ "debugging disabled for module %s\n",
+ elem->name);
}
}
}
#include <linux/klist.h>
#include <linux/module.h>
+#include <linux/sched.h>
/*
* Use the lowest bit of n_klist to mark deleted nodes and exclude
static void klist_node_init(struct klist *k, struct klist_node *n)
{
INIT_LIST_HEAD(&n->n_node);
- init_completion(&n->n_removed);
kref_init(&n->n_ref);
knode_set_klist(n, k);
if (k->get)
}
EXPORT_SYMBOL_GPL(klist_add_before);
+struct klist_waiter {
+ struct list_head list;
+ struct klist_node *node;
+ struct task_struct *process;
+ int woken;
+};
+
+static DEFINE_SPINLOCK(klist_remove_lock);
+static LIST_HEAD(klist_remove_waiters);
+
static void klist_release(struct kref *kref)
{
+ struct klist_waiter *waiter, *tmp;
struct klist_node *n = container_of(kref, struct klist_node, n_ref);
WARN_ON(!knode_dead(n));
list_del(&n->n_node);
- complete(&n->n_removed);
+ spin_lock(&klist_remove_lock);
+ list_for_each_entry_safe(waiter, tmp, &klist_remove_waiters, list) {
+ if (waiter->node != n)
+ continue;
+
+ waiter->woken = 1;
+ mb();
+ wake_up_process(waiter->process);
+ list_del(&waiter->list);
+ }
+ spin_unlock(&klist_remove_lock);
knode_set_klist(n, NULL);
}
*/
void klist_remove(struct klist_node *n)
{
+ struct klist_waiter waiter;
+
+ waiter.node = n;
+ waiter.process = current;
+ waiter.woken = 0;
+ spin_lock(&klist_remove_lock);
+ list_add(&waiter.list, &klist_remove_waiters);
+ spin_unlock(&klist_remove_lock);
+
klist_del(n);
- wait_for_completion(&n->n_removed);
+
+ for (;;) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (waiter.woken)
+ break;
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
}
EXPORT_SYMBOL_GPL(klist_remove);
/* keys passed in from the caller */
if (envp_ext) {
for (i = 0; envp_ext[i]; i++) {
- retval = add_uevent_var(env, envp_ext[i]);
+ retval = add_uevent_var(env, "%s", envp_ext[i]);
if (retval)
goto exit;
}
}
NETLINK_CB(skb).dst_group = 1;
- netlink_broadcast(uevent_sock, skb, 0, 1, GFP_KERNEL);
- }
+ retval = netlink_broadcast(uevent_sock, skb, 0, 1,
+ GFP_KERNEL);
+ } else
+ retval = -ENOMEM;
}
#endif
* the damage, or panic when the transfer is too big.
*/
printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
- "device %s\n", size, dev ? dev->bus_id : "?");
+ "device %s\n", size, dev ? dev_name(dev) : "?");
if (size > io_tlb_overflow && do_panic) {
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
#include <asm/ebcdic.h>
#include <asm/io.h>
#include <asm/s390_ext.h>
-#include <asm/s390_rdev.h>
#include <asm/smp.h>
/*
rc = register_external_interrupt(0x4000, iucv_external_interrupt);
if (rc)
goto out;
- iucv_root = s390_root_dev_register("iucv");
+ iucv_root = root_device_register("iucv");
if (IS_ERR(iucv_root)) {
rc = PTR_ERR(iucv_root);
goto out_int;
kfree(iucv_irq_data[cpu]);
iucv_irq_data[cpu] = NULL;
}
- s390_root_dev_unregister(iucv_root);
+ root_device_unregister(iucv_root);
out_int:
unregister_external_interrupt(0x4000, iucv_external_interrupt);
out:
kfree(iucv_irq_data[cpu]);
iucv_irq_data[cpu] = NULL;
}
- s390_root_dev_unregister(iucv_root);
+ root_device_unregister(iucv_root);
bus_unregister(&iucv_bus);
unregister_external_interrupt(0x4000, iucv_external_interrupt);
}