summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig4
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/block/rbd.c730
-rw-r--r--drivers/block/rbd_types.h4
-rw-r--r--drivers/char/hw_random/omap-rng.c2
-rw-r--r--drivers/clk/Kconfig37
-rw-r--r--drivers/clk/Makefile2
-rw-r--r--drivers/clk/clk-divider.c200
-rw-r--r--drivers/clk/clk-fixed-rate.c82
-rw-r--r--drivers/clk/clk-gate.c150
-rw-r--r--drivers/clk/clk-mux.c116
-rw-r--r--drivers/clk/clk.c1461
-rw-r--r--drivers/clocksource/tcb_clksrc.c90
-rw-r--r--drivers/devfreq/exynos4_bus.c230
-rw-r--r--drivers/dma/Kconfig9
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/sa11x0-dma.c1109
-rw-r--r--drivers/gpio/gpio-ep93xx.c7
-rw-r--r--drivers/gpio/gpio-omap.c1106
-rw-r--r--drivers/gpio/gpio-sa1100.c1
-rw-r--r--drivers/gpio/gpio-samsung.c487
-rw-r--r--drivers/gpio/gpio-tegra.c59
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c98
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h8
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c391
-rw-r--r--drivers/gpu/drm/radeon/r600d.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c12
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/cayman1
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen1
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r6001
-rw-r--r--drivers/i2c/busses/i2c-gpio.c98
-rw-r--r--drivers/i2c/busses/i2c-imx.c4
-rw-r--r--drivers/i2c/busses/i2c-pxa.c95
-rw-r--r--drivers/input/keyboard/jornada720_kbd.c1
-rw-r--r--drivers/input/serio/ams_delta_serio.c54
-rw-r--r--drivers/input/serio/rpckbd.c44
-rw-r--r--drivers/input/serio/sa1111ps2.c59
-rw-r--r--drivers/input/touchscreen/Kconfig2
-rw-r--r--drivers/input/touchscreen/jornada720_ts.c1
-rw-r--r--drivers/leds/Kconfig9
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-ams-delta.c126
-rw-r--r--drivers/md/Kconfig28
-rw-r--r--drivers/md/Makefile1
-rw-r--r--drivers/md/dm-bufio.c108
-rw-r--r--drivers/md/dm-bufio.h8
-rw-r--r--drivers/md/dm-crypt.c46
-rw-r--r--drivers/md/dm-delay.c9
-rw-r--r--drivers/md/dm-exception-store.c2
-rw-r--r--drivers/md/dm-flakey.c3
-rw-r--r--drivers/md/dm-ioctl.c5
-rw-r--r--drivers/md/dm-linear.c3
-rw-r--r--drivers/md/dm-log.c3
-rw-r--r--drivers/md/dm-mpath.c52
-rw-r--r--drivers/md/dm-queue-length.c3
-rw-r--r--drivers/md/dm-raid.c53
-rw-r--r--drivers/md/dm-raid1.c12
-rw-r--r--drivers/md/dm-round-robin.c3
-rw-r--r--drivers/md/dm-service-time.c5
-rw-r--r--drivers/md/dm-stripe.c3
-rw-r--r--drivers/md/dm-table.c9
-rw-r--r--drivers/md/dm-thin-metadata.c5
-rw-r--r--drivers/md/dm-thin-metadata.h13
-rw-r--r--drivers/md/dm-thin.c680
-rw-r--r--drivers/md/dm-verity.c913
-rw-r--r--drivers/md/dm.c1
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h7
-rw-r--r--drivers/md/persistent-data/dm-btree-remove.c202
-rw-r--r--drivers/md/persistent-data/dm-btree.c27
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c3
-rw-r--r--drivers/mfd/Kconfig5
-rw-r--r--drivers/mfd/mcp-core.c49
-rw-r--r--drivers/mfd/mcp-sa11x0.c198
-rw-r--r--drivers/mfd/ucb1x00-assabet.c46
-rw-r--r--drivers/mfd/ucb1x00-core.c433
-rw-r--r--drivers/mfd/ucb1x00-ts.c39
-rw-r--r--drivers/misc/atmel_tclib.c64
-rw-r--r--drivers/mmc/host/Kconfig2
-rw-r--r--drivers/mmc/host/at91_mci.c1
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c6
-rw-r--r--drivers/mmc/host/sdhci-s3c.c6
-rw-r--r--drivers/mtd/Kconfig2
-rw-r--r--drivers/mtd/devices/Kconfig1
-rw-r--r--drivers/mtd/maps/Kconfig1
-rw-r--r--drivers/mtd/maps/sa1100-flash.c112
-rw-r--r--drivers/mtd/nand/Kconfig4
-rw-r--r--drivers/mtd/nand/ams-delta.c74
-rw-r--r--drivers/mtd/nand/atmel_nand.c136
-rw-r--r--drivers/mtd/onenand/Kconfig1
-rw-r--r--drivers/net/Space.c2
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig19
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c148
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c39
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/irda/Kconfig2
-rw-r--r--drivers/net/irda/sa1100_ir.c953
-rw-r--r--drivers/net/usb/cdc-phonet.c6
-rw-r--r--drivers/net/usb/qmi_wwan.c36
-rw-r--r--drivers/net/wireless/iwlegacy/3945.c3
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rx.c2
-rw-r--r--drivers/of/Kconfig4
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/of_mtd.c85
-rw-r--r--drivers/pcmcia/at91_cf.c5
-rw-r--r--drivers/pcmcia/sa1111_generic.c55
-rw-r--r--drivers/pcmcia/sa1111_neponset.c7
-rw-r--r--drivers/regulator/Kconfig8
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/bq24022.c162
-rw-r--r--drivers/remoteproc/Kconfig28
-rw-r--r--drivers/remoteproc/Makefile9
-rw-r--r--drivers/remoteproc/omap_remoteproc.c229
-rw-r--r--drivers/remoteproc/omap_remoteproc.h69
-rw-r--r--drivers/remoteproc/remoteproc_core.c1586
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c179
-rw-r--r--drivers/remoteproc/remoteproc_internal.h44
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c289
-rw-r--r--drivers/rpmsg/Kconfig10
-rw-r--r--drivers/rpmsg/Makefile1
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c1054
-rw-r--r--drivers/rtc/Kconfig6
-rw-r--r--drivers/rtc/rtc-at91sam9.c85
-rw-r--r--drivers/rtc/rtc-mv.c9
-rw-r--r--drivers/rtc/rtc-s3c.c71
-rw-r--r--drivers/rtc/rtc-sa1100.c210
-rw-r--r--drivers/s390/cio/qdio_main.c1
-rw-r--r--drivers/s390/cio/qdio_setup.c3
-rw-r--r--drivers/scsi/arm/arxescsi.c2
-rw-r--r--drivers/scsi/arm/fas216.c4
-rw-r--r--drivers/scsi/arm/fas216.h4
-rw-r--r--drivers/sh/clk/cpg.c16
-rw-r--r--drivers/spi/Kconfig2
-rw-r--r--drivers/spi/spi-orion.c5
-rw-r--r--drivers/spi/spi-s3c24xx.c2
-rw-r--r--drivers/staging/ste_rmi4/Makefile2
-rw-r--r--drivers/tty/serial/atmel_serial.c2
-rw-r--r--drivers/tty/serial/imx.c7
-rw-r--r--drivers/tty/serial/pxa.c49
-rw-r--r--drivers/tty/serial/sa1100.c1
-rw-r--r--drivers/usb/Kconfig4
-rw-r--r--drivers/usb/gadget/Kconfig8
-rw-r--r--drivers/usb/gadget/at91_udc.c49
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c6
-rw-r--r--drivers/usb/gadget/f_phonet.c2
-rw-r--r--drivers/usb/host/ehci-atmel.c24
-rw-r--r--drivers/usb/host/ohci-at91.c106
-rw-r--r--drivers/usb/host/ohci-hcd.c2
-rw-r--r--drivers/usb/host/ohci-sa1111.c297
-rw-r--r--drivers/usb/serial/option.c6
-rw-r--r--drivers/vhost/net.c2
-rw-r--r--drivers/vhost/vhost.c11
-rw-r--r--drivers/vhost/vhost.h2
-rw-r--r--drivers/video/Kconfig2
-rw-r--r--drivers/video/backlight/ep93xx_bl.c25
-rw-r--r--drivers/video/ep93xx-fb.c18
-rw-r--r--drivers/video/omap/lcd_ams_delta.c27
-rw-r--r--drivers/video/omap2/dss/dispc.c5
-rw-r--r--drivers/video/omap2/dss/dss.c3
-rw-r--r--drivers/video/sa1100fb.c493
-rw-r--r--drivers/video/sa1100fb.h76
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c8
-rw-r--r--drivers/watchdog/orion_wdt.c24
174 files changed, 13123 insertions, 3938 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index decf8e420856..6f0459cb745b 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -130,6 +130,10 @@ source "drivers/clocksource/Kconfig"
source "drivers/iommu/Kconfig"
+source "drivers/remoteproc/Kconfig"
+
+source "drivers/rpmsg/Kconfig"
+
source "drivers/virt/Kconfig"
source "drivers/devfreq/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 932e8bf20356..262b19d6b627 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -125,6 +125,8 @@ obj-y += clk/
obj-$(CONFIG_HWSPINLOCK) += hwspinlock/
obj-$(CONFIG_NFC) += nfc/
obj-$(CONFIG_IOMMU_SUPPORT) += iommu/
+obj-$(CONFIG_REMOTEPROC) += remoteproc/
+obj-$(CONFIG_RPMSG) += rpmsg/
# Virtualization drivers
obj-$(CONFIG_VIRT_DRIVERS) += virt/
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index a6278e7e61a0..013c7a549fb6 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -41,19 +41,35 @@
#include "rbd_types.h"
-#define DRV_NAME "rbd"
-#define DRV_NAME_LONG "rbd (rados block device)"
+/*
+ * The basic unit of block I/O is a sector. It is interpreted in a
+ * number of contexts in Linux (blk, bio, genhd), but the default is
+ * universally 512 bytes. These symbols are just slightly more
+ * meaningful than the bare numbers they represent.
+ */
+#define SECTOR_SHIFT 9
+#define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
+
+#define RBD_DRV_NAME "rbd"
+#define RBD_DRV_NAME_LONG "rbd (rados block device)"
#define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
-#define RBD_MAX_MD_NAME_LEN (96 + sizeof(RBD_SUFFIX))
+#define RBD_MAX_MD_NAME_LEN (RBD_MAX_OBJ_NAME_LEN + sizeof(RBD_SUFFIX))
#define RBD_MAX_POOL_NAME_LEN 64
#define RBD_MAX_SNAP_NAME_LEN 32
#define RBD_MAX_OPT_LEN 1024
#define RBD_SNAP_HEAD_NAME "-"
+/*
+ * An RBD device name will be "rbd#", where the "rbd" comes from
+ * RBD_DRV_NAME above, and # is a unique integer identifier.
+ * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
+ * enough to hold all possible device names.
+ */
#define DEV_NAME_LEN 32
+#define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
#define RBD_NOTIFY_TIMEOUT_DEFAULT 10
@@ -66,7 +82,6 @@ struct rbd_image_header {
__u8 obj_order;
__u8 crypt_type;
__u8 comp_type;
- struct rw_semaphore snap_rwsem;
struct ceph_snap_context *snapc;
size_t snap_names_len;
u64 snap_seq;
@@ -83,7 +98,7 @@ struct rbd_options {
};
/*
- * an instance of the client. multiple devices may share a client.
+ * an instance of the client. multiple devices may share an rbd client.
*/
struct rbd_client {
struct ceph_client *client;
@@ -92,20 +107,9 @@ struct rbd_client {
struct list_head node;
};
-struct rbd_req_coll;
-
/*
- * a single io request
+ * a request completion status
*/
-struct rbd_request {
- struct request *rq; /* blk layer request */
- struct bio *bio; /* cloned bio */
- struct page **pages; /* list of used pages */
- u64 len;
- int coll_index;
- struct rbd_req_coll *coll;
-};
-
struct rbd_req_status {
int done;
int rc;
@@ -122,6 +126,18 @@ struct rbd_req_coll {
struct rbd_req_status status[0];
};
+/*
+ * a single io request
+ */
+struct rbd_request {
+ struct request *rq; /* blk layer request */
+ struct bio *bio; /* cloned bio */
+ struct page **pages; /* list of used pages */
+ u64 len;
+ int coll_index;
+ struct rbd_req_coll *coll;
+};
+
struct rbd_snap {
struct device dev;
const char *name;
@@ -140,7 +156,6 @@ struct rbd_device {
struct gendisk *disk; /* blkdev's gendisk and rq */
struct request_queue *q;
- struct ceph_client *client;
struct rbd_client *rbd_client;
char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
@@ -157,6 +172,8 @@ struct rbd_device {
struct ceph_osd_event *watch_event;
struct ceph_osd_request *watch_request;
+ /* protects updating the header */
+ struct rw_semaphore header_rwsem;
char snap_name[RBD_MAX_SNAP_NAME_LEN];
u32 cur_snap; /* index+1 of current snapshot within snap context
0 - for the head */
@@ -171,15 +188,13 @@ struct rbd_device {
struct device dev;
};
-static struct bus_type rbd_bus_type = {
- .name = "rbd",
-};
-
-static spinlock_t node_lock; /* protects client get/put */
-
static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
+
static LIST_HEAD(rbd_dev_list); /* devices */
-static LIST_HEAD(rbd_client_list); /* clients */
+static DEFINE_SPINLOCK(rbd_dev_list_lock);
+
+static LIST_HEAD(rbd_client_list); /* clients */
+static DEFINE_SPINLOCK(rbd_client_list_lock);
static int __rbd_init_snaps_header(struct rbd_device *rbd_dev);
static void rbd_dev_release(struct device *dev);
@@ -190,12 +205,32 @@ static ssize_t rbd_snap_add(struct device *dev,
static void __rbd_remove_snap_dev(struct rbd_device *rbd_dev,
struct rbd_snap *snap);
+static ssize_t rbd_add(struct bus_type *bus, const char *buf,
+ size_t count);
+static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
+ size_t count);
-static struct rbd_device *dev_to_rbd(struct device *dev)
+static struct bus_attribute rbd_bus_attrs[] = {
+ __ATTR(add, S_IWUSR, NULL, rbd_add),
+ __ATTR(remove, S_IWUSR, NULL, rbd_remove),
+ __ATTR_NULL
+};
+
+static struct bus_type rbd_bus_type = {
+ .name = "rbd",
+ .bus_attrs = rbd_bus_attrs,
+};
+
+static void rbd_root_dev_release(struct device *dev)
{
- return container_of(dev, struct rbd_device, dev);
}
+static struct device rbd_root_dev = {
+ .init_name = "rbd",
+ .release = rbd_root_dev_release,
+};
+
+
static struct device *rbd_get_dev(struct rbd_device *rbd_dev)
{
return get_device(&rbd_dev->dev);
@@ -210,8 +245,7 @@ static int __rbd_update_snaps(struct rbd_device *rbd_dev);
static int rbd_open(struct block_device *bdev, fmode_t mode)
{
- struct gendisk *disk = bdev->bd_disk;
- struct rbd_device *rbd_dev = disk->private_data;
+ struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
rbd_get_dev(rbd_dev);
@@ -256,9 +290,11 @@ static struct rbd_client *rbd_client_create(struct ceph_options *opt,
kref_init(&rbdc->kref);
INIT_LIST_HEAD(&rbdc->node);
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+
rbdc->client = ceph_create_client(opt, rbdc, 0, 0);
if (IS_ERR(rbdc->client))
- goto out_rbdc;
+ goto out_mutex;
opt = NULL; /* Now rbdc->client is responsible for opt */
ret = ceph_open_session(rbdc->client);
@@ -267,16 +303,19 @@ static struct rbd_client *rbd_client_create(struct ceph_options *opt,
rbdc->rbd_opts = rbd_opts;
- spin_lock(&node_lock);
+ spin_lock(&rbd_client_list_lock);
list_add_tail(&rbdc->node, &rbd_client_list);
- spin_unlock(&node_lock);
+ spin_unlock(&rbd_client_list_lock);
+
+ mutex_unlock(&ctl_mutex);
dout("rbd_client_create created %p\n", rbdc);
return rbdc;
out_err:
ceph_destroy_client(rbdc->client);
-out_rbdc:
+out_mutex:
+ mutex_unlock(&ctl_mutex);
kfree(rbdc);
out_opt:
if (opt)
@@ -324,7 +363,7 @@ static int parse_rbd_opts_token(char *c, void *private)
substring_t argstr[MAX_OPT_ARGS];
int token, intval, ret;
- token = match_token((char *)c, rbdopt_tokens, argstr);
+ token = match_token(c, rbdopt_tokens, argstr);
if (token < 0)
return -EINVAL;
@@ -357,58 +396,54 @@ static int parse_rbd_opts_token(char *c, void *private)
* Get a ceph client with specific addr and configuration, if one does
* not exist create it.
*/
-static int rbd_get_client(struct rbd_device *rbd_dev, const char *mon_addr,
- char *options)
+static struct rbd_client *rbd_get_client(const char *mon_addr,
+ size_t mon_addr_len,
+ char *options)
{
struct rbd_client *rbdc;
struct ceph_options *opt;
- int ret;
struct rbd_options *rbd_opts;
rbd_opts = kzalloc(sizeof(*rbd_opts), GFP_KERNEL);
if (!rbd_opts)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
rbd_opts->notify_timeout = RBD_NOTIFY_TIMEOUT_DEFAULT;
- ret = ceph_parse_options(&opt, options, mon_addr,
- mon_addr + strlen(mon_addr), parse_rbd_opts_token, rbd_opts);
- if (ret < 0)
- goto done_err;
+ opt = ceph_parse_options(options, mon_addr,
+ mon_addr + mon_addr_len,
+ parse_rbd_opts_token, rbd_opts);
+ if (IS_ERR(opt)) {
+ kfree(rbd_opts);
+ return ERR_CAST(opt);
+ }
- spin_lock(&node_lock);
+ spin_lock(&rbd_client_list_lock);
rbdc = __rbd_client_find(opt);
if (rbdc) {
+ /* using an existing client */
+ kref_get(&rbdc->kref);
+ spin_unlock(&rbd_client_list_lock);
+
ceph_destroy_options(opt);
kfree(rbd_opts);
- /* using an existing client */
- kref_get(&rbdc->kref);
- rbd_dev->rbd_client = rbdc;
- rbd_dev->client = rbdc->client;
- spin_unlock(&node_lock);
- return 0;
+ return rbdc;
}
- spin_unlock(&node_lock);
+ spin_unlock(&rbd_client_list_lock);
rbdc = rbd_client_create(opt, rbd_opts);
- if (IS_ERR(rbdc)) {
- ret = PTR_ERR(rbdc);
- goto done_err;
- }
- rbd_dev->rbd_client = rbdc;
- rbd_dev->client = rbdc->client;
- return 0;
-done_err:
- kfree(rbd_opts);
- return ret;
+ if (IS_ERR(rbdc))
+ kfree(rbd_opts);
+
+ return rbdc;
}
/*
* Destroy ceph client
*
- * Caller must hold node_lock.
+ * Caller must hold rbd_client_list_lock.
*/
static void rbd_client_release(struct kref *kref)
{
@@ -428,11 +463,10 @@ static void rbd_client_release(struct kref *kref)
*/
static void rbd_put_client(struct rbd_device *rbd_dev)
{
- spin_lock(&node_lock);
+ spin_lock(&rbd_client_list_lock);
kref_put(&rbd_dev->rbd_client->kref, rbd_client_release);
- spin_unlock(&node_lock);
+ spin_unlock(&rbd_client_list_lock);
rbd_dev->rbd_client = NULL;
- rbd_dev->client = NULL;
}
/*
@@ -457,21 +491,19 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
gfp_t gfp_flags)
{
int i;
- u32 snap_count = le32_to_cpu(ondisk->snap_count);
- int ret = -ENOMEM;
+ u32 snap_count;
- if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT))) {
+ if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT)))
return -ENXIO;
- }
- init_rwsem(&header->snap_rwsem);
- header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
+ snap_count = le32_to_cpu(ondisk->snap_count);
header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
- snap_count *
- sizeof(struct rbd_image_snap_ondisk),
+ snap_count * sizeof (*ondisk),
gfp_flags);
if (!header->snapc)
return -ENOMEM;
+
+ header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
if (snap_count) {
header->snap_names = kmalloc(header->snap_names_len,
GFP_KERNEL);
@@ -498,8 +530,7 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
header->snapc->num_snaps = snap_count;
header->total_snaps = snap_count;
- if (snap_count &&
- allocated_snaps == snap_count) {
+ if (snap_count && allocated_snaps == snap_count) {
for (i = 0; i < snap_count; i++) {
header->snapc->snaps[i] =
le64_to_cpu(ondisk->snaps[i].id);
@@ -518,7 +549,7 @@ err_names:
kfree(header->snap_names);
err_snapc:
kfree(header->snapc);
- return ret;
+ return -ENOMEM;
}
static int snap_index(struct rbd_image_header *header, int snap_num)
@@ -542,35 +573,34 @@ static int snap_by_name(struct rbd_image_header *header, const char *snap_name,
int i;
char *p = header->snap_names;
- for (i = 0; i < header->total_snaps; i++, p += strlen(p) + 1) {
- if (strcmp(snap_name, p) == 0)
- break;
- }
- if (i == header->total_snaps)
- return -ENOENT;
- if (seq)
- *seq = header->snapc->snaps[i];
+ for (i = 0; i < header->total_snaps; i++) {
+ if (!strcmp(snap_name, p)) {
- if (size)
- *size = header->snap_sizes[i];
+ /* Found it. Pass back its id and/or size */
- return i;
+ if (seq)
+ *seq = header->snapc->snaps[i];
+ if (size)
+ *size = header->snap_sizes[i];
+ return i;
+ }
+ p += strlen(p) + 1; /* Skip ahead to the next name */
+ }
+ return -ENOENT;
}
-static int rbd_header_set_snap(struct rbd_device *dev,
- const char *snap_name,
- u64 *size)
+static int rbd_header_set_snap(struct rbd_device *dev, u64 *size)
{
struct rbd_image_header *header = &dev->header;
struct ceph_snap_context *snapc = header->snapc;
int ret = -ENOENT;
- down_write(&header->snap_rwsem);
+ BUILD_BUG_ON(sizeof (dev->snap_name) < sizeof (RBD_SNAP_HEAD_NAME));
- if (!snap_name ||
- !*snap_name ||
- strcmp(snap_name, "-") == 0 ||
- strcmp(snap_name, RBD_SNAP_HEAD_NAME) == 0) {
+ down_write(&dev->header_rwsem);
+
+ if (!memcmp(dev->snap_name, RBD_SNAP_HEAD_NAME,
+ sizeof (RBD_SNAP_HEAD_NAME))) {
if (header->total_snaps)
snapc->seq = header->snap_seq;
else
@@ -580,7 +610,7 @@ static int rbd_header_set_snap(struct rbd_device *dev,
if (size)
*size = header->image_size;
} else {
- ret = snap_by_name(header, snap_name, &snapc->seq, size);
+ ret = snap_by_name(header, dev->snap_name, &snapc->seq, size);
if (ret < 0)
goto done;
@@ -590,7 +620,7 @@ static int rbd_header_set_snap(struct rbd_device *dev,
ret = 0;
done:
- up_write(&header->snap_rwsem);
+ up_write(&dev->header_rwsem);
return ret;
}
@@ -717,7 +747,7 @@ static struct bio *bio_chain_clone(struct bio **old, struct bio **next,
/* split the bio. We'll release it either in the next
call, or it will have to be released outside */
- bp = bio_split(old_chain, (len - total) / 512ULL);
+ bp = bio_split(old_chain, (len - total) / SECTOR_SIZE);
if (!bp)
goto err_out;
@@ -857,7 +887,7 @@ static int rbd_do_request(struct request *rq,
struct timespec mtime = CURRENT_TIME;
struct rbd_request *req_data;
struct ceph_osd_request_head *reqhead;
- struct rbd_image_header *header = &dev->header;
+ struct ceph_osd_client *osdc;
req_data = kzalloc(sizeof(*req_data), GFP_NOIO);
if (!req_data) {
@@ -874,15 +904,13 @@ static int rbd_do_request(struct request *rq,
dout("rbd_do_request obj=%s ofs=%lld len=%lld\n", obj, len, ofs);
- down_read(&header->snap_rwsem);
+ down_read(&dev->header_rwsem);
- req = ceph_osdc_alloc_request(&dev->client->osdc, flags,
- snapc,
- ops,
- false,
- GFP_NOIO, pages, bio);
+ osdc = &dev->rbd_client->client->osdc;
+ req = ceph_osdc_alloc_request(osdc, flags, snapc, ops,
+ false, GFP_NOIO, pages, bio);
if (!req) {
- up_read(&header->snap_rwsem);
+ up_read(&dev->header_rwsem);
ret = -ENOMEM;
goto done_pages;
}
@@ -909,27 +937,27 @@ static int rbd_do_request(struct request *rq,
layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
layout->fl_pg_preferred = cpu_to_le32(-1);
layout->fl_pg_pool = cpu_to_le32(dev->poolid);
- ceph_calc_raw_layout(&dev->client->osdc, layout, snapid,
- ofs, &len, &bno, req, ops);
+ ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno,
+ req, ops);
ceph_osdc_build_request(req, ofs, &len,
ops,
snapc,
&mtime,
req->r_oid, req->r_oid_len);
- up_read(&header->snap_rwsem);
+ up_read(&dev->header_rwsem);
if (linger_req) {
- ceph_osdc_set_request_linger(&dev->client->osdc, req);
+ ceph_osdc_set_request_linger(osdc, req);
*linger_req = req;
}
- ret = ceph_osdc_start_request(&dev->client->osdc, req, false);
+ ret = ceph_osdc_start_request(osdc, req, false);
if (ret < 0)
goto done_err;
if (!rbd_cb) {
- ret = ceph_osdc_wait_request(&dev->client->osdc, req);
+ ret = ceph_osdc_wait_request(osdc, req);
if (ver)
*ver = le64_to_cpu(req->r_reassert_version.version);
dout("reassert_ver=%lld\n",
@@ -1213,8 +1241,8 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
rc = __rbd_update_snaps(dev);
mutex_unlock(&ctl_mutex);
if (rc)
- pr_warning(DRV_NAME "%d got notification but failed to update"
- " snaps: %d\n", dev->major, rc);
+ pr_warning(RBD_DRV_NAME "%d got notification but failed to "
+ " update snaps: %d\n", dev->major, rc);
rbd_req_sync_notify_ack(dev, ver, notify_id, dev->obj_md_name);
}
@@ -1227,7 +1255,7 @@ static int rbd_req_sync_watch(struct rbd_device *dev,
u64 ver)
{
struct ceph_osd_req_op *ops;
- struct ceph_osd_client *osdc = &dev->client->osdc;
+ struct ceph_osd_client *osdc = &dev->rbd_client->client->osdc;
int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_WATCH, 0);
if (ret < 0)
@@ -1314,7 +1342,7 @@ static int rbd_req_sync_notify(struct rbd_device *dev,
const char *obj)
{
struct ceph_osd_req_op *ops;
- struct ceph_osd_client *osdc = &dev->client->osdc;
+ struct ceph_osd_client *osdc = &dev->rbd_client->client->osdc;
struct ceph_osd_event *event;
struct rbd_notify_info info;
int payload_len = sizeof(u32) + sizeof(u32);
@@ -1421,9 +1449,7 @@ static void rbd_rq_fn(struct request_queue *q)
struct request *rq;
struct bio_pair *bp = NULL;
- rq = blk_fetch_request(q);
-
- while (1) {
+ while ((rq = blk_fetch_request(q))) {
struct bio *bio;
struct bio *rq_bio, *next_bio = NULL;
bool do_write;
@@ -1441,32 +1467,32 @@ static void rbd_rq_fn(struct request_queue *q)
/* filter out block requests we don't understand */
if ((rq->cmd_type != REQ_TYPE_FS)) {
__blk_end_request_all(rq, 0);
- goto next;
+ continue;
}
/* deduce our operation (read, write) */
do_write = (rq_data_dir(rq) == WRITE);
size = blk_rq_bytes(rq);
- ofs = blk_rq_pos(rq) * 512ULL;
+ ofs = blk_rq_pos(rq) * SECTOR_SIZE;
rq_bio = rq->bio;
if (do_write && rbd_dev->read_only) {
__blk_end_request_all(rq, -EROFS);
- goto next;
+ continue;
}
spin_unlock_irq(q->queue_lock);
dout("%s 0x%x bytes at 0x%llx\n",
do_write ? "write" : "read",
- size, blk_rq_pos(rq) * 512ULL);
+ size, blk_rq_pos(rq) * SECTOR_SIZE);
num_segs = rbd_get_num_segments(&rbd_dev->header, ofs, size);
coll = rbd_alloc_coll(num_segs);
if (!coll) {
spin_lock_irq(q->queue_lock);
__blk_end_request_all(rq, -ENOMEM);
- goto next;
+ continue;
}
do {
@@ -1512,8 +1538,6 @@ next_seg:
if (bp)
bio_pair_release(bp);
spin_lock_irq(q->queue_lock);
-next:
- rq = blk_fetch_request(q);
}
}
@@ -1526,13 +1550,17 @@ static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
struct bio_vec *bvec)
{
struct rbd_device *rbd_dev = q->queuedata;
- unsigned int chunk_sectors = 1 << (rbd_dev->header.obj_order - 9);
- sector_t sector = bmd->bi_sector + get_start_sect(bmd->bi_bdev);
- unsigned int bio_sectors = bmd->bi_size >> 9;
+ unsigned int chunk_sectors;
+ sector_t sector;
+ unsigned int bio_sectors;
int max;
+ chunk_sectors = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
+ sector = bmd->bi_sector + get_start_sect(bmd->bi_bdev);
+ bio_sectors = bmd->bi_size >> SECTOR_SHIFT;
+
max = (chunk_sectors - ((sector & (chunk_sectors - 1))
- + bio_sectors)) << 9;
+ + bio_sectors)) << SECTOR_SHIFT;
if (max < 0)
max = 0; /* bio_add cannot handle a negative return */
if (max <= bvec->bv_len && bio_sectors == 0)
@@ -1565,15 +1593,16 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
ssize_t rc;
struct rbd_image_header_ondisk *dh;
int snap_count = 0;
- u64 snap_names_len = 0;
u64 ver;
+ size_t len;
+ /*
+ * First reads the fixed-size header to determine the number
+ * of snapshots, then re-reads it, along with all snapshot
+ * records as well as their stored names.
+ */
+ len = sizeof (*dh);
while (1) {
- int len = sizeof(*dh) +
- snap_count * sizeof(struct rbd_image_snap_ondisk) +
- snap_names_len;
-
- rc = -ENOMEM;
dh = kmalloc(len, GFP_KERNEL);
if (!dh)
return -ENOMEM;
@@ -1588,21 +1617,22 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL);
if (rc < 0) {
- if (rc == -ENXIO) {
+ if (rc == -ENXIO)
pr_warning("unrecognized header format"
" for image %s", rbd_dev->obj);
- }
goto out_dh;
}
- if (snap_count != header->total_snaps) {
- snap_count = header->total_snaps;
- snap_names_len = header->snap_names_len;
- rbd_header_free(header);
- kfree(dh);
- continue;
- }
- break;
+ if (snap_count == header->total_snaps)
+ break;
+
+ snap_count = header->total_snaps;
+ len = sizeof (*dh) +
+ snap_count * sizeof(struct rbd_image_snap_ondisk) +
+ header->snap_names_len;
+
+ rbd_header_free(header);
+ kfree(dh);
}
header->obj_version = ver;
@@ -1623,13 +1653,14 @@ static int rbd_header_add_snap(struct rbd_device *dev,
int ret;
void *data, *p, *e;
u64 ver;
+ struct ceph_mon_client *monc;
/* we should create a snapshot only if we're pointing at the head */
if (dev->cur_snap)
return -EINVAL;
- ret = ceph_monc_create_snapid(&dev->client->monc, dev->poolid,
- &new_snapid);
+ monc = &dev->rbd_client->client->monc;
+ ret = ceph_monc_create_snapid(monc, dev->poolid, &new_snapid);
dout("created snapid=%lld\n", new_snapid);
if (ret < 0)
return ret;
@@ -1684,9 +1715,9 @@ static int __rbd_update_snaps(struct rbd_device *rbd_dev)
return ret;
/* resized? */
- set_capacity(rbd_dev->disk, h.image_size / 512ULL);
+ set_capacity(rbd_dev->disk, h.image_size / SECTOR_SIZE);
- down_write(&rbd_dev->header.snap_rwsem);
+ down_write(&rbd_dev->header_rwsem);
snap_seq = rbd_dev->header.snapc->seq;
if (rbd_dev->header.total_snaps &&
@@ -1711,7 +1742,7 @@ static int __rbd_update_snaps(struct rbd_device *rbd_dev)
ret = __rbd_init_snaps_header(rbd_dev);
- up_write(&rbd_dev->header.snap_rwsem);
+ up_write(&rbd_dev->header_rwsem);
return ret;
}
@@ -1721,6 +1752,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
struct gendisk *disk;
struct request_queue *q;
int rc;
+ u64 segment_size;
u64 total_size = 0;
/* contact OSD, request size info about the object being mapped */
@@ -1733,7 +1765,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
if (rc)
return rc;
- rc = rbd_header_set_snap(rbd_dev, rbd_dev->snap_name, &total_size);
+ rc = rbd_header_set_snap(rbd_dev, &total_size);
if (rc)
return rc;
@@ -1743,7 +1775,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
if (!disk)
goto out;
- snprintf(disk->disk_name, sizeof(disk->disk_name), DRV_NAME "%d",
+ snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
rbd_dev->id);
disk->major = rbd_dev->major;
disk->first_minor = 0;
@@ -1756,11 +1788,15 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
if (!q)
goto out_disk;
+ /* We use the default size, but let's be explicit about it. */
+ blk_queue_physical_block_size(q, SECTOR_SIZE);
+
/* set io sizes to object size */
- blk_queue_max_hw_sectors(q, rbd_obj_bytes(&rbd_dev->header) / 512ULL);
- blk_queue_max_segment_size(q, rbd_obj_bytes(&rbd_dev->header));
- blk_queue_io_min(q, rbd_obj_bytes(&rbd_dev->header));
- blk_queue_io_opt(q, rbd_obj_bytes(&rbd_dev->header));
+ segment_size = rbd_obj_bytes(&rbd_dev->header);
+ blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
+ blk_queue_max_segment_size(q, segment_size);
+ blk_queue_io_min(q, segment_size);
+ blk_queue_io_opt(q, segment_size);
blk_queue_merge_bvec(q, rbd_merge_bvec);
disk->queue = q;
@@ -1771,7 +1807,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
rbd_dev->q = q;
/* finally, announce the disk to the world */
- set_capacity(disk, total_size / 512ULL);
+ set_capacity(disk, total_size / SECTOR_SIZE);
add_disk(disk);
pr_info("%s: added with size 0x%llx\n",
@@ -1788,10 +1824,15 @@ out:
sysfs
*/
+static struct rbd_device *dev_to_rbd_dev(struct device *dev)
+{
+ return container_of(dev, struct rbd_device, dev);
+}
+
static ssize_t rbd_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct rbd_device *rbd_dev = dev_to_rbd(dev);
+ struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
return sprintf(buf, "%llu\n", (unsigned long long)rbd_dev->header.image_size);
}
@@ -1799,7 +1840,7 @@ static ssize_t rbd_size_show(struct device *dev,
static ssize_t rbd_major_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct rbd_device *rbd_dev = dev_to_rbd(dev);
+ struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
return sprintf(buf, "%d\n", rbd_dev->major);
}
@@ -1807,15 +1848,16 @@ static ssize_t rbd_major_show(struct device *dev,
static ssize_t rbd_client_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct rbd_device *rbd_dev = dev_to_rbd(dev);
+ struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- return sprintf(buf, "client%lld\n", ceph_client_id(rbd_dev->client));
+ return sprintf(buf, "client%lld\n",
+ ceph_client_id(rbd_dev->rbd_client->client));
}
static ssize_t rbd_pool_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct rbd_device *rbd_dev = dev_to_rbd(dev);
+ struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
return sprintf(buf, "%s\n", rbd_dev->pool_name);
}
@@ -1823,7 +1865,7 @@ static ssize_t rbd_pool_show(struct device *dev,
static ssize_t rbd_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct rbd_device *rbd_dev = dev_to_rbd(dev);
+ struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
return sprintf(buf, "%s\n", rbd_dev->obj);
}
@@ -1832,7 +1874,7 @@ static ssize_t rbd_snap_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct rbd_device *rbd_dev = dev_to_rbd(dev);
+ struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
return sprintf(buf, "%s\n", rbd_dev->snap_name);
}
@@ -1842,7 +1884,7 @@ static ssize_t rbd_image_refresh(struct device *dev,
const char *buf,
size_t size)
{
- struct rbd_device *rbd_dev = dev_to_rbd(dev);
+ struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
int rc;
int ret = size;
@@ -1907,7 +1949,7 @@ static ssize_t rbd_snap_size_show(struct device *dev,
{
struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
- return sprintf(buf, "%lld\n", (long long)snap->size);
+ return sprintf(buf, "%zd\n", snap->size);
}
static ssize_t rbd_snap_id_show(struct device *dev,
@@ -1916,7 +1958,7 @@ static ssize_t rbd_snap_id_show(struct device *dev,
{
struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
- return sprintf(buf, "%lld\n", (long long)snap->id);
+ return sprintf(buf, "%llu\n", (unsigned long long) snap->id);
}
static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL);
@@ -2088,19 +2130,9 @@ static int __rbd_init_snaps_header(struct rbd_device *rbd_dev)
return 0;
}
-
-static void rbd_root_dev_release(struct device *dev)
-{
-}
-
-static struct device rbd_root_dev = {
- .init_name = "rbd",
- .release = rbd_root_dev_release,
-};
-
static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
{
- int ret = -ENOMEM;
+ int ret;
struct device *dev;
struct rbd_snap *snap;
@@ -2114,7 +2146,7 @@ static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
dev_set_name(dev, "%d", rbd_dev->id);
ret = device_register(dev);
if (ret < 0)
- goto done_free;
+ goto out;
list_for_each_entry(snap, &rbd_dev->snaps, node) {
ret = rbd_register_snap_dev(rbd_dev, snap,
@@ -2122,10 +2154,7 @@ static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
if (ret < 0)
break;
}
-
- mutex_unlock(&ctl_mutex);
- return 0;
-done_free:
+out:
mutex_unlock(&ctl_mutex);
return ret;
}
@@ -2154,104 +2183,250 @@ static int rbd_init_watch_dev(struct rbd_device *rbd_dev)
return ret;
}
+static atomic64_t rbd_id_max = ATOMIC64_INIT(0);
+
+/*
+ * Get a unique rbd identifier for the given new rbd_dev, and add
+ * the rbd_dev to the global list. The minimum rbd id is 1.
+ */
+static void rbd_id_get(struct rbd_device *rbd_dev)
+{
+ rbd_dev->id = atomic64_inc_return(&rbd_id_max);
+
+ spin_lock(&rbd_dev_list_lock);
+ list_add_tail(&rbd_dev->node, &rbd_dev_list);
+ spin_unlock(&rbd_dev_list_lock);
+}
+
+/*
+ * Remove an rbd_dev from the global list, and record that its
+ * identifier is no longer in use.
+ */
+static void rbd_id_put(struct rbd_device *rbd_dev)
+{
+ struct list_head *tmp;
+ int rbd_id = rbd_dev->id;
+ int max_id;
+
+ BUG_ON(rbd_id < 1);
+
+ spin_lock(&rbd_dev_list_lock);
+ list_del_init(&rbd_dev->node);
+
+ /*
+ * If the id being "put" is not the current maximum, there
+ * is nothing special we need to do.
+ */
+ if (rbd_id != atomic64_read(&rbd_id_max)) {
+ spin_unlock(&rbd_dev_list_lock);
+ return;
+ }
+
+ /*
+ * We need to update the current maximum id. Search the
+ * list to find out what it is. We're more likely to find
+ * the maximum at the end, so search the list backward.
+ */
+ max_id = 0;
+ list_for_each_prev(tmp, &rbd_dev_list) {
+ struct rbd_device *rbd_dev;
+
+ rbd_dev = list_entry(tmp, struct rbd_device, node);
+ if (rbd_id > max_id)
+ max_id = rbd_id;
+ }
+ spin_unlock(&rbd_dev_list_lock);
+
+ /*
+ * The max id could have been updated by rbd_id_get(), in
+ * which case it now accurately reflects the new maximum.
+ * Be careful not to overwrite the maximum value in that
+ * case.
+ */
+ atomic64_cmpxchg(&rbd_id_max, rbd_id, max_id);
+}
+
+/*
+ * Skips over white space at *buf, and updates *buf to point to the
+ * first found non-space character (if any). Returns the length of
+ * the token (string of non-white space characters) found. Note
+ * that *buf must be terminated with '\0'.
+ */
+static inline size_t next_token(const char **buf)
+{
+ /*
+ * These are the characters that produce nonzero for
+ * isspace() in the "C" and "POSIX" locales.
+ */
+ const char *spaces = " \f\n\r\t\v";
+
+ *buf += strspn(*buf, spaces); /* Find start of token */
+
+ return strcspn(*buf, spaces); /* Return token length */
+}
+
+/*
+ * Finds the next token in *buf, and if the provided token buffer is
+ * big enough, copies the found token into it. The result, if
+ * copied, is guaranteed to be terminated with '\0'. Note that *buf
+ * must be terminated with '\0' on entry.
+ *
+ * Returns the length of the token found (not including the '\0').
+ * Return value will be 0 if no token is found, and it will be >=
+ * token_size if the token would not fit.
+ *
+ * The *buf pointer will be updated to point beyond the end of the
+ * found token. Note that this occurs even if the token buffer is
+ * too small to hold it.
+ */
+static inline size_t copy_token(const char **buf,
+ char *token,
+ size_t token_size)
+{
+ size_t len;
+
+ len = next_token(buf);
+ if (len < token_size) {
+ memcpy(token, *buf, len);
+ *(token + len) = '\0';
+ }
+ *buf += len;
+
+ return len;
+}
+
+/*
+ * This fills in the pool_name, obj, obj_len, snap_name, obj_len,
+ * rbd_dev, rbd_md_name, and name fields of the given rbd_dev, based
+ * on the list of monitor addresses and other options provided via
+ * /sys/bus/rbd/add.
+ */
+static int rbd_add_parse_args(struct rbd_device *rbd_dev,
+ const char *buf,
+ const char **mon_addrs,
+ size_t *mon_addrs_size,
+ char *options,
+ size_t options_size)
+{
+ size_t len;
+
+ /* The first four tokens are required */
+
+ len = next_token(&buf);
+ if (!len)
+ return -EINVAL;
+ *mon_addrs_size = len + 1;
+ *mon_addrs = buf;
+
+ buf += len;
+
+ len = copy_token(&buf, options, options_size);
+ if (!len || len >= options_size)
+ return -EINVAL;
+
+ len = copy_token(&buf, rbd_dev->pool_name, sizeof (rbd_dev->pool_name));
+ if (!len || len >= sizeof (rbd_dev->pool_name))
+ return -EINVAL;
+
+ len = copy_token(&buf, rbd_dev->obj, sizeof (rbd_dev->obj));
+ if (!len || len >= sizeof (rbd_dev->obj))
+ return -EINVAL;
+
+ /* We have the object length in hand, save it. */
+
+ rbd_dev->obj_len = len;
+
+ BUILD_BUG_ON(RBD_MAX_MD_NAME_LEN
+ < RBD_MAX_OBJ_NAME_LEN + sizeof (RBD_SUFFIX));
+ sprintf(rbd_dev->obj_md_name, "%s%s", rbd_dev->obj, RBD_SUFFIX);
+
+ /*
+ * The snapshot name is optional, but it's an error if it's
+ * too long. If no snapshot is supplied, fill in the default.
+ */
+ len = copy_token(&buf, rbd_dev->snap_name, sizeof (rbd_dev->snap_name));
+ if (!len)
+ memcpy(rbd_dev->snap_name, RBD_SNAP_HEAD_NAME,
+ sizeof (RBD_SNAP_HEAD_NAME));
+ else if (len >= sizeof (rbd_dev->snap_name))
+ return -EINVAL;
+
+ return 0;
+}
+
static ssize_t rbd_add(struct bus_type *bus,
const char *buf,
size_t count)
{
- struct ceph_osd_client *osdc;
struct rbd_device *rbd_dev;
- ssize_t rc = -ENOMEM;
- int irc, new_id = 0;
- struct list_head *tmp;
- char *mon_dev_name;
- char *options;
+ const char *mon_addrs = NULL;
+ size_t mon_addrs_size = 0;
+ char *options = NULL;
+ struct ceph_osd_client *osdc;
+ int rc = -ENOMEM;
if (!try_module_get(THIS_MODULE))
return -ENODEV;
- mon_dev_name = kmalloc(RBD_MAX_OPT_LEN, GFP_KERNEL);
- if (!mon_dev_name)
- goto err_out_mod;
-
- options = kmalloc(RBD_MAX_OPT_LEN, GFP_KERNEL);
- if (!options)
- goto err_mon_dev;
-
- /* new rbd_device object */
rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
if (!rbd_dev)
- goto err_out_opt;
+ goto err_nomem;
+ options = kmalloc(count, GFP_KERNEL);
+ if (!options)
+ goto err_nomem;
/* static rbd_device initialization */
spin_lock_init(&rbd_dev->lock);
INIT_LIST_HEAD(&rbd_dev->node);
INIT_LIST_HEAD(&rbd_dev->snaps);
+ init_rwsem(&rbd_dev->header_rwsem);
- init_rwsem(&rbd_dev->header.snap_rwsem);
+ init_rwsem(&rbd_dev->header_rwsem);
/* generate unique id: find highest unique id, add one */
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
- list_for_each(tmp, &rbd_dev_list) {
- struct rbd_device *rbd_dev;
+ rbd_id_get(rbd_dev);
- rbd_dev = list_entry(tmp, struct rbd_device, node);
- if (rbd_dev->id >= new_id)
- new_id = rbd_dev->id + 1;
- }
-
- rbd_dev->id = new_id;
-
- /* add to global list */
- list_add_tail(&rbd_dev->node, &rbd_dev_list);
+ /* Fill in the device name, now that we have its id. */
+ BUILD_BUG_ON(DEV_NAME_LEN
+ < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
+ sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->id);
/* parse add command */
- if (sscanf(buf, "%" __stringify(RBD_MAX_OPT_LEN) "s "
- "%" __stringify(RBD_MAX_OPT_LEN) "s "
- "%" __stringify(RBD_MAX_POOL_NAME_LEN) "s "
- "%" __stringify(RBD_MAX_OBJ_NAME_LEN) "s"
- "%" __stringify(RBD_MAX_SNAP_NAME_LEN) "s",
- mon_dev_name, options, rbd_dev->pool_name,
- rbd_dev->obj, rbd_dev->snap_name) < 4) {
- rc = -EINVAL;
- goto err_out_slot;
- }
-
- if (rbd_dev->snap_name[0] == 0)
- rbd_dev->snap_name[0] = '-';
-
- rbd_dev->obj_len = strlen(rbd_dev->obj);
- snprintf(rbd_dev->obj_md_name, sizeof(rbd_dev->obj_md_name), "%s%s",
- rbd_dev->obj, RBD_SUFFIX);
-
- /* initialize rest of new object */
- snprintf(rbd_dev->name, DEV_NAME_LEN, DRV_NAME "%d", rbd_dev->id);
- rc = rbd_get_client(rbd_dev, mon_dev_name, options);
- if (rc < 0)
- goto err_out_slot;
+ rc = rbd_add_parse_args(rbd_dev, buf, &mon_addrs, &mon_addrs_size,
+ options, count);
+ if (rc)
+ goto err_put_id;
- mutex_unlock(&ctl_mutex);
+ rbd_dev->rbd_client = rbd_get_client(mon_addrs, mon_addrs_size - 1,
+ options);
+ if (IS_ERR(rbd_dev->rbd_client)) {
+ rc = PTR_ERR(rbd_dev->rbd_client);
+ goto err_put_id;
+ }
/* pick the pool */
- osdc = &rbd_dev->client->osdc;
+ osdc = &rbd_dev->rbd_client->client->osdc;
rc = ceph_pg_poolid_by_name(osdc->osdmap, rbd_dev->pool_name);
if (rc < 0)
goto err_out_client;
rbd_dev->poolid = rc;
/* register our block device */
- irc = register_blkdev(0, rbd_dev->name);
- if (irc < 0) {
- rc = irc;
+ rc = register_blkdev(0, rbd_dev->name);
+ if (rc < 0)
goto err_out_client;
- }
- rbd_dev->major = irc;
+ rbd_dev->major = rc;
rc = rbd_bus_add_dev(rbd_dev);
if (rc)
goto err_out_blkdev;
- /* set up and announce blkdev mapping */
+ /*
+ * At this point cleanup in the event of an error is the job
+ * of the sysfs code (initiated by rbd_bus_del_dev()).
+ *
+ * Set up and announce blkdev mapping.
+ */
rc = rbd_init_disk(rbd_dev);
if (rc)
goto err_out_bus;
@@ -2263,35 +2438,26 @@ static ssize_t rbd_add(struct bus_type *bus,
return count;
err_out_bus:
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- list_del_init(&rbd_dev->node);
- mutex_unlock(&ctl_mutex);
-
/* this will also clean up rest of rbd_dev stuff */
rbd_bus_del_dev(rbd_dev);
kfree(options);
- kfree(mon_dev_name);
return rc;
err_out_blkdev:
unregister_blkdev(rbd_dev->major, rbd_dev->name);
err_out_client:
rbd_put_client(rbd_dev);
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-err_out_slot:
- list_del_init(&rbd_dev->node);
- mutex_unlock(&ctl_mutex);
-
- kfree(rbd_dev);
-err_out_opt:
+err_put_id:
+ rbd_id_put(rbd_dev);
+err_nomem:
kfree(options);
-err_mon_dev:
- kfree(mon_dev_name);
-err_out_mod:
+ kfree(rbd_dev);
+
dout("Error adding device %s\n", buf);
module_put(THIS_MODULE);
- return rc;
+
+ return (ssize_t) rc;
}
static struct rbd_device *__rbd_get_dev(unsigned long id)
@@ -2299,22 +2465,28 @@ static struct rbd_device *__rbd_get_dev(unsigned long id)
struct list_head *tmp;
struct rbd_device *rbd_dev;
+ spin_lock(&rbd_dev_list_lock);
list_for_each(tmp, &rbd_dev_list) {
rbd_dev = list_entry(tmp, struct rbd_device, node);
- if (rbd_dev->id == id)
+ if (rbd_dev->id == id) {
+ spin_unlock(&rbd_dev_list_lock);
return rbd_dev;
+ }
}
+ spin_unlock(&rbd_dev_list_lock);
return NULL;
}
static void rbd_dev_release(struct device *dev)
{
- struct rbd_device *rbd_dev =
- container_of(dev, struct rbd_device, dev);
+ struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- if (rbd_dev->watch_request)
- ceph_osdc_unregister_linger_request(&rbd_dev->client->osdc,
+ if (rbd_dev->watch_request) {
+ struct ceph_client *client = rbd_dev->rbd_client->client;
+
+ ceph_osdc_unregister_linger_request(&client->osdc,
rbd_dev->watch_request);
+ }
if (rbd_dev->watch_event)
rbd_req_sync_unwatch(rbd_dev, rbd_dev->obj_md_name);
@@ -2323,6 +2495,9 @@ static void rbd_dev_release(struct device *dev)
/* clean up and free blkdev */
rbd_free_disk(rbd_dev);
unregister_blkdev(rbd_dev->major, rbd_dev->name);
+
+ /* done with the id, and with the rbd_dev */
+ rbd_id_put(rbd_dev);
kfree(rbd_dev);
/* release module ref */
@@ -2355,8 +2530,6 @@ static ssize_t rbd_remove(struct bus_type *bus,
goto done;
}
- list_del_init(&rbd_dev->node);
-
__rbd_remove_all_snaps(rbd_dev);
rbd_bus_del_dev(rbd_dev);
@@ -2370,7 +2543,7 @@ static ssize_t rbd_snap_add(struct device *dev,
const char *buf,
size_t count)
{
- struct rbd_device *rbd_dev = dev_to_rbd(dev);
+ struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
int ret;
char *name = kmalloc(count + 1, GFP_KERNEL);
if (!name)
@@ -2406,12 +2579,6 @@ err_unlock:
return ret;
}
-static struct bus_attribute rbd_bus_attrs[] = {
- __ATTR(add, S_IWUSR, NULL, rbd_add),
- __ATTR(remove, S_IWUSR, NULL, rbd_remove),
- __ATTR_NULL
-};
-
/*
* create control files in sysfs
* /sys/bus/rbd/...
@@ -2420,21 +2587,21 @@ static int rbd_sysfs_init(void)
{
int ret;
- rbd_bus_type.bus_attrs = rbd_bus_attrs;
-
- ret = bus_register(&rbd_bus_type);
- if (ret < 0)
+ ret = device_register(&rbd_root_dev);
+ if (ret < 0)
return ret;
- ret = device_register(&rbd_root_dev);
+ ret = bus_register(&rbd_bus_type);
+ if (ret < 0)
+ device_unregister(&rbd_root_dev);
return ret;
}
static void rbd_sysfs_cleanup(void)
{
- device_unregister(&rbd_root_dev);
bus_unregister(&rbd_bus_type);
+ device_unregister(&rbd_root_dev);
}
int __init rbd_init(void)
@@ -2444,8 +2611,7 @@ int __init rbd_init(void)
rc = rbd_sysfs_init();
if (rc)
return rc;
- spin_lock_init(&node_lock);
- pr_info("loaded " DRV_NAME_LONG "\n");
+ pr_info("loaded " RBD_DRV_NAME_LONG "\n");
return 0;
}
diff --git a/drivers/block/rbd_types.h b/drivers/block/rbd_types.h
index fc6c678aa2cb..950708688f17 100644
--- a/drivers/block/rbd_types.h
+++ b/drivers/block/rbd_types.h
@@ -41,10 +41,6 @@
#define RBD_HEADER_SIGNATURE "RBD"
#define RBD_HEADER_VERSION "001.005"
-struct rbd_info {
- __le64 max_id;
-} __attribute__ ((packed));
-
struct rbd_image_snap_ondisk {
__le64 id;
__le64 image_size;
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index b757fac3cd1f..a07a5caa599c 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -26,6 +26,8 @@
#include <asm/io.h>
+#include <plat/cpu.h>
+
#define RNG_OUT_REG 0x00 /* Output register */
#define RNG_STAT_REG 0x04 /* Status register
[0] = STAT_BUSY */
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 9b3cd08cd0ed..165e1febae53 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -8,3 +8,40 @@ config HAVE_CLK_PREPARE
config HAVE_MACH_CLKDEV
bool
+
+config COMMON_CLK
+ bool
+ select HAVE_CLK_PREPARE
+ ---help---
+ The common clock framework is a single definition of struct
+ clk, useful across many platforms, as well as an
+ implementation of the clock API in include/linux/clk.h.
+ Architectures utilizing the common struct clk should select
+ this option.
+
+menu "Common Clock Framework"
+ depends on COMMON_CLK
+
+config COMMON_CLK_DISABLE_UNUSED
+ bool "Disabled unused clocks at boot"
+ depends on COMMON_CLK
+ ---help---
+ Traverses the entire clock tree and disables any clocks that are
+ enabled in hardware but have not been enabled by any device drivers.
+ This saves power and keeps the software model of the clock in line
+ with reality.
+
+ If in doubt, say "N".
+
+config COMMON_CLK_DEBUG
+ bool "DebugFS representation of clock tree"
+ depends on COMMON_CLK
+ select DEBUG_FS
+ ---help---
+ Creates a directory hierchy in debugfs for visualizing the clk
+ tree structure. Each directory contains read-only members
+ that export information specific to that clk node: clk_rate,
+ clk_flags, clk_prepare_count, clk_enable_count &
+ clk_notifier_count.
+
+endmenu
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 07613fa172c9..1f736bc11c4b 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -1,2 +1,4 @@
obj-$(CONFIG_CLKDEV_LOOKUP) += clkdev.o
+obj-$(CONFIG_COMMON_CLK) += clk.o clk-fixed-rate.o clk-gate.o \
+ clk-mux.o clk-divider.o
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
new file mode 100644
index 000000000000..d5ac6a75ea57
--- /dev/null
+++ b/drivers/clk/clk-divider.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ * Copyright (C) 2011 Richard Zhao, Linaro <richard.zhao@linaro.org>
+ * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Adjustable divider clock implementation
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/string.h>
+
+/*
+ * DOC: basic adjustable divider clock that cannot gate
+ *
+ * Traits of this clock:
+ * prepare - clk_prepare only ensures that parents are prepared
+ * enable - clk_enable only ensures that parents are enabled
+ * rate - rate is adjustable. clk->rate = parent->rate / divisor
+ * parent - fixed parent. No clk_set_parent support
+ */
+
+#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
+
+#define div_mask(d) ((1 << (d->width)) - 1)
+
+static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ unsigned int div;
+
+ div = readl(divider->reg) >> divider->shift;
+ div &= div_mask(divider);
+
+ if (!(divider->flags & CLK_DIVIDER_ONE_BASED))
+ div++;
+
+ return parent_rate / div;
+}
+EXPORT_SYMBOL_GPL(clk_divider_recalc_rate);
+
+/*
+ * The reverse of DIV_ROUND_UP: The maximum number which
+ * divided by m is r
+ */
+#define MULT_ROUND_UP(r, m) ((r) * (m) + (m) - 1)
+
+static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
+ unsigned long *best_parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ int i, bestdiv = 0;
+ unsigned long parent_rate, best = 0, now, maxdiv;
+
+ if (!rate)
+ rate = 1;
+
+ maxdiv = (1 << divider->width);
+
+ if (divider->flags & CLK_DIVIDER_ONE_BASED)
+ maxdiv--;
+
+ if (!best_parent_rate) {
+ parent_rate = __clk_get_rate(__clk_get_parent(hw->clk));
+ bestdiv = DIV_ROUND_UP(parent_rate, rate);
+ bestdiv = bestdiv == 0 ? 1 : bestdiv;
+ bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
+ return bestdiv;
+ }
+
+ /*
+ * The maximum divider we can use without overflowing
+ * unsigned long in rate * i below
+ */
+ maxdiv = min(ULONG_MAX / rate, maxdiv);
+
+ for (i = 1; i <= maxdiv; i++) {
+ parent_rate = __clk_round_rate(__clk_get_parent(hw->clk),
+ MULT_ROUND_UP(rate, i));
+ now = parent_rate / i;
+ if (now <= rate && now > best) {
+ bestdiv = i;
+ best = now;
+ *best_parent_rate = parent_rate;
+ }
+ }
+
+ if (!bestdiv) {
+ bestdiv = (1 << divider->width);
+ if (divider->flags & CLK_DIVIDER_ONE_BASED)
+ bestdiv--;
+ *best_parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), 1);
+ }
+
+ return bestdiv;
+}
+
+static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ int div;
+ div = clk_divider_bestdiv(hw, rate, prate);
+
+ if (prate)
+ return *prate / div;
+ else {
+ unsigned long r;
+ r = __clk_get_rate(__clk_get_parent(hw->clk));
+ return r / div;
+ }
+}
+EXPORT_SYMBOL_GPL(clk_divider_round_rate);
+
+static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ unsigned int div;
+ unsigned long flags = 0;
+ u32 val;
+
+ div = __clk_get_rate(__clk_get_parent(hw->clk)) / rate;
+
+ if (!(divider->flags & CLK_DIVIDER_ONE_BASED))
+ div--;
+
+ if (div > div_mask(divider))
+ div = div_mask(divider);
+
+ if (divider->lock)
+ spin_lock_irqsave(divider->lock, flags);
+
+ val = readl(divider->reg);
+ val &= ~(div_mask(divider) << divider->shift);
+ val |= div << divider->shift;
+ writel(val, divider->reg);
+
+ if (divider->lock)
+ spin_unlock_irqrestore(divider->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(clk_divider_set_rate);
+
+struct clk_ops clk_divider_ops = {
+ .recalc_rate = clk_divider_recalc_rate,
+ .round_rate = clk_divider_round_rate,
+ .set_rate = clk_divider_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_divider_ops);
+
+struct clk *clk_register_divider(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 shift, u8 width,
+ u8 clk_divider_flags, spinlock_t *lock)
+{
+ struct clk_divider *div;
+ struct clk *clk;
+
+ div = kzalloc(sizeof(struct clk_divider), GFP_KERNEL);
+
+ if (!div) {
+ pr_err("%s: could not allocate divider clk\n", __func__);
+ return NULL;
+ }
+
+ /* struct clk_divider assignments */
+ div->reg = reg;
+ div->shift = shift;
+ div->width = width;
+ div->flags = clk_divider_flags;
+ div->lock = lock;
+
+ if (parent_name) {
+ div->parent[0] = kstrdup(parent_name, GFP_KERNEL);
+ if (!div->parent[0])
+ goto out;
+ }
+
+ clk = clk_register(dev, name,
+ &clk_divider_ops, &div->hw,
+ div->parent,
+ (parent_name ? 1 : 0),
+ flags);
+ if (clk)
+ return clk;
+
+out:
+ kfree(div->parent[0]);
+ kfree(div);
+
+ return NULL;
+}
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
new file mode 100644
index 000000000000..90c79fb5d1bd
--- /dev/null
+++ b/drivers/clk/clk-fixed-rate.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
+ * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Fixed rate clock implementation
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+
+/*
+ * DOC: basic fixed-rate clock that cannot gate
+ *
+ * Traits of this clock:
+ * prepare - clk_(un)prepare only ensures parents are prepared
+ * enable - clk_enable only ensures parents are enabled
+ * rate - rate is always a fixed value. No clk_set_rate support
+ * parent - fixed parent. No clk_set_parent support
+ */
+
+#define to_clk_fixed_rate(_hw) container_of(_hw, struct clk_fixed_rate, hw)
+
+static unsigned long clk_fixed_rate_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return to_clk_fixed_rate(hw)->fixed_rate;
+}
+EXPORT_SYMBOL_GPL(clk_fixed_rate_recalc_rate);
+
+struct clk_ops clk_fixed_rate_ops = {
+ .recalc_rate = clk_fixed_rate_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_fixed_rate_ops);
+
+struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ unsigned long fixed_rate)
+{
+ struct clk_fixed_rate *fixed;
+ char **parent_names = NULL;
+ u8 len;
+
+ fixed = kzalloc(sizeof(struct clk_fixed_rate), GFP_KERNEL);
+
+ if (!fixed) {
+ pr_err("%s: could not allocate fixed clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* struct clk_fixed_rate assignments */
+ fixed->fixed_rate = fixed_rate;
+
+ if (parent_name) {
+ parent_names = kmalloc(sizeof(char *), GFP_KERNEL);
+
+ if (! parent_names)
+ goto out;
+
+ len = sizeof(char) * strlen(parent_name);
+
+ parent_names[0] = kmalloc(len, GFP_KERNEL);
+
+ if (!parent_names[0])
+ goto out;
+
+ strncpy(parent_names[0], parent_name, len);
+ }
+
+out:
+ return clk_register(dev, name,
+ &clk_fixed_rate_ops, &fixed->hw,
+ parent_names,
+ (parent_name ? 1 : 0),
+ flags);
+}
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
new file mode 100644
index 000000000000..b5902e2ef2fd
--- /dev/null
+++ b/drivers/clk/clk-gate.c
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
+ * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Gated clock implementation
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/string.h>
+
+/**
+ * DOC: basic gatable clock which can gate and ungate it's ouput
+ *
+ * Traits of this clock:
+ * prepare - clk_(un)prepare only ensures parent is (un)prepared
+ * enable - clk_enable and clk_disable are functional & control gating
+ * rate - inherits rate from parent. No clk_set_rate support
+ * parent - fixed parent. No clk_set_parent support
+ */
+
+#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
+
+static void clk_gate_set_bit(struct clk_gate *gate)
+{
+ u32 reg;
+ unsigned long flags = 0;
+
+ if (gate->lock)
+ spin_lock_irqsave(gate->lock, flags);
+
+ reg = readl(gate->reg);
+ reg |= BIT(gate->bit_idx);
+ writel(reg, gate->reg);
+
+ if (gate->lock)
+ spin_unlock_irqrestore(gate->lock, flags);
+}
+
+static void clk_gate_clear_bit(struct clk_gate *gate)
+{
+ u32 reg;
+ unsigned long flags = 0;
+
+ if (gate->lock)
+ spin_lock_irqsave(gate->lock, flags);
+
+ reg = readl(gate->reg);
+ reg &= ~BIT(gate->bit_idx);
+ writel(reg, gate->reg);
+
+ if (gate->lock)
+ spin_unlock_irqrestore(gate->lock, flags);
+}
+
+static int clk_gate_enable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+
+ if (gate->flags & CLK_GATE_SET_TO_DISABLE)
+ clk_gate_clear_bit(gate);
+ else
+ clk_gate_set_bit(gate);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(clk_gate_enable);
+
+static void clk_gate_disable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+
+ if (gate->flags & CLK_GATE_SET_TO_DISABLE)
+ clk_gate_set_bit(gate);
+ else
+ clk_gate_clear_bit(gate);
+}
+EXPORT_SYMBOL_GPL(clk_gate_disable);
+
+static int clk_gate_is_enabled(struct clk_hw *hw)
+{
+ u32 reg;
+ struct clk_gate *gate = to_clk_gate(hw);
+
+ reg = readl(gate->reg);
+
+ /* if a set bit disables this clk, flip it before masking */
+ if (gate->flags & CLK_GATE_SET_TO_DISABLE)
+ reg ^= BIT(gate->bit_idx);
+
+ reg &= BIT(gate->bit_idx);
+
+ return reg ? 1 : 0;
+}
+EXPORT_SYMBOL_GPL(clk_gate_is_enabled);
+
+struct clk_ops clk_gate_ops = {
+ .enable = clk_gate_enable,
+ .disable = clk_gate_disable,
+ .is_enabled = clk_gate_is_enabled,
+};
+EXPORT_SYMBOL_GPL(clk_gate_ops);
+
+struct clk *clk_register_gate(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 bit_idx,
+ u8 clk_gate_flags, spinlock_t *lock)
+{
+ struct clk_gate *gate;
+ struct clk *clk;
+
+ gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
+
+ if (!gate) {
+ pr_err("%s: could not allocate gated clk\n", __func__);
+ return NULL;
+ }
+
+ /* struct clk_gate assignments */
+ gate->reg = reg;
+ gate->bit_idx = bit_idx;
+ gate->flags = clk_gate_flags;
+ gate->lock = lock;
+
+ if (parent_name) {
+ gate->parent[0] = kstrdup(parent_name, GFP_KERNEL);
+ if (!gate->parent[0])
+ goto out;
+ }
+
+ clk = clk_register(dev, name,
+ &clk_gate_ops, &gate->hw,
+ gate->parent,
+ (parent_name ? 1 : 0),
+ flags);
+ if (clk)
+ return clk;
+out:
+ kfree(gate->parent[0]);
+ kfree(gate);
+
+ return NULL;
+}
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
new file mode 100644
index 000000000000..c71ad1f41a97
--- /dev/null
+++ b/drivers/clk/clk-mux.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ * Copyright (C) 2011 Richard Zhao, Linaro <richard.zhao@linaro.org>
+ * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Simple multiplexer clock implementation
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+
+/*
+ * DOC: basic adjustable multiplexer clock that cannot gate
+ *
+ * Traits of this clock:
+ * prepare - clk_prepare only ensures that parents are prepared
+ * enable - clk_enable only ensures that parents are enabled
+ * rate - rate is only affected by parent switching. No clk_set_rate support
+ * parent - parent is adjustable through clk_set_parent
+ */
+
+#define to_clk_mux(_hw) container_of(_hw, struct clk_mux, hw)
+
+static u8 clk_mux_get_parent(struct clk_hw *hw)
+{
+ struct clk_mux *mux = to_clk_mux(hw);
+ u32 val;
+
+ /*
+ * FIXME need a mux-specific flag to determine if val is bitwise or numeric
+ * e.g. sys_clkin_ck's clksel field is 3 bits wide, but ranges from 0x1
+ * to 0x7 (index starts at one)
+ * OTOH, pmd_trace_clk_mux_ck uses a separate bit for each clock, so
+ * val = 0x4 really means "bit 2, index starts at bit 0"
+ */
+ val = readl(mux->reg) >> mux->shift;
+ val &= (1 << mux->width) - 1;
+
+ if (val && (mux->flags & CLK_MUX_INDEX_BIT))
+ val = ffs(val) - 1;
+
+ if (val && (mux->flags & CLK_MUX_INDEX_ONE))
+ val--;
+
+ if (val >= __clk_get_num_parents(hw->clk))
+ return -EINVAL;
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(clk_mux_get_parent);
+
+static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_mux *mux = to_clk_mux(hw);
+ u32 val;
+ unsigned long flags = 0;
+
+ if (mux->flags & CLK_MUX_INDEX_BIT)
+ index = (1 << ffs(index));
+
+ if (mux->flags & CLK_MUX_INDEX_ONE)
+ index++;
+
+ if (mux->lock)
+ spin_lock_irqsave(mux->lock, flags);
+
+ val = readl(mux->reg);
+ val &= ~(((1 << mux->width) - 1) << mux->shift);
+ val |= index << mux->shift;
+ writel(val, mux->reg);
+
+ if (mux->lock)
+ spin_unlock_irqrestore(mux->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(clk_mux_set_parent);
+
+struct clk_ops clk_mux_ops = {
+ .get_parent = clk_mux_get_parent,
+ .set_parent = clk_mux_set_parent,
+};
+EXPORT_SYMBOL_GPL(clk_mux_ops);
+
+struct clk *clk_register_mux(struct device *dev, const char *name,
+ char **parent_names, u8 num_parents, unsigned long flags,
+ void __iomem *reg, u8 shift, u8 width,
+ u8 clk_mux_flags, spinlock_t *lock)
+{
+ struct clk_mux *mux;
+
+ mux = kmalloc(sizeof(struct clk_mux), GFP_KERNEL);
+
+ if (!mux) {
+ pr_err("%s: could not allocate mux clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* struct clk_mux assignments */
+ mux->reg = reg;
+ mux->shift = shift;
+ mux->width = width;
+ mux->flags = clk_mux_flags;
+ mux->lock = lock;
+
+ return clk_register(dev, name, &clk_mux_ops, &mux->hw,
+ parent_names, num_parents, flags);
+}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
new file mode 100644
index 000000000000..9cf6f59e3e19
--- /dev/null
+++ b/drivers/clk/clk.c
@@ -0,0 +1,1461 @@
+/*
+ * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
+ * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Standard functionality for the common clock API. See Documentation/clk.txt
+ */
+
+#include <linux/clk-private.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+static DEFINE_SPINLOCK(enable_lock);
+static DEFINE_MUTEX(prepare_lock);
+
+static HLIST_HEAD(clk_root_list);
+static HLIST_HEAD(clk_orphan_list);
+static LIST_HEAD(clk_notifier_list);
+
+/*** debugfs support ***/
+
+#ifdef CONFIG_COMMON_CLK_DEBUG
+#include <linux/debugfs.h>
+
+static struct dentry *rootdir;
+static struct dentry *orphandir;
+static int inited = 0;
+
+/* caller must hold prepare_lock */
+static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
+{
+ struct dentry *d;
+ int ret = -ENOMEM;
+
+ if (!clk || !pdentry) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ d = debugfs_create_dir(clk->name, pdentry);
+ if (!d)
+ goto out;
+
+ clk->dentry = d;
+
+ d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry,
+ (u32 *)&clk->rate);
+ if (!d)
+ goto err_out;
+
+ d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
+ (u32 *)&clk->flags);
+ if (!d)
+ goto err_out;
+
+ d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry,
+ (u32 *)&clk->prepare_count);
+ if (!d)
+ goto err_out;
+
+ d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry,
+ (u32 *)&clk->enable_count);
+ if (!d)
+ goto err_out;
+
+ d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry,
+ (u32 *)&clk->notifier_count);
+ if (!d)
+ goto err_out;
+
+ ret = 0;
+ goto out;
+
+err_out:
+ debugfs_remove(clk->dentry);
+out:
+ return ret;
+}
+
+/* caller must hold prepare_lock */
+static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
+{
+ struct clk *child;
+ struct hlist_node *tmp;
+ int ret = -EINVAL;;
+
+ if (!clk || !pdentry)
+ goto out;
+
+ ret = clk_debug_create_one(clk, pdentry);
+
+ if (ret)
+ goto out;
+
+ hlist_for_each_entry(child, tmp, &clk->children, child_node)
+ clk_debug_create_subtree(child, clk->dentry);
+
+ ret = 0;
+out:
+ return ret;
+}
+
+/**
+ * clk_debug_register - add a clk node to the debugfs clk tree
+ * @clk: the clk being added to the debugfs clk tree
+ *
+ * Dynamically adds a clk to the debugfs clk tree if debugfs has been
+ * initialized. Otherwise it bails out early since the debugfs clk tree
+ * will be created lazily by clk_debug_init as part of a late_initcall.
+ *
+ * Caller must hold prepare_lock. Only clk_init calls this function (so
+ * far) so this is taken care.
+ */
+static int clk_debug_register(struct clk *clk)
+{
+ struct clk *parent;
+ struct dentry *pdentry;
+ int ret = 0;
+
+ if (!inited)
+ goto out;
+
+ parent = clk->parent;
+
+ /*
+ * Check to see if a clk is a root clk. Also check that it is
+ * safe to add this clk to debugfs
+ */
+ if (!parent)
+ if (clk->flags & CLK_IS_ROOT)
+ pdentry = rootdir;
+ else
+ pdentry = orphandir;
+ else
+ if (parent->dentry)
+ pdentry = parent->dentry;
+ else
+ goto out;
+
+ ret = clk_debug_create_subtree(clk, pdentry);
+
+out:
+ return ret;
+}
+
+/**
+ * clk_debug_init - lazily create the debugfs clk tree visualization
+ *
+ * clks are often initialized very early during boot before memory can
+ * be dynamically allocated and well before debugfs is setup.
+ * clk_debug_init walks the clk tree hierarchy while holding
+ * prepare_lock and creates the topology as part of a late_initcall,
+ * thus insuring that clks initialized very early will still be
+ * represented in the debugfs clk tree. This function should only be
+ * called once at boot-time, and all other clks added dynamically will
+ * be done so with clk_debug_register.
+ */
+static int __init clk_debug_init(void)
+{
+ struct clk *clk;
+ struct hlist_node *tmp;
+
+ rootdir = debugfs_create_dir("clk", NULL);
+
+ if (!rootdir)
+ return -ENOMEM;
+
+ orphandir = debugfs_create_dir("orphans", rootdir);
+
+ if (!orphandir)
+ return -ENOMEM;
+
+ mutex_lock(&prepare_lock);
+
+ hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
+ clk_debug_create_subtree(clk, rootdir);
+
+ hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
+ clk_debug_create_subtree(clk, orphandir);
+
+ inited = 1;
+
+ mutex_unlock(&prepare_lock);
+
+ return 0;
+}
+late_initcall(clk_debug_init);
+#else
+static inline int clk_debug_register(struct clk *clk) { return 0; }
+#endif /* CONFIG_COMMON_CLK_DEBUG */
+
+#ifdef CONFIG_COMMON_CLK_DISABLE_UNUSED
+/* caller must hold prepare_lock */
+static void clk_disable_unused_subtree(struct clk *clk)
+{
+ struct clk *child;
+ struct hlist_node *tmp;
+ unsigned long flags;
+
+ if (!clk)
+ goto out;
+
+ hlist_for_each_entry(child, tmp, &clk->children, child_node)
+ clk_disable_unused_subtree(child);
+
+ spin_lock_irqsave(&enable_lock, flags);
+
+ if (clk->enable_count)
+ goto unlock_out;
+
+ if (clk->flags & CLK_IGNORE_UNUSED)
+ goto unlock_out;
+
+ if (__clk_is_enabled(clk) && clk->ops->disable)
+ clk->ops->disable(clk->hw);
+
+unlock_out:
+ spin_unlock_irqrestore(&enable_lock, flags);
+
+out:
+ return;
+}
+
+static int clk_disable_unused(void)
+{
+ struct clk *clk;
+ struct hlist_node *tmp;
+
+ mutex_lock(&prepare_lock);
+
+ hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
+ clk_disable_unused_subtree(clk);
+
+ hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
+ clk_disable_unused_subtree(clk);
+
+ mutex_unlock(&prepare_lock);
+
+ return 0;
+}
+late_initcall(clk_disable_unused);
+#else
+static inline int clk_disable_unused(struct clk *clk) { return 0; }
+#endif /* CONFIG_COMMON_CLK_DISABLE_UNUSED */
+
+/*** helper functions ***/
+
+inline const char *__clk_get_name(struct clk *clk)
+{
+ return !clk ? NULL : clk->name;
+}
+
+inline struct clk_hw *__clk_get_hw(struct clk *clk)
+{
+ return !clk ? NULL : clk->hw;
+}
+
+inline u8 __clk_get_num_parents(struct clk *clk)
+{
+ return !clk ? -EINVAL : clk->num_parents;
+}
+
+inline struct clk *__clk_get_parent(struct clk *clk)
+{
+ return !clk ? NULL : clk->parent;
+}
+
+inline int __clk_get_enable_count(struct clk *clk)
+{
+ return !clk ? -EINVAL : clk->enable_count;
+}
+
+inline int __clk_get_prepare_count(struct clk *clk)
+{
+ return !clk ? -EINVAL : clk->prepare_count;
+}
+
+unsigned long __clk_get_rate(struct clk *clk)
+{
+ unsigned long ret;
+
+ if (!clk) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = clk->rate;
+
+ if (clk->flags & CLK_IS_ROOT)
+ goto out;
+
+ if (!clk->parent)
+ ret = -ENODEV;
+
+out:
+ return ret;
+}
+
+inline unsigned long __clk_get_flags(struct clk *clk)
+{
+ return !clk ? -EINVAL : clk->flags;
+}
+
+int __clk_is_enabled(struct clk *clk)
+{
+ int ret;
+
+ if (!clk)
+ return -EINVAL;
+
+ /*
+ * .is_enabled is only mandatory for clocks that gate
+ * fall back to software usage counter if .is_enabled is missing
+ */
+ if (!clk->ops->is_enabled) {
+ ret = clk->enable_count ? 1 : 0;
+ goto out;
+ }
+
+ ret = clk->ops->is_enabled(clk->hw);
+out:
+ return ret;
+}
+
+static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
+{
+ struct clk *child;
+ struct clk *ret;
+ struct hlist_node *tmp;
+
+ if (!strcmp(clk->name, name))
+ return clk;
+
+ hlist_for_each_entry(child, tmp, &clk->children, child_node) {
+ ret = __clk_lookup_subtree(name, child);
+ if (ret)
+ return ret;
+ }
+
+ return NULL;
+}
+
+struct clk *__clk_lookup(const char *name)
+{
+ struct clk *root_clk;
+ struct clk *ret;
+ struct hlist_node *tmp;
+
+ if (!name)
+ return NULL;
+
+ /* search the 'proper' clk tree first */
+ hlist_for_each_entry(root_clk, tmp, &clk_root_list, child_node) {
+ ret = __clk_lookup_subtree(name, root_clk);
+ if (ret)
+ return ret;
+ }
+
+ /* if not found, then search the orphan tree */
+ hlist_for_each_entry(root_clk, tmp, &clk_orphan_list, child_node) {
+ ret = __clk_lookup_subtree(name, root_clk);
+ if (ret)
+ return ret;
+ }
+
+ return NULL;
+}
+
+/*** clk api ***/
+
+void __clk_unprepare(struct clk *clk)
+{
+ if (!clk)
+ return;
+
+ if (WARN_ON(clk->prepare_count == 0))
+ return;
+
+ if (--clk->prepare_count > 0)
+ return;
+
+ WARN_ON(clk->enable_count > 0);
+
+ if (clk->ops->unprepare)
+ clk->ops->unprepare(clk->hw);
+
+ __clk_unprepare(clk->parent);
+}
+
+/**
+ * clk_unprepare - undo preparation of a clock source
+ * @clk: the clk being unprepare
+ *
+ * clk_unprepare may sleep, which differentiates it from clk_disable. In a
+ * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
+ * if the operation may sleep. One example is a clk which is accessed over
+ * I2c. In the complex case a clk gate operation may require a fast and a slow
+ * part. It is this reason that clk_unprepare and clk_disable are not mutually
+ * exclusive. In fact clk_disable must be called before clk_unprepare.
+ */
+void clk_unprepare(struct clk *clk)
+{
+ mutex_lock(&prepare_lock);
+ __clk_unprepare(clk);
+ mutex_unlock(&prepare_lock);
+}
+EXPORT_SYMBOL_GPL(clk_unprepare);
+
+int __clk_prepare(struct clk *clk)
+{
+ int ret = 0;
+
+ if (!clk)
+ return 0;
+
+ if (clk->prepare_count == 0) {
+ ret = __clk_prepare(clk->parent);
+ if (ret)
+ return ret;
+
+ if (clk->ops->prepare) {
+ ret = clk->ops->prepare(clk->hw);
+ if (ret) {
+ __clk_unprepare(clk->parent);
+ return ret;
+ }
+ }
+ }
+
+ clk->prepare_count++;
+
+ return 0;
+}
+
+/**
+ * clk_prepare - prepare a clock source
+ * @clk: the clk being prepared
+ *
+ * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
+ * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
+ * operation may sleep. One example is a clk which is accessed over I2c. In
+ * the complex case a clk ungate operation may require a fast and a slow part.
+ * It is this reason that clk_prepare and clk_enable are not mutually
+ * exclusive. In fact clk_prepare must be called before clk_enable.
+ * Returns 0 on success, -EERROR otherwise.
+ */
+int clk_prepare(struct clk *clk)
+{
+ int ret;
+
+ mutex_lock(&prepare_lock);
+ ret = __clk_prepare(clk);
+ mutex_unlock(&prepare_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(clk_prepare);
+
+static void __clk_disable(struct clk *clk)
+{
+ if (!clk)
+ return;
+
+ if (WARN_ON(clk->enable_count == 0))
+ return;
+
+ if (--clk->enable_count > 0)
+ return;
+
+ if (clk->ops->disable)
+ clk->ops->disable(clk->hw);
+
+ __clk_disable(clk->parent);
+}
+
+/**
+ * clk_disable - gate a clock
+ * @clk: the clk being gated
+ *
+ * clk_disable must not sleep, which differentiates it from clk_unprepare. In
+ * a simple case, clk_disable can be used instead of clk_unprepare to gate a
+ * clk if the operation is fast and will never sleep. One example is a
+ * SoC-internal clk which is controlled via simple register writes. In the
+ * complex case a clk gate operation may require a fast and a slow part. It is
+ * this reason that clk_unprepare and clk_disable are not mutually exclusive.
+ * In fact clk_disable must be called before clk_unprepare.
+ */
+void clk_disable(struct clk *clk)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&enable_lock, flags);
+ __clk_disable(clk);
+ spin_unlock_irqrestore(&enable_lock, flags);
+}
+EXPORT_SYMBOL_GPL(clk_disable);
+
+static int __clk_enable(struct clk *clk)
+{
+ int ret = 0;
+
+ if (!clk)
+ return 0;
+
+ if (WARN_ON(clk->prepare_count == 0))
+ return -ESHUTDOWN;
+
+ if (clk->enable_count == 0) {
+ ret = __clk_enable(clk->parent);
+
+ if (ret)
+ return ret;
+
+ if (clk->ops->enable) {
+ ret = clk->ops->enable(clk->hw);
+ if (ret) {
+ __clk_disable(clk->parent);
+ return ret;
+ }
+ }
+ }
+
+ clk->enable_count++;
+ return 0;
+}
+
+/**
+ * clk_enable - ungate a clock
+ * @clk: the clk being ungated
+ *
+ * clk_enable must not sleep, which differentiates it from clk_prepare. In a
+ * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
+ * if the operation will never sleep. One example is a SoC-internal clk which
+ * is controlled via simple register writes. In the complex case a clk ungate
+ * operation may require a fast and a slow part. It is this reason that
+ * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
+ * must be called before clk_enable. Returns 0 on success, -EERROR
+ * otherwise.
+ */
+int clk_enable(struct clk *clk)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&enable_lock, flags);
+ ret = __clk_enable(clk);
+ spin_unlock_irqrestore(&enable_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(clk_enable);
+
+/**
+ * clk_get_rate - return the rate of clk
+ * @clk: the clk whose rate is being returned
+ *
+ * Simply returns the cached rate of the clk. Does not query the hardware. If
+ * clk is NULL then returns -EINVAL.
+ */
+unsigned long clk_get_rate(struct clk *clk)
+{
+ unsigned long rate;
+
+ mutex_lock(&prepare_lock);
+ rate = __clk_get_rate(clk);
+ mutex_unlock(&prepare_lock);
+
+ return rate;
+}
+EXPORT_SYMBOL_GPL(clk_get_rate);
+
+/**
+ * __clk_round_rate - round the given rate for a clk
+ * @clk: round the rate of this clock
+ *
+ * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
+ */
+unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ unsigned long unused;
+
+ if (!clk)
+ return -EINVAL;
+
+ if (!clk->ops->round_rate)
+ return clk->rate;
+
+ if (clk->flags & CLK_SET_RATE_PARENT)
+ return clk->ops->round_rate(clk->hw, rate, &unused);
+ else
+ return clk->ops->round_rate(clk->hw, rate, NULL);
+}
+
+/**
+ * clk_round_rate - round the given rate for a clk
+ * @clk: the clk for which we are rounding a rate
+ * @rate: the rate which is to be rounded
+ *
+ * Takes in a rate as input and rounds it to a rate that the clk can actually
+ * use which is then returned. If clk doesn't support round_rate operation
+ * then the parent rate is returned.
+ */
+long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ unsigned long ret;
+
+ mutex_lock(&prepare_lock);
+ ret = __clk_round_rate(clk, rate);
+ mutex_unlock(&prepare_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(clk_round_rate);
+
+/**
+ * __clk_notify - call clk notifier chain
+ * @clk: struct clk * that is changing rate
+ * @msg: clk notifier type (see include/linux/clk.h)
+ * @old_rate: old clk rate
+ * @new_rate: new clk rate
+ *
+ * Triggers a notifier call chain on the clk rate-change notification
+ * for 'clk'. Passes a pointer to the struct clk and the previous
+ * and current rates to the notifier callback. Intended to be called by
+ * internal clock code only. Returns NOTIFY_DONE from the last driver
+ * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
+ * a driver returns that.
+ */
+static int __clk_notify(struct clk *clk, unsigned long msg,
+ unsigned long old_rate, unsigned long new_rate)
+{
+ struct clk_notifier *cn;
+ struct clk_notifier_data cnd;
+ int ret = NOTIFY_DONE;
+
+ cnd.clk = clk;
+ cnd.old_rate = old_rate;
+ cnd.new_rate = new_rate;
+
+ list_for_each_entry(cn, &clk_notifier_list, node) {
+ if (cn->clk == clk) {
+ ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
+ &cnd);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * __clk_recalc_rates
+ * @clk: first clk in the subtree
+ * @msg: notification type (see include/linux/clk.h)
+ *
+ * Walks the subtree of clks starting with clk and recalculates rates as it
+ * goes. Note that if a clk does not implement the .recalc_rate callback then
+ * it is assumed that the clock will take on the rate of it's parent.
+ *
+ * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
+ * if necessary.
+ *
+ * Caller must hold prepare_lock.
+ */
+static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
+{
+ unsigned long old_rate;
+ unsigned long parent_rate = 0;
+ struct hlist_node *tmp;
+ struct clk *child;
+
+ old_rate = clk->rate;
+
+ if (clk->parent)
+ parent_rate = clk->parent->rate;
+
+ if (clk->ops->recalc_rate)
+ clk->rate = clk->ops->recalc_rate(clk->hw, parent_rate);
+ else
+ clk->rate = parent_rate;
+
+ /*
+ * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
+ * & ABORT_RATE_CHANGE notifiers
+ */
+ if (clk->notifier_count && msg)
+ __clk_notify(clk, msg, old_rate, clk->rate);
+
+ hlist_for_each_entry(child, tmp, &clk->children, child_node)
+ __clk_recalc_rates(child, msg);
+}
+
+/**
+ * __clk_speculate_rates
+ * @clk: first clk in the subtree
+ * @parent_rate: the "future" rate of clk's parent
+ *
+ * Walks the subtree of clks starting with clk, speculating rates as it
+ * goes and firing off PRE_RATE_CHANGE notifications as necessary.
+ *
+ * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
+ * pre-rate change notifications and returns early if no clks in the
+ * subtree have subscribed to the notifications. Note that if a clk does not
+ * implement the .recalc_rate callback then it is assumed that the clock will
+ * take on the rate of it's parent.
+ *
+ * Caller must hold prepare_lock.
+ */
+static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
+{
+ struct hlist_node *tmp;
+ struct clk *child;
+ unsigned long new_rate;
+ int ret = NOTIFY_DONE;
+
+ if (clk->ops->recalc_rate)
+ new_rate = clk->ops->recalc_rate(clk->hw, parent_rate);
+ else
+ new_rate = parent_rate;
+
+ /* abort the rate change if a driver returns NOTIFY_BAD */
+ if (clk->notifier_count)
+ ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
+
+ if (ret == NOTIFY_BAD)
+ goto out;
+
+ hlist_for_each_entry(child, tmp, &clk->children, child_node) {
+ ret = __clk_speculate_rates(child, new_rate);
+ if (ret == NOTIFY_BAD)
+ break;
+ }
+
+out:
+ return ret;
+}
+
+static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
+{
+ struct clk *child;
+ struct hlist_node *tmp;
+
+ clk->new_rate = new_rate;
+
+ hlist_for_each_entry(child, tmp, &clk->children, child_node) {
+ if (child->ops->recalc_rate)
+ child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
+ else
+ child->new_rate = new_rate;
+ clk_calc_subtree(child, child->new_rate);
+ }
+}
+
+/*
+ * calculate the new rates returning the topmost clock that has to be
+ * changed.
+ */
+static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
+{
+ struct clk *top = clk;
+ unsigned long best_parent_rate = clk->parent->rate;
+ unsigned long new_rate;
+
+ if (!clk->ops->round_rate && !(clk->flags & CLK_SET_RATE_PARENT)) {
+ clk->new_rate = clk->rate;
+ return NULL;
+ }
+
+ if (!clk->ops->round_rate && (clk->flags & CLK_SET_RATE_PARENT)) {
+ top = clk_calc_new_rates(clk->parent, rate);
+ new_rate = clk->new_rate = clk->parent->new_rate;
+
+ goto out;
+ }
+
+ if (clk->flags & CLK_SET_RATE_PARENT)
+ new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
+ else
+ new_rate = clk->ops->round_rate(clk->hw, rate, NULL);
+
+ if (best_parent_rate != clk->parent->rate) {
+ top = clk_calc_new_rates(clk->parent, best_parent_rate);
+
+ goto out;
+ }
+
+out:
+ clk_calc_subtree(clk, new_rate);
+
+ return top;
+}
+
+/*
+ * Notify about rate changes in a subtree. Always walk down the whole tree
+ * so that in case of an error we can walk down the whole tree again and
+ * abort the change.
+ */
+static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
+{
+ struct hlist_node *tmp;
+ struct clk *child, *fail_clk = NULL;
+ int ret = NOTIFY_DONE;
+
+ if (clk->rate == clk->new_rate)
+ return 0;
+
+ if (clk->notifier_count) {
+ ret = __clk_notify(clk, event, clk->rate, clk->new_rate);
+ if (ret == NOTIFY_BAD)
+ fail_clk = clk;
+ }
+
+ hlist_for_each_entry(child, tmp, &clk->children, child_node) {
+ clk = clk_propagate_rate_change(child, event);
+ if (clk)
+ fail_clk = clk;
+ }
+
+ return fail_clk;
+}
+
+/*
+ * walk down a subtree and set the new rates notifying the rate
+ * change on the way
+ */
+static void clk_change_rate(struct clk *clk)
+{
+ struct clk *child;
+ unsigned long old_rate;
+ struct hlist_node *tmp;
+
+ old_rate = clk->rate;
+
+ if (clk->ops->set_rate)
+ clk->ops->set_rate(clk->hw, clk->new_rate);
+
+ if (clk->ops->recalc_rate)
+ clk->rate = clk->ops->recalc_rate(clk->hw,
+ clk->parent->rate);
+ else
+ clk->rate = clk->parent->rate;
+
+ if (clk->notifier_count && old_rate != clk->rate)
+ __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
+
+ hlist_for_each_entry(child, tmp, &clk->children, child_node)
+ clk_change_rate(child);
+}
+
+/**
+ * clk_set_rate - specify a new rate for clk
+ * @clk: the clk whose rate is being changed
+ * @rate: the new rate for clk
+ *
+ * In the simplest case clk_set_rate will only change the rate of clk.
+ *
+ * If clk has the CLK_SET_RATE_GATE flag set and it is enabled this call
+ * will fail; only when the clk is disabled will it be able to change
+ * its rate.
+ *
+ * Setting the CLK_SET_RATE_PARENT flag allows clk_set_rate to
+ * recursively propagate up to clk's parent; whether or not this happens
+ * depends on the outcome of clk's .round_rate implementation. If
+ * *parent_rate is 0 after calling .round_rate then upstream parent
+ * propagation is ignored. If *parent_rate comes back with a new rate
+ * for clk's parent then we propagate up to clk's parent and set it's
+ * rate. Upward propagation will continue until either a clk does not
+ * support the CLK_SET_RATE_PARENT flag or .round_rate stops requesting
+ * changes to clk's parent_rate. If there is a failure during upstream
+ * propagation then clk_set_rate will unwind and restore each clk's rate
+ * that had been successfully changed. Afterwards a rate change abort
+ * notification will be propagated downstream, starting from the clk
+ * that failed.
+ *
+ * At the end of all of the rate setting, clk_set_rate internally calls
+ * __clk_recalc_rates and propagates the rate changes downstream,
+ * starting from the highest clk whose rate was changed. This has the
+ * added benefit of propagating post-rate change notifiers.
+ *
+ * Note that while post-rate change and rate change abort notifications
+ * are guaranteed to be sent to a clk only once per call to
+ * clk_set_rate, pre-change notifications will be sent for every clk
+ * whose rate is changed. Stacking pre-change notifications is noisy
+ * for the drivers subscribed to them, but this allows drivers to react
+ * to intermediate clk rate changes up until the point where the final
+ * rate is achieved at the end of upstream propagation.
+ *
+ * Returns 0 on success, -EERROR otherwise.
+ */
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ struct clk *top, *fail_clk;
+ int ret = 0;
+
+ /* prevent racing with updates to the clock topology */
+ mutex_lock(&prepare_lock);
+
+ /* bail early if nothing to do */
+ if (rate == clk->rate)
+ goto out;
+
+ /* calculate new rates and get the topmost changed clock */
+ top = clk_calc_new_rates(clk, rate);
+ if (!top) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* notify that we are about to change rates */
+ fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
+ if (fail_clk) {
+ pr_warn("%s: failed to set %s rate\n", __func__,
+ fail_clk->name);
+ clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* change the rates */
+ clk_change_rate(top);
+
+ mutex_unlock(&prepare_lock);
+
+ return 0;
+out:
+ mutex_unlock(&prepare_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(clk_set_rate);
+
+/**
+ * clk_get_parent - return the parent of a clk
+ * @clk: the clk whose parent gets returned
+ *
+ * Simply returns clk->parent. Returns NULL if clk is NULL.
+ */
+struct clk *clk_get_parent(struct clk *clk)
+{
+ struct clk *parent;
+
+ mutex_lock(&prepare_lock);
+ parent = __clk_get_parent(clk);
+ mutex_unlock(&prepare_lock);
+
+ return parent;
+}
+EXPORT_SYMBOL_GPL(clk_get_parent);
+
+/*
+ * .get_parent is mandatory for clocks with multiple possible parents. It is
+ * optional for single-parent clocks. Always call .get_parent if it is
+ * available and WARN if it is missing for multi-parent clocks.
+ *
+ * For single-parent clocks without .get_parent, first check to see if the
+ * .parents array exists, and if so use it to avoid an expensive tree
+ * traversal. If .parents does not exist then walk the tree with __clk_lookup.
+ */
+static struct clk *__clk_init_parent(struct clk *clk)
+{
+ struct clk *ret = NULL;
+ u8 index;
+
+ /* handle the trivial cases */
+
+ if (!clk->num_parents)
+ goto out;
+
+ if (clk->num_parents == 1) {
+ if (IS_ERR_OR_NULL(clk->parent))
+ ret = clk->parent = __clk_lookup(clk->parent_names[0]);
+ ret = clk->parent;
+ goto out;
+ }
+
+ if (!clk->ops->get_parent) {
+ WARN(!clk->ops->get_parent,
+ "%s: multi-parent clocks must implement .get_parent\n",
+ __func__);
+ goto out;
+ };
+
+ /*
+ * Do our best to cache parent clocks in clk->parents. This prevents
+ * unnecessary and expensive calls to __clk_lookup. We don't set
+ * clk->parent here; that is done by the calling function
+ */
+
+ index = clk->ops->get_parent(clk->hw);
+
+ if (!clk->parents)
+ clk->parents =
+ kmalloc((sizeof(struct clk*) * clk->num_parents),
+ GFP_KERNEL);
+
+ if (!clk->parents)
+ ret = __clk_lookup(clk->parent_names[index]);
+ else if (!clk->parents[index])
+ ret = clk->parents[index] =
+ __clk_lookup(clk->parent_names[index]);
+ else
+ ret = clk->parents[index];
+
+out:
+ return ret;
+}
+
+void __clk_reparent(struct clk *clk, struct clk *new_parent)
+{
+#ifdef CONFIG_COMMON_CLK_DEBUG
+ struct dentry *d;
+ struct dentry *new_parent_d;
+#endif
+
+ if (!clk || !new_parent)
+ return;
+
+ hlist_del(&clk->child_node);
+
+ if (new_parent)
+ hlist_add_head(&clk->child_node, &new_parent->children);
+ else
+ hlist_add_head(&clk->child_node, &clk_orphan_list);
+
+#ifdef CONFIG_COMMON_CLK_DEBUG
+ if (!inited)
+ goto out;
+
+ if (new_parent)
+ new_parent_d = new_parent->dentry;
+ else
+ new_parent_d = orphandir;
+
+ d = debugfs_rename(clk->dentry->d_parent, clk->dentry,
+ new_parent_d, clk->name);
+ if (d)
+ clk->dentry = d;
+ else
+ pr_debug("%s: failed to rename debugfs entry for %s\n",
+ __func__, clk->name);
+out:
+#endif
+
+ clk->parent = new_parent;
+
+ __clk_recalc_rates(clk, POST_RATE_CHANGE);
+}
+
+static int __clk_set_parent(struct clk *clk, struct clk *parent)
+{
+ struct clk *old_parent;
+ unsigned long flags;
+ int ret = -EINVAL;
+ u8 i;
+
+ old_parent = clk->parent;
+
+ /* find index of new parent clock using cached parent ptrs */
+ for (i = 0; i < clk->num_parents; i++)
+ if (clk->parents[i] == parent)
+ break;
+
+ /*
+ * find index of new parent clock using string name comparison
+ * also try to cache the parent to avoid future calls to __clk_lookup
+ */
+ if (i == clk->num_parents)
+ for (i = 0; i < clk->num_parents; i++)
+ if (!strcmp(clk->parent_names[i], parent->name)) {
+ clk->parents[i] = __clk_lookup(parent->name);
+ break;
+ }
+
+ if (i == clk->num_parents) {
+ pr_debug("%s: clock %s is not a possible parent of clock %s\n",
+ __func__, parent->name, clk->name);
+ goto out;
+ }
+
+ /* migrate prepare and enable */
+ if (clk->prepare_count)
+ __clk_prepare(parent);
+
+ /* FIXME replace with clk_is_enabled(clk) someday */
+ spin_lock_irqsave(&enable_lock, flags);
+ if (clk->enable_count)
+ __clk_enable(parent);
+ spin_unlock_irqrestore(&enable_lock, flags);
+
+ /* change clock input source */
+ ret = clk->ops->set_parent(clk->hw, i);
+
+ /* clean up old prepare and enable */
+ spin_lock_irqsave(&enable_lock, flags);
+ if (clk->enable_count)
+ __clk_disable(old_parent);
+ spin_unlock_irqrestore(&enable_lock, flags);
+
+ if (clk->prepare_count)
+ __clk_unprepare(old_parent);
+
+out:
+ return ret;
+}
+
+/**
+ * clk_set_parent - switch the parent of a mux clk
+ * @clk: the mux clk whose input we are switching
+ * @parent: the new input to clk
+ *
+ * Re-parent clk to use parent as it's new input source. If clk has the
+ * CLK_SET_PARENT_GATE flag set then clk must be gated for this
+ * operation to succeed. After successfully changing clk's parent
+ * clk_set_parent will update the clk topology, sysfs topology and
+ * propagate rate recalculation via __clk_recalc_rates. Returns 0 on
+ * success, -EERROR otherwise.
+ */
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+ int ret = 0;
+
+ if (!clk || !clk->ops)
+ return -EINVAL;
+
+ if (!clk->ops->set_parent)
+ return -ENOSYS;
+
+ /* prevent racing with updates to the clock topology */
+ mutex_lock(&prepare_lock);
+
+ if (clk->parent == parent)
+ goto out;
+
+ /* propagate PRE_RATE_CHANGE notifications */
+ if (clk->notifier_count)
+ ret = __clk_speculate_rates(clk, parent->rate);
+
+ /* abort if a driver objects */
+ if (ret == NOTIFY_STOP)
+ goto out;
+
+ /* only re-parent if the clock is not in use */
+ if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count)
+ ret = -EBUSY;
+ else
+ ret = __clk_set_parent(clk, parent);
+
+ /* propagate ABORT_RATE_CHANGE if .set_parent failed */
+ if (ret) {
+ __clk_recalc_rates(clk, ABORT_RATE_CHANGE);
+ goto out;
+ }
+
+ /* propagate rate recalculation downstream */
+ __clk_reparent(clk, parent);
+
+out:
+ mutex_unlock(&prepare_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(clk_set_parent);
+
+/**
+ * __clk_init - initialize the data structures in a struct clk
+ * @dev: device initializing this clk, placeholder for now
+ * @clk: clk being initialized
+ *
+ * Initializes the lists in struct clk, queries the hardware for the
+ * parent and rate and sets them both.
+ *
+ * Any struct clk passed into __clk_init must have the following members
+ * populated:
+ * .name
+ * .ops
+ * .hw
+ * .parent_names
+ * .num_parents
+ * .flags
+ *
+ * Essentially, everything that would normally be passed into clk_register is
+ * assumed to be initialized already in __clk_init. The other members may be
+ * populated, but are optional.
+ *
+ * __clk_init is only exposed via clk-private.h and is intended for use with
+ * very large numbers of clocks that need to be statically initialized. It is
+ * a layering violation to include clk-private.h from any code which implements
+ * a clock's .ops; as such any statically initialized clock data MUST be in a
+ * separate C file from the logic that implements it's operations.
+ */
+void __clk_init(struct device *dev, struct clk *clk)
+{
+ int i;
+ struct clk *orphan;
+ struct hlist_node *tmp, *tmp2;
+
+ if (!clk)
+ return;
+
+ mutex_lock(&prepare_lock);
+
+ /* check to see if a clock with this name is already registered */
+ if (__clk_lookup(clk->name))
+ goto out;
+
+ /* throw a WARN if any entries in parent_names are NULL */
+ for (i = 0; i < clk->num_parents; i++)
+ WARN(!clk->parent_names[i],
+ "%s: invalid NULL in %s's .parent_names\n",
+ __func__, clk->name);
+
+ /*
+ * Allocate an array of struct clk *'s to avoid unnecessary string
+ * look-ups of clk's possible parents. This can fail for clocks passed
+ * in to clk_init during early boot; thus any access to clk->parents[]
+ * must always check for a NULL pointer and try to populate it if
+ * necessary.
+ *
+ * If clk->parents is not NULL we skip this entire block. This allows
+ * for clock drivers to statically initialize clk->parents.
+ */
+ if (clk->num_parents && !clk->parents) {
+ clk->parents = kmalloc((sizeof(struct clk*) * clk->num_parents),
+ GFP_KERNEL);
+ /*
+ * __clk_lookup returns NULL for parents that have not been
+ * clk_init'd; thus any access to clk->parents[] must check
+ * for a NULL pointer. We can always perform lazy lookups for
+ * missing parents later on.
+ */
+ if (clk->parents)
+ for (i = 0; i < clk->num_parents; i++)
+ clk->parents[i] =
+ __clk_lookup(clk->parent_names[i]);
+ }
+
+ clk->parent = __clk_init_parent(clk);
+
+ /*
+ * Populate clk->parent if parent has already been __clk_init'd. If
+ * parent has not yet been __clk_init'd then place clk in the orphan
+ * list. If clk has set the CLK_IS_ROOT flag then place it in the root
+ * clk list.
+ *
+ * Every time a new clk is clk_init'd then we walk the list of orphan
+ * clocks and re-parent any that are children of the clock currently
+ * being clk_init'd.
+ */
+ if (clk->parent)
+ hlist_add_head(&clk->child_node,
+ &clk->parent->children);
+ else if (clk->flags & CLK_IS_ROOT)
+ hlist_add_head(&clk->child_node, &clk_root_list);
+ else
+ hlist_add_head(&clk->child_node, &clk_orphan_list);
+
+ /*
+ * Set clk's rate. The preferred method is to use .recalc_rate. For
+ * simple clocks and lazy developers the default fallback is to use the
+ * parent's rate. If a clock doesn't have a parent (or is orphaned)
+ * then rate is set to zero.
+ */
+ if (clk->ops->recalc_rate)
+ clk->rate = clk->ops->recalc_rate(clk->hw,
+ __clk_get_rate(clk->parent));
+ else if (clk->parent)
+ clk->rate = clk->parent->rate;
+ else
+ clk->rate = 0;
+
+ /*
+ * walk the list of orphan clocks and reparent any that are children of
+ * this clock
+ */
+ hlist_for_each_entry_safe(orphan, tmp, tmp2, &clk_orphan_list, child_node)
+ for (i = 0; i < orphan->num_parents; i++)
+ if (!strcmp(clk->name, orphan->parent_names[i])) {
+ __clk_reparent(orphan, clk);
+ break;
+ }
+
+ /*
+ * optional platform-specific magic
+ *
+ * The .init callback is not used by any of the basic clock types, but
+ * exists for weird hardware that must perform initialization magic.
+ * Please consider other ways of solving initialization problems before
+ * using this callback, as it's use is discouraged.
+ */
+ if (clk->ops->init)
+ clk->ops->init(clk->hw);
+
+ clk_debug_register(clk);
+
+out:
+ mutex_unlock(&prepare_lock);
+
+ return;
+}
+
+/**
+ * clk_register - allocate a new clock, register it and return an opaque cookie
+ * @dev: device that is registering this clock
+ * @name: clock name
+ * @ops: operations this clock supports
+ * @hw: link to hardware-specific clock data
+ * @parent_names: array of string names for all possible parents
+ * @num_parents: number of possible parents
+ * @flags: framework-level hints and quirks
+ *
+ * clk_register is the primary interface for populating the clock tree with new
+ * clock nodes. It returns a pointer to the newly allocated struct clk which
+ * cannot be dereferenced by driver code but may be used in conjuction with the
+ * rest of the clock API.
+ */
+struct clk *clk_register(struct device *dev, const char *name,
+ const struct clk_ops *ops, struct clk_hw *hw,
+ char **parent_names, u8 num_parents, unsigned long flags)
+{
+ struct clk *clk;
+
+ clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+ if (!clk)
+ return NULL;
+
+ clk->name = name;
+ clk->ops = ops;
+ clk->hw = hw;
+ clk->flags = flags;
+ clk->parent_names = parent_names;
+ clk->num_parents = num_parents;
+ hw->clk = clk;
+
+ __clk_init(dev, clk);
+
+ return clk;
+}
+EXPORT_SYMBOL_GPL(clk_register);
+
+/*** clk rate change notifiers ***/
+
+/**
+ * clk_notifier_register - add a clk rate change notifier
+ * @clk: struct clk * to watch
+ * @nb: struct notifier_block * with callback info
+ *
+ * Request notification when clk's rate changes. This uses an SRCU
+ * notifier because we want it to block and notifier unregistrations are
+ * uncommon. The callbacks associated with the notifier must not
+ * re-enter into the clk framework by calling any top-level clk APIs;
+ * this will cause a nested prepare_lock mutex.
+ *
+ * Pre-change notifier callbacks will be passed the current, pre-change
+ * rate of the clk via struct clk_notifier_data.old_rate. The new,
+ * post-change rate of the clk is passed via struct
+ * clk_notifier_data.new_rate.
+ *
+ * Post-change notifiers will pass the now-current, post-change rate of
+ * the clk in both struct clk_notifier_data.old_rate and struct
+ * clk_notifier_data.new_rate.
+ *
+ * Abort-change notifiers are effectively the opposite of pre-change
+ * notifiers: the original pre-change clk rate is passed in via struct
+ * clk_notifier_data.new_rate and the failed post-change rate is passed
+ * in via struct clk_notifier_data.old_rate.
+ *
+ * clk_notifier_register() must be called from non-atomic context.
+ * Returns -EINVAL if called with null arguments, -ENOMEM upon
+ * allocation failure; otherwise, passes along the return value of
+ * srcu_notifier_chain_register().
+ */
+int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
+{
+ struct clk_notifier *cn;
+ int ret = -ENOMEM;
+
+ if (!clk || !nb)
+ return -EINVAL;
+
+ mutex_lock(&prepare_lock);
+
+ /* search the list of notifiers for this clk */
+ list_for_each_entry(cn, &clk_notifier_list, node)
+ if (cn->clk == clk)
+ break;
+
+ /* if clk wasn't in the notifier list, allocate new clk_notifier */
+ if (cn->clk != clk) {
+ cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
+ if (!cn)
+ goto out;
+
+ cn->clk = clk;
+ srcu_init_notifier_head(&cn->notifier_head);
+
+ list_add(&cn->node, &clk_notifier_list);
+ }
+
+ ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
+
+ clk->notifier_count++;
+
+out:
+ mutex_unlock(&prepare_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(clk_notifier_register);
+
+/**
+ * clk_notifier_unregister - remove a clk rate change notifier
+ * @clk: struct clk *
+ * @nb: struct notifier_block * with callback info
+ *
+ * Request no further notification for changes to 'clk' and frees memory
+ * allocated in clk_notifier_register.
+ *
+ * Returns -EINVAL if called with null arguments; otherwise, passes
+ * along the return value of srcu_notifier_chain_unregister().
+ */
+int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
+{
+ struct clk_notifier *cn = NULL;
+ int ret = -EINVAL;
+
+ if (!clk || !nb)
+ return -EINVAL;
+
+ mutex_lock(&prepare_lock);
+
+ list_for_each_entry(cn, &clk_notifier_list, node)
+ if (cn->clk == clk)
+ break;
+
+ if (cn->clk == clk) {
+ ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
+
+ clk->notifier_count--;
+
+ /* XXX the notifier code should handle this better */
+ if (!cn->notifier_head.head) {
+ srcu_cleanup_notifier_head(&cn->notifier_head);
+ kfree(cn);
+ }
+
+ } else {
+ ret = -ENOENT;
+ }
+
+ mutex_unlock(&prepare_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(clk_notifier_unregister);
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 55d0f95f82f9..32cb929b8eb6 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -19,6 +19,8 @@
* - Two channels combine to create a free-running 32 bit counter
* with a base rate of 5+ MHz, packaged as a clocksource (with
* resolution better than 200 nsec).
+ * - Some chips support 32 bit counter. A single channel is used for
+ * this 32 bit free-running counter. the second channel is not used.
*
* - The third channel may be used to provide a 16-bit clockevent
* source, used in either periodic or oneshot mode. This runs
@@ -54,6 +56,11 @@ static cycle_t tc_get_cycles(struct clocksource *cs)
return (upper << 16) | lower;
}
+static cycle_t tc_get_cycles32(struct clocksource *cs)
+{
+ return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
+}
+
static struct clocksource clksrc = {
.name = "tcb_clksrc",
.rating = 200,
@@ -209,6 +216,48 @@ static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
#endif
+static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx)
+{
+ /* channel 0: waveform mode, input mclk/8, clock TIOA0 on overflow */
+ __raw_writel(mck_divisor_idx /* likely divide-by-8 */
+ | ATMEL_TC_WAVE
+ | ATMEL_TC_WAVESEL_UP /* free-run */
+ | ATMEL_TC_ACPA_SET /* TIOA0 rises at 0 */
+ | ATMEL_TC_ACPC_CLEAR, /* (duty cycle 50%) */
+ tcaddr + ATMEL_TC_REG(0, CMR));
+ __raw_writel(0x0000, tcaddr + ATMEL_TC_REG(0, RA));
+ __raw_writel(0x8000, tcaddr + ATMEL_TC_REG(0, RC));
+ __raw_writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR)); /* no irqs */
+ __raw_writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
+
+ /* channel 1: waveform mode, input TIOA0 */
+ __raw_writel(ATMEL_TC_XC1 /* input: TIOA0 */
+ | ATMEL_TC_WAVE
+ | ATMEL_TC_WAVESEL_UP, /* free-run */
+ tcaddr + ATMEL_TC_REG(1, CMR));
+ __raw_writel(0xff, tcaddr + ATMEL_TC_REG(1, IDR)); /* no irqs */
+ __raw_writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(1, CCR));
+
+ /* chain channel 0 to channel 1*/
+ __raw_writel(ATMEL_TC_TC1XC1S_TIOA0, tcaddr + ATMEL_TC_BMR);
+ /* then reset all the timers */
+ __raw_writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
+}
+
+static void __init tcb_setup_single_chan(struct atmel_tc *tc, int mck_divisor_idx)
+{
+ /* channel 0: waveform mode, input mclk/8 */
+ __raw_writel(mck_divisor_idx /* likely divide-by-8 */
+ | ATMEL_TC_WAVE
+ | ATMEL_TC_WAVESEL_UP, /* free-run */
+ tcaddr + ATMEL_TC_REG(0, CMR));
+ __raw_writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR)); /* no irqs */
+ __raw_writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
+
+ /* then reset all the timers */
+ __raw_writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
+}
+
static int __init tcb_clksrc_init(void)
{
static char bootinfo[] __initdata
@@ -260,34 +309,19 @@ static int __init tcb_clksrc_init(void)
divided_rate / 1000000,
((divided_rate + 500000) % 1000000) / 1000);
- /* tclib will give us three clocks no matter what the
- * underlying platform supports.
- */
- clk_enable(tc->clk[1]);
-
- /* channel 0: waveform mode, input mclk/8, clock TIOA0 on overflow */
- __raw_writel(best_divisor_idx /* likely divide-by-8 */
- | ATMEL_TC_WAVE
- | ATMEL_TC_WAVESEL_UP /* free-run */
- | ATMEL_TC_ACPA_SET /* TIOA0 rises at 0 */
- | ATMEL_TC_ACPC_CLEAR, /* (duty cycle 50%) */
- tcaddr + ATMEL_TC_REG(0, CMR));
- __raw_writel(0x0000, tcaddr + ATMEL_TC_REG(0, RA));
- __raw_writel(0x8000, tcaddr + ATMEL_TC_REG(0, RC));
- __raw_writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR)); /* no irqs */
- __raw_writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
-
- /* channel 1: waveform mode, input TIOA0 */
- __raw_writel(ATMEL_TC_XC1 /* input: TIOA0 */
- | ATMEL_TC_WAVE
- | ATMEL_TC_WAVESEL_UP, /* free-run */
- tcaddr + ATMEL_TC_REG(1, CMR));
- __raw_writel(0xff, tcaddr + ATMEL_TC_REG(1, IDR)); /* no irqs */
- __raw_writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(1, CCR));
-
- /* chain channel 0 to channel 1, then reset all the timers */
- __raw_writel(ATMEL_TC_TC1XC1S_TIOA0, tcaddr + ATMEL_TC_BMR);
- __raw_writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
+ if (tc->tcb_config && tc->tcb_config->counter_width == 32) {
+ /* use apropriate function to read 32 bit counter */
+ clksrc.read = tc_get_cycles32;
+ /* setup ony channel 0 */
+ tcb_setup_single_chan(tc, best_divisor_idx);
+ } else {
+ /* tclib will give us three clocks no matter what the
+ * underlying platform supports.
+ */
+ clk_enable(tc->clk[1]);
+ /* setup both channel 0 & 1 */
+ tcb_setup_dual_chan(tc, best_divisor_idx);
+ }
/* and away we go! */
clocksource_register_hz(&clksrc, divided_rate);
diff --git a/drivers/devfreq/exynos4_bus.c b/drivers/devfreq/exynos4_bus.c
index 1a361e99965a..88ddc77a9bb1 100644
--- a/drivers/devfreq/exynos4_bus.c
+++ b/drivers/devfreq/exynos4_bus.c
@@ -311,51 +311,51 @@ static int exynos4210_set_busclk(struct busfreq_data *data, struct opp *opp)
/* Change Divider - DMC0 */
tmp = data->dmc_divtable[index];
- __raw_writel(tmp, S5P_CLKDIV_DMC0);
+ __raw_writel(tmp, EXYNOS4_CLKDIV_DMC0);
do {
- tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0);
+ tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_DMC0);
} while (tmp & 0x11111111);
/* Change Divider - TOP */
tmp = data->top_divtable[index];
- __raw_writel(tmp, S5P_CLKDIV_TOP);
+ __raw_writel(tmp, EXYNOS4_CLKDIV_TOP);
do {
- tmp = __raw_readl(S5P_CLKDIV_STAT_TOP);
+ tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_TOP);
} while (tmp & 0x11111);
/* Change Divider - LEFTBUS */
- tmp = __raw_readl(S5P_CLKDIV_LEFTBUS);
+ tmp = __raw_readl(EXYNOS4_CLKDIV_LEFTBUS);
- tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+ tmp &= ~(EXYNOS4_CLKDIV_BUS_GDLR_MASK | EXYNOS4_CLKDIV_BUS_GPLR_MASK);
tmp |= ((exynos4210_clkdiv_lr_bus[index][0] <<
- S5P_CLKDIV_BUS_GDLR_SHIFT) |
+ EXYNOS4_CLKDIV_BUS_GDLR_SHIFT) |
(exynos4210_clkdiv_lr_bus[index][1] <<
- S5P_CLKDIV_BUS_GPLR_SHIFT));
+ EXYNOS4_CLKDIV_BUS_GPLR_SHIFT));
- __raw_writel(tmp, S5P_CLKDIV_LEFTBUS);
+ __raw_writel(tmp, EXYNOS4_CLKDIV_LEFTBUS);
do {
- tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS);
+ tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_LEFTBUS);
} while (tmp & 0x11);
/* Change Divider - RIGHTBUS */
- tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS);
+ tmp = __raw_readl(EXYNOS4_CLKDIV_RIGHTBUS);
- tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+ tmp &= ~(EXYNOS4_CLKDIV_BUS_GDLR_MASK | EXYNOS4_CLKDIV_BUS_GPLR_MASK);
tmp |= ((exynos4210_clkdiv_lr_bus[index][0] <<
- S5P_CLKDIV_BUS_GDLR_SHIFT) |
+ EXYNOS4_CLKDIV_BUS_GDLR_SHIFT) |
(exynos4210_clkdiv_lr_bus[index][1] <<
- S5P_CLKDIV_BUS_GPLR_SHIFT));
+ EXYNOS4_CLKDIV_BUS_GPLR_SHIFT));
- __raw_writel(tmp, S5P_CLKDIV_RIGHTBUS);
+ __raw_writel(tmp, EXYNOS4_CLKDIV_RIGHTBUS);
do {
- tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS);
+ tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_RIGHTBUS);
} while (tmp & 0x11);
return 0;
@@ -376,137 +376,137 @@ static int exynos4x12_set_busclk(struct busfreq_data *data, struct opp *opp)
/* Change Divider - DMC0 */
tmp = data->dmc_divtable[index];
- __raw_writel(tmp, S5P_CLKDIV_DMC0);
+ __raw_writel(tmp, EXYNOS4_CLKDIV_DMC0);
do {
- tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0);
+ tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_DMC0);
} while (tmp & 0x11111111);
/* Change Divider - DMC1 */
- tmp = __raw_readl(S5P_CLKDIV_DMC1);
+ tmp = __raw_readl(EXYNOS4_CLKDIV_DMC1);
- tmp &= ~(S5P_CLKDIV_DMC1_G2D_ACP_MASK |
- S5P_CLKDIV_DMC1_C2C_MASK |
- S5P_CLKDIV_DMC1_C2CACLK_MASK);
+ tmp &= ~(EXYNOS4_CLKDIV_DMC1_G2D_ACP_MASK |
+ EXYNOS4_CLKDIV_DMC1_C2C_MASK |
+ EXYNOS4_CLKDIV_DMC1_C2CACLK_MASK);
tmp |= ((exynos4x12_clkdiv_dmc1[index][0] <<
- S5P_CLKDIV_DMC1_G2D_ACP_SHIFT) |
+ EXYNOS4_CLKDIV_DMC1_G2D_ACP_SHIFT) |
(exynos4x12_clkdiv_dmc1[index][1] <<
- S5P_CLKDIV_DMC1_C2C_SHIFT) |
+ EXYNOS4_CLKDIV_DMC1_C2C_SHIFT) |
(exynos4x12_clkdiv_dmc1[index][2] <<
- S5P_CLKDIV_DMC1_C2CACLK_SHIFT));
+ EXYNOS4_CLKDIV_DMC1_C2CACLK_SHIFT));
- __raw_writel(tmp, S5P_CLKDIV_DMC1);
+ __raw_writel(tmp, EXYNOS4_CLKDIV_DMC1);
do {
- tmp = __raw_readl(S5P_CLKDIV_STAT_DMC1);
+ tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_DMC1);
} while (tmp & 0x111111);
/* Change Divider - TOP */
- tmp = __raw_readl(S5P_CLKDIV_TOP);
+ tmp = __raw_readl(EXYNOS4_CLKDIV_TOP);
- tmp &= ~(S5P_CLKDIV_TOP_ACLK266_GPS_MASK |
- S5P_CLKDIV_TOP_ACLK100_MASK |
- S5P_CLKDIV_TOP_ACLK160_MASK |
- S5P_CLKDIV_TOP_ACLK133_MASK |
- S5P_CLKDIV_TOP_ONENAND_MASK);
+ tmp &= ~(EXYNOS4_CLKDIV_TOP_ACLK266_GPS_MASK |
+ EXYNOS4_CLKDIV_TOP_ACLK100_MASK |
+ EXYNOS4_CLKDIV_TOP_ACLK160_MASK |
+ EXYNOS4_CLKDIV_TOP_ACLK133_MASK |
+ EXYNOS4_CLKDIV_TOP_ONENAND_MASK);
tmp |= ((exynos4x12_clkdiv_top[index][0] <<
- S5P_CLKDIV_TOP_ACLK266_GPS_SHIFT) |
+ EXYNOS4_CLKDIV_TOP_ACLK266_GPS_SHIFT) |
(exynos4x12_clkdiv_top[index][1] <<
- S5P_CLKDIV_TOP_ACLK100_SHIFT) |
+ EXYNOS4_CLKDIV_TOP_ACLK100_SHIFT) |
(exynos4x12_clkdiv_top[index][2] <<
- S5P_CLKDIV_TOP_ACLK160_SHIFT) |
+ EXYNOS4_CLKDIV_TOP_ACLK160_SHIFT) |
(exynos4x12_clkdiv_top[index][3] <<
- S5P_CLKDIV_TOP_ACLK133_SHIFT) |
+ EXYNOS4_CLKDIV_TOP_ACLK133_SHIFT) |
(exynos4x12_clkdiv_top[index][4] <<
- S5P_CLKDIV_TOP_ONENAND_SHIFT));
+ EXYNOS4_CLKDIV_TOP_ONENAND_SHIFT));
- __raw_writel(tmp, S5P_CLKDIV_TOP);
+ __raw_writel(tmp, EXYNOS4_CLKDIV_TOP);
do {
- tmp = __raw_readl(S5P_CLKDIV_STAT_TOP);
+ tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_TOP);
} while (tmp & 0x11111);
/* Change Divider - LEFTBUS */
- tmp = __raw_readl(S5P_CLKDIV_LEFTBUS);
+ tmp = __raw_readl(EXYNOS4_CLKDIV_LEFTBUS);
- tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+ tmp &= ~(EXYNOS4_CLKDIV_BUS_GDLR_MASK | EXYNOS4_CLKDIV_BUS_GPLR_MASK);
tmp |= ((exynos4x12_clkdiv_lr_bus[index][0] <<
- S5P_CLKDIV_BUS_GDLR_SHIFT) |
+ EXYNOS4_CLKDIV_BUS_GDLR_SHIFT) |
(exynos4x12_clkdiv_lr_bus[index][1] <<
- S5P_CLKDIV_BUS_GPLR_SHIFT));
+ EXYNOS4_CLKDIV_BUS_GPLR_SHIFT));
- __raw_writel(tmp, S5P_CLKDIV_LEFTBUS);
+ __raw_writel(tmp, EXYNOS4_CLKDIV_LEFTBUS);
do {
- tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS);
+ tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_LEFTBUS);
} while (tmp & 0x11);
/* Change Divider - RIGHTBUS */
- tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS);
+ tmp = __raw_readl(EXYNOS4_CLKDIV_RIGHTBUS);
- tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+ tmp &= ~(EXYNOS4_CLKDIV_BUS_GDLR_MASK | EXYNOS4_CLKDIV_BUS_GPLR_MASK);
tmp |= ((exynos4x12_clkdiv_lr_bus[index][0] <<
- S5P_CLKDIV_BUS_GDLR_SHIFT) |
+ EXYNOS4_CLKDIV_BUS_GDLR_SHIFT) |
(exynos4x12_clkdiv_lr_bus[index][1] <<
- S5P_CLKDIV_BUS_GPLR_SHIFT));
+ EXYNOS4_CLKDIV_BUS_GPLR_SHIFT));
- __raw_writel(tmp, S5P_CLKDIV_RIGHTBUS);
+ __raw_writel(tmp, EXYNOS4_CLKDIV_RIGHTBUS);
do {
- tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS);
+ tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_RIGHTBUS);
} while (tmp & 0x11);
/* Change Divider - MFC */
- tmp = __raw_readl(S5P_CLKDIV_MFC);
+ tmp = __raw_readl(EXYNOS4_CLKDIV_MFC);
- tmp &= ~(S5P_CLKDIV_MFC_MASK);
+ tmp &= ~(EXYNOS4_CLKDIV_MFC_MASK);
tmp |= ((exynos4x12_clkdiv_sclkip[index][0] <<
- S5P_CLKDIV_MFC_SHIFT));
+ EXYNOS4_CLKDIV_MFC_SHIFT));
- __raw_writel(tmp, S5P_CLKDIV_MFC);
+ __raw_writel(tmp, EXYNOS4_CLKDIV_MFC);
do {
- tmp = __raw_readl(S5P_CLKDIV_STAT_MFC);
+ tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_MFC);
} while (tmp & 0x1);
/* Change Divider - JPEG */
- tmp = __raw_readl(S5P_CLKDIV_CAM1);
+ tmp = __raw_readl(EXYNOS4_CLKDIV_CAM1);
- tmp &= ~(S5P_CLKDIV_CAM1_JPEG_MASK);
+ tmp &= ~(EXYNOS4_CLKDIV_CAM1_JPEG_MASK);
tmp |= ((exynos4x12_clkdiv_sclkip[index][1] <<
- S5P_CLKDIV_CAM1_JPEG_SHIFT));
+ EXYNOS4_CLKDIV_CAM1_JPEG_SHIFT));
- __raw_writel(tmp, S5P_CLKDIV_CAM1);
+ __raw_writel(tmp, EXYNOS4_CLKDIV_CAM1);
do {
- tmp = __raw_readl(S5P_CLKDIV_STAT_CAM1);
+ tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_CAM1);
} while (tmp & 0x1);
/* Change Divider - FIMC0~3 */
- tmp = __raw_readl(S5P_CLKDIV_CAM);
+ tmp = __raw_readl(EXYNOS4_CLKDIV_CAM);
- tmp &= ~(S5P_CLKDIV_CAM_FIMC0_MASK | S5P_CLKDIV_CAM_FIMC1_MASK |
- S5P_CLKDIV_CAM_FIMC2_MASK | S5P_CLKDIV_CAM_FIMC3_MASK);
+ tmp &= ~(EXYNOS4_CLKDIV_CAM_FIMC0_MASK | EXYNOS4_CLKDIV_CAM_FIMC1_MASK |
+ EXYNOS4_CLKDIV_CAM_FIMC2_MASK | EXYNOS4_CLKDIV_CAM_FIMC3_MASK);
tmp |= ((exynos4x12_clkdiv_sclkip[index][2] <<
- S5P_CLKDIV_CAM_FIMC0_SHIFT) |
+ EXYNOS4_CLKDIV_CAM_FIMC0_SHIFT) |
(exynos4x12_clkdiv_sclkip[index][2] <<
- S5P_CLKDIV_CAM_FIMC1_SHIFT) |
+ EXYNOS4_CLKDIV_CAM_FIMC1_SHIFT) |
(exynos4x12_clkdiv_sclkip[index][2] <<
- S5P_CLKDIV_CAM_FIMC2_SHIFT) |
+ EXYNOS4_CLKDIV_CAM_FIMC2_SHIFT) |
(exynos4x12_clkdiv_sclkip[index][2] <<
- S5P_CLKDIV_CAM_FIMC3_SHIFT));
+ EXYNOS4_CLKDIV_CAM_FIMC3_SHIFT));
- __raw_writel(tmp, S5P_CLKDIV_CAM);
+ __raw_writel(tmp, EXYNOS4_CLKDIV_CAM);
do {
- tmp = __raw_readl(S5P_CLKDIV_STAT_CAM1);
+ tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_CAM1);
} while (tmp & 0x1111);
return 0;
@@ -760,55 +760,55 @@ static int exynos4210_init_tables(struct busfreq_data *data)
int mgrp;
int i, err = 0;
- tmp = __raw_readl(S5P_CLKDIV_DMC0);
+ tmp = __raw_readl(EXYNOS4_CLKDIV_DMC0);
for (i = LV_0; i < EX4210_LV_NUM; i++) {
- tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK |
- S5P_CLKDIV_DMC0_ACPPCLK_MASK |
- S5P_CLKDIV_DMC0_DPHY_MASK |
- S5P_CLKDIV_DMC0_DMC_MASK |
- S5P_CLKDIV_DMC0_DMCD_MASK |
- S5P_CLKDIV_DMC0_DMCP_MASK |
- S5P_CLKDIV_DMC0_COPY2_MASK |
- S5P_CLKDIV_DMC0_CORETI_MASK);
+ tmp &= ~(EXYNOS4_CLKDIV_DMC0_ACP_MASK |
+ EXYNOS4_CLKDIV_DMC0_ACPPCLK_MASK |
+ EXYNOS4_CLKDIV_DMC0_DPHY_MASK |
+ EXYNOS4_CLKDIV_DMC0_DMC_MASK |
+ EXYNOS4_CLKDIV_DMC0_DMCD_MASK |
+ EXYNOS4_CLKDIV_DMC0_DMCP_MASK |
+ EXYNOS4_CLKDIV_DMC0_COPY2_MASK |
+ EXYNOS4_CLKDIV_DMC0_CORETI_MASK);
tmp |= ((exynos4210_clkdiv_dmc0[i][0] <<
- S5P_CLKDIV_DMC0_ACP_SHIFT) |
+ EXYNOS4_CLKDIV_DMC0_ACP_SHIFT) |
(exynos4210_clkdiv_dmc0[i][1] <<
- S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) |
+ EXYNOS4_CLKDIV_DMC0_ACPPCLK_SHIFT) |
(exynos4210_clkdiv_dmc0[i][2] <<
- S5P_CLKDIV_DMC0_DPHY_SHIFT) |
+ EXYNOS4_CLKDIV_DMC0_DPHY_SHIFT) |
(exynos4210_clkdiv_dmc0[i][3] <<
- S5P_CLKDIV_DMC0_DMC_SHIFT) |
+ EXYNOS4_CLKDIV_DMC0_DMC_SHIFT) |
(exynos4210_clkdiv_dmc0[i][4] <<
- S5P_CLKDIV_DMC0_DMCD_SHIFT) |
+ EXYNOS4_CLKDIV_DMC0_DMCD_SHIFT) |
(exynos4210_clkdiv_dmc0[i][5] <<
- S5P_CLKDIV_DMC0_DMCP_SHIFT) |
+ EXYNOS4_CLKDIV_DMC0_DMCP_SHIFT) |
(exynos4210_clkdiv_dmc0[i][6] <<
- S5P_CLKDIV_DMC0_COPY2_SHIFT) |
+ EXYNOS4_CLKDIV_DMC0_COPY2_SHIFT) |
(exynos4210_clkdiv_dmc0[i][7] <<
- S5P_CLKDIV_DMC0_CORETI_SHIFT));
+ EXYNOS4_CLKDIV_DMC0_CORETI_SHIFT));
data->dmc_divtable[i] = tmp;
}
- tmp = __raw_readl(S5P_CLKDIV_TOP);
+ tmp = __raw_readl(EXYNOS4_CLKDIV_TOP);
for (i = LV_0; i < EX4210_LV_NUM; i++) {
- tmp &= ~(S5P_CLKDIV_TOP_ACLK200_MASK |
- S5P_CLKDIV_TOP_ACLK100_MASK |
- S5P_CLKDIV_TOP_ACLK160_MASK |
- S5P_CLKDIV_TOP_ACLK133_MASK |
- S5P_CLKDIV_TOP_ONENAND_MASK);
+ tmp &= ~(EXYNOS4_CLKDIV_TOP_ACLK200_MASK |
+ EXYNOS4_CLKDIV_TOP_ACLK100_MASK |
+ EXYNOS4_CLKDIV_TOP_ACLK160_MASK |
+ EXYNOS4_CLKDIV_TOP_ACLK133_MASK |
+ EXYNOS4_CLKDIV_TOP_ONENAND_MASK);
tmp |= ((exynos4210_clkdiv_top[i][0] <<
- S5P_CLKDIV_TOP_ACLK200_SHIFT) |
+ EXYNOS4_CLKDIV_TOP_ACLK200_SHIFT) |
(exynos4210_clkdiv_top[i][1] <<
- S5P_CLKDIV_TOP_ACLK100_SHIFT) |
+ EXYNOS4_CLKDIV_TOP_ACLK100_SHIFT) |
(exynos4210_clkdiv_top[i][2] <<
- S5P_CLKDIV_TOP_ACLK160_SHIFT) |
+ EXYNOS4_CLKDIV_TOP_ACLK160_SHIFT) |
(exynos4210_clkdiv_top[i][3] <<
- S5P_CLKDIV_TOP_ACLK133_SHIFT) |
+ EXYNOS4_CLKDIV_TOP_ACLK133_SHIFT) |
(exynos4210_clkdiv_top[i][4] <<
- S5P_CLKDIV_TOP_ONENAND_SHIFT));
+ EXYNOS4_CLKDIV_TOP_ONENAND_SHIFT));
data->top_divtable[i] = tmp;
}
@@ -868,32 +868,32 @@ static int exynos4x12_init_tables(struct busfreq_data *data)
int ret;
/* Enable pause function for DREX2 DVFS */
- tmp = __raw_readl(S5P_DMC_PAUSE_CTRL);
- tmp |= DMC_PAUSE_ENABLE;
- __raw_writel(tmp, S5P_DMC_PAUSE_CTRL);
+ tmp = __raw_readl(EXYNOS4_DMC_PAUSE_CTRL);
+ tmp |= EXYNOS4_DMC_PAUSE_ENABLE;
+ __raw_writel(tmp, EXYNOS4_DMC_PAUSE_CTRL);
- tmp = __raw_readl(S5P_CLKDIV_DMC0);
+ tmp = __raw_readl(EXYNOS4_CLKDIV_DMC0);
for (i = 0; i < EX4x12_LV_NUM; i++) {
- tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK |
- S5P_CLKDIV_DMC0_ACPPCLK_MASK |
- S5P_CLKDIV_DMC0_DPHY_MASK |
- S5P_CLKDIV_DMC0_DMC_MASK |
- S5P_CLKDIV_DMC0_DMCD_MASK |
- S5P_CLKDIV_DMC0_DMCP_MASK);
+ tmp &= ~(EXYNOS4_CLKDIV_DMC0_ACP_MASK |
+ EXYNOS4_CLKDIV_DMC0_ACPPCLK_MASK |
+ EXYNOS4_CLKDIV_DMC0_DPHY_MASK |
+ EXYNOS4_CLKDIV_DMC0_DMC_MASK |
+ EXYNOS4_CLKDIV_DMC0_DMCD_MASK |
+ EXYNOS4_CLKDIV_DMC0_DMCP_MASK);
tmp |= ((exynos4x12_clkdiv_dmc0[i][0] <<
- S5P_CLKDIV_DMC0_ACP_SHIFT) |
+ EXYNOS4_CLKDIV_DMC0_ACP_SHIFT) |
(exynos4x12_clkdiv_dmc0[i][1] <<
- S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) |
+ EXYNOS4_CLKDIV_DMC0_ACPPCLK_SHIFT) |
(exynos4x12_clkdiv_dmc0[i][2] <<
- S5P_CLKDIV_DMC0_DPHY_SHIFT) |
+ EXYNOS4_CLKDIV_DMC0_DPHY_SHIFT) |
(exynos4x12_clkdiv_dmc0[i][3] <<
- S5P_CLKDIV_DMC0_DMC_SHIFT) |
+ EXYNOS4_CLKDIV_DMC0_DMC_SHIFT) |
(exynos4x12_clkdiv_dmc0[i][4] <<
- S5P_CLKDIV_DMC0_DMCD_SHIFT) |
+ EXYNOS4_CLKDIV_DMC0_DMCD_SHIFT) |
(exynos4x12_clkdiv_dmc0[i][5] <<
- S5P_CLKDIV_DMC0_DMCP_SHIFT));
+ EXYNOS4_CLKDIV_DMC0_DMCP_SHIFT));
data->dmc_divtable[i] = tmp;
}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index f1a274994bb1..4a6c46dea8a0 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -252,6 +252,15 @@ config EP93XX_DMA
help
Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller.
+config DMA_SA11X0
+ tristate "SA-11x0 DMA support"
+ depends on ARCH_SA1100
+ select DMA_ENGINE
+ help
+ Support the DMA engine found on Intel StrongARM SA-1100 and
+ SA-1110 SoCs. This DMA engine can only be used with on-chip
+ devices.
+
config DMA_ENGINE
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 009a222e8283..86b795baba98 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -27,3 +27,4 @@ obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PCH_DMA) += pch_dma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
+obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
new file mode 100644
index 000000000000..16a6b48883cf
--- /dev/null
+++ b/drivers/dma/sa11x0-dma.c
@@ -0,0 +1,1109 @@
+/*
+ * SA11x0 DMAengine support
+ *
+ * Copyright (C) 2012 Russell King
+ * Derived in part from arch/arm/mach-sa1100/dma.c,
+ * Copyright (C) 2000, 2001 by Nicolas Pitre
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/sched.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sa11x0-dma.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#define NR_PHY_CHAN 6
+#define DMA_ALIGN 3
+#define DMA_MAX_SIZE 0x1fff
+#define DMA_CHUNK_SIZE 0x1000
+
+#define DMA_DDAR 0x00
+#define DMA_DCSR_S 0x04
+#define DMA_DCSR_C 0x08
+#define DMA_DCSR_R 0x0c
+#define DMA_DBSA 0x10
+#define DMA_DBTA 0x14
+#define DMA_DBSB 0x18
+#define DMA_DBTB 0x1c
+#define DMA_SIZE 0x20
+
+#define DCSR_RUN (1 << 0)
+#define DCSR_IE (1 << 1)
+#define DCSR_ERROR (1 << 2)
+#define DCSR_DONEA (1 << 3)
+#define DCSR_STRTA (1 << 4)
+#define DCSR_DONEB (1 << 5)
+#define DCSR_STRTB (1 << 6)
+#define DCSR_BIU (1 << 7)
+
+#define DDAR_RW (1 << 0) /* 0 = W, 1 = R */
+#define DDAR_E (1 << 1) /* 0 = LE, 1 = BE */
+#define DDAR_BS (1 << 2) /* 0 = BS4, 1 = BS8 */
+#define DDAR_DW (1 << 3) /* 0 = 8b, 1 = 16b */
+#define DDAR_Ser0UDCTr (0x0 << 4)
+#define DDAR_Ser0UDCRc (0x1 << 4)
+#define DDAR_Ser1SDLCTr (0x2 << 4)
+#define DDAR_Ser1SDLCRc (0x3 << 4)
+#define DDAR_Ser1UARTTr (0x4 << 4)
+#define DDAR_Ser1UARTRc (0x5 << 4)
+#define DDAR_Ser2ICPTr (0x6 << 4)
+#define DDAR_Ser2ICPRc (0x7 << 4)
+#define DDAR_Ser3UARTTr (0x8 << 4)
+#define DDAR_Ser3UARTRc (0x9 << 4)
+#define DDAR_Ser4MCP0Tr (0xa << 4)
+#define DDAR_Ser4MCP0Rc (0xb << 4)
+#define DDAR_Ser4MCP1Tr (0xc << 4)
+#define DDAR_Ser4MCP1Rc (0xd << 4)
+#define DDAR_Ser4SSPTr (0xe << 4)
+#define DDAR_Ser4SSPRc (0xf << 4)
+
+struct sa11x0_dma_sg {
+ u32 addr;
+ u32 len;
+};
+
+struct sa11x0_dma_desc {
+ struct dma_async_tx_descriptor tx;
+ u32 ddar;
+ size_t size;
+
+ /* maybe protected by c->lock */
+ struct list_head node;
+ unsigned sglen;
+ struct sa11x0_dma_sg sg[0];
+};
+
+struct sa11x0_dma_phy;
+
+struct sa11x0_dma_chan {
+ struct dma_chan chan;
+ spinlock_t lock;
+ dma_cookie_t lc;
+
+ /* protected by c->lock */
+ struct sa11x0_dma_phy *phy;
+ enum dma_status status;
+ struct list_head desc_submitted;
+ struct list_head desc_issued;
+
+ /* protected by d->lock */
+ struct list_head node;
+
+ u32 ddar;
+ const char *name;
+};
+
+struct sa11x0_dma_phy {
+ void __iomem *base;
+ struct sa11x0_dma_dev *dev;
+ unsigned num;
+
+ struct sa11x0_dma_chan *vchan;
+
+ /* Protected by c->lock */
+ unsigned sg_load;
+ struct sa11x0_dma_desc *txd_load;
+ unsigned sg_done;
+ struct sa11x0_dma_desc *txd_done;
+#ifdef CONFIG_PM_SLEEP
+ u32 dbs[2];
+ u32 dbt[2];
+ u32 dcsr;
+#endif
+};
+
+struct sa11x0_dma_dev {
+ struct dma_device slave;
+ void __iomem *base;
+ spinlock_t lock;
+ struct tasklet_struct task;
+ struct list_head chan_pending;
+ struct list_head desc_complete;
+ struct sa11x0_dma_phy phy[NR_PHY_CHAN];
+};
+
+static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct sa11x0_dma_chan, chan);
+}
+
+static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
+{
+ return container_of(dmadev, struct sa11x0_dma_dev, slave);
+}
+
+static struct sa11x0_dma_desc *to_sa11x0_dma_tx(struct dma_async_tx_descriptor *tx)
+{
+ return container_of(tx, struct sa11x0_dma_desc, tx);
+}
+
+static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c)
+{
+ if (list_empty(&c->desc_issued))
+ return NULL;
+
+ return list_first_entry(&c->desc_issued, struct sa11x0_dma_desc, node);
+}
+
+static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd)
+{
+ list_del(&txd->node);
+ p->txd_load = txd;
+ p->sg_load = 0;
+
+ dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n",
+ p->num, txd, txd->tx.cookie, txd->ddar);
+}
+
+static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
+ struct sa11x0_dma_chan *c)
+{
+ struct sa11x0_dma_desc *txd = p->txd_load;
+ struct sa11x0_dma_sg *sg;
+ void __iomem *base = p->base;
+ unsigned dbsx, dbtx;
+ u32 dcsr;
+
+ if (!txd)
+ return;
+
+ dcsr = readl_relaxed(base + DMA_DCSR_R);
+
+ /* Don't try to load the next transfer if both buffers are started */
+ if ((dcsr & (DCSR_STRTA | DCSR_STRTB)) == (DCSR_STRTA | DCSR_STRTB))
+ return;
+
+ if (p->sg_load == txd->sglen) {
+ struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
+
+ /*
+ * We have reached the end of the current descriptor.
+ * Peek at the next descriptor, and if compatible with
+ * the current, start processing it.
+ */
+ if (txn && txn->ddar == txd->ddar) {
+ txd = txn;
+ sa11x0_dma_start_desc(p, txn);
+ } else {
+ p->txd_load = NULL;
+ return;
+ }
+ }
+
+ sg = &txd->sg[p->sg_load++];
+
+ /* Select buffer to load according to channel status */
+ if (((dcsr & (DCSR_BIU | DCSR_STRTB)) == (DCSR_BIU | DCSR_STRTB)) ||
+ ((dcsr & (DCSR_BIU | DCSR_STRTA)) == 0)) {
+ dbsx = DMA_DBSA;
+ dbtx = DMA_DBTA;
+ dcsr = DCSR_STRTA | DCSR_IE | DCSR_RUN;
+ } else {
+ dbsx = DMA_DBSB;
+ dbtx = DMA_DBTB;
+ dcsr = DCSR_STRTB | DCSR_IE | DCSR_RUN;
+ }
+
+ writel_relaxed(sg->addr, base + dbsx);
+ writel_relaxed(sg->len, base + dbtx);
+ writel(dcsr, base + DMA_DCSR_S);
+
+ dev_dbg(p->dev->slave.dev, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n",
+ p->num, dcsr,
+ 'A' + (dbsx == DMA_DBSB), sg->addr,
+ 'A' + (dbtx == DMA_DBTB), sg->len);
+}
+
+static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
+ struct sa11x0_dma_chan *c)
+{
+ struct sa11x0_dma_desc *txd = p->txd_done;
+
+ if (++p->sg_done == txd->sglen) {
+ struct sa11x0_dma_dev *d = p->dev;
+
+ dev_vdbg(d->slave.dev, "pchan %u: txd %p[%x]: completed\n",
+ p->num, p->txd_done, p->txd_done->tx.cookie);
+
+ c->lc = txd->tx.cookie;
+
+ spin_lock(&d->lock);
+ list_add_tail(&txd->node, &d->desc_complete);
+ spin_unlock(&d->lock);
+
+ p->sg_done = 0;
+ p->txd_done = p->txd_load;
+
+ tasklet_schedule(&d->task);
+ }
+
+ sa11x0_dma_start_sg(p, c);
+}
+
+static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
+{
+ struct sa11x0_dma_phy *p = dev_id;
+ struct sa11x0_dma_dev *d = p->dev;
+ struct sa11x0_dma_chan *c;
+ u32 dcsr;
+
+ dcsr = readl_relaxed(p->base + DMA_DCSR_R);
+ if (!(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB)))
+ return IRQ_NONE;
+
+ /* Clear reported status bits */
+ writel_relaxed(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB),
+ p->base + DMA_DCSR_C);
+
+ dev_dbg(d->slave.dev, "pchan %u: irq: DCSR:%02x\n", p->num, dcsr);
+
+ if (dcsr & DCSR_ERROR) {
+ dev_err(d->slave.dev, "pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n",
+ p->num, dcsr,
+ readl_relaxed(p->base + DMA_DDAR),
+ readl_relaxed(p->base + DMA_DBSA),
+ readl_relaxed(p->base + DMA_DBTA),
+ readl_relaxed(p->base + DMA_DBSB),
+ readl_relaxed(p->base + DMA_DBTB));
+ }
+
+ c = p->vchan;
+ if (c) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->lock, flags);
+ /*
+ * Now that we're holding the lock, check that the vchan
+ * really is associated with this pchan before touching the
+ * hardware. This should always succeed, because we won't
+ * change p->vchan or c->phy while the channel is actively
+ * transferring.
+ */
+ if (c->phy == p) {
+ if (dcsr & DCSR_DONEA)
+ sa11x0_dma_complete(p, c);
+ if (dcsr & DCSR_DONEB)
+ sa11x0_dma_complete(p, c);
+ }
+ spin_unlock_irqrestore(&c->lock, flags);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void sa11x0_dma_start_txd(struct sa11x0_dma_chan *c)
+{
+ struct sa11x0_dma_desc *txd = sa11x0_dma_next_desc(c);
+
+ /* If the issued list is empty, we have no further txds to process */
+ if (txd) {
+ struct sa11x0_dma_phy *p = c->phy;
+
+ sa11x0_dma_start_desc(p, txd);
+ p->txd_done = txd;
+ p->sg_done = 0;
+
+ /* The channel should not have any transfers started */
+ WARN_ON(readl_relaxed(p->base + DMA_DCSR_R) &
+ (DCSR_STRTA | DCSR_STRTB));
+
+ /* Clear the run and start bits before changing DDAR */
+ writel_relaxed(DCSR_RUN | DCSR_STRTA | DCSR_STRTB,
+ p->base + DMA_DCSR_C);
+ writel_relaxed(txd->ddar, p->base + DMA_DDAR);
+
+ /* Try to start both buffers */
+ sa11x0_dma_start_sg(p, c);
+ sa11x0_dma_start_sg(p, c);
+ }
+}
+
+static void sa11x0_dma_tasklet(unsigned long arg)
+{
+ struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg;
+ struct sa11x0_dma_phy *p;
+ struct sa11x0_dma_chan *c;
+ struct sa11x0_dma_desc *txd, *txn;
+ LIST_HEAD(head);
+ unsigned pch, pch_alloc = 0;
+
+ dev_dbg(d->slave.dev, "tasklet enter\n");
+
+ /* Get the completed tx descriptors */
+ spin_lock_irq(&d->lock);
+ list_splice_init(&d->desc_complete, &head);
+ spin_unlock_irq(&d->lock);
+
+ list_for_each_entry(txd, &head, node) {
+ c = to_sa11x0_dma_chan(txd->tx.chan);
+
+ dev_dbg(d->slave.dev, "vchan %p: txd %p[%x] completed\n",
+ c, txd, txd->tx.cookie);
+
+ spin_lock_irq(&c->lock);
+ p = c->phy;
+ if (p) {
+ if (!p->txd_done)
+ sa11x0_dma_start_txd(c);
+ if (!p->txd_done) {
+ /* No current txd associated with this channel */
+ dev_dbg(d->slave.dev, "pchan %u: free\n", p->num);
+
+ /* Mark this channel free */
+ c->phy = NULL;
+ p->vchan = NULL;
+ }
+ }
+ spin_unlock_irq(&c->lock);
+ }
+
+ spin_lock_irq(&d->lock);
+ for (pch = 0; pch < NR_PHY_CHAN; pch++) {
+ p = &d->phy[pch];
+
+ if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
+ c = list_first_entry(&d->chan_pending,
+ struct sa11x0_dma_chan, node);
+ list_del_init(&c->node);
+
+ pch_alloc |= 1 << pch;
+
+ /* Mark this channel allocated */
+ p->vchan = c;
+
+ dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, c);
+ }
+ }
+ spin_unlock_irq(&d->lock);
+
+ for (pch = 0; pch < NR_PHY_CHAN; pch++) {
+ if (pch_alloc & (1 << pch)) {
+ p = &d->phy[pch];
+ c = p->vchan;
+
+ spin_lock_irq(&c->lock);
+ c->phy = p;
+
+ sa11x0_dma_start_txd(c);
+ spin_unlock_irq(&c->lock);
+ }
+ }
+
+ /* Now free the completed tx descriptor, and call their callbacks */
+ list_for_each_entry_safe(txd, txn, &head, node) {
+ dma_async_tx_callback callback = txd->tx.callback;
+ void *callback_param = txd->tx.callback_param;
+
+ dev_dbg(d->slave.dev, "txd %p[%x]: callback and free\n",
+ txd, txd->tx.cookie);
+
+ kfree(txd);
+
+ if (callback)
+ callback(callback_param);
+ }
+
+ dev_dbg(d->slave.dev, "tasklet exit\n");
+}
+
+
+static void sa11x0_dma_desc_free(struct sa11x0_dma_dev *d, struct list_head *head)
+{
+ struct sa11x0_dma_desc *txd, *txn;
+
+ list_for_each_entry_safe(txd, txn, head, node) {
+ dev_dbg(d->slave.dev, "txd %p: freeing\n", txd);
+ kfree(txd);
+ }
+}
+
+static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ return 0;
+}
+
+static void sa11x0_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
+ struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&c->lock, flags);
+ spin_lock(&d->lock);
+ list_del_init(&c->node);
+ spin_unlock(&d->lock);
+
+ list_splice_tail_init(&c->desc_submitted, &head);
+ list_splice_tail_init(&c->desc_issued, &head);
+ spin_unlock_irqrestore(&c->lock, flags);
+
+ sa11x0_dma_desc_free(d, &head);
+}
+
+static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p)
+{
+ unsigned reg;
+ u32 dcsr;
+
+ dcsr = readl_relaxed(p->base + DMA_DCSR_R);
+
+ if ((dcsr & (DCSR_BIU | DCSR_STRTA)) == DCSR_STRTA ||
+ (dcsr & (DCSR_BIU | DCSR_STRTB)) == DCSR_BIU)
+ reg = DMA_DBSA;
+ else
+ reg = DMA_DBSB;
+
+ return readl_relaxed(p->base + reg);
+}
+
+static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *state)
+{
+ struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
+ struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
+ struct sa11x0_dma_phy *p;
+ struct sa11x0_dma_desc *txd;
+ dma_cookie_t last_used, last_complete;
+ unsigned long flags;
+ enum dma_status ret;
+ size_t bytes = 0;
+
+ last_used = c->chan.cookie;
+ last_complete = c->lc;
+
+ ret = dma_async_is_complete(cookie, last_complete, last_used);
+ if (ret == DMA_SUCCESS) {
+ dma_set_tx_state(state, last_complete, last_used, 0);
+ return ret;
+ }
+
+ spin_lock_irqsave(&c->lock, flags);
+ p = c->phy;
+ ret = c->status;
+ if (p) {
+ dma_addr_t addr = sa11x0_dma_pos(p);
+
+ dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr);
+
+ txd = p->txd_done;
+ if (txd) {
+ unsigned i;
+
+ for (i = 0; i < txd->sglen; i++) {
+ dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n",
+ i, txd->sg[i].addr, txd->sg[i].len);
+ if (addr >= txd->sg[i].addr &&
+ addr < txd->sg[i].addr + txd->sg[i].len) {
+ unsigned len;
+
+ len = txd->sg[i].len -
+ (addr - txd->sg[i].addr);
+ dev_vdbg(d->slave.dev, "tx_status: [%u] +%x\n",
+ i, len);
+ bytes += len;
+ i++;
+ break;
+ }
+ }
+ for (; i < txd->sglen; i++) {
+ dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x ++\n",
+ i, txd->sg[i].addr, txd->sg[i].len);
+ bytes += txd->sg[i].len;
+ }
+ }
+ if (txd != p->txd_load && p->txd_load)
+ bytes += p->txd_load->size;
+ }
+ list_for_each_entry(txd, &c->desc_issued, node) {
+ bytes += txd->size;
+ }
+ spin_unlock_irqrestore(&c->lock, flags);
+
+ dma_set_tx_state(state, last_complete, last_used, bytes);
+
+ dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", bytes);
+
+ return ret;
+}
+
+/*
+ * Move pending txds to the issued list, and re-init pending list.
+ * If not already pending, add this channel to the list of pending
+ * channels and trigger the tasklet to run.
+ */
+static void sa11x0_dma_issue_pending(struct dma_chan *chan)
+{
+ struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
+ struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->lock, flags);
+ list_splice_tail_init(&c->desc_submitted, &c->desc_issued);
+ if (!list_empty(&c->desc_issued)) {
+ spin_lock(&d->lock);
+ if (!c->phy && list_empty(&c->node)) {
+ list_add_tail(&c->node, &d->chan_pending);
+ tasklet_schedule(&d->task);
+ dev_dbg(d->slave.dev, "vchan %p: issued\n", c);
+ }
+ spin_unlock(&d->lock);
+ } else
+ dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", c);
+ spin_unlock_irqrestore(&c->lock, flags);
+}
+
+static dma_cookie_t sa11x0_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(tx->chan);
+ struct sa11x0_dma_desc *txd = to_sa11x0_dma_tx(tx);
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->lock, flags);
+ c->chan.cookie += 1;
+ if (c->chan.cookie < 0)
+ c->chan.cookie = 1;
+ txd->tx.cookie = c->chan.cookie;
+
+ list_add_tail(&txd->node, &c->desc_submitted);
+ spin_unlock_irqrestore(&c->lock, flags);
+
+ dev_dbg(tx->chan->device->dev, "vchan %p: txd %p[%x]: submitted\n",
+ c, txd, txd->tx.cookie);
+
+ return txd->tx.cookie;
+}
+
+static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen,
+ enum dma_transfer_direction dir, unsigned long flags)
+{
+ struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
+ struct sa11x0_dma_desc *txd;
+ struct scatterlist *sgent;
+ unsigned i, j = sglen;
+ size_t size = 0;
+
+ /* SA11x0 channels can only operate in their native direction */
+ if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
+ dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
+ c, c->ddar, dir);
+ return NULL;
+ }
+
+ /* Do not allow zero-sized txds */
+ if (sglen == 0)
+ return NULL;
+
+ for_each_sg(sg, sgent, sglen, i) {
+ dma_addr_t addr = sg_dma_address(sgent);
+ unsigned int len = sg_dma_len(sgent);
+
+ if (len > DMA_MAX_SIZE)
+ j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1;
+ if (addr & DMA_ALIGN) {
+ dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n",
+ c, addr);
+ return NULL;
+ }
+ }
+
+ txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC);
+ if (!txd) {
+ dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", c);
+ return NULL;
+ }
+
+ j = 0;
+ for_each_sg(sg, sgent, sglen, i) {
+ dma_addr_t addr = sg_dma_address(sgent);
+ unsigned len = sg_dma_len(sgent);
+
+ size += len;
+
+ do {
+ unsigned tlen = len;
+
+ /*
+ * Check whether the transfer will fit. If not, try
+ * to split the transfer up such that we end up with
+ * equal chunks - but make sure that we preserve the
+ * alignment. This avoids small segments.
+ */
+ if (tlen > DMA_MAX_SIZE) {
+ unsigned mult = DIV_ROUND_UP(tlen,
+ DMA_MAX_SIZE & ~DMA_ALIGN);
+
+ tlen = (tlen / mult) & ~DMA_ALIGN;
+ }
+
+ txd->sg[j].addr = addr;
+ txd->sg[j].len = tlen;
+
+ addr += tlen;
+ len -= tlen;
+ j++;
+ } while (len);
+ }
+
+ dma_async_tx_descriptor_init(&txd->tx, &c->chan);
+ txd->tx.flags = flags;
+ txd->tx.tx_submit = sa11x0_dma_tx_submit;
+ txd->ddar = c->ddar;
+ txd->size = size;
+ txd->sglen = j;
+
+ dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n",
+ c, txd, txd->size, txd->sglen);
+
+ return &txd->tx;
+}
+
+static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg)
+{
+ u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW);
+ dma_addr_t addr;
+ enum dma_slave_buswidth width;
+ u32 maxburst;
+
+ if (ddar & DDAR_RW) {
+ addr = cfg->src_addr;
+ width = cfg->src_addr_width;
+ maxburst = cfg->src_maxburst;
+ } else {
+ addr = cfg->dst_addr;
+ width = cfg->dst_addr_width;
+ maxburst = cfg->dst_maxburst;
+ }
+
+ if ((width != DMA_SLAVE_BUSWIDTH_1_BYTE &&
+ width != DMA_SLAVE_BUSWIDTH_2_BYTES) ||
+ (maxburst != 4 && maxburst != 8))
+ return -EINVAL;
+
+ if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
+ ddar |= DDAR_DW;
+ if (maxburst == 8)
+ ddar |= DDAR_BS;
+
+ dev_dbg(c->chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n",
+ c, addr, width, maxburst);
+
+ c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6;
+
+ return 0;
+}
+
+static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
+ struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
+ struct sa11x0_dma_phy *p;
+ LIST_HEAD(head);
+ unsigned long flags;
+ int ret;
+
+ switch (cmd) {
+ case DMA_SLAVE_CONFIG:
+ return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg);
+
+ case DMA_TERMINATE_ALL:
+ dev_dbg(d->slave.dev, "vchan %p: terminate all\n", c);
+ /* Clear the tx descriptor lists */
+ spin_lock_irqsave(&c->lock, flags);
+ list_splice_tail_init(&c->desc_submitted, &head);
+ list_splice_tail_init(&c->desc_issued, &head);
+
+ p = c->phy;
+ if (p) {
+ struct sa11x0_dma_desc *txd, *txn;
+
+ dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
+ /* vchan is assigned to a pchan - stop the channel */
+ writel(DCSR_RUN | DCSR_IE |
+ DCSR_STRTA | DCSR_DONEA |
+ DCSR_STRTB | DCSR_DONEB,
+ p->base + DMA_DCSR_C);
+
+ list_for_each_entry_safe(txd, txn, &d->desc_complete, node)
+ if (txd->tx.chan == &c->chan)
+ list_move(&txd->node, &head);
+
+ if (p->txd_load) {
+ if (p->txd_load != p->txd_done)
+ list_add_tail(&p->txd_load->node, &head);
+ p->txd_load = NULL;
+ }
+ if (p->txd_done) {
+ list_add_tail(&p->txd_done->node, &head);
+ p->txd_done = NULL;
+ }
+ c->phy = NULL;
+ spin_lock(&d->lock);
+ p->vchan = NULL;
+ spin_unlock(&d->lock);
+ tasklet_schedule(&d->task);
+ }
+ spin_unlock_irqrestore(&c->lock, flags);
+ sa11x0_dma_desc_free(d, &head);
+ ret = 0;
+ break;
+
+ case DMA_PAUSE:
+ dev_dbg(d->slave.dev, "vchan %p: pause\n", c);
+ spin_lock_irqsave(&c->lock, flags);
+ if (c->status == DMA_IN_PROGRESS) {
+ c->status = DMA_PAUSED;
+
+ p = c->phy;
+ if (p) {
+ writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
+ } else {
+ spin_lock(&d->lock);
+ list_del_init(&c->node);
+ spin_unlock(&d->lock);
+ }
+ }
+ spin_unlock_irqrestore(&c->lock, flags);
+ ret = 0;
+ break;
+
+ case DMA_RESUME:
+ dev_dbg(d->slave.dev, "vchan %p: resume\n", c);
+ spin_lock_irqsave(&c->lock, flags);
+ if (c->status == DMA_PAUSED) {
+ c->status = DMA_IN_PROGRESS;
+
+ p = c->phy;
+ if (p) {
+ writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
+ } else if (!list_empty(&c->desc_issued)) {
+ spin_lock(&d->lock);
+ list_add_tail(&c->node, &d->chan_pending);
+ spin_unlock(&d->lock);
+ }
+ }
+ spin_unlock_irqrestore(&c->lock, flags);
+ ret = 0;
+ break;
+
+ default:
+ ret = -ENXIO;
+ break;
+ }
+
+ return ret;
+}
+
+struct sa11x0_dma_channel_desc {
+ u32 ddar;
+ const char *name;
+};
+
+#define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 }
+static const struct sa11x0_dma_channel_desc chan_desc[] = {
+ CD(Ser0UDCTr, 0),
+ CD(Ser0UDCRc, DDAR_RW),
+ CD(Ser1SDLCTr, 0),
+ CD(Ser1SDLCRc, DDAR_RW),
+ CD(Ser1UARTTr, 0),
+ CD(Ser1UARTRc, DDAR_RW),
+ CD(Ser2ICPTr, 0),
+ CD(Ser2ICPRc, DDAR_RW),
+ CD(Ser3UARTTr, 0),
+ CD(Ser3UARTRc, DDAR_RW),
+ CD(Ser4MCP0Tr, 0),
+ CD(Ser4MCP0Rc, DDAR_RW),
+ CD(Ser4MCP1Tr, 0),
+ CD(Ser4MCP1Rc, DDAR_RW),
+ CD(Ser4SSPTr, 0),
+ CD(Ser4SSPRc, DDAR_RW),
+};
+
+static int __devinit sa11x0_dma_init_dmadev(struct dma_device *dmadev,
+ struct device *dev)
+{
+ unsigned i;
+
+ dmadev->chancnt = ARRAY_SIZE(chan_desc);
+ INIT_LIST_HEAD(&dmadev->channels);
+ dmadev->dev = dev;
+ dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources;
+ dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources;
+ dmadev->device_control = sa11x0_dma_control;
+ dmadev->device_tx_status = sa11x0_dma_tx_status;
+ dmadev->device_issue_pending = sa11x0_dma_issue_pending;
+
+ for (i = 0; i < dmadev->chancnt; i++) {
+ struct sa11x0_dma_chan *c;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c) {
+ dev_err(dev, "no memory for channel %u\n", i);
+ return -ENOMEM;
+ }
+
+ c->chan.device = dmadev;
+ c->status = DMA_IN_PROGRESS;
+ c->ddar = chan_desc[i].ddar;
+ c->name = chan_desc[i].name;
+ spin_lock_init(&c->lock);
+ INIT_LIST_HEAD(&c->desc_submitted);
+ INIT_LIST_HEAD(&c->desc_issued);
+ INIT_LIST_HEAD(&c->node);
+ list_add_tail(&c->chan.device_node, &dmadev->channels);
+ }
+
+ return dma_async_device_register(dmadev);
+}
+
+static int sa11x0_dma_request_irq(struct platform_device *pdev, int nr,
+ void *data)
+{
+ int irq = platform_get_irq(pdev, nr);
+
+ if (irq <= 0)
+ return -ENXIO;
+
+ return request_irq(irq, sa11x0_dma_irq, 0, dev_name(&pdev->dev), data);
+}
+
+static void sa11x0_dma_free_irq(struct platform_device *pdev, int nr,
+ void *data)
+{
+ int irq = platform_get_irq(pdev, nr);
+ if (irq > 0)
+ free_irq(irq, data);
+}
+
+static void sa11x0_dma_free_channels(struct dma_device *dmadev)
+{
+ struct sa11x0_dma_chan *c, *cn;
+
+ list_for_each_entry_safe(c, cn, &dmadev->channels, chan.device_node) {
+ list_del(&c->chan.device_node);
+ kfree(c);
+ }
+}
+
+static int __devinit sa11x0_dma_probe(struct platform_device *pdev)
+{
+ struct sa11x0_dma_dev *d;
+ struct resource *res;
+ unsigned i;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENXIO;
+
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
+ if (!d) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ spin_lock_init(&d->lock);
+ INIT_LIST_HEAD(&d->chan_pending);
+ INIT_LIST_HEAD(&d->desc_complete);
+
+ d->base = ioremap(res->start, resource_size(res));
+ if (!d->base) {
+ ret = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ tasklet_init(&d->task, sa11x0_dma_tasklet, (unsigned long)d);
+
+ for (i = 0; i < NR_PHY_CHAN; i++) {
+ struct sa11x0_dma_phy *p = &d->phy[i];
+
+ p->dev = d;
+ p->num = i;
+ p->base = d->base + i * DMA_SIZE;
+ writel_relaxed(DCSR_RUN | DCSR_IE | DCSR_ERROR |
+ DCSR_DONEA | DCSR_STRTA | DCSR_DONEB | DCSR_STRTB,
+ p->base + DMA_DCSR_C);
+ writel_relaxed(0, p->base + DMA_DDAR);
+
+ ret = sa11x0_dma_request_irq(pdev, i, p);
+ if (ret) {
+ while (i) {
+ i--;
+ sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
+ }
+ goto err_irq;
+ }
+ }
+
+ dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
+ d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg;
+ ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev);
+ if (ret) {
+ dev_warn(d->slave.dev, "failed to register slave async device: %d\n",
+ ret);
+ goto err_slave_reg;
+ }
+
+ platform_set_drvdata(pdev, d);
+ return 0;
+
+ err_slave_reg:
+ sa11x0_dma_free_channels(&d->slave);
+ for (i = 0; i < NR_PHY_CHAN; i++)
+ sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
+ err_irq:
+ tasklet_kill(&d->task);
+ iounmap(d->base);
+ err_ioremap:
+ kfree(d);
+ err_alloc:
+ return ret;
+}
+
+static int __devexit sa11x0_dma_remove(struct platform_device *pdev)
+{
+ struct sa11x0_dma_dev *d = platform_get_drvdata(pdev);
+ unsigned pch;
+
+ dma_async_device_unregister(&d->slave);
+
+ sa11x0_dma_free_channels(&d->slave);
+ for (pch = 0; pch < NR_PHY_CHAN; pch++)
+ sa11x0_dma_free_irq(pdev, pch, &d->phy[pch]);
+ tasklet_kill(&d->task);
+ iounmap(d->base);
+ kfree(d);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sa11x0_dma_suspend(struct device *dev)
+{
+ struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
+ unsigned pch;
+
+ for (pch = 0; pch < NR_PHY_CHAN; pch++) {
+ struct sa11x0_dma_phy *p = &d->phy[pch];
+ u32 dcsr, saved_dcsr;
+
+ dcsr = saved_dcsr = readl_relaxed(p->base + DMA_DCSR_R);
+ if (dcsr & DCSR_RUN) {
+ writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
+ dcsr = readl_relaxed(p->base + DMA_DCSR_R);
+ }
+
+ saved_dcsr &= DCSR_RUN | DCSR_IE;
+ if (dcsr & DCSR_BIU) {
+ p->dbs[0] = readl_relaxed(p->base + DMA_DBSB);
+ p->dbt[0] = readl_relaxed(p->base + DMA_DBTB);
+ p->dbs[1] = readl_relaxed(p->base + DMA_DBSA);
+ p->dbt[1] = readl_relaxed(p->base + DMA_DBTA);
+ saved_dcsr |= (dcsr & DCSR_STRTA ? DCSR_STRTB : 0) |
+ (dcsr & DCSR_STRTB ? DCSR_STRTA : 0);
+ } else {
+ p->dbs[0] = readl_relaxed(p->base + DMA_DBSA);
+ p->dbt[0] = readl_relaxed(p->base + DMA_DBTA);
+ p->dbs[1] = readl_relaxed(p->base + DMA_DBSB);
+ p->dbt[1] = readl_relaxed(p->base + DMA_DBTB);
+ saved_dcsr |= dcsr & (DCSR_STRTA | DCSR_STRTB);
+ }
+ p->dcsr = saved_dcsr;
+
+ writel(DCSR_STRTA | DCSR_STRTB, p->base + DMA_DCSR_C);
+ }
+
+ return 0;
+}
+
+static int sa11x0_dma_resume(struct device *dev)
+{
+ struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
+ unsigned pch;
+
+ for (pch = 0; pch < NR_PHY_CHAN; pch++) {
+ struct sa11x0_dma_phy *p = &d->phy[pch];
+ struct sa11x0_dma_desc *txd = NULL;
+ u32 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
+
+ WARN_ON(dcsr & (DCSR_BIU | DCSR_STRTA | DCSR_STRTB | DCSR_RUN));
+
+ if (p->txd_done)
+ txd = p->txd_done;
+ else if (p->txd_load)
+ txd = p->txd_load;
+
+ if (!txd)
+ continue;
+
+ writel_relaxed(txd->ddar, p->base + DMA_DDAR);
+
+ writel_relaxed(p->dbs[0], p->base + DMA_DBSA);
+ writel_relaxed(p->dbt[0], p->base + DMA_DBTA);
+ writel_relaxed(p->dbs[1], p->base + DMA_DBSB);
+ writel_relaxed(p->dbt[1], p->base + DMA_DBTB);
+ writel_relaxed(p->dcsr, p->base + DMA_DCSR_S);
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops sa11x0_dma_pm_ops = {
+ .suspend_noirq = sa11x0_dma_suspend,
+ .resume_noirq = sa11x0_dma_resume,
+ .freeze_noirq = sa11x0_dma_suspend,
+ .thaw_noirq = sa11x0_dma_resume,
+ .poweroff_noirq = sa11x0_dma_suspend,
+ .restore_noirq = sa11x0_dma_resume,
+};
+
+static struct platform_driver sa11x0_dma_driver = {
+ .driver = {
+ .name = "sa11x0-dma",
+ .owner = THIS_MODULE,
+ .pm = &sa11x0_dma_pm_ops,
+ },
+ .probe = sa11x0_dma_probe,
+ .remove = __devexit_p(sa11x0_dma_remove),
+};
+
+bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+ if (chan->device->dev->driver == &sa11x0_dma_driver.driver) {
+ struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
+ const char *p = param;
+
+ return !strcmp(c->name, p);
+ }
+ return false;
+}
+EXPORT_SYMBOL(sa11x0_dma_filter_fn);
+
+static int __init sa11x0_dma_init(void)
+{
+ return platform_driver_register(&sa11x0_dma_driver);
+}
+subsys_initcall(sa11x0_dma_init);
+
+static void __exit sa11x0_dma_exit(void)
+{
+ platform_driver_unregister(&sa11x0_dma_driver);
+}
+module_exit(sa11x0_dma_exit);
+
+MODULE_AUTHOR("Russell King");
+MODULE_DESCRIPTION("SA-11x0 DMA driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sa11x0-dma");
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index 1c0fc3756cb1..4ca5642e9776 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -378,13 +378,6 @@ static int __devinit ep93xx_gpio_probe(struct platform_device *pdev)
}
ep93xx_gpio->mmio_base = mmio;
- /* Default all ports to GPIO */
- ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_KEYS |
- EP93XX_SYSCON_DEVCFG_GONK |
- EP93XX_SYSCON_DEVCFG_EONIDE |
- EP93XX_SYSCON_DEVCFG_GONIDE |
- EP93XX_SYSCON_DEVCFG_HONIDE);
-
for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++) {
struct bgpio_chip *bgc = &ep93xx_gpio->bgc[i];
struct ep93xx_gpio_bank *bank = &ep93xx_gpio_banks[i];
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 0b0562979171..f49bd6f47a50 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -21,6 +21,7 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/pm.h>
#include <mach/hardware.h>
#include <asm/irq.h>
@@ -28,19 +29,36 @@
#include <asm/gpio.h>
#include <asm/mach/irq.h>
+#define OFF_MODE 1
+
+static LIST_HEAD(omap_gpio_list);
+
+struct gpio_regs {
+ u32 irqenable1;
+ u32 irqenable2;
+ u32 wake_en;
+ u32 ctrl;
+ u32 oe;
+ u32 leveldetect0;
+ u32 leveldetect1;
+ u32 risingdetect;
+ u32 fallingdetect;
+ u32 dataout;
+ u32 debounce;
+ u32 debounce_en;
+};
+
struct gpio_bank {
+ struct list_head node;
unsigned long pbase;
void __iomem *base;
u16 irq;
u16 virtual_irq_start;
- int method;
u32 suspend_wakeup;
-#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
u32 saved_wakeup;
-#endif
u32 non_wakeup_gpios;
u32 enabled_non_wakeup_gpios;
-
+ struct gpio_regs context;
u32 saved_datain;
u32 saved_fallingdetect;
u32 saved_risingdetect;
@@ -51,44 +69,27 @@ struct gpio_bank {
struct clk *dbck;
u32 mod_usage;
u32 dbck_enable_mask;
+ bool dbck_enabled;
struct device *dev;
+ bool is_mpuio;
bool dbck_flag;
+ bool loses_context;
int stride;
u32 width;
+ int context_loss_count;
+ u16 id;
+ int power_mode;
+ bool workaround_enabled;
void (*set_dataout)(struct gpio_bank *bank, int gpio, int enable);
+ int (*get_context_loss_count)(struct device *dev);
struct omap_gpio_reg_offs *regs;
};
-#ifdef CONFIG_ARCH_OMAP3
-struct omap3_gpio_regs {
- u32 irqenable1;
- u32 irqenable2;
- u32 wake_en;
- u32 ctrl;
- u32 oe;
- u32 leveldetect0;
- u32 leveldetect1;
- u32 risingdetect;
- u32 fallingdetect;
- u32 dataout;
-};
-
-static struct omap3_gpio_regs gpio_context[OMAP34XX_NR_GPIOS];
-#endif
-
-/*
- * TODO: Cleanup gpio_bank usage as it is having information
- * related to all instances of the device
- */
-static struct gpio_bank *gpio_bank;
-
-/* TODO: Analyze removing gpio_bank_count usage from driver code */
-int gpio_bank_count;
-
#define GPIO_INDEX(bank, gpio) (gpio % bank->width)
#define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio))
+#define GPIO_MOD_CTRL_BIT BIT(0)
static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
{
@@ -102,6 +103,7 @@ static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
else
l &= ~(1 << gpio);
__raw_writel(l, reg);
+ bank->context.oe = l;
}
@@ -132,6 +134,7 @@ static void _set_gpio_dataout_mask(struct gpio_bank *bank, int gpio, int enable)
else
l &= ~gpio_bit;
__raw_writel(l, reg);
+ bank->context.dataout = l;
}
static int _get_gpio_datain(struct gpio_bank *bank, int gpio)
@@ -160,6 +163,22 @@ static inline void _gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
__raw_writel(l, base + reg);
}
+static inline void _gpio_dbck_enable(struct gpio_bank *bank)
+{
+ if (bank->dbck_enable_mask && !bank->dbck_enabled) {
+ clk_enable(bank->dbck);
+ bank->dbck_enabled = true;
+ }
+}
+
+static inline void _gpio_dbck_disable(struct gpio_bank *bank)
+{
+ if (bank->dbck_enable_mask && bank->dbck_enabled) {
+ clk_disable(bank->dbck);
+ bank->dbck_enabled = false;
+ }
+}
+
/**
* _set_gpio_debounce - low level gpio debounce time
* @bank: the gpio bank we're acting upon
@@ -188,70 +207,74 @@ static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
l = GPIO_BIT(bank, gpio);
+ clk_enable(bank->dbck);
reg = bank->base + bank->regs->debounce;
__raw_writel(debounce, reg);
reg = bank->base + bank->regs->debounce_en;
val = __raw_readl(reg);
- if (debounce) {
+ if (debounce)
val |= l;
- clk_enable(bank->dbck);
- } else {
+ else
val &= ~l;
- clk_disable(bank->dbck);
- }
bank->dbck_enable_mask = val;
__raw_writel(val, reg);
+ clk_disable(bank->dbck);
+ /*
+ * Enable debounce clock per module.
+ * This call is mandatory because in omap_gpio_request() when
+ * *_runtime_get_sync() is called, _gpio_dbck_enable() within
+ * runtime callbck fails to turn on dbck because dbck_enable_mask
+ * used within _gpio_dbck_enable() is still not initialized at
+ * that point. Therefore we have to enable dbck here.
+ */
+ _gpio_dbck_enable(bank);
+ if (bank->dbck_enable_mask) {
+ bank->context.debounce = debounce;
+ bank->context.debounce_en = val;
+ }
}
-#ifdef CONFIG_ARCH_OMAP2PLUS
-static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
+static inline void set_gpio_trigger(struct gpio_bank *bank, int gpio,
int trigger)
{
void __iomem *base = bank->base;
u32 gpio_bit = 1 << gpio;
- if (cpu_is_omap44xx()) {
- _gpio_rmw(base, OMAP4_GPIO_LEVELDETECT0, gpio_bit,
- trigger & IRQ_TYPE_LEVEL_LOW);
- _gpio_rmw(base, OMAP4_GPIO_LEVELDETECT1, gpio_bit,
- trigger & IRQ_TYPE_LEVEL_HIGH);
- _gpio_rmw(base, OMAP4_GPIO_RISINGDETECT, gpio_bit,
- trigger & IRQ_TYPE_EDGE_RISING);
- _gpio_rmw(base, OMAP4_GPIO_FALLINGDETECT, gpio_bit,
- trigger & IRQ_TYPE_EDGE_FALLING);
- } else {
- _gpio_rmw(base, OMAP24XX_GPIO_LEVELDETECT0, gpio_bit,
- trigger & IRQ_TYPE_LEVEL_LOW);
- _gpio_rmw(base, OMAP24XX_GPIO_LEVELDETECT1, gpio_bit,
- trigger & IRQ_TYPE_LEVEL_HIGH);
- _gpio_rmw(base, OMAP24XX_GPIO_RISINGDETECT, gpio_bit,
- trigger & IRQ_TYPE_EDGE_RISING);
- _gpio_rmw(base, OMAP24XX_GPIO_FALLINGDETECT, gpio_bit,
- trigger & IRQ_TYPE_EDGE_FALLING);
- }
+ _gpio_rmw(base, bank->regs->leveldetect0, gpio_bit,
+ trigger & IRQ_TYPE_LEVEL_LOW);
+ _gpio_rmw(base, bank->regs->leveldetect1, gpio_bit,
+ trigger & IRQ_TYPE_LEVEL_HIGH);
+ _gpio_rmw(base, bank->regs->risingdetect, gpio_bit,
+ trigger & IRQ_TYPE_EDGE_RISING);
+ _gpio_rmw(base, bank->regs->fallingdetect, gpio_bit,
+ trigger & IRQ_TYPE_EDGE_FALLING);
+
+ bank->context.leveldetect0 =
+ __raw_readl(bank->base + bank->regs->leveldetect0);
+ bank->context.leveldetect1 =
+ __raw_readl(bank->base + bank->regs->leveldetect1);
+ bank->context.risingdetect =
+ __raw_readl(bank->base + bank->regs->risingdetect);
+ bank->context.fallingdetect =
+ __raw_readl(bank->base + bank->regs->fallingdetect);
+
if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
- if (cpu_is_omap44xx()) {
- _gpio_rmw(base, OMAP4_GPIO_IRQWAKEN0, gpio_bit,
- trigger != 0);
- } else {
- /*
- * GPIO wakeup request can only be generated on edge
- * transitions
- */
- if (trigger & IRQ_TYPE_EDGE_BOTH)
- __raw_writel(1 << gpio, bank->base
- + OMAP24XX_GPIO_SETWKUENA);
- else
- __raw_writel(1 << gpio, bank->base
- + OMAP24XX_GPIO_CLEARWKUENA);
- }
+ _gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0);
+ bank->context.wake_en =
+ __raw_readl(bank->base + bank->regs->wkup_en);
}
+
/* This part needs to be executed always for OMAP{34xx, 44xx} */
- if (cpu_is_omap34xx() || cpu_is_omap44xx() ||
- (bank->non_wakeup_gpios & gpio_bit)) {
+ if (!bank->regs->irqctrl) {
+ /* On omap24xx proceed only when valid GPIO bit is set */
+ if (bank->non_wakeup_gpios) {
+ if (!(bank->non_wakeup_gpios & gpio_bit))
+ goto exit;
+ }
+
/*
* Log the edge gpio and manually trigger the IRQ
* after resume if the input level changes
@@ -264,17 +287,11 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
bank->enabled_non_wakeup_gpios &= ~gpio_bit;
}
- if (cpu_is_omap44xx()) {
- bank->level_mask =
- __raw_readl(bank->base + OMAP4_GPIO_LEVELDETECT0) |
- __raw_readl(bank->base + OMAP4_GPIO_LEVELDETECT1);
- } else {
- bank->level_mask =
- __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT0) |
- __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT1);
- }
+exit:
+ bank->level_mask =
+ __raw_readl(bank->base + bank->regs->leveldetect0) |
+ __raw_readl(bank->base + bank->regs->leveldetect1);
}
-#endif
#ifdef CONFIG_ARCH_OMAP1
/*
@@ -286,23 +303,10 @@ static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
void __iomem *reg = bank->base;
u32 l = 0;
- switch (bank->method) {
- case METHOD_MPUIO:
- reg += OMAP_MPUIO_GPIO_INT_EDGE / bank->stride;
- break;
-#ifdef CONFIG_ARCH_OMAP15XX
- case METHOD_GPIO_1510:
- reg += OMAP1510_GPIO_INT_CONTROL;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
- case METHOD_GPIO_7XX:
- reg += OMAP7XX_GPIO_INT_CONTROL;
- break;
-#endif
- default:
+ if (!bank->regs->irqctrl)
return;
- }
+
+ reg += bank->regs->irqctrl;
l = __raw_readl(reg);
if ((l >> gpio) & 1)
@@ -312,31 +316,21 @@ static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
__raw_writel(l, reg);
}
+#else
+static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) {}
#endif
static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
{
void __iomem *reg = bank->base;
+ void __iomem *base = bank->base;
u32 l = 0;
- switch (bank->method) {
-#ifdef CONFIG_ARCH_OMAP1
- case METHOD_MPUIO:
- reg += OMAP_MPUIO_GPIO_INT_EDGE / bank->stride;
- l = __raw_readl(reg);
- if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
- bank->toggle_mask |= 1 << gpio;
- if (trigger & IRQ_TYPE_EDGE_RISING)
- l |= 1 << gpio;
- else if (trigger & IRQ_TYPE_EDGE_FALLING)
- l &= ~(1 << gpio);
- else
- goto bad;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP15XX
- case METHOD_GPIO_1510:
- reg += OMAP1510_GPIO_INT_CONTROL;
+ if (bank->regs->leveldetect0 && bank->regs->wkup_en) {
+ set_gpio_trigger(bank, gpio, trigger);
+ } else if (bank->regs->irqctrl) {
+ reg += bank->regs->irqctrl;
+
l = __raw_readl(reg);
if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
bank->toggle_mask |= 1 << gpio;
@@ -345,15 +339,15 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
else if (trigger & IRQ_TYPE_EDGE_FALLING)
l &= ~(1 << gpio);
else
- goto bad;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP16XX
- case METHOD_GPIO_1610:
+ return -EINVAL;
+
+ __raw_writel(l, reg);
+ } else if (bank->regs->edgectrl1) {
if (gpio & 0x08)
- reg += OMAP1610_GPIO_EDGE_CTRL2;
+ reg += bank->regs->edgectrl2;
else
- reg += OMAP1610_GPIO_EDGE_CTRL1;
+ reg += bank->regs->edgectrl1;
+
gpio &= 0x07;
l = __raw_readl(reg);
l &= ~(3 << (gpio << 1));
@@ -361,40 +355,14 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
l |= 2 << (gpio << 1);
if (trigger & IRQ_TYPE_EDGE_FALLING)
l |= 1 << (gpio << 1);
- if (trigger)
- /* Enable wake-up during idle for dynamic tick */
- __raw_writel(1 << gpio, bank->base + OMAP1610_GPIO_SET_WAKEUPENA);
- else
- __raw_writel(1 << gpio, bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA);
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
- case METHOD_GPIO_7XX:
- reg += OMAP7XX_GPIO_INT_CONTROL;
- l = __raw_readl(reg);
- if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
- bank->toggle_mask |= 1 << gpio;
- if (trigger & IRQ_TYPE_EDGE_RISING)
- l |= 1 << gpio;
- else if (trigger & IRQ_TYPE_EDGE_FALLING)
- l &= ~(1 << gpio);
- else
- goto bad;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP2PLUS
- case METHOD_GPIO_24XX:
- case METHOD_GPIO_44XX:
- set_24xx_gpio_triggering(bank, gpio, trigger);
- return 0;
-#endif
- default:
- goto bad;
+
+ /* Enable wake-up during idle for dynamic tick */
+ _gpio_rmw(base, bank->regs->wkup_en, 1 << gpio, trigger);
+ bank->context.wake_en =
+ __raw_readl(bank->base + bank->regs->wkup_en);
+ __raw_writel(l, reg);
}
- __raw_writel(l, reg);
return 0;
-bad:
- return -EINVAL;
}
static int gpio_irq_type(struct irq_data *d, unsigned type)
@@ -412,12 +380,12 @@ static int gpio_irq_type(struct irq_data *d, unsigned type)
if (type & ~IRQ_TYPE_SENSE_MASK)
return -EINVAL;
- /* OMAP1 allows only only edge triggering */
- if (!cpu_class_is_omap2()
- && (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
+ bank = irq_data_get_irq_chip_data(d);
+
+ if (!bank->regs->leveldetect0 &&
+ (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
return -EINVAL;
- bank = irq_data_get_irq_chip_data(d);
spin_lock_irqsave(&bank->lock, flags);
retval = _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), type);
spin_unlock_irqrestore(&bank->lock, flags);
@@ -484,6 +452,7 @@ static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
}
__raw_writel(l, reg);
+ bank->context.irqenable1 = l;
}
static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
@@ -504,6 +473,7 @@ static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
}
__raw_writel(l, reg);
+ bank->context.irqenable1 = l;
}
static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int enable)
@@ -567,38 +537,39 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
unsigned long flags;
- spin_lock_irqsave(&bank->lock, flags);
+ /*
+ * If this is the first gpio_request for the bank,
+ * enable the bank module.
+ */
+ if (!bank->mod_usage)
+ pm_runtime_get_sync(bank->dev);
+ spin_lock_irqsave(&bank->lock, flags);
/* Set trigger to none. You need to enable the desired trigger with
* request_irq() or set_irq_type().
*/
_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
-#ifdef CONFIG_ARCH_OMAP15XX
- if (bank->method == METHOD_GPIO_1510) {
- void __iomem *reg;
+ if (bank->regs->pinctrl) {
+ void __iomem *reg = bank->base + bank->regs->pinctrl;
/* Claim the pin for MPU */
- reg = bank->base + OMAP1510_GPIO_PIN_CONTROL;
__raw_writel(__raw_readl(reg) | (1 << offset), reg);
}
-#endif
- if (!cpu_class_is_omap1()) {
- if (!bank->mod_usage) {
- void __iomem *reg = bank->base;
- u32 ctrl;
-
- if (cpu_is_omap24xx() || cpu_is_omap34xx())
- reg += OMAP24XX_GPIO_CTRL;
- else if (cpu_is_omap44xx())
- reg += OMAP4_GPIO_CTRL;
- ctrl = __raw_readl(reg);
- /* Module is enabled, clocks are not gated */
- ctrl &= 0xFFFFFFFE;
- __raw_writel(ctrl, reg);
- }
- bank->mod_usage |= 1 << offset;
+
+ if (bank->regs->ctrl && !bank->mod_usage) {
+ void __iomem *reg = bank->base + bank->regs->ctrl;
+ u32 ctrl;
+
+ ctrl = __raw_readl(reg);
+ /* Module is enabled, clocks are not gated */
+ ctrl &= ~GPIO_MOD_CTRL_BIT;
+ __raw_writel(ctrl, reg);
+ bank->context.ctrl = ctrl;
}
+
+ bank->mod_usage |= 1 << offset;
+
spin_unlock_irqrestore(&bank->lock, flags);
return 0;
@@ -607,48 +578,40 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
{
struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
+ void __iomem *base = bank->base;
unsigned long flags;
spin_lock_irqsave(&bank->lock, flags);
-#ifdef CONFIG_ARCH_OMAP16XX
- if (bank->method == METHOD_GPIO_1610) {
- /* Disable wake-up during idle for dynamic tick */
- void __iomem *reg = bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA;
- __raw_writel(1 << offset, reg);
- }
-#endif
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- if (bank->method == METHOD_GPIO_24XX) {
- /* Disable wake-up during idle for dynamic tick */
- void __iomem *reg = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
- __raw_writel(1 << offset, reg);
- }
-#endif
-#ifdef CONFIG_ARCH_OMAP4
- if (bank->method == METHOD_GPIO_44XX) {
+
+ if (bank->regs->wkup_en) {
/* Disable wake-up during idle for dynamic tick */
- void __iomem *reg = bank->base + OMAP4_GPIO_IRQWAKEN0;
- __raw_writel(1 << offset, reg);
+ _gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0);
+ bank->context.wake_en =
+ __raw_readl(bank->base + bank->regs->wkup_en);
}
-#endif
- if (!cpu_class_is_omap1()) {
- bank->mod_usage &= ~(1 << offset);
- if (!bank->mod_usage) {
- void __iomem *reg = bank->base;
- u32 ctrl;
-
- if (cpu_is_omap24xx() || cpu_is_omap34xx())
- reg += OMAP24XX_GPIO_CTRL;
- else if (cpu_is_omap44xx())
- reg += OMAP4_GPIO_CTRL;
- ctrl = __raw_readl(reg);
- /* Module is disabled, clocks are gated */
- ctrl |= 1;
- __raw_writel(ctrl, reg);
- }
+
+ bank->mod_usage &= ~(1 << offset);
+
+ if (bank->regs->ctrl && !bank->mod_usage) {
+ void __iomem *reg = bank->base + bank->regs->ctrl;
+ u32 ctrl;
+
+ ctrl = __raw_readl(reg);
+ /* Module is disabled, clocks are gated */
+ ctrl |= GPIO_MOD_CTRL_BIT;
+ __raw_writel(ctrl, reg);
+ bank->context.ctrl = ctrl;
}
+
_reset_gpio(bank, bank->chip.base + offset);
spin_unlock_irqrestore(&bank->lock, flags);
+
+ /*
+ * If this is the last gpio to be freed in the bank,
+ * disable the bank module.
+ */
+ if (!bank->mod_usage)
+ pm_runtime_put(bank->dev);
}
/*
@@ -674,6 +637,7 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
bank = irq_get_handler_data(irq);
isr_reg = bank->base + bank->regs->irqstatus;
+ pm_runtime_get_sync(bank->dev);
if (WARN_ON(!isr_reg))
goto exit;
@@ -685,12 +649,8 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
enabled = _get_gpio_irqbank_mask(bank);
isr_saved = isr = __raw_readl(isr_reg) & enabled;
- if (cpu_is_omap15xx() && (bank->method == METHOD_MPUIO))
- isr &= 0x0000ffff;
-
- if (cpu_class_is_omap2()) {
+ if (bank->level_mask)
level_mask = bank->level_mask & enabled;
- }
/* clear edge sensitive interrupts before handler(s) are
called so that we don't miss any interrupt occurred while
@@ -718,7 +678,6 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
if (!(isr & 1))
continue;
-#ifdef CONFIG_ARCH_OMAP1
/*
* Some chips can't respond to both rising and falling
* at the same time. If this irq was requested with
@@ -728,7 +687,6 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
*/
if (bank->toggle_mask & (1 << gpio_index))
_toggle_gpio_edge_triggering(bank, gpio_index);
-#endif
generic_handle_irq(gpio_irq);
}
@@ -740,6 +698,7 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
exit:
if (!unmasked)
chained_irq_exit(chip, desc);
+ pm_runtime_put(bank->dev);
}
static void gpio_irq_shutdown(struct irq_data *d)
@@ -808,14 +767,6 @@ static struct irq_chip gpio_irq_chip = {
/*---------------------------------------------------------------------*/
-#ifdef CONFIG_ARCH_OMAP1
-
-#define bank_is_mpuio(bank) ((bank)->method == METHOD_MPUIO)
-
-#ifdef CONFIG_ARCH_OMAP16XX
-
-#include <linux/platform_device.h>
-
static int omap_mpuio_suspend_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -869,32 +820,16 @@ static struct platform_device omap_mpuio_device = {
/* could list the /proc/iomem resources */
};
-static inline void mpuio_init(void)
+static inline void mpuio_init(struct gpio_bank *bank)
{
- struct gpio_bank *bank = &gpio_bank[0];
platform_set_drvdata(&omap_mpuio_device, bank);
if (platform_driver_register(&omap_mpuio_driver) == 0)
(void) platform_device_register(&omap_mpuio_device);
}
-#else
-static inline void mpuio_init(void) {}
-#endif /* 16xx */
-
-#else
-
-#define bank_is_mpuio(bank) 0
-static inline void mpuio_init(void) {}
-
-#endif
-
/*---------------------------------------------------------------------*/
-/* REVISIT these are stupid implementations! replace by ones that
- * don't switch on METHOD_* and which mostly avoid spinlocks
- */
-
static int gpio_input(struct gpio_chip *chip, unsigned offset)
{
struct gpio_bank *bank;
@@ -1007,78 +942,32 @@ static void __init omap_gpio_show_rev(struct gpio_bank *bank)
*/
static struct lock_class_key gpio_lock_class;
-static inline int init_gpio_info(struct platform_device *pdev)
+static void omap_gpio_mod_init(struct gpio_bank *bank)
{
- /* TODO: Analyze removing gpio_bank_count usage from driver code */
- gpio_bank = kzalloc(gpio_bank_count * sizeof(struct gpio_bank),
- GFP_KERNEL);
- if (!gpio_bank) {
- dev_err(&pdev->dev, "Memory alloc failed for gpio_bank\n");
- return -ENOMEM;
- }
- return 0;
-}
+ void __iomem *base = bank->base;
+ u32 l = 0xffffffff;
-/* TODO: Cleanup cpu_is_* checks */
-static void omap_gpio_mod_init(struct gpio_bank *bank, int id)
-{
- if (cpu_class_is_omap2()) {
- if (cpu_is_omap44xx()) {
- __raw_writel(0xffffffff, bank->base +
- OMAP4_GPIO_IRQSTATUSCLR0);
- __raw_writel(0x00000000, bank->base +
- OMAP4_GPIO_DEBOUNCENABLE);
- /* Initialize interface clk ungated, module enabled */
- __raw_writel(0, bank->base + OMAP4_GPIO_CTRL);
- } else if (cpu_is_omap34xx()) {
- __raw_writel(0x00000000, bank->base +
- OMAP24XX_GPIO_IRQENABLE1);
- __raw_writel(0xffffffff, bank->base +
- OMAP24XX_GPIO_IRQSTATUS1);
- __raw_writel(0x00000000, bank->base +
- OMAP24XX_GPIO_DEBOUNCE_EN);
-
- /* Initialize interface clk ungated, module enabled */
- __raw_writel(0, bank->base + OMAP24XX_GPIO_CTRL);
- } else if (cpu_is_omap24xx()) {
- static const u32 non_wakeup_gpios[] = {
- 0xe203ffc0, 0x08700040
- };
- if (id < ARRAY_SIZE(non_wakeup_gpios))
- bank->non_wakeup_gpios = non_wakeup_gpios[id];
- }
- } else if (cpu_class_is_omap1()) {
- if (bank_is_mpuio(bank))
- __raw_writew(0xffff, bank->base +
- OMAP_MPUIO_GPIO_MASKIT / bank->stride);
- if (cpu_is_omap15xx() && bank->method == METHOD_GPIO_1510) {
- __raw_writew(0xffff, bank->base
- + OMAP1510_GPIO_INT_MASK);
- __raw_writew(0x0000, bank->base
- + OMAP1510_GPIO_INT_STATUS);
- }
- if (cpu_is_omap16xx() && bank->method == METHOD_GPIO_1610) {
- __raw_writew(0x0000, bank->base
- + OMAP1610_GPIO_IRQENABLE1);
- __raw_writew(0xffff, bank->base
- + OMAP1610_GPIO_IRQSTATUS1);
- __raw_writew(0x0014, bank->base
- + OMAP1610_GPIO_SYSCONFIG);
+ if (bank->width == 16)
+ l = 0xffff;
- /*
- * Enable system clock for GPIO module.
- * The CAM_CLK_CTRL *is* really the right place.
- */
- omap_writel(omap_readl(ULPD_CAM_CLK_CTRL) | 0x04,
- ULPD_CAM_CLK_CTRL);
- }
- if (cpu_is_omap7xx() && bank->method == METHOD_GPIO_7XX) {
- __raw_writel(0xffffffff, bank->base
- + OMAP7XX_GPIO_INT_MASK);
- __raw_writel(0x00000000, bank->base
- + OMAP7XX_GPIO_INT_STATUS);
- }
+ if (bank->is_mpuio) {
+ __raw_writel(l, bank->base + bank->regs->irqenable);
+ return;
}
+
+ _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->irqenable_inv);
+ _gpio_rmw(base, bank->regs->irqstatus, l,
+ bank->regs->irqenable_inv == false);
+ _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->debounce_en != 0);
+ _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->ctrl != 0);
+ if (bank->regs->debounce_en)
+ _gpio_rmw(base, bank->regs->debounce_en, 0, 1);
+
+ /* Save OE default value (0xffffffff) in the context */
+ bank->context.oe = __raw_readl(bank->base + bank->regs->direction);
+ /* Initialize interface clk ungated, module enabled */
+ if (bank->regs->ctrl)
+ _gpio_rmw(base, bank->regs->ctrl, 0, 1);
}
static __init void
@@ -1101,8 +990,8 @@ omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
ct->chip.irq_mask = irq_gc_mask_set_bit;
ct->chip.irq_unmask = irq_gc_mask_clr_bit;
ct->chip.irq_set_type = gpio_irq_type;
- /* REVISIT: assuming only 16xx supports MPUIO wake events */
- if (cpu_is_omap16xx())
+
+ if (bank->regs->wkup_en)
ct->chip.irq_set_wake = gpio_wake_enable,
ct->regs.mask = OMAP_MPUIO_GPIO_INT / bank->stride;
@@ -1115,7 +1004,6 @@ static void __devinit omap_gpio_chip_init(struct gpio_bank *bank)
int j;
static int gpio;
- bank->mod_usage = 0;
/*
* REVISIT eventually switch from OMAP-specific gpio structs
* over to the generic ones
@@ -1128,11 +1016,10 @@ static void __devinit omap_gpio_chip_init(struct gpio_bank *bank)
bank->chip.set_debounce = gpio_debounce;
bank->chip.set = gpio_set;
bank->chip.to_irq = gpio_2irq;
- if (bank_is_mpuio(bank)) {
+ if (bank->is_mpuio) {
bank->chip.label = "mpuio";
-#ifdef CONFIG_ARCH_OMAP16XX
- bank->chip.dev = &omap_mpuio_device.dev;
-#endif
+ if (bank->regs->wkup_en)
+ bank->chip.dev = &omap_mpuio_device.dev;
bank->chip.base = OMAP_MPUIO(0);
} else {
bank->chip.label = "gpio";
@@ -1147,7 +1034,7 @@ static void __devinit omap_gpio_chip_init(struct gpio_bank *bank)
j < bank->virtual_irq_start + bank->width; j++) {
irq_set_lockdep_class(j, &gpio_lock_class);
irq_set_chip_data(j, bank);
- if (bank_is_mpuio(bank)) {
+ if (bank->is_mpuio) {
omap_mpuio_alloc_gc(bank, j, bank->width);
} else {
irq_set_chip(j, &gpio_irq_chip);
@@ -1161,42 +1048,44 @@ static void __devinit omap_gpio_chip_init(struct gpio_bank *bank)
static int __devinit omap_gpio_probe(struct platform_device *pdev)
{
- static int gpio_init_done;
struct omap_gpio_platform_data *pdata;
struct resource *res;
- int id;
struct gpio_bank *bank;
+ int ret = 0;
- if (!pdev->dev.platform_data)
- return -EINVAL;
-
- pdata = pdev->dev.platform_data;
-
- if (!gpio_init_done) {
- int ret;
-
- ret = init_gpio_info(pdev);
- if (ret)
- return ret;
+ if (!pdev->dev.platform_data) {
+ ret = -EINVAL;
+ goto err_exit;
}
- id = pdev->id;
- bank = &gpio_bank[id];
+ bank = kzalloc(sizeof(struct gpio_bank), GFP_KERNEL);
+ if (!bank) {
+ dev_err(&pdev->dev, "Memory alloc failed for gpio_bank\n");
+ ret = -ENOMEM;
+ goto err_exit;
+ }
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (unlikely(!res)) {
- dev_err(&pdev->dev, "GPIO Bank %i Invalid IRQ resource\n", id);
- return -ENODEV;
+ dev_err(&pdev->dev, "GPIO Bank %i Invalid IRQ resource\n",
+ pdev->id);
+ ret = -ENODEV;
+ goto err_free;
}
bank->irq = res->start;
+ bank->id = pdev->id;
+
+ pdata = pdev->dev.platform_data;
bank->virtual_irq_start = pdata->virtual_irq_start;
- bank->method = pdata->bank_type;
bank->dev = &pdev->dev;
bank->dbck_flag = pdata->dbck_flag;
bank->stride = pdata->bank_stride;
bank->width = pdata->bank_width;
-
+ bank->is_mpuio = pdata->is_mpuio;
+ bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
+ bank->loses_context = pdata->loses_context;
+ bank->get_context_loss_count = pdata->get_context_loss_count;
bank->regs = pdata->regs;
if (bank->regs->set_dataout && bank->regs->clr_dataout)
@@ -1209,369 +1098,310 @@ static int __devinit omap_gpio_probe(struct platform_device *pdev)
/* Static mapping, never released */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (unlikely(!res)) {
- dev_err(&pdev->dev, "GPIO Bank %i Invalid mem resource\n", id);
- return -ENODEV;
+ dev_err(&pdev->dev, "GPIO Bank %i Invalid mem resource\n",
+ pdev->id);
+ ret = -ENODEV;
+ goto err_free;
}
bank->base = ioremap(res->start, resource_size(res));
if (!bank->base) {
- dev_err(&pdev->dev, "Could not ioremap gpio bank%i\n", id);
- return -ENOMEM;
+ dev_err(&pdev->dev, "Could not ioremap gpio bank%i\n",
+ pdev->id);
+ ret = -ENOMEM;
+ goto err_free;
}
+ platform_set_drvdata(pdev, bank);
+
pm_runtime_enable(bank->dev);
+ pm_runtime_irq_safe(bank->dev);
pm_runtime_get_sync(bank->dev);
- omap_gpio_mod_init(bank, id);
+ if (bank->is_mpuio)
+ mpuio_init(bank);
+
+ omap_gpio_mod_init(bank);
omap_gpio_chip_init(bank);
omap_gpio_show_rev(bank);
- if (!gpio_init_done)
- gpio_init_done = 1;
+ pm_runtime_put(bank->dev);
- return 0;
+ list_add_tail(&bank->node, &omap_gpio_list);
+
+ return ret;
+
+err_free:
+ kfree(bank);
+err_exit:
+ return ret;
}
-#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
-static int omap_gpio_suspend(void)
+#ifdef CONFIG_ARCH_OMAP2PLUS
+
+#if defined(CONFIG_PM_SLEEP)
+static int omap_gpio_suspend(struct device *dev)
{
- int i;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct gpio_bank *bank = platform_get_drvdata(pdev);
+ void __iomem *base = bank->base;
+ void __iomem *wakeup_enable;
+ unsigned long flags;
- if (!cpu_class_is_omap2() && !cpu_is_omap16xx())
+ if (!bank->mod_usage || !bank->loses_context)
return 0;
- for (i = 0; i < gpio_bank_count; i++) {
- struct gpio_bank *bank = &gpio_bank[i];
- void __iomem *wake_status;
- void __iomem *wake_clear;
- void __iomem *wake_set;
- unsigned long flags;
-
- switch (bank->method) {
-#ifdef CONFIG_ARCH_OMAP16XX
- case METHOD_GPIO_1610:
- wake_status = bank->base + OMAP1610_GPIO_WAKEUPENABLE;
- wake_clear = bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA;
- wake_set = bank->base + OMAP1610_GPIO_SET_WAKEUPENA;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- case METHOD_GPIO_24XX:
- wake_status = bank->base + OMAP24XX_GPIO_WAKE_EN;
- wake_clear = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
- wake_set = bank->base + OMAP24XX_GPIO_SETWKUENA;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP4
- case METHOD_GPIO_44XX:
- wake_status = bank->base + OMAP4_GPIO_IRQWAKEN0;
- wake_clear = bank->base + OMAP4_GPIO_IRQWAKEN0;
- wake_set = bank->base + OMAP4_GPIO_IRQWAKEN0;
- break;
-#endif
- default:
- continue;
- }
+ if (!bank->regs->wkup_en || !bank->suspend_wakeup)
+ return 0;
- spin_lock_irqsave(&bank->lock, flags);
- bank->saved_wakeup = __raw_readl(wake_status);
- __raw_writel(0xffffffff, wake_clear);
- __raw_writel(bank->suspend_wakeup, wake_set);
- spin_unlock_irqrestore(&bank->lock, flags);
- }
+ wakeup_enable = bank->base + bank->regs->wkup_en;
+
+ spin_lock_irqsave(&bank->lock, flags);
+ bank->saved_wakeup = __raw_readl(wakeup_enable);
+ _gpio_rmw(base, bank->regs->wkup_en, 0xffffffff, 0);
+ _gpio_rmw(base, bank->regs->wkup_en, bank->suspend_wakeup, 1);
+ spin_unlock_irqrestore(&bank->lock, flags);
return 0;
}
-static void omap_gpio_resume(void)
+static int omap_gpio_resume(struct device *dev)
{
- int i;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct gpio_bank *bank = platform_get_drvdata(pdev);
+ void __iomem *base = bank->base;
+ unsigned long flags;
- if (!cpu_class_is_omap2() && !cpu_is_omap16xx())
- return;
+ if (!bank->mod_usage || !bank->loses_context)
+ return 0;
- for (i = 0; i < gpio_bank_count; i++) {
- struct gpio_bank *bank = &gpio_bank[i];
- void __iomem *wake_clear;
- void __iomem *wake_set;
- unsigned long flags;
-
- switch (bank->method) {
-#ifdef CONFIG_ARCH_OMAP16XX
- case METHOD_GPIO_1610:
- wake_clear = bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA;
- wake_set = bank->base + OMAP1610_GPIO_SET_WAKEUPENA;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- case METHOD_GPIO_24XX:
- wake_clear = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
- wake_set = bank->base + OMAP24XX_GPIO_SETWKUENA;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP4
- case METHOD_GPIO_44XX:
- wake_clear = bank->base + OMAP4_GPIO_IRQWAKEN0;
- wake_set = bank->base + OMAP4_GPIO_IRQWAKEN0;
- break;
-#endif
- default:
- continue;
- }
+ if (!bank->regs->wkup_en || !bank->saved_wakeup)
+ return 0;
- spin_lock_irqsave(&bank->lock, flags);
- __raw_writel(0xffffffff, wake_clear);
- __raw_writel(bank->saved_wakeup, wake_set);
- spin_unlock_irqrestore(&bank->lock, flags);
- }
-}
+ spin_lock_irqsave(&bank->lock, flags);
+ _gpio_rmw(base, bank->regs->wkup_en, 0xffffffff, 0);
+ _gpio_rmw(base, bank->regs->wkup_en, bank->saved_wakeup, 1);
+ spin_unlock_irqrestore(&bank->lock, flags);
-static struct syscore_ops omap_gpio_syscore_ops = {
- .suspend = omap_gpio_suspend,
- .resume = omap_gpio_resume,
-};
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
-#endif
+#if defined(CONFIG_PM_RUNTIME)
+static void omap_gpio_restore_context(struct gpio_bank *bank);
-#ifdef CONFIG_ARCH_OMAP2PLUS
+static int omap_gpio_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct gpio_bank *bank = platform_get_drvdata(pdev);
+ u32 l1 = 0, l2 = 0;
+ unsigned long flags;
-static int workaround_enabled;
+ spin_lock_irqsave(&bank->lock, flags);
+ if (bank->power_mode != OFF_MODE) {
+ bank->power_mode = 0;
+ goto update_gpio_context_count;
+ }
+ /*
+ * If going to OFF, remove triggering for all
+ * non-wakeup GPIOs. Otherwise spurious IRQs will be
+ * generated. See OMAP2420 Errata item 1.101.
+ */
+ if (!(bank->enabled_non_wakeup_gpios))
+ goto update_gpio_context_count;
-void omap2_gpio_prepare_for_idle(int off_mode)
-{
- int i, c = 0;
- int min = 0;
+ bank->saved_datain = __raw_readl(bank->base +
+ bank->regs->datain);
+ l1 = __raw_readl(bank->base + bank->regs->fallingdetect);
+ l2 = __raw_readl(bank->base + bank->regs->risingdetect);
- if (cpu_is_omap34xx())
- min = 1;
+ bank->saved_fallingdetect = l1;
+ bank->saved_risingdetect = l2;
+ l1 &= ~bank->enabled_non_wakeup_gpios;
+ l2 &= ~bank->enabled_non_wakeup_gpios;
- for (i = min; i < gpio_bank_count; i++) {
- struct gpio_bank *bank = &gpio_bank[i];
- u32 l1 = 0, l2 = 0;
- int j;
+ __raw_writel(l1, bank->base + bank->regs->fallingdetect);
+ __raw_writel(l2, bank->base + bank->regs->risingdetect);
- for (j = 0; j < hweight_long(bank->dbck_enable_mask); j++)
- clk_disable(bank->dbck);
+ bank->workaround_enabled = true;
- if (!off_mode)
- continue;
+update_gpio_context_count:
+ if (bank->get_context_loss_count)
+ bank->context_loss_count =
+ bank->get_context_loss_count(bank->dev);
- /* If going to OFF, remove triggering for all
- * non-wakeup GPIOs. Otherwise spurious IRQs will be
- * generated. See OMAP2420 Errata item 1.101. */
- if (!(bank->enabled_non_wakeup_gpios))
- continue;
+ _gpio_dbck_disable(bank);
+ spin_unlock_irqrestore(&bank->lock, flags);
- if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
- bank->saved_datain = __raw_readl(bank->base +
- OMAP24XX_GPIO_DATAIN);
- l1 = __raw_readl(bank->base +
- OMAP24XX_GPIO_FALLINGDETECT);
- l2 = __raw_readl(bank->base +
- OMAP24XX_GPIO_RISINGDETECT);
- }
+ return 0;
+}
- if (cpu_is_omap44xx()) {
- bank->saved_datain = __raw_readl(bank->base +
- OMAP4_GPIO_DATAIN);
- l1 = __raw_readl(bank->base +
- OMAP4_GPIO_FALLINGDETECT);
- l2 = __raw_readl(bank->base +
- OMAP4_GPIO_RISINGDETECT);
- }
+static int omap_gpio_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct gpio_bank *bank = platform_get_drvdata(pdev);
+ int context_lost_cnt_after;
+ u32 l = 0, gen, gen0, gen1;
+ unsigned long flags;
- bank->saved_fallingdetect = l1;
- bank->saved_risingdetect = l2;
- l1 &= ~bank->enabled_non_wakeup_gpios;
- l2 &= ~bank->enabled_non_wakeup_gpios;
+ spin_lock_irqsave(&bank->lock, flags);
+ _gpio_dbck_enable(bank);
+ if (!bank->enabled_non_wakeup_gpios || !bank->workaround_enabled) {
+ spin_unlock_irqrestore(&bank->lock, flags);
+ return 0;
+ }
- if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
- __raw_writel(l1, bank->base +
- OMAP24XX_GPIO_FALLINGDETECT);
- __raw_writel(l2, bank->base +
- OMAP24XX_GPIO_RISINGDETECT);
+ if (bank->get_context_loss_count) {
+ context_lost_cnt_after =
+ bank->get_context_loss_count(bank->dev);
+ if (context_lost_cnt_after != bank->context_loss_count ||
+ !context_lost_cnt_after) {
+ omap_gpio_restore_context(bank);
+ } else {
+ spin_unlock_irqrestore(&bank->lock, flags);
+ return 0;
}
+ }
- if (cpu_is_omap44xx()) {
- __raw_writel(l1, bank->base + OMAP4_GPIO_FALLINGDETECT);
- __raw_writel(l2, bank->base + OMAP4_GPIO_RISINGDETECT);
- }
+ __raw_writel(bank->saved_fallingdetect,
+ bank->base + bank->regs->fallingdetect);
+ __raw_writel(bank->saved_risingdetect,
+ bank->base + bank->regs->risingdetect);
+ l = __raw_readl(bank->base + bank->regs->datain);
- c++;
- }
- if (!c) {
- workaround_enabled = 0;
- return;
- }
- workaround_enabled = 1;
-}
+ /*
+ * Check if any of the non-wakeup interrupt GPIOs have changed
+ * state. If so, generate an IRQ by software. This is
+ * horribly racy, but it's the best we can do to work around
+ * this silicon bug.
+ */
+ l ^= bank->saved_datain;
+ l &= bank->enabled_non_wakeup_gpios;
-void omap2_gpio_resume_after_idle(void)
-{
- int i;
- int min = 0;
+ /*
+ * No need to generate IRQs for the rising edge for gpio IRQs
+ * configured with falling edge only; and vice versa.
+ */
+ gen0 = l & bank->saved_fallingdetect;
+ gen0 &= bank->saved_datain;
- if (cpu_is_omap34xx())
- min = 1;
- for (i = min; i < gpio_bank_count; i++) {
- struct gpio_bank *bank = &gpio_bank[i];
- u32 l = 0, gen, gen0, gen1;
- int j;
+ gen1 = l & bank->saved_risingdetect;
+ gen1 &= ~(bank->saved_datain);
- for (j = 0; j < hweight_long(bank->dbck_enable_mask); j++)
- clk_enable(bank->dbck);
+ /* FIXME: Consider GPIO IRQs with level detections properly! */
+ gen = l & (~(bank->saved_fallingdetect) & ~(bank->saved_risingdetect));
+ /* Consider all GPIO IRQs needed to be updated */
+ gen |= gen0 | gen1;
- if (!workaround_enabled)
- continue;
+ if (gen) {
+ u32 old0, old1;
- if (!(bank->enabled_non_wakeup_gpios))
- continue;
+ old0 = __raw_readl(bank->base + bank->regs->leveldetect0);
+ old1 = __raw_readl(bank->base + bank->regs->leveldetect1);
if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
- __raw_writel(bank->saved_fallingdetect,
- bank->base + OMAP24XX_GPIO_FALLINGDETECT);
- __raw_writel(bank->saved_risingdetect,
- bank->base + OMAP24XX_GPIO_RISINGDETECT);
- l = __raw_readl(bank->base + OMAP24XX_GPIO_DATAIN);
+ __raw_writel(old0 | gen, bank->base +
+ bank->regs->leveldetect0);
+ __raw_writel(old1 | gen, bank->base +
+ bank->regs->leveldetect1);
}
if (cpu_is_omap44xx()) {
- __raw_writel(bank->saved_fallingdetect,
- bank->base + OMAP4_GPIO_FALLINGDETECT);
- __raw_writel(bank->saved_risingdetect,
- bank->base + OMAP4_GPIO_RISINGDETECT);
- l = __raw_readl(bank->base + OMAP4_GPIO_DATAIN);
- }
-
- /* Check if any of the non-wakeup interrupt GPIOs have changed
- * state. If so, generate an IRQ by software. This is
- * horribly racy, but it's the best we can do to work around
- * this silicon bug. */
- l ^= bank->saved_datain;
- l &= bank->enabled_non_wakeup_gpios;
-
- /*
- * No need to generate IRQs for the rising edge for gpio IRQs
- * configured with falling edge only; and vice versa.
- */
- gen0 = l & bank->saved_fallingdetect;
- gen0 &= bank->saved_datain;
-
- gen1 = l & bank->saved_risingdetect;
- gen1 &= ~(bank->saved_datain);
-
- /* FIXME: Consider GPIO IRQs with level detections properly! */
- gen = l & (~(bank->saved_fallingdetect) &
- ~(bank->saved_risingdetect));
- /* Consider all GPIO IRQs needed to be updated */
- gen |= gen0 | gen1;
-
- if (gen) {
- u32 old0, old1;
-
- if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
- old0 = __raw_readl(bank->base +
- OMAP24XX_GPIO_LEVELDETECT0);
- old1 = __raw_readl(bank->base +
- OMAP24XX_GPIO_LEVELDETECT1);
- __raw_writel(old0 | gen, bank->base +
- OMAP24XX_GPIO_LEVELDETECT0);
- __raw_writel(old1 | gen, bank->base +
- OMAP24XX_GPIO_LEVELDETECT1);
- __raw_writel(old0, bank->base +
- OMAP24XX_GPIO_LEVELDETECT0);
- __raw_writel(old1, bank->base +
- OMAP24XX_GPIO_LEVELDETECT1);
- }
-
- if (cpu_is_omap44xx()) {
- old0 = __raw_readl(bank->base +
- OMAP4_GPIO_LEVELDETECT0);
- old1 = __raw_readl(bank->base +
- OMAP4_GPIO_LEVELDETECT1);
- __raw_writel(old0 | l, bank->base +
- OMAP4_GPIO_LEVELDETECT0);
- __raw_writel(old1 | l, bank->base +
- OMAP4_GPIO_LEVELDETECT1);
- __raw_writel(old0, bank->base +
- OMAP4_GPIO_LEVELDETECT0);
- __raw_writel(old1, bank->base +
- OMAP4_GPIO_LEVELDETECT1);
- }
+ __raw_writel(old0 | l, bank->base +
+ bank->regs->leveldetect0);
+ __raw_writel(old1 | l, bank->base +
+ bank->regs->leveldetect1);
}
+ __raw_writel(old0, bank->base + bank->regs->leveldetect0);
+ __raw_writel(old1, bank->base + bank->regs->leveldetect1);
}
+ bank->workaround_enabled = false;
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
}
+#endif /* CONFIG_PM_RUNTIME */
-#endif
+void omap2_gpio_prepare_for_idle(int pwr_mode)
+{
+ struct gpio_bank *bank;
+
+ list_for_each_entry(bank, &omap_gpio_list, node) {
+ if (!bank->mod_usage || !bank->loses_context)
+ continue;
+
+ bank->power_mode = pwr_mode;
+
+ pm_runtime_put_sync_suspend(bank->dev);
+ }
+}
-#ifdef CONFIG_ARCH_OMAP3
-/* save the registers of bank 2-6 */
-void omap_gpio_save_context(void)
+void omap2_gpio_resume_after_idle(void)
{
- int i;
-
- /* saving banks from 2-6 only since GPIO1 is in WKUP */
- for (i = 1; i < gpio_bank_count; i++) {
- struct gpio_bank *bank = &gpio_bank[i];
- gpio_context[i].irqenable1 =
- __raw_readl(bank->base + OMAP24XX_GPIO_IRQENABLE1);
- gpio_context[i].irqenable2 =
- __raw_readl(bank->base + OMAP24XX_GPIO_IRQENABLE2);
- gpio_context[i].wake_en =
- __raw_readl(bank->base + OMAP24XX_GPIO_WAKE_EN);
- gpio_context[i].ctrl =
- __raw_readl(bank->base + OMAP24XX_GPIO_CTRL);
- gpio_context[i].oe =
- __raw_readl(bank->base + OMAP24XX_GPIO_OE);
- gpio_context[i].leveldetect0 =
- __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT0);
- gpio_context[i].leveldetect1 =
- __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT1);
- gpio_context[i].risingdetect =
- __raw_readl(bank->base + OMAP24XX_GPIO_RISINGDETECT);
- gpio_context[i].fallingdetect =
- __raw_readl(bank->base + OMAP24XX_GPIO_FALLINGDETECT);
- gpio_context[i].dataout =
- __raw_readl(bank->base + OMAP24XX_GPIO_DATAOUT);
+ struct gpio_bank *bank;
+
+ list_for_each_entry(bank, &omap_gpio_list, node) {
+ if (!bank->mod_usage || !bank->loses_context)
+ continue;
+
+ pm_runtime_get_sync(bank->dev);
}
}
-/* restore the required registers of bank 2-6 */
-void omap_gpio_restore_context(void)
+#if defined(CONFIG_PM_RUNTIME)
+static void omap_gpio_restore_context(struct gpio_bank *bank)
{
- int i;
-
- for (i = 1; i < gpio_bank_count; i++) {
- struct gpio_bank *bank = &gpio_bank[i];
- __raw_writel(gpio_context[i].irqenable1,
- bank->base + OMAP24XX_GPIO_IRQENABLE1);
- __raw_writel(gpio_context[i].irqenable2,
- bank->base + OMAP24XX_GPIO_IRQENABLE2);
- __raw_writel(gpio_context[i].wake_en,
- bank->base + OMAP24XX_GPIO_WAKE_EN);
- __raw_writel(gpio_context[i].ctrl,
- bank->base + OMAP24XX_GPIO_CTRL);
- __raw_writel(gpio_context[i].oe,
- bank->base + OMAP24XX_GPIO_OE);
- __raw_writel(gpio_context[i].leveldetect0,
- bank->base + OMAP24XX_GPIO_LEVELDETECT0);
- __raw_writel(gpio_context[i].leveldetect1,
- bank->base + OMAP24XX_GPIO_LEVELDETECT1);
- __raw_writel(gpio_context[i].risingdetect,
- bank->base + OMAP24XX_GPIO_RISINGDETECT);
- __raw_writel(gpio_context[i].fallingdetect,
- bank->base + OMAP24XX_GPIO_FALLINGDETECT);
- __raw_writel(gpio_context[i].dataout,
- bank->base + OMAP24XX_GPIO_DATAOUT);
+ __raw_writel(bank->context.wake_en,
+ bank->base + bank->regs->wkup_en);
+ __raw_writel(bank->context.ctrl, bank->base + bank->regs->ctrl);
+ __raw_writel(bank->context.leveldetect0,
+ bank->base + bank->regs->leveldetect0);
+ __raw_writel(bank->context.leveldetect1,
+ bank->base + bank->regs->leveldetect1);
+ __raw_writel(bank->context.risingdetect,
+ bank->base + bank->regs->risingdetect);
+ __raw_writel(bank->context.fallingdetect,
+ bank->base + bank->regs->fallingdetect);
+ if (bank->regs->set_dataout && bank->regs->clr_dataout)
+ __raw_writel(bank->context.dataout,
+ bank->base + bank->regs->set_dataout);
+ else
+ __raw_writel(bank->context.dataout,
+ bank->base + bank->regs->dataout);
+ __raw_writel(bank->context.oe, bank->base + bank->regs->direction);
+
+ if (bank->dbck_enable_mask) {
+ __raw_writel(bank->context.debounce, bank->base +
+ bank->regs->debounce);
+ __raw_writel(bank->context.debounce_en,
+ bank->base + bank->regs->debounce_en);
}
+
+ __raw_writel(bank->context.irqenable1,
+ bank->base + bank->regs->irqenable);
+ __raw_writel(bank->context.irqenable2,
+ bank->base + bank->regs->irqenable2);
}
+#endif /* CONFIG_PM_RUNTIME */
+#else
+#define omap_gpio_suspend NULL
+#define omap_gpio_resume NULL
+#define omap_gpio_runtime_suspend NULL
+#define omap_gpio_runtime_resume NULL
#endif
+static const struct dev_pm_ops gpio_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(omap_gpio_suspend, omap_gpio_resume)
+ SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume,
+ NULL)
+};
+
static struct platform_driver omap_gpio_driver = {
.probe = omap_gpio_probe,
.driver = {
.name = "omap_gpio",
+ .pm = &gpio_pm_ops,
},
};
@@ -1585,17 +1415,3 @@ static int __init omap_gpio_drv_reg(void)
return platform_driver_register(&omap_gpio_driver);
}
postcore_initcall(omap_gpio_drv_reg);
-
-static int __init omap_gpio_sysinit(void)
-{
- mpuio_init();
-
-#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
- if (cpu_is_omap16xx() || cpu_class_is_omap2())
- register_syscore_ops(&omap_gpio_syscore_ops);
-#endif
-
- return 0;
-}
-
-arch_initcall(omap_gpio_sysinit);
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index 7eecf69362ee..8ea3b33d4b40 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <mach/hardware.h>
+#include <mach/irqs.h>
static int sa1100_gpio_get(struct gpio_chip *chip, unsigned offset)
{
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index 0a79a1167a25..46277877b7ec 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -169,7 +169,7 @@ int s3c24xx_gpio_setpull_1down(struct samsung_gpio_chip *chip,
return s3c24xx_gpio_setpull_1(chip, off, pull, S3C_GPIO_PULL_DOWN);
}
-static int exynos4_gpio_setpull(struct samsung_gpio_chip *chip,
+static int exynos_gpio_setpull(struct samsung_gpio_chip *chip,
unsigned int off, samsung_gpio_pull_t pull)
{
if (pull == S3C_GPIO_PULL_UP)
@@ -178,7 +178,7 @@ static int exynos4_gpio_setpull(struct samsung_gpio_chip *chip,
return samsung_gpio_setpull_updown(chip, off, pull);
}
-static samsung_gpio_pull_t exynos4_gpio_getpull(struct samsung_gpio_chip *chip,
+static samsung_gpio_pull_t exynos_gpio_getpull(struct samsung_gpio_chip *chip,
unsigned int off)
{
samsung_gpio_pull_t pull;
@@ -452,9 +452,9 @@ static struct samsung_gpio_cfg s3c24xx_gpiocfg_banka = {
};
#endif
-static struct samsung_gpio_cfg exynos4_gpio_cfg = {
- .set_pull = exynos4_gpio_setpull,
- .get_pull = exynos4_gpio_getpull,
+static struct samsung_gpio_cfg exynos_gpio_cfg = {
+ .set_pull = exynos_gpio_setpull,
+ .get_pull = exynos_gpio_getpull,
.set_config = samsung_gpio_setcfg_4bit,
.get_config = samsung_gpio_getcfg_4bit,
};
@@ -502,13 +502,13 @@ static struct samsung_gpio_cfg samsung_gpio_cfgs[] = {
.get_config = samsung_gpio_getcfg_2bit,
},
[8] = {
- .set_pull = exynos4_gpio_setpull,
- .get_pull = exynos4_gpio_getpull,
+ .set_pull = exynos_gpio_setpull,
+ .get_pull = exynos_gpio_getpull,
},
[9] = {
.cfg_eint = 0x3,
- .set_pull = exynos4_gpio_setpull,
- .get_pull = exynos4_gpio_getpull,
+ .set_pull = exynos_gpio_setpull,
+ .get_pull = exynos_gpio_getpull,
}
};
@@ -2113,10 +2113,10 @@ static struct samsung_gpio_chip s5pv210_gpios_4bit[] = {
};
/*
- * Followings are the gpio banks in EXYNOS4210
+ * Followings are the gpio banks in EXYNOS SoCs
*
* The 'config' member when left to NULL, is initialized to the default
- * structure samsung_gpio_cfgs[3] in the init function below.
+ * structure exynos_gpio_cfg in the init function below.
*
* The 'base' member is also initialized in the init function below.
* Note: The initialization of 'base' member of samsung_gpio_chip structure
@@ -2331,7 +2331,6 @@ static struct samsung_gpio_chip exynos4_gpios_2[] = {
.label = "GPY6",
},
}, {
- .base = (S5P_VA_GPIO2 + 0xC00),
.config = &samsung_gpio_cfgs[9],
.irq_base = IRQ_EINT(0),
.chip = {
@@ -2341,7 +2340,6 @@ static struct samsung_gpio_chip exynos4_gpios_2[] = {
.to_irq = samsung_gpiolib_to_irq,
},
}, {
- .base = (S5P_VA_GPIO2 + 0xC20),
.config = &samsung_gpio_cfgs[9],
.irq_base = IRQ_EINT(8),
.chip = {
@@ -2351,7 +2349,6 @@ static struct samsung_gpio_chip exynos4_gpios_2[] = {
.to_irq = samsung_gpiolib_to_irq,
},
}, {
- .base = (S5P_VA_GPIO2 + 0xC40),
.config = &samsung_gpio_cfgs[9],
.irq_base = IRQ_EINT(16),
.chip = {
@@ -2361,7 +2358,6 @@ static struct samsung_gpio_chip exynos4_gpios_2[] = {
.to_irq = samsung_gpiolib_to_irq,
},
}, {
- .base = (S5P_VA_GPIO2 + 0xC60),
.config = &samsung_gpio_cfgs[9],
.irq_base = IRQ_EINT(24),
.chip = {
@@ -2386,8 +2382,280 @@ static struct samsung_gpio_chip exynos4_gpios_3[] = {
#endif
};
-#if defined(CONFIG_ARCH_EXYNOS4) && defined(CONFIG_OF)
-static int exynos4_gpio_xlate(struct gpio_chip *gc,
+static struct samsung_gpio_chip exynos5_gpios_1[] = {
+#ifdef CONFIG_ARCH_EXYNOS5
+ {
+ .chip = {
+ .base = EXYNOS5_GPA0(0),
+ .ngpio = EXYNOS5_GPIO_A0_NR,
+ .label = "GPA0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS5_GPA1(0),
+ .ngpio = EXYNOS5_GPIO_A1_NR,
+ .label = "GPA1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS5_GPA2(0),
+ .ngpio = EXYNOS5_GPIO_A2_NR,
+ .label = "GPA2",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS5_GPB0(0),
+ .ngpio = EXYNOS5_GPIO_B0_NR,
+ .label = "GPB0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS5_GPB1(0),
+ .ngpio = EXYNOS5_GPIO_B1_NR,
+ .label = "GPB1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS5_GPB2(0),
+ .ngpio = EXYNOS5_GPIO_B2_NR,
+ .label = "GPB2",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS5_GPB3(0),
+ .ngpio = EXYNOS5_GPIO_B3_NR,
+ .label = "GPB3",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS5_GPC0(0),
+ .ngpio = EXYNOS5_GPIO_C0_NR,
+ .label = "GPC0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS5_GPC1(0),
+ .ngpio = EXYNOS5_GPIO_C1_NR,
+ .label = "GPC1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS5_GPC2(0),
+ .ngpio = EXYNOS5_GPIO_C2_NR,
+ .label = "GPC2",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS5_GPC3(0),
+ .ngpio = EXYNOS5_GPIO_C3_NR,
+ .label = "GPC3",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS5_GPD0(0),
+ .ngpio = EXYNOS5_GPIO_D0_NR,
+ .label = "GPD0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS5_GPD1(0),
+ .ngpio = EXYNOS5_GPIO_D1_NR,
+ .label = "GPD1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS5_GPY0(0),
+ .ngpio = EXYNOS5_GPIO_Y0_NR,
+ .label = "GPY0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS5_GPY1(0),
+ .ngpio = EXYNOS5_GPIO_Y1_NR,
+ .label = "GPY1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS5_GPY2(0),
+ .ngpio = EXYNOS5_GPIO_Y2_NR,
+ .label = "GPY2",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS5_GPY3(0),
+ .ngpio = EXYNOS5_GPIO_Y3_NR,
+ .label = "GPY3",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS5_GPY4(0),
+ .ngpio = EXYNOS5_GPIO_Y4_NR,
+ .label = "GPY4",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS5_GPY5(0),
+ .ngpio = EXYNOS5_GPIO_Y5_NR,
+ .label = "GPY5",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS5_GPY6(0),
+ .ngpio = EXYNOS5_GPIO_Y6_NR,
+ .label = "GPY6",
+ },
+ }, {
+ .config = &samsung_gpio_cfgs[9],
+ .irq_base = IRQ_EINT(0),
+ .chip = {
+ .base = EXYNOS5_GPX0(0),
+ .ngpio = EXYNOS5_GPIO_X0_NR,
+ .label = "GPX0",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .config = &samsung_gpio_cfgs[9],
+ .irq_base = IRQ_EINT(8),
+ .chip = {
+ .base = EXYNOS5_GPX1(0),
+ .ngpio = EXYNOS5_GPIO_X1_NR,
+ .label = "GPX1",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .config = &samsung_gpio_cfgs[9],
+ .irq_base = IRQ_EINT(16),
+ .chip = {
+ .base = EXYNOS5_GPX2(0),
+ .ngpio = EXYNOS5_GPIO_X2_NR,
+ .label = "GPX2",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .config = &samsung_gpio_cfgs[9],
+ .irq_base = IRQ_EINT(24),
+ .chip = {
+ .base = EXYNOS5_GPX3(0),
+ .ngpio = EXYNOS5_GPIO_X3_NR,
+ .label = "GPX3",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ },
+#endif
+};
+
+static struct samsung_gpio_chip exynos5_gpios_2[] = {
+#ifdef CONFIG_ARCH_EXYNOS5
+ {
+ .chip = {
+ .base = EXYNOS5_GPE0(0),
+ .ngpio = EXYNOS5_GPIO_E0_NR,
+ .label = "GPE0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS5_GPE1(0),
+ .ngpio = EXYNOS5_GPIO_E1_NR,
+ .label = "GPE1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS5_GPF0(0),
+ .ngpio = EXYNOS5_GPIO_F0_NR,
+ .label = "GPF0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS5_GPF1(0),
+ .ngpio = EXYNOS5_GPIO_F1_NR,
+ .label = "GPF1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS5_GPG0(0),
+ .ngpio = EXYNOS5_GPIO_G0_NR,
+ .label = "GPG0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS5_GPG1(0),
+ .ngpio = EXYNOS5_GPIO_G1_NR,
+ .label = "GPG1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS5_GPG2(0),
+ .ngpio = EXYNOS5_GPIO_G2_NR,
+ .label = "GPG2",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS5_GPH0(0),
+ .ngpio = EXYNOS5_GPIO_H0_NR,
+ .label = "GPH0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS5_GPH1(0),
+ .ngpio = EXYNOS5_GPIO_H1_NR,
+ .label = "GPH1",
+
+ },
+ },
+#endif
+};
+
+static struct samsung_gpio_chip exynos5_gpios_3[] = {
+#ifdef CONFIG_ARCH_EXYNOS5
+ {
+ .chip = {
+ .base = EXYNOS5_GPV0(0),
+ .ngpio = EXYNOS5_GPIO_V0_NR,
+ .label = "GPV0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS5_GPV1(0),
+ .ngpio = EXYNOS5_GPIO_V1_NR,
+ .label = "GPV1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS5_GPV2(0),
+ .ngpio = EXYNOS5_GPIO_V2_NR,
+ .label = "GPV2",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS5_GPV3(0),
+ .ngpio = EXYNOS5_GPIO_V3_NR,
+ .label = "GPV3",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS5_GPV4(0),
+ .ngpio = EXYNOS5_GPIO_V4_NR,
+ .label = "GPV4",
+ },
+ },
+#endif
+};
+
+static struct samsung_gpio_chip exynos5_gpios_4[] = {
+#ifdef CONFIG_ARCH_EXYNOS5
+ {
+ .chip = {
+ .base = EXYNOS5_GPZ(0),
+ .ngpio = EXYNOS5_GPIO_Z_NR,
+ .label = "GPZ",
+ },
+ },
+#endif
+};
+
+
+#if defined(CONFIG_ARCH_EXYNOS) && defined(CONFIG_OF)
+static int exynos_gpio_xlate(struct gpio_chip *gc,
const struct of_phandle_args *gpiospec, u32 *flags)
{
unsigned int pin;
@@ -2413,13 +2681,13 @@ static int exynos4_gpio_xlate(struct gpio_chip *gc,
return gpiospec->args[0];
}
-static const struct of_device_id exynos4_gpio_dt_match[] __initdata = {
+static const struct of_device_id exynos_gpio_dt_match[] __initdata = {
{ .compatible = "samsung,exynos4-gpio", },
{}
};
-static __init void exynos4_gpiolib_attach_ofnode(struct samsung_gpio_chip *chip,
- u64 base, u64 offset)
+static __init void exynos_gpiolib_attach_ofnode(struct samsung_gpio_chip *chip,
+ u64 base, u64 offset)
{
struct gpio_chip *gc = &chip->chip;
u64 address;
@@ -2429,28 +2697,29 @@ static __init void exynos4_gpiolib_attach_ofnode(struct samsung_gpio_chip *chip,
address = chip->base ? base + ((u32)chip->base & 0xfff) : base + offset;
gc->of_node = of_find_matching_node_by_address(NULL,
- exynos4_gpio_dt_match, address);
+ exynos_gpio_dt_match, address);
if (!gc->of_node) {
pr_info("gpio: device tree node not found for gpio controller"
" with base address %08llx\n", address);
return;
}
gc->of_gpio_n_cells = 4;
- gc->of_xlate = exynos4_gpio_xlate;
+ gc->of_xlate = exynos_gpio_xlate;
}
-#elif defined(CONFIG_ARCH_EXYNOS4)
-static __init void exynos4_gpiolib_attach_ofnode(struct samsung_gpio_chip *chip,
- u64 base, u64 offset)
+#elif defined(CONFIG_ARCH_EXYNOS)
+static __init void exynos_gpiolib_attach_ofnode(struct samsung_gpio_chip *chip,
+ u64 base, u64 offset)
{
return;
}
-#endif /* defined(CONFIG_ARCH_EXYNOS4) && defined(CONFIG_OF) */
+#endif /* defined(CONFIG_ARCH_EXYNOS) && defined(CONFIG_OF) */
/* TODO: cleanup soc_is_* */
static __init int samsung_gpiolib_init(void)
{
struct samsung_gpio_chip *chip;
int i, nr_chips;
+ void __iomem *gpio_base1, *gpio_base2, *gpio_base3, *gpio_base4;
int group = 0;
samsung_gpiolib_set_cfg(samsung_gpio_cfgs, ARRAY_SIZE(samsung_gpio_cfgs));
@@ -2516,66 +2785,200 @@ static __init int samsung_gpiolib_init(void)
s5p_register_gpioint_bank(IRQ_GPIOINT, 0, S5P_GPIOINT_GROUP_MAXNR);
#endif
} else if (soc_is_exynos4210()) {
- group = 0;
+#ifdef CONFIG_CPU_EXYNOS4210
+ void __iomem *gpx_base;
/* gpio part1 */
+ gpio_base1 = ioremap(EXYNOS4_PA_GPIO1, SZ_4K);
+ if (gpio_base1 == NULL) {
+ pr_err("unable to ioremap for gpio_base1\n");
+ goto err_ioremap1;
+ }
+
chip = exynos4_gpios_1;
nr_chips = ARRAY_SIZE(exynos4_gpios_1);
for (i = 0; i < nr_chips; i++, chip++) {
if (!chip->config) {
- chip->config = &exynos4_gpio_cfg;
+ chip->config = &exynos_gpio_cfg;
chip->group = group++;
}
-#ifdef CONFIG_CPU_EXYNOS4210
- exynos4_gpiolib_attach_ofnode(chip,
+ exynos_gpiolib_attach_ofnode(chip,
EXYNOS4_PA_GPIO1, i * 0x20);
-#endif
}
- samsung_gpiolib_add_4bit_chips(exynos4_gpios_1, nr_chips, S5P_VA_GPIO1);
+ samsung_gpiolib_add_4bit_chips(exynos4_gpios_1,
+ nr_chips, gpio_base1);
/* gpio part2 */
+ gpio_base2 = ioremap(EXYNOS4_PA_GPIO2, SZ_4K);
+ if (gpio_base2 == NULL) {
+ pr_err("unable to ioremap for gpio_base2\n");
+ goto err_ioremap2;
+ }
+
+ /* need to set base address for gpx */
+ chip = &exynos4_gpios_2[16];
+ gpx_base = gpio_base2 + 0xC00;
+ for (i = 0; i < 4; i++, chip++, gpx_base += 0x20)
+ chip->base = gpx_base;
+
chip = exynos4_gpios_2;
nr_chips = ARRAY_SIZE(exynos4_gpios_2);
for (i = 0; i < nr_chips; i++, chip++) {
if (!chip->config) {
- chip->config = &exynos4_gpio_cfg;
+ chip->config = &exynos_gpio_cfg;
chip->group = group++;
}
-#ifdef CONFIG_CPU_EXYNOS4210
- exynos4_gpiolib_attach_ofnode(chip,
+ exynos_gpiolib_attach_ofnode(chip,
EXYNOS4_PA_GPIO2, i * 0x20);
-#endif
}
- samsung_gpiolib_add_4bit_chips(exynos4_gpios_2, nr_chips, S5P_VA_GPIO2);
+ samsung_gpiolib_add_4bit_chips(exynos4_gpios_2,
+ nr_chips, gpio_base2);
/* gpio part3 */
+ gpio_base3 = ioremap(EXYNOS4_PA_GPIO3, SZ_256);
+ if (gpio_base3 == NULL) {
+ pr_err("unable to ioremap for gpio_base3\n");
+ goto err_ioremap3;
+ }
+
chip = exynos4_gpios_3;
nr_chips = ARRAY_SIZE(exynos4_gpios_3);
for (i = 0; i < nr_chips; i++, chip++) {
if (!chip->config) {
- chip->config = &exynos4_gpio_cfg;
+ chip->config = &exynos_gpio_cfg;
chip->group = group++;
}
-#ifdef CONFIG_CPU_EXYNOS4210
- exynos4_gpiolib_attach_ofnode(chip,
+ exynos_gpiolib_attach_ofnode(chip,
EXYNOS4_PA_GPIO3, i * 0x20);
-#endif
}
- samsung_gpiolib_add_4bit_chips(exynos4_gpios_3, nr_chips, S5P_VA_GPIO3);
+ samsung_gpiolib_add_4bit_chips(exynos4_gpios_3,
+ nr_chips, gpio_base3);
#if defined(CONFIG_CPU_EXYNOS4210) && defined(CONFIG_S5P_GPIO_INT)
s5p_register_gpioint_bank(IRQ_GPIO_XA, 0, IRQ_GPIO1_NR_GROUPS);
s5p_register_gpioint_bank(IRQ_GPIO_XB, IRQ_GPIO1_NR_GROUPS, IRQ_GPIO2_NR_GROUPS);
#endif
+
+#endif /* CONFIG_CPU_EXYNOS4210 */
+ } else if (soc_is_exynos5250()) {
+#ifdef CONFIG_SOC_EXYNOS5250
+ void __iomem *gpx_base;
+
+ /* gpio part1 */
+ gpio_base1 = ioremap(EXYNOS5_PA_GPIO1, SZ_4K);
+ if (gpio_base1 == NULL) {
+ pr_err("unable to ioremap for gpio_base1\n");
+ goto err_ioremap1;
+ }
+
+ /* need to set base address for gpx */
+ chip = &exynos5_gpios_1[20];
+ gpx_base = gpio_base1 + 0xC00;
+ for (i = 0; i < 4; i++, chip++, gpx_base += 0x20)
+ chip->base = gpx_base;
+
+ chip = exynos5_gpios_1;
+ nr_chips = ARRAY_SIZE(exynos5_gpios_1);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (!chip->config) {
+ chip->config = &exynos_gpio_cfg;
+ chip->group = group++;
+ }
+ exynos_gpiolib_attach_ofnode(chip,
+ EXYNOS5_PA_GPIO1, i * 0x20);
+ }
+ samsung_gpiolib_add_4bit_chips(exynos5_gpios_1,
+ nr_chips, gpio_base1);
+
+ /* gpio part2 */
+ gpio_base2 = ioremap(EXYNOS5_PA_GPIO2, SZ_4K);
+ if (gpio_base2 == NULL) {
+ pr_err("unable to ioremap for gpio_base2\n");
+ goto err_ioremap2;
+ }
+
+ chip = exynos5_gpios_2;
+ nr_chips = ARRAY_SIZE(exynos5_gpios_2);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (!chip->config) {
+ chip->config = &exynos_gpio_cfg;
+ chip->group = group++;
+ }
+ exynos_gpiolib_attach_ofnode(chip,
+ EXYNOS5_PA_GPIO2, i * 0x20);
+ }
+ samsung_gpiolib_add_4bit_chips(exynos5_gpios_2,
+ nr_chips, gpio_base2);
+
+ /* gpio part3 */
+ gpio_base3 = ioremap(EXYNOS5_PA_GPIO3, SZ_4K);
+ if (gpio_base3 == NULL) {
+ pr_err("unable to ioremap for gpio_base3\n");
+ goto err_ioremap3;
+ }
+
+ /* need to set base address for gpv */
+ exynos5_gpios_3[0].base = gpio_base3;
+ exynos5_gpios_3[1].base = gpio_base3 + 0x20;
+ exynos5_gpios_3[2].base = gpio_base3 + 0x60;
+ exynos5_gpios_3[3].base = gpio_base3 + 0x80;
+ exynos5_gpios_3[4].base = gpio_base3 + 0xC0;
+
+ chip = exynos5_gpios_3;
+ nr_chips = ARRAY_SIZE(exynos5_gpios_3);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (!chip->config) {
+ chip->config = &exynos_gpio_cfg;
+ chip->group = group++;
+ }
+ exynos_gpiolib_attach_ofnode(chip,
+ EXYNOS5_PA_GPIO3, i * 0x20);
+ }
+ samsung_gpiolib_add_4bit_chips(exynos5_gpios_3,
+ nr_chips, gpio_base3);
+
+ /* gpio part4 */
+ gpio_base4 = ioremap(EXYNOS5_PA_GPIO4, SZ_4K);
+ if (gpio_base4 == NULL) {
+ pr_err("unable to ioremap for gpio_base4\n");
+ goto err_ioremap4;
+ }
+
+ chip = exynos5_gpios_4;
+ nr_chips = ARRAY_SIZE(exynos5_gpios_4);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (!chip->config) {
+ chip->config = &exynos_gpio_cfg;
+ chip->group = group++;
+ }
+ exynos_gpiolib_attach_ofnode(chip,
+ EXYNOS5_PA_GPIO4, i * 0x20);
+ }
+ samsung_gpiolib_add_4bit_chips(exynos5_gpios_4,
+ nr_chips, gpio_base4);
+#endif /* CONFIG_SOC_EXYNOS5250 */
} else {
WARN(1, "Unknown SoC in gpio-samsung, no GPIOs added\n");
return -ENODEV;
}
return 0;
+
+err_ioremap4:
+ iounmap(gpio_base3);
+err_ioremap3:
+ iounmap(gpio_base2);
+err_ioremap2:
+ iounmap(gpio_base1);
+err_ioremap1:
+ return -ENOMEM;
}
core_initcall(samsung_gpiolib_init);
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index bdc293791590..6f17671260e1 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -25,6 +25,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/module.h>
+#include <linux/irqdomain.h>
#include <asm/mach/irq.h>
@@ -74,9 +75,10 @@ struct tegra_gpio_bank {
#endif
};
-
+static struct irq_domain *irq_domain;
static void __iomem *regs;
-static struct tegra_gpio_bank tegra_gpio_banks[7];
+static u32 tegra_gpio_bank_count;
+static struct tegra_gpio_bank *tegra_gpio_banks;
static inline void tegra_gpio_writel(u32 val, u32 reg)
{
@@ -139,7 +141,7 @@ static int tegra_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
static int tegra_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
- return TEGRA_GPIO_TO_IRQ(offset);
+ return irq_find_mapping(irq_domain, offset);
}
static struct gpio_chip tegra_gpio_chip = {
@@ -155,28 +157,28 @@ static struct gpio_chip tegra_gpio_chip = {
static void tegra_gpio_irq_ack(struct irq_data *d)
{
- int gpio = d->irq - INT_GPIO_BASE;
+ int gpio = d->hwirq;
tegra_gpio_writel(1 << GPIO_BIT(gpio), GPIO_INT_CLR(gpio));
}
static void tegra_gpio_irq_mask(struct irq_data *d)
{
- int gpio = d->irq - INT_GPIO_BASE;
+ int gpio = d->hwirq;
tegra_gpio_mask_write(GPIO_MSK_INT_ENB(gpio), gpio, 0);
}
static void tegra_gpio_irq_unmask(struct irq_data *d)
{
- int gpio = d->irq - INT_GPIO_BASE;
+ int gpio = d->hwirq;
tegra_gpio_mask_write(GPIO_MSK_INT_ENB(gpio), gpio, 1);
}
static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
- int gpio = d->irq - INT_GPIO_BASE;
+ int gpio = d->hwirq;
struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
int port = GPIO_PORT(gpio);
int lvl_type;
@@ -273,7 +275,7 @@ void tegra_gpio_resume(void)
local_irq_save(flags);
- for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) {
+ for (b = 0; b < tegra_gpio_bank_count; b++) {
struct tegra_gpio_bank *bank = &tegra_gpio_banks[b];
for (p = 0; p < ARRAY_SIZE(bank->oe); p++) {
@@ -296,7 +298,7 @@ void tegra_gpio_suspend(void)
int p;
local_irq_save(flags);
- for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) {
+ for (b = 0; b < tegra_gpio_bank_count; b++) {
struct tegra_gpio_bank *bank = &tegra_gpio_banks[b];
for (p = 0; p < ARRAY_SIZE(bank->oe); p++) {
@@ -337,13 +339,44 @@ static struct lock_class_key gpio_lock_class;
static int __devinit tegra_gpio_probe(struct platform_device *pdev)
{
+ int irq_base;
struct resource *res;
struct tegra_gpio_bank *bank;
int gpio;
int i;
int j;
- for (i = 0; i < ARRAY_SIZE(tegra_gpio_banks); i++) {
+ for (;;) {
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, tegra_gpio_bank_count);
+ if (!res)
+ break;
+ tegra_gpio_bank_count++;
+ }
+ if (!tegra_gpio_bank_count) {
+ dev_err(&pdev->dev, "Missing IRQ resource\n");
+ return -ENODEV;
+ }
+
+ tegra_gpio_chip.ngpio = tegra_gpio_bank_count * 32;
+
+ tegra_gpio_banks = devm_kzalloc(&pdev->dev,
+ tegra_gpio_bank_count * sizeof(*tegra_gpio_banks),
+ GFP_KERNEL);
+ if (!tegra_gpio_banks) {
+ dev_err(&pdev->dev, "Couldn't allocate bank structure\n");
+ return -ENODEV;
+ }
+
+ irq_base = irq_alloc_descs(-1, 0, tegra_gpio_chip.ngpio, 0);
+ if (irq_base < 0) {
+ dev_err(&pdev->dev, "Couldn't allocate IRQ numbers\n");
+ return -ENODEV;
+ }
+ irq_domain = irq_domain_add_legacy(pdev->dev.of_node,
+ tegra_gpio_chip.ngpio, irq_base, 0,
+ &irq_domain_simple_ops, NULL);
+
+ for (i = 0; i < tegra_gpio_bank_count; i++) {
res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
if (!res) {
dev_err(&pdev->dev, "Missing IRQ resource\n");
@@ -380,8 +413,8 @@ static int __devinit tegra_gpio_probe(struct platform_device *pdev)
gpiochip_add(&tegra_gpio_chip);
- for (gpio = 0; gpio < TEGRA_NR_GPIOS; gpio++) {
- int irq = TEGRA_GPIO_TO_IRQ(gpio);
+ for (gpio = 0; gpio < tegra_gpio_chip.ngpio; gpio++) {
+ int irq = irq_find_mapping(irq_domain, gpio);
/* No validity check; all Tegra GPIOs are valid IRQs */
bank = &tegra_gpio_banks[GPIO_BANK(gpio)];
@@ -393,7 +426,7 @@ static int __devinit tegra_gpio_probe(struct platform_device *pdev)
set_irq_flags(irq, IRQF_VALID);
}
- for (i = 0; i < ARRAY_SIZE(tegra_gpio_banks); i++) {
+ for (i = 0; i < tegra_gpio_bank_count; i++) {
bank = &tegra_gpio_banks[i];
irq_set_chained_handler(bank->irq, tegra_gpio_irq_handler);
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 4c2cb4a8ad98..5675d93b4205 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -244,7 +244,6 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
uint64_t value)
{
struct drm_encoder *encoder = connector->encoder;
- struct backlight_device *psb_bd;
if (!strcmp(property->name, "scaling mode") && encoder) {
struct psb_intel_crtc *psb_crtc =
@@ -301,11 +300,15 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
value))
goto set_prop_error;
else {
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ struct backlight_device *psb_bd;
+
psb_bd = mdfld_get_backlight_device();
if (psb_bd) {
psb_bd->props.brightness = value;
mdfld_set_brightness(psb_bd);
}
+#endif
}
}
set_prop_done:
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 8f510fd956b0..fa860358add1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -654,10 +654,13 @@ nouveau_connector_detect_depth(struct drm_connector *connector)
if (nv_connector->edid && connector->display_info.bpc)
return;
- /* if not, we're out of options unless we're LVDS, default to 6bpc */
- connector->display_info.bpc = 6;
- if (nv_encoder->dcb->type != OUTPUT_LVDS)
+ /* if not, we're out of options unless we're LVDS, default to 8bpc */
+ if (nv_encoder->dcb->type != OUTPUT_LVDS) {
+ connector->display_info.bpc = 8;
return;
+ }
+
+ connector->display_info.bpc = 6;
/* LVDS: panel straps */
if (bios->fp_no_ddc) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index 8f4f914d9eab..e2be95af2e52 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -315,8 +315,8 @@ nouveau_i2c_init(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvbios *bios = &dev_priv->vbios;
struct nouveau_i2c_chan *port;
+ u8 version = 0x00, entries, recordlen;
u8 *i2c, *entry, legacy[2][4] = {};
- u8 version, entries, recordlen;
int ret, i;
INIT_LIST_HEAD(&dev_priv->i2c_ports);
@@ -346,12 +346,12 @@ nouveau_i2c_init(struct drm_device *dev)
if (i2c[7]) legacy[1][1] = i2c[7];
}
- if (i2c && version >= 0x30) {
+ if (version >= 0x30) {
entry = i2c[1] + i2c;
entries = i2c[2];
recordlen = i2c[3];
} else
- if (i2c) {
+ if (version) {
entry = i2c;
entries = 16;
recordlen = 4;
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index a3ae91fa8141..a4886b36d0fa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -852,7 +852,7 @@ nouveau_card_init(struct drm_device *dev)
if (ret)
goto out_pm;
- if (!dev_priv->noaccel) {
+ if (dev_priv->eng[NVOBJ_ENGINE_GR]) {
ret = nouveau_card_channel_init(dev);
if (ret)
goto out_fence;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 083b3eada001..b5ff1f7b6f7e 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -588,8 +588,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
connector = radeon_get_connector_for_encoder(encoder);
- if (connector && connector->display_info.bpc)
- bpc = connector->display_info.bpc;
+ /* if (connector && connector->display_info.bpc)
+ bpc = connector->display_info.bpc; */
encoder_mode = atombios_get_encoder_mode(encoder);
is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
@@ -965,7 +965,9 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
struct radeon_connector_atom_dig *dig_connector =
radeon_connector->con_priv;
int dp_clock;
- bpc = connector->display_info.bpc;
+
+ /* if (connector->display_info.bpc)
+ bpc = connector->display_info.bpc; */
switch (encoder_mode) {
case ATOM_ENCODER_MODE_DP_MST:
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 6c62be226804..c57d85664e77 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -405,10 +405,13 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
/* get bpc from the EDID */
static int convert_bpc_to_bpp(int bpc)
{
+#if 0
if (bpc == 0)
return 24;
else
return bpc * 3;
+#endif
+ return 24;
}
/* get the max pix clock supported by the link rate and lane num */
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 468b874336f9..e607c4d7dd98 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -541,7 +541,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
dp_clock = dig_connector->dp_clock;
dp_lane_count = dig_connector->dp_lane_count;
hpd_id = radeon_connector->hpd.hpd;
- bpc = connector->display_info.bpc;
+ /* bpc = connector->display_info.bpc; */
}
/* no dig encoder assigned */
@@ -1159,7 +1159,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
dp_lane_count = dig_connector->dp_lane_count;
connector_object_id =
(radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- bpc = connector->display_info.bpc;
+ /* bpc = connector->display_info.bpc; */
}
memset(&args, 0, sizeof(args));
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index a58b37a2e65a..70089d32b80f 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -80,6 +80,9 @@ struct evergreen_cs_track {
bool cb_dirty;
bool db_dirty;
bool streamout_dirty;
+ u32 htile_offset;
+ u32 htile_surface;
+ struct radeon_bo *htile_bo;
};
static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
@@ -144,6 +147,9 @@ static void evergreen_cs_track_init(struct evergreen_cs_track *track)
track->db_s_read_bo = NULL;
track->db_s_write_bo = NULL;
track->db_dirty = true;
+ track->htile_bo = NULL;
+ track->htile_offset = 0xFFFFFFFF;
+ track->htile_surface = 0;
for (i = 0; i < 4; i++) {
track->vgt_strmout_size[i] = 0;
@@ -444,6 +450,62 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
return 0;
}
+static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
+ unsigned nbx, unsigned nby)
+{
+ struct evergreen_cs_track *track = p->track;
+ unsigned long size;
+
+ if (track->htile_bo == NULL) {
+ dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
+ __func__, __LINE__, track->db_z_info);
+ return -EINVAL;
+ }
+
+ if (G_028ABC_LINEAR(track->htile_surface)) {
+ /* pitch must be 16 htiles aligned == 16 * 8 pixel aligned */
+ nbx = round_up(nbx, 16 * 8);
+ /* height is npipes htiles aligned == npipes * 8 pixel aligned */
+ nby = round_up(nby, track->npipes * 8);
+ } else {
+ switch (track->npipes) {
+ case 8:
+ nbx = round_up(nbx, 64 * 8);
+ nby = round_up(nby, 64 * 8);
+ break;
+ case 4:
+ nbx = round_up(nbx, 64 * 8);
+ nby = round_up(nby, 32 * 8);
+ break;
+ case 2:
+ nbx = round_up(nbx, 32 * 8);
+ nby = round_up(nby, 32 * 8);
+ break;
+ case 1:
+ nbx = round_up(nbx, 32 * 8);
+ nby = round_up(nby, 16 * 8);
+ break;
+ default:
+ dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
+ __func__, __LINE__, track->npipes);
+ return -EINVAL;
+ }
+ }
+ /* compute number of htile */
+ nbx = nbx / 8;
+ nby = nby / 8;
+ size = nbx * nby * 4;
+ size += track->htile_offset;
+
+ if (size > radeon_bo_size(track->htile_bo)) {
+ dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
+ __func__, __LINE__, radeon_bo_size(track->htile_bo),
+ size, nbx, nby);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
{
struct evergreen_cs_track *track = p->track;
@@ -530,6 +592,14 @@ static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
return -EINVAL;
}
+ /* hyperz */
+ if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
+ r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
+ if (r) {
+ return r;
+ }
+ }
+
return 0;
}
@@ -617,6 +687,14 @@ static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p)
return -EINVAL;
}
+ /* hyperz */
+ if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
+ r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
+ if (r) {
+ return r;
+ }
+ }
+
return 0;
}
@@ -850,7 +928,7 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
return r;
}
/* Check depth buffer */
- if (G_028800_Z_WRITE_ENABLE(track->db_depth_control)) {
+ if (G_028800_Z_ENABLE(track->db_depth_control)) {
r = evergreen_cs_track_validate_depth(p);
if (r)
return r;
@@ -1616,6 +1694,23 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->cb_color_bo[tmp] = reloc->robj;
track->cb_dirty = true;
break;
+ case DB_HTILE_DATA_BASE:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->htile_offset = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->htile_bo = reloc->robj;
+ track->db_dirty = true;
+ break;
+ case DB_HTILE_SURFACE:
+ /* 8x8 only */
+ track->htile_surface = radeon_get_ib_value(p, idx);
+ track->db_dirty = true;
+ break;
case CB_IMMED0_BASE:
case CB_IMMED1_BASE:
case CB_IMMED2_BASE:
@@ -1628,7 +1723,6 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_IMMED9_BASE:
case CB_IMMED10_BASE:
case CB_IMMED11_BASE:
- case DB_HTILE_DATA_BASE:
case SQ_PGM_START_FS:
case SQ_PGM_START_ES:
case SQ_PGM_START_VS:
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index eb5708c7159d..b4eefc355f16 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -991,6 +991,14 @@
#define G_028008_SLICE_MAX(x) (((x) >> 13) & 0x7FF)
#define C_028008_SLICE_MAX 0xFF001FFF
#define DB_HTILE_DATA_BASE 0x28014
+#define DB_HTILE_SURFACE 0x28abc
+#define S_028ABC_HTILE_WIDTH(x) (((x) & 0x1) << 0)
+#define G_028ABC_HTILE_WIDTH(x) (((x) >> 0) & 0x1)
+#define C_028ABC_HTILE_WIDTH 0xFFFFFFFE
+#define S_028ABC_HTILE_HEIGHT(x) (((x) & 0x1) << 1)
+#define G_028ABC_HTILE_HEIGHT(x) (((x) >> 1) & 0x1)
+#define C_028ABC_HTILE_HEIGHT 0xFFFFFFFD
+#define G_028ABC_LINEAR(x) (((x) >> 2) & 0x1)
#define DB_Z_INFO 0x28040
# define Z_ARRAY_MODE(x) ((x) << 4)
# define DB_TILE_SPLIT(x) (((x) & 0x7) << 8)
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 0ec3f205f9c4..b8e12af304a9 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -78,6 +78,9 @@ struct r600_cs_track {
bool cb_dirty;
bool db_dirty;
bool streamout_dirty;
+ struct radeon_bo *htile_bo;
+ u64 htile_offset;
+ u32 htile_surface;
};
#define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc, CHIP_R600 }
@@ -321,6 +324,9 @@ static void r600_cs_track_init(struct r600_cs_track *track)
track->db_depth_size_idx = 0;
track->db_depth_control = 0xFFFFFFFF;
track->db_dirty = true;
+ track->htile_bo = NULL;
+ track->htile_offset = 0xFFFFFFFF;
+ track->htile_surface = 0;
for (i = 0; i < 4; i++) {
track->vgt_strmout_size[i] = 0;
@@ -455,12 +461,256 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
return 0;
}
+static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
+{
+ struct r600_cs_track *track = p->track;
+ u32 nviews, bpe, ntiles, size, slice_tile_max, tmp;
+ u32 height_align, pitch_align, depth_align;
+ u32 pitch = 8192;
+ u32 height = 8192;
+ u64 base_offset, base_align;
+ struct array_mode_checker array_check;
+ int array_mode;
+ volatile u32 *ib = p->ib->ptr;
+
+
+ if (track->db_bo == NULL) {
+ dev_warn(p->dev, "z/stencil with no depth buffer\n");
+ return -EINVAL;
+ }
+ switch (G_028010_FORMAT(track->db_depth_info)) {
+ case V_028010_DEPTH_16:
+ bpe = 2;
+ break;
+ case V_028010_DEPTH_X8_24:
+ case V_028010_DEPTH_8_24:
+ case V_028010_DEPTH_X8_24_FLOAT:
+ case V_028010_DEPTH_8_24_FLOAT:
+ case V_028010_DEPTH_32_FLOAT:
+ bpe = 4;
+ break;
+ case V_028010_DEPTH_X24_8_32_FLOAT:
+ bpe = 8;
+ break;
+ default:
+ dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
+ return -EINVAL;
+ }
+ if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
+ if (!track->db_depth_size_idx) {
+ dev_warn(p->dev, "z/stencil buffer size not set\n");
+ return -EINVAL;
+ }
+ tmp = radeon_bo_size(track->db_bo) - track->db_offset;
+ tmp = (tmp / bpe) >> 6;
+ if (!tmp) {
+ dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
+ track->db_depth_size, bpe, track->db_offset,
+ radeon_bo_size(track->db_bo));
+ return -EINVAL;
+ }
+ ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
+ } else {
+ size = radeon_bo_size(track->db_bo);
+ /* pitch in pixels */
+ pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
+ slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
+ slice_tile_max *= 64;
+ height = slice_tile_max / pitch;
+ if (height > 8192)
+ height = 8192;
+ base_offset = track->db_bo_mc + track->db_offset;
+ array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
+ array_check.array_mode = array_mode;
+ array_check.group_size = track->group_size;
+ array_check.nbanks = track->nbanks;
+ array_check.npipes = track->npipes;
+ array_check.nsamples = track->nsamples;
+ array_check.blocksize = bpe;
+ if (r600_get_array_mode_alignment(&array_check,
+ &pitch_align, &height_align, &depth_align, &base_align)) {
+ dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
+ G_028010_ARRAY_MODE(track->db_depth_info),
+ track->db_depth_info);
+ return -EINVAL;
+ }
+ switch (array_mode) {
+ case V_028010_ARRAY_1D_TILED_THIN1:
+ /* don't break userspace */
+ height &= ~0x7;
+ break;
+ case V_028010_ARRAY_2D_TILED_THIN1:
+ break;
+ default:
+ dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
+ G_028010_ARRAY_MODE(track->db_depth_info),
+ track->db_depth_info);
+ return -EINVAL;
+ }
+
+ if (!IS_ALIGNED(pitch, pitch_align)) {
+ dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
+ __func__, __LINE__, pitch, pitch_align, array_mode);
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(height, height_align)) {
+ dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
+ __func__, __LINE__, height, height_align, array_mode);
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(base_offset, base_align)) {
+ dev_warn(p->dev, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__,
+ base_offset, base_align, array_mode);
+ return -EINVAL;
+ }
+
+ ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
+ nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
+ tmp = ntiles * bpe * 64 * nviews;
+ if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
+ dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
+ array_mode,
+ track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
+ radeon_bo_size(track->db_bo));
+ return -EINVAL;
+ }
+ }
+
+ /* hyperz */
+ if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
+ unsigned long size;
+ unsigned nbx, nby;
+
+ if (track->htile_bo == NULL) {
+ dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
+ __func__, __LINE__, track->db_depth_info);
+ return -EINVAL;
+ }
+ if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
+ dev_warn(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n",
+ __func__, __LINE__, track->db_depth_size);
+ return -EINVAL;
+ }
+
+ nbx = pitch;
+ nby = height;
+ if (G_028D24_LINEAR(track->htile_surface)) {
+ /* nbx must be 16 htiles aligned == 16 * 8 pixel aligned */
+ nbx = round_up(nbx, 16 * 8);
+ /* nby is npipes htiles aligned == npipes * 8 pixel aligned */
+ nby = round_up(nby, track->npipes * 8);
+ } else {
+ /* htile widht & nby (8 or 4) make 2 bits number */
+ tmp = track->htile_surface & 3;
+ /* align is htile align * 8, htile align vary according to
+ * number of pipe and tile width and nby
+ */
+ switch (track->npipes) {
+ case 8:
+ switch (tmp) {
+ case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 64 * 8);
+ nby = round_up(nby, 64 * 8);
+ break;
+ case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
+ case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
+ nbx = round_up(nbx, 64 * 8);
+ nby = round_up(nby, 32 * 8);
+ break;
+ case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
+ nbx = round_up(nbx, 32 * 8);
+ nby = round_up(nby, 32 * 8);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case 4:
+ switch (tmp) {
+ case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 64 * 8);
+ nby = round_up(nby, 32 * 8);
+ break;
+ case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
+ case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
+ nbx = round_up(nbx, 32 * 8);
+ nby = round_up(nby, 32 * 8);
+ break;
+ case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
+ nbx = round_up(nbx, 32 * 8);
+ nby = round_up(nby, 16 * 8);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case 2:
+ switch (tmp) {
+ case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 32 * 8);
+ nby = round_up(nby, 32 * 8);
+ break;
+ case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
+ case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
+ nbx = round_up(nbx, 32 * 8);
+ nby = round_up(nby, 16 * 8);
+ break;
+ case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
+ nbx = round_up(nbx, 16 * 8);
+ nby = round_up(nby, 16 * 8);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case 1:
+ switch (tmp) {
+ case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 32 * 8);
+ nby = round_up(nby, 16 * 8);
+ break;
+ case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
+ case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
+ nbx = round_up(nbx, 16 * 8);
+ nby = round_up(nby, 16 * 8);
+ break;
+ case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
+ nbx = round_up(nbx, 16 * 8);
+ nby = round_up(nby, 8 * 8);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
+ __func__, __LINE__, track->npipes);
+ return -EINVAL;
+ }
+ }
+ /* compute number of htile */
+ nbx = G_028D24_HTILE_WIDTH(track->htile_surface) ? nbx / 8 : nbx / 4;
+ nby = G_028D24_HTILE_HEIGHT(track->htile_surface) ? nby / 8 : nby / 4;
+ size = nbx * nby * 4;
+ size += track->htile_offset;
+
+ if (size > radeon_bo_size(track->htile_bo)) {
+ dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
+ __func__, __LINE__, radeon_bo_size(track->htile_bo),
+ size, nbx, nby);
+ return -EINVAL;
+ }
+ }
+
+ track->db_dirty = false;
+ return 0;
+}
+
static int r600_cs_track_check(struct radeon_cs_parser *p)
{
struct r600_cs_track *track = p->track;
u32 tmp;
int r, i;
- volatile u32 *ib = p->ib->ptr;
/* on legacy kernel we don't perform advanced check */
if (p->rdev == NULL)
@@ -513,124 +763,14 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
track->cb_dirty = false;
}
- if (track->db_dirty) {
- /* Check depth buffer */
- if (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
- G_028800_Z_ENABLE(track->db_depth_control)) {
- u32 nviews, bpe, ntiles, size, slice_tile_max;
- u32 height, height_align, pitch, pitch_align, depth_align;
- u64 base_offset, base_align;
- struct array_mode_checker array_check;
- int array_mode;
-
- if (track->db_bo == NULL) {
- dev_warn(p->dev, "z/stencil with no depth buffer\n");
- return -EINVAL;
- }
- if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
- dev_warn(p->dev, "this kernel doesn't support z/stencil htile\n");
- return -EINVAL;
- }
- switch (G_028010_FORMAT(track->db_depth_info)) {
- case V_028010_DEPTH_16:
- bpe = 2;
- break;
- case V_028010_DEPTH_X8_24:
- case V_028010_DEPTH_8_24:
- case V_028010_DEPTH_X8_24_FLOAT:
- case V_028010_DEPTH_8_24_FLOAT:
- case V_028010_DEPTH_32_FLOAT:
- bpe = 4;
- break;
- case V_028010_DEPTH_X24_8_32_FLOAT:
- bpe = 8;
- break;
- default:
- dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
- return -EINVAL;
- }
- if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
- if (!track->db_depth_size_idx) {
- dev_warn(p->dev, "z/stencil buffer size not set\n");
- return -EINVAL;
- }
- tmp = radeon_bo_size(track->db_bo) - track->db_offset;
- tmp = (tmp / bpe) >> 6;
- if (!tmp) {
- dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
- track->db_depth_size, bpe, track->db_offset,
- radeon_bo_size(track->db_bo));
- return -EINVAL;
- }
- ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
- } else {
- size = radeon_bo_size(track->db_bo);
- /* pitch in pixels */
- pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
- slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
- slice_tile_max *= 64;
- height = slice_tile_max / pitch;
- if (height > 8192)
- height = 8192;
- base_offset = track->db_bo_mc + track->db_offset;
- array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
- array_check.array_mode = array_mode;
- array_check.group_size = track->group_size;
- array_check.nbanks = track->nbanks;
- array_check.npipes = track->npipes;
- array_check.nsamples = track->nsamples;
- array_check.blocksize = bpe;
- if (r600_get_array_mode_alignment(&array_check,
- &pitch_align, &height_align, &depth_align, &base_align)) {
- dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
- G_028010_ARRAY_MODE(track->db_depth_info),
- track->db_depth_info);
- return -EINVAL;
- }
- switch (array_mode) {
- case V_028010_ARRAY_1D_TILED_THIN1:
- /* don't break userspace */
- height &= ~0x7;
- break;
- case V_028010_ARRAY_2D_TILED_THIN1:
- break;
- default:
- dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
- G_028010_ARRAY_MODE(track->db_depth_info),
- track->db_depth_info);
- return -EINVAL;
- }
-
- if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
- __func__, __LINE__, pitch, pitch_align, array_mode);
- return -EINVAL;
- }
- if (!IS_ALIGNED(height, height_align)) {
- dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
- __func__, __LINE__, height, height_align, array_mode);
- return -EINVAL;
- }
- if (!IS_ALIGNED(base_offset, base_align)) {
- dev_warn(p->dev, "%s offset[%d] 0x%llx, 0x%llx, %d not aligned\n", __func__, i,
- base_offset, base_align, array_mode);
- return -EINVAL;
- }
-
- ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
- nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
- tmp = ntiles * bpe * 64 * nviews;
- if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
- dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
- array_mode,
- track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
- radeon_bo_size(track->db_bo));
- return -EINVAL;
- }
- }
- }
- track->db_dirty = false;
+ /* Check depth buffer */
+ if (track->db_dirty && (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
+ G_028800_Z_ENABLE(track->db_depth_control))) {
+ r = r600_cs_track_validate_db(p);
+ if (r)
+ return r;
}
+
return 0;
}
@@ -1244,6 +1384,21 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_dirty = true;
break;
case DB_HTILE_DATA_BASE:
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->htile_offset = radeon_get_ib_value(p, idx) << 8;
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->htile_bo = reloc->robj;
+ track->db_dirty = true;
+ break;
+ case DB_HTILE_SURFACE:
+ track->htile_surface = radeon_get_ib_value(p, idx);
+ track->db_dirty = true;
+ break;
case SQ_PGM_START_FS:
case SQ_PGM_START_ES:
case SQ_PGM_START_VS:
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 3568a2e345fa..59f9c993cc31 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -195,6 +195,14 @@
#define PREZ_MUST_WAIT_FOR_POSTZ_DONE (1 << 31)
#define DB_DEPTH_BASE 0x2800C
#define DB_HTILE_DATA_BASE 0x28014
+#define DB_HTILE_SURFACE 0x28D24
+#define S_028D24_HTILE_WIDTH(x) (((x) & 0x1) << 0)
+#define G_028D24_HTILE_WIDTH(x) (((x) >> 0) & 0x1)
+#define C_028D24_HTILE_WIDTH 0xFFFFFFFE
+#define S_028D24_HTILE_HEIGHT(x) (((x) & 0x1) << 1)
+#define G_028D24_HTILE_HEIGHT(x) (((x) >> 1) & 0x1)
+#define C_028D24_HTILE_HEIGHT 0xFFFFFFFD
+#define G_028D24_LINEAR(x) (((x) >> 2) & 0x1)
#define DB_WATERMARKS 0x9838
#define DEPTH_FREE(x) ((x) << 0)
#define DEPTH_FLUSH(x) ((x) << 5)
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 91541e63d582..6f70158d34e4 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -233,7 +233,17 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
bo->pin_count++;
if (gpu_addr)
*gpu_addr = radeon_bo_gpu_offset(bo);
- WARN_ON_ONCE(max_offset != 0);
+
+ if (max_offset != 0) {
+ u64 domain_start;
+
+ if (domain == RADEON_GEM_DOMAIN_VRAM)
+ domain_start = bo->rdev->mc.vram_start;
+ else
+ domain_start = bo->rdev->mc.gtt_start;
+ WARN_ON_ONCE((*gpu_addr - domain_start) > max_offset);
+ }
+
return 0;
}
radeon_ttm_placement_from_domain(bo, domain);
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman
index aea63c415852..0f656b111c15 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/cayman
+++ b/drivers/gpu/drm/radeon/reg_srcs/cayman
@@ -509,7 +509,6 @@ cayman 0x9400
0x00028AA8 IA_MULTI_VGT_PARAM
0x00028AB4 VGT_REUSE_OFF
0x00028AB8 VGT_VTX_CNT_EN
-0x00028ABC DB_HTILE_SURFACE
0x00028AC0 DB_SRESULTS_COMPARE_STATE0
0x00028AC4 DB_SRESULTS_COMPARE_STATE1
0x00028AC8 DB_PRELOAD_CONTROL
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
index 77c37202376f..b912a37689bf 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/evergreen
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -519,7 +519,6 @@ evergreen 0x9400
0x00028AA4 VGT_INSTANCE_STEP_RATE_1
0x00028AB4 VGT_REUSE_OFF
0x00028AB8 VGT_VTX_CNT_EN
-0x00028ABC DB_HTILE_SURFACE
0x00028AC0 DB_SRESULTS_COMPARE_STATE0
0x00028AC4 DB_SRESULTS_COMPARE_STATE1
0x00028AC8 DB_PRELOAD_CONTROL
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
index 626c24ea0b56..5e659b034d9a 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r600
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -713,7 +713,6 @@ r600 0x9400
0x0000A710 TD_VS_SAMPLER17_BORDER_RED
0x00009508 TA_CNTL_AUX
0x0002802C DB_DEPTH_CLEAR
-0x00028D24 DB_HTILE_SURFACE
0x00028D34 DB_PREFETCH_LIMIT
0x00028D30 DB_PRELOAD_CONTROL
0x00028D0C DB_RENDER_CONTROL
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index a651779d9ff7..c0330a41db03 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -14,8 +14,15 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
-
-#include <asm/gpio.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/of_i2c.h>
+
+struct i2c_gpio_private_data {
+ struct i2c_adapter adap;
+ struct i2c_algo_bit_data bit_data;
+ struct i2c_gpio_platform_data pdata;
+};
/* Toggle SDA by changing the direction of the pin */
static void i2c_gpio_setsda_dir(void *data, int state)
@@ -78,24 +85,62 @@ static int i2c_gpio_getscl(void *data)
return gpio_get_value(pdata->scl_pin);
}
+static int __devinit of_i2c_gpio_probe(struct device_node *np,
+ struct i2c_gpio_platform_data *pdata)
+{
+ u32 reg;
+
+ if (of_gpio_count(np) < 2)
+ return -ENODEV;
+
+ pdata->sda_pin = of_get_gpio(np, 0);
+ pdata->scl_pin = of_get_gpio(np, 1);
+
+ if (!gpio_is_valid(pdata->sda_pin) || !gpio_is_valid(pdata->scl_pin)) {
+ pr_err("%s: invalid GPIO pins, sda=%d/scl=%d\n",
+ np->full_name, pdata->sda_pin, pdata->scl_pin);
+ return -ENODEV;
+ }
+
+ of_property_read_u32(np, "i2c-gpio,delay-us", &pdata->udelay);
+
+ if (!of_property_read_u32(np, "i2c-gpio,timeout-ms", &reg))
+ pdata->timeout = msecs_to_jiffies(reg);
+
+ pdata->sda_is_open_drain =
+ of_property_read_bool(np, "i2c-gpio,sda-open-drain");
+ pdata->scl_is_open_drain =
+ of_property_read_bool(np, "i2c-gpio,scl-open-drain");
+ pdata->scl_is_output_only =
+ of_property_read_bool(np, "i2c-gpio,scl-output-only");
+
+ return 0;
+}
+
static int __devinit i2c_gpio_probe(struct platform_device *pdev)
{
+ struct i2c_gpio_private_data *priv;
struct i2c_gpio_platform_data *pdata;
struct i2c_algo_bit_data *bit_data;
struct i2c_adapter *adap;
int ret;
- pdata = pdev->dev.platform_data;
- if (!pdata)
- return -ENXIO;
-
- ret = -ENOMEM;
- adap = kzalloc(sizeof(struct i2c_adapter), GFP_KERNEL);
- if (!adap)
- goto err_alloc_adap;
- bit_data = kzalloc(sizeof(struct i2c_algo_bit_data), GFP_KERNEL);
- if (!bit_data)
- goto err_alloc_bit_data;
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ adap = &priv->adap;
+ bit_data = &priv->bit_data;
+ pdata = &priv->pdata;
+
+ if (pdev->dev.of_node) {
+ ret = of_i2c_gpio_probe(pdev->dev.of_node, pdata);
+ if (ret)
+ return ret;
+ } else {
+ if (!pdev->dev.platform_data)
+ return -ENXIO;
+ memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata));
+ }
ret = gpio_request(pdata->sda_pin, "sda");
if (ret)
@@ -143,6 +188,7 @@ static int __devinit i2c_gpio_probe(struct platform_device *pdev)
adap->algo_data = bit_data;
adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
adap->dev.parent = &pdev->dev;
+ adap->dev.of_node = pdev->dev.of_node;
/*
* If "dev->id" is negative we consider it as zero.
@@ -154,7 +200,9 @@ static int __devinit i2c_gpio_probe(struct platform_device *pdev)
if (ret)
goto err_add_bus;
- platform_set_drvdata(pdev, adap);
+ of_i2c_register_devices(adap);
+
+ platform_set_drvdata(pdev, priv);
dev_info(&pdev->dev, "using pins %u (SDA) and %u (SCL%s)\n",
pdata->sda_pin, pdata->scl_pin,
@@ -168,34 +216,40 @@ err_add_bus:
err_request_scl:
gpio_free(pdata->sda_pin);
err_request_sda:
- kfree(bit_data);
-err_alloc_bit_data:
- kfree(adap);
-err_alloc_adap:
return ret;
}
static int __devexit i2c_gpio_remove(struct platform_device *pdev)
{
+ struct i2c_gpio_private_data *priv;
struct i2c_gpio_platform_data *pdata;
struct i2c_adapter *adap;
- adap = platform_get_drvdata(pdev);
- pdata = pdev->dev.platform_data;
+ priv = platform_get_drvdata(pdev);
+ adap = &priv->adap;
+ pdata = &priv->pdata;
i2c_del_adapter(adap);
gpio_free(pdata->scl_pin);
gpio_free(pdata->sda_pin);
- kfree(adap->algo_data);
- kfree(adap);
return 0;
}
+#if defined(CONFIG_OF)
+static const struct of_device_id i2c_gpio_dt_ids[] = {
+ { .compatible = "i2c-gpio", },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, i2c_gpio_dt_ids);
+#endif
+
static struct platform_driver i2c_gpio_driver = {
.driver = {
.name = "i2c-gpio",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(i2c_gpio_dt_ids),
},
.probe = i2c_gpio_probe,
.remove = __devexit_p(i2c_gpio_remove),
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 124d9c594f40..dfb84b7ee550 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -191,7 +191,7 @@ static int i2c_imx_start(struct imx_i2c_struct *i2c_imx)
dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
- clk_enable(i2c_imx->clk);
+ clk_prepare_enable(i2c_imx->clk);
writeb(i2c_imx->ifdr, i2c_imx->base + IMX_I2C_IFDR);
/* Enable I2C controller */
writeb(0, i2c_imx->base + IMX_I2C_I2SR);
@@ -240,7 +240,7 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx)
/* Disable I2C controller */
writeb(0, i2c_imx->base + IMX_I2C_I2CR);
- clk_disable(i2c_imx->clk);
+ clk_disable_unprepare(i2c_imx->clk);
}
static void __init i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index d60364650990..f6733267fa9c 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -29,6 +29,8 @@
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/i2c-pxa.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_i2c.h>
#include <linux/platform_device.h>
#include <linux/err.h>
@@ -1044,23 +1046,60 @@ static const struct i2c_algorithm i2c_pxa_pio_algorithm = {
.functionality = i2c_pxa_functionality,
};
-static int i2c_pxa_probe(struct platform_device *dev)
+static struct of_device_id i2c_pxa_dt_ids[] = {
+ { .compatible = "mrvl,pxa-i2c", .data = (void *)REGS_PXA2XX },
+ { .compatible = "mrvl,pwri2c", .data = (void *)REGS_PXA3XX },
+ { .compatible = "mrvl,mmp-twsi", .data = (void *)REGS_PXA2XX },
+ {}
+};
+MODULE_DEVICE_TABLE(of, i2c_pxa_dt_ids);
+
+static int i2c_pxa_probe_dt(struct platform_device *pdev, struct pxa_i2c *i2c,
+ enum pxa_i2c_types *i2c_types)
{
- struct pxa_i2c *i2c;
- struct resource *res;
- struct i2c_pxa_platform_data *plat = dev->dev.platform_data;
- const struct platform_device_id *id = platform_get_device_id(dev);
- enum pxa_i2c_types i2c_type = id->driver_data;
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *of_id =
+ of_match_device(i2c_pxa_dt_ids, &pdev->dev);
int ret;
- int irq;
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- irq = platform_get_irq(dev, 0);
- if (res == NULL || irq < 0)
- return -ENODEV;
+ if (!of_id)
+ return 1;
+ ret = of_alias_get_id(np, "i2c");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
+ return ret;
+ }
+ pdev->id = ret;
+ if (of_get_property(np, "mrvl,i2c-polling", NULL))
+ i2c->use_pio = 1;
+ if (of_get_property(np, "mrvl,i2c-fast-mode", NULL))
+ i2c->fast_mode = 1;
+ *i2c_types = (u32)(of_id->data);
+ return 0;
+}
- if (!request_mem_region(res->start, resource_size(res), res->name))
- return -ENOMEM;
+static int i2c_pxa_probe_pdata(struct platform_device *pdev,
+ struct pxa_i2c *i2c,
+ enum pxa_i2c_types *i2c_types)
+{
+ struct i2c_pxa_platform_data *plat = pdev->dev.platform_data;
+ const struct platform_device_id *id = platform_get_device_id(pdev);
+
+ *i2c_types = id->driver_data;
+ if (plat) {
+ i2c->use_pio = plat->use_pio;
+ i2c->fast_mode = plat->fast_mode;
+ }
+ return 0;
+}
+
+static int i2c_pxa_probe(struct platform_device *dev)
+{
+ struct i2c_pxa_platform_data *plat = dev->dev.platform_data;
+ enum pxa_i2c_types i2c_type;
+ struct pxa_i2c *i2c;
+ struct resource *res = NULL;
+ int ret, irq;
i2c = kzalloc(sizeof(struct pxa_i2c), GFP_KERNEL);
if (!i2c) {
@@ -1068,6 +1107,24 @@ static int i2c_pxa_probe(struct platform_device *dev)
goto emalloc;
}
+ ret = i2c_pxa_probe_dt(dev, i2c, &i2c_type);
+ if (ret > 0)
+ ret = i2c_pxa_probe_pdata(dev, i2c, &i2c_type);
+ if (ret < 0)
+ goto eclk;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(dev, 0);
+ if (res == NULL || irq < 0) {
+ ret = -ENODEV;
+ goto eclk;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res), res->name)) {
+ ret = -ENOMEM;
+ goto eclk;
+ }
+
i2c->adap.owner = THIS_MODULE;
i2c->adap.retries = 5;
@@ -1109,21 +1166,16 @@ static int i2c_pxa_probe(struct platform_device *dev)
i2c->slave_addr = I2C_PXA_SLAVE_ADDR;
-#ifdef CONFIG_I2C_PXA_SLAVE
if (plat) {
+#ifdef CONFIG_I2C_PXA_SLAVE
i2c->slave_addr = plat->slave_addr;
i2c->slave = plat->slave;
- }
#endif
-
- clk_enable(i2c->clk);
-
- if (plat) {
i2c->adap.class = plat->class;
- i2c->use_pio = plat->use_pio;
- i2c->fast_mode = plat->fast_mode;
}
+ clk_enable(i2c->clk);
+
if (i2c->use_pio) {
i2c->adap.algo = &i2c_pxa_pio_algorithm;
} else {
@@ -1234,6 +1286,7 @@ static struct platform_driver i2c_pxa_driver = {
.name = "pxa2xx-i2c",
.owner = THIS_MODULE,
.pm = I2C_PXA_DEV_PM_OPS,
+ .of_match_table = i2c_pxa_dt_ids,
},
.id_table = i2c_pxa_id_table,
};
diff --git a/drivers/input/keyboard/jornada720_kbd.c b/drivers/input/keyboard/jornada720_kbd.c
index eeafc30b207b..9d639fa1afbd 100644
--- a/drivers/input/keyboard/jornada720_kbd.c
+++ b/drivers/input/keyboard/jornada720_kbd.c
@@ -27,6 +27,7 @@
#include <mach/jornada720.h>
#include <mach/hardware.h>
+#include <mach/irqs.h>
MODULE_AUTHOR("Kristoffer Ericson <Kristoffer.Ericson@gmail.com>");
MODULE_DESCRIPTION("HP Jornada 710/720/728 keyboard driver");
diff --git a/drivers/input/serio/ams_delta_serio.c b/drivers/input/serio/ams_delta_serio.c
index d4d08bd9205b..bd5b10eeeb40 100644
--- a/drivers/input/serio/ams_delta_serio.c
+++ b/drivers/input/serio/ams_delta_serio.c
@@ -92,8 +92,7 @@ static irqreturn_t ams_delta_serio_interrupt(int irq, void *dev_id)
static int ams_delta_serio_open(struct serio *serio)
{
/* enable keyboard */
- ams_delta_latch2_write(AMD_DELTA_LATCH2_KEYBRD_PWR,
- AMD_DELTA_LATCH2_KEYBRD_PWR);
+ gpio_set_value(AMS_DELTA_GPIO_PIN_KEYBRD_PWR, 1);
return 0;
}
@@ -101,9 +100,32 @@ static int ams_delta_serio_open(struct serio *serio)
static void ams_delta_serio_close(struct serio *serio)
{
/* disable keyboard */
- ams_delta_latch2_write(AMD_DELTA_LATCH2_KEYBRD_PWR, 0);
+ gpio_set_value(AMS_DELTA_GPIO_PIN_KEYBRD_PWR, 0);
}
+static const struct gpio ams_delta_gpios[] __initconst_or_module = {
+ {
+ .gpio = AMS_DELTA_GPIO_PIN_KEYBRD_DATA,
+ .flags = GPIOF_DIR_IN,
+ .label = "serio-data",
+ },
+ {
+ .gpio = AMS_DELTA_GPIO_PIN_KEYBRD_CLK,
+ .flags = GPIOF_DIR_IN,
+ .label = "serio-clock",
+ },
+ {
+ .gpio = AMS_DELTA_GPIO_PIN_KEYBRD_PWR,
+ .flags = GPIOF_OUT_INIT_LOW,
+ .label = "serio-power",
+ },
+ {
+ .gpio = AMS_DELTA_GPIO_PIN_KEYBRD_DATAOUT,
+ .flags = GPIOF_OUT_INIT_LOW,
+ .label = "serio-dataout",
+ },
+};
+
static int __init ams_delta_serio_init(void)
{
int err;
@@ -123,19 +145,12 @@ static int __init ams_delta_serio_init(void)
strlcpy(ams_delta_serio->phys, "GPIO/serio0",
sizeof(ams_delta_serio->phys));
- err = gpio_request(AMS_DELTA_GPIO_PIN_KEYBRD_DATA, "serio-data");
+ err = gpio_request_array(ams_delta_gpios,
+ ARRAY_SIZE(ams_delta_gpios));
if (err) {
- pr_err("ams_delta_serio: Couldn't request gpio pin for data\n");
+ pr_err("ams_delta_serio: Couldn't request gpio pins\n");
goto serio;
}
- gpio_direction_input(AMS_DELTA_GPIO_PIN_KEYBRD_DATA);
-
- err = gpio_request(AMS_DELTA_GPIO_PIN_KEYBRD_CLK, "serio-clock");
- if (err) {
- pr_err("ams_delta_serio: couldn't request gpio pin for clock\n");
- goto gpio_data;
- }
- gpio_direction_input(AMS_DELTA_GPIO_PIN_KEYBRD_CLK);
err = request_irq(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK),
ams_delta_serio_interrupt, IRQ_TYPE_EDGE_RISING,
@@ -143,7 +158,7 @@ static int __init ams_delta_serio_init(void)
if (err < 0) {
pr_err("ams_delta_serio: couldn't request gpio interrupt %d\n",
gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK));
- goto gpio_clk;
+ goto gpio;
}
/*
* Since GPIO register handling for keyboard clock pin is performed
@@ -157,10 +172,9 @@ static int __init ams_delta_serio_init(void)
dev_info(&ams_delta_serio->dev, "%s\n", ams_delta_serio->name);
return 0;
-gpio_clk:
- gpio_free(AMS_DELTA_GPIO_PIN_KEYBRD_CLK);
-gpio_data:
- gpio_free(AMS_DELTA_GPIO_PIN_KEYBRD_DATA);
+gpio:
+ gpio_free_array(ams_delta_gpios,
+ ARRAY_SIZE(ams_delta_gpios));
serio:
kfree(ams_delta_serio);
return err;
@@ -171,7 +185,7 @@ static void __exit ams_delta_serio_exit(void)
{
serio_unregister_port(ams_delta_serio);
free_irq(OMAP_GPIO_IRQ(AMS_DELTA_GPIO_PIN_KEYBRD_CLK), 0);
- gpio_free(AMS_DELTA_GPIO_PIN_KEYBRD_CLK);
- gpio_free(AMS_DELTA_GPIO_PIN_KEYBRD_DATA);
+ gpio_free_array(ams_delta_gpios,
+ ARRAY_SIZE(ams_delta_gpios));
}
module_exit(ams_delta_serio_exit);
diff --git a/drivers/input/serio/rpckbd.c b/drivers/input/serio/rpckbd.c
index 8b44ddc8041c..58b224498b35 100644
--- a/drivers/input/serio/rpckbd.c
+++ b/drivers/input/serio/rpckbd.c
@@ -36,7 +36,6 @@
#include <linux/io.h>
#include <linux/slab.h>
-#include <asm/irq.h>
#include <mach/hardware.h>
#include <asm/hardware/iomd.h>
#include <asm/system.h>
@@ -46,6 +45,11 @@ MODULE_DESCRIPTION("Acorn RiscPC PS/2 keyboard controller driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:kart");
+struct rpckbd_data {
+ int tx_irq;
+ int rx_irq;
+};
+
static int rpckbd_write(struct serio *port, unsigned char val)
{
while (!(iomd_readb(IOMD_KCTRL) & (1 << 7)))
@@ -78,19 +82,21 @@ static irqreturn_t rpckbd_tx(int irq, void *dev_id)
static int rpckbd_open(struct serio *port)
{
+ struct rpckbd_data *rpckbd = port->port_data;
+
/* Reset the keyboard state machine. */
iomd_writeb(0, IOMD_KCTRL);
iomd_writeb(8, IOMD_KCTRL);
iomd_readb(IOMD_KARTRX);
- if (request_irq(IRQ_KEYBOARDRX, rpckbd_rx, 0, "rpckbd", port) != 0) {
+ if (request_irq(rpckbd->rx_irq, rpckbd_rx, 0, "rpckbd", port) != 0) {
printk(KERN_ERR "rpckbd.c: Could not allocate keyboard receive IRQ\n");
return -EBUSY;
}
- if (request_irq(IRQ_KEYBOARDTX, rpckbd_tx, 0, "rpckbd", port) != 0) {
+ if (request_irq(rpckbd->tx_irq, rpckbd_tx, 0, "rpckbd", port) != 0) {
printk(KERN_ERR "rpckbd.c: Could not allocate keyboard transmit IRQ\n");
- free_irq(IRQ_KEYBOARDRX, port);
+ free_irq(rpckbd->rx_irq, port);
return -EBUSY;
}
@@ -99,8 +105,10 @@ static int rpckbd_open(struct serio *port)
static void rpckbd_close(struct serio *port)
{
- free_irq(IRQ_KEYBOARDRX, port);
- free_irq(IRQ_KEYBOARDTX, port);
+ struct rpckbd_data *rpckbd = port->port_data;
+
+ free_irq(rpckbd->rx_irq, port);
+ free_irq(rpckbd->tx_irq, port);
}
/*
@@ -109,17 +117,35 @@ static void rpckbd_close(struct serio *port)
*/
static int __devinit rpckbd_probe(struct platform_device *dev)
{
+ struct rpckbd_data *rpckbd;
struct serio *serio;
+ int tx_irq, rx_irq;
+
+ rx_irq = platform_get_irq(dev, 0);
+ if (rx_irq <= 0)
+ return rx_irq < 0 ? rx_irq : -ENXIO;
+
+ tx_irq = platform_get_irq(dev, 1);
+ if (tx_irq <= 0)
+ return tx_irq < 0 ? tx_irq : -ENXIO;
serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
- if (!serio)
+ rpckbd = kzalloc(sizeof(*rpckbd), GFP_KERNEL);
+ if (!serio || !rpckbd) {
+ kfree(rpckbd);
+ kfree(serio);
return -ENOMEM;
+ }
+
+ rpckbd->rx_irq = rx_irq;
+ rpckbd->tx_irq = tx_irq;
serio->id.type = SERIO_8042;
serio->write = rpckbd_write;
serio->open = rpckbd_open;
serio->close = rpckbd_close;
serio->dev.parent = &dev->dev;
+ serio->port_data = rpckbd;
strlcpy(serio->name, "RiscPC PS/2 kbd port", sizeof(serio->name));
strlcpy(serio->phys, "rpckbd/serio0", sizeof(serio->phys));
@@ -131,7 +157,11 @@ static int __devinit rpckbd_probe(struct platform_device *dev)
static int __devexit rpckbd_remove(struct platform_device *dev)
{
struct serio *serio = platform_get_drvdata(dev);
+ struct rpckbd_data *rpckbd = serio->port_data;
+
serio_unregister_port(serio);
+ kfree(rpckbd);
+
return 0;
}
diff --git a/drivers/input/serio/sa1111ps2.c b/drivers/input/serio/sa1111ps2.c
index 44fc8b4bcd81..5ebabe3fc845 100644
--- a/drivers/input/serio/sa1111ps2.c
+++ b/drivers/input/serio/sa1111ps2.c
@@ -24,6 +24,26 @@
#include <asm/hardware/sa1111.h>
+#define PS2CR 0x0000
+#define PS2STAT 0x0004
+#define PS2DATA 0x0008
+#define PS2CLKDIV 0x000c
+#define PS2PRECNT 0x0010
+
+#define PS2CR_ENA 0x08
+#define PS2CR_FKD 0x02
+#define PS2CR_FKC 0x01
+
+#define PS2STAT_STP 0x0100
+#define PS2STAT_TXE 0x0080
+#define PS2STAT_TXB 0x0040
+#define PS2STAT_RXF 0x0020
+#define PS2STAT_RXB 0x0010
+#define PS2STAT_ENA 0x0008
+#define PS2STAT_RXP 0x0004
+#define PS2STAT_KBD 0x0002
+#define PS2STAT_KBC 0x0001
+
struct ps2if {
struct serio *io;
struct sa1111_dev *dev;
@@ -45,22 +65,22 @@ static irqreturn_t ps2_rxint(int irq, void *dev_id)
struct ps2if *ps2if = dev_id;
unsigned int scancode, flag, status;
- status = sa1111_readl(ps2if->base + SA1111_PS2STAT);
+ status = sa1111_readl(ps2if->base + PS2STAT);
while (status & PS2STAT_RXF) {
if (status & PS2STAT_STP)
- sa1111_writel(PS2STAT_STP, ps2if->base + SA1111_PS2STAT);
+ sa1111_writel(PS2STAT_STP, ps2if->base + PS2STAT);
flag = (status & PS2STAT_STP ? SERIO_FRAME : 0) |
(status & PS2STAT_RXP ? 0 : SERIO_PARITY);
- scancode = sa1111_readl(ps2if->base + SA1111_PS2DATA) & 0xff;
+ scancode = sa1111_readl(ps2if->base + PS2DATA) & 0xff;
if (hweight8(scancode) & 1)
flag ^= SERIO_PARITY;
serio_interrupt(ps2if->io, scancode, flag);
- status = sa1111_readl(ps2if->base + SA1111_PS2STAT);
+ status = sa1111_readl(ps2if->base + PS2STAT);
}
return IRQ_HANDLED;
@@ -75,12 +95,12 @@ static irqreturn_t ps2_txint(int irq, void *dev_id)
unsigned int status;
spin_lock(&ps2if->lock);
- status = sa1111_readl(ps2if->base + SA1111_PS2STAT);
+ status = sa1111_readl(ps2if->base + PS2STAT);
if (ps2if->head == ps2if->tail) {
disable_irq_nosync(irq);
/* done */
} else if (status & PS2STAT_TXE) {
- sa1111_writel(ps2if->buf[ps2if->tail], ps2if->base + SA1111_PS2DATA);
+ sa1111_writel(ps2if->buf[ps2if->tail], ps2if->base + PS2DATA);
ps2if->tail = (ps2if->tail + 1) & (sizeof(ps2if->buf) - 1);
}
spin_unlock(&ps2if->lock);
@@ -103,8 +123,8 @@ static int ps2_write(struct serio *io, unsigned char val)
/*
* If the TX register is empty, we can go straight out.
*/
- if (sa1111_readl(ps2if->base + SA1111_PS2STAT) & PS2STAT_TXE) {
- sa1111_writel(val, ps2if->base + SA1111_PS2DATA);
+ if (sa1111_readl(ps2if->base + PS2STAT) & PS2STAT_TXE) {
+ sa1111_writel(val, ps2if->base + PS2DATA);
} else {
if (ps2if->head == ps2if->tail)
enable_irq(ps2if->dev->irq[1]);
@@ -124,13 +144,16 @@ static int ps2_open(struct serio *io)
struct ps2if *ps2if = io->port_data;
int ret;
- sa1111_enable_device(ps2if->dev);
+ ret = sa1111_enable_device(ps2if->dev);
+ if (ret)
+ return ret;
ret = request_irq(ps2if->dev->irq[0], ps2_rxint, 0,
SA1111_DRIVER_NAME(ps2if->dev), ps2if);
if (ret) {
printk(KERN_ERR "sa1111ps2: could not allocate IRQ%d: %d\n",
ps2if->dev->irq[0], ret);
+ sa1111_disable_device(ps2if->dev);
return ret;
}
@@ -140,6 +163,7 @@ static int ps2_open(struct serio *io)
printk(KERN_ERR "sa1111ps2: could not allocate IRQ%d: %d\n",
ps2if->dev->irq[1], ret);
free_irq(ps2if->dev->irq[0], ps2if);
+ sa1111_disable_device(ps2if->dev);
return ret;
}
@@ -147,7 +171,7 @@ static int ps2_open(struct serio *io)
enable_irq_wake(ps2if->dev->irq[0]);
- sa1111_writel(PS2CR_ENA, ps2if->base + SA1111_PS2CR);
+ sa1111_writel(PS2CR_ENA, ps2if->base + PS2CR);
return 0;
}
@@ -155,7 +179,7 @@ static void ps2_close(struct serio *io)
{
struct ps2if *ps2if = io->port_data;
- sa1111_writel(0, ps2if->base + SA1111_PS2CR);
+ sa1111_writel(0, ps2if->base + PS2CR);
disable_irq_wake(ps2if->dev->irq[0]);
@@ -175,7 +199,7 @@ static void __devinit ps2_clear_input(struct ps2if *ps2if)
int maxread = 100;
while (maxread--) {
- if ((sa1111_readl(ps2if->base + SA1111_PS2DATA) & 0xff) == 0xff)
+ if ((sa1111_readl(ps2if->base + PS2DATA) & 0xff) == 0xff)
break;
}
}
@@ -185,11 +209,11 @@ static unsigned int __devinit ps2_test_one(struct ps2if *ps2if,
{
unsigned int val;
- sa1111_writel(PS2CR_ENA | mask, ps2if->base + SA1111_PS2CR);
+ sa1111_writel(PS2CR_ENA | mask, ps2if->base + PS2CR);
udelay(2);
- val = sa1111_readl(ps2if->base + SA1111_PS2STAT);
+ val = sa1111_readl(ps2if->base + PS2STAT);
return val & (PS2STAT_KBC | PS2STAT_KBD);
}
@@ -220,7 +244,7 @@ static int __devinit ps2_test(struct ps2if *ps2if)
ret = -ENODEV;
}
- sa1111_writel(0, ps2if->base + SA1111_PS2CR);
+ sa1111_writel(0, ps2if->base + PS2CR);
return ret;
}
@@ -274,8 +298,8 @@ static int __devinit ps2_probe(struct sa1111_dev *dev)
sa1111_enable_device(ps2if->dev);
/* Incoming clock is 8MHz */
- sa1111_writel(0, ps2if->base + SA1111_PS2CLKDIV);
- sa1111_writel(127, ps2if->base + SA1111_PS2PRECNT);
+ sa1111_writel(0, ps2if->base + PS2CLKDIV);
+ sa1111_writel(127, ps2if->base + PS2PRECNT);
/*
* Flush any pending input.
@@ -330,6 +354,7 @@ static int __devexit ps2_remove(struct sa1111_dev *dev)
static struct sa1111_driver ps2_driver = {
.drv = {
.name = "sa1111-ps2",
+ .owner = THIS_MODULE,
},
.devid = SA1111_DEVID_PS2,
.probe = ps2_probe,
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 97b31a0e0525..2a2141915aa0 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -260,7 +260,7 @@ config TOUCHSCREEN_ILI210X
config TOUCHSCREEN_S3C2410
tristate "Samsung S3C2410/generic touchscreen input driver"
- depends on ARCH_S3C2410 || SAMSUNG_DEV_TS
+ depends on ARCH_S3C24XX || SAMSUNG_DEV_TS
select S3C_ADC
help
Say Y here if you have the s3c2410 touchscreen.
diff --git a/drivers/input/touchscreen/jornada720_ts.c b/drivers/input/touchscreen/jornada720_ts.c
index c3848ad2325b..d9be6eac99b1 100644
--- a/drivers/input/touchscreen/jornada720_ts.c
+++ b/drivers/input/touchscreen/jornada720_ts.c
@@ -22,6 +22,7 @@
#include <mach/hardware.h>
#include <mach/jornada720.h>
+#include <mach/irqs.h>
MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>");
MODULE_DESCRIPTION("HP Jornada 710/720/728 touchscreen driver");
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 589ba02d65a2..ff4b8cfda585 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -69,18 +69,11 @@ config LEDS_MIKROTIK_RB532
config LEDS_S3C24XX
tristate "LED Support for Samsung S3C24XX GPIO LEDs"
depends on LEDS_CLASS
- depends on ARCH_S3C2410
+ depends on ARCH_S3C24XX
help
This option enables support for LEDs connected to GPIO lines
on Samsung S3C24XX series CPUs, such as the S3C2410 and S3C2440.
-config LEDS_AMS_DELTA
- tristate "LED Support for the Amstrad Delta (E3)"
- depends on LEDS_CLASS
- depends on MACH_AMS_DELTA
- help
- This option enables support for the LEDs on Amstrad Delta (E3).
-
config LEDS_NET48XX
tristate "LED Support for Soekris net48xx series Error LED"
depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index fa0f428b32fe..890481cb09f6 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -12,7 +12,6 @@ obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o
obj-$(CONFIG_LEDS_LM3530) += leds-lm3530.o
obj-$(CONFIG_LEDS_MIKROTIK_RB532) += leds-rb532.o
obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o
-obj-$(CONFIG_LEDS_AMS_DELTA) += leds-ams-delta.o
obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o
obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o
obj-$(CONFIG_LEDS_COBALT_QUBE) += leds-cobalt-qube.o
diff --git a/drivers/leds/leds-ams-delta.c b/drivers/leds/leds-ams-delta.c
deleted file mode 100644
index 07428357c83f..000000000000
--- a/drivers/leds/leds-ams-delta.c
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * LEDs driver for Amstrad Delta (E3)
- *
- * Copyright (C) 2006 Jonathan McDowell <noodles@earth.li>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/leds.h>
-#include <plat/board-ams-delta.h>
-
-/*
- * Our context
- */
-struct ams_delta_led {
- struct led_classdev cdev;
- u8 bitmask;
-};
-
-static void ams_delta_led_set(struct led_classdev *led_cdev,
- enum led_brightness value)
-{
- struct ams_delta_led *led_dev =
- container_of(led_cdev, struct ams_delta_led, cdev);
-
- if (value)
- ams_delta_latch1_write(led_dev->bitmask, led_dev->bitmask);
- else
- ams_delta_latch1_write(led_dev->bitmask, 0);
-}
-
-static struct ams_delta_led ams_delta_leds[] = {
- {
- .cdev = {
- .name = "ams-delta::camera",
- .brightness_set = ams_delta_led_set,
- },
- .bitmask = AMS_DELTA_LATCH1_LED_CAMERA,
- },
- {
- .cdev = {
- .name = "ams-delta::advert",
- .brightness_set = ams_delta_led_set,
- },
- .bitmask = AMS_DELTA_LATCH1_LED_ADVERT,
- },
- {
- .cdev = {
- .name = "ams-delta::email",
- .brightness_set = ams_delta_led_set,
- },
- .bitmask = AMS_DELTA_LATCH1_LED_EMAIL,
- },
- {
- .cdev = {
- .name = "ams-delta::handsfree",
- .brightness_set = ams_delta_led_set,
- },
- .bitmask = AMS_DELTA_LATCH1_LED_HANDSFREE,
- },
- {
- .cdev = {
- .name = "ams-delta::voicemail",
- .brightness_set = ams_delta_led_set,
- },
- .bitmask = AMS_DELTA_LATCH1_LED_VOICEMAIL,
- },
- {
- .cdev = {
- .name = "ams-delta::voice",
- .brightness_set = ams_delta_led_set,
- },
- .bitmask = AMS_DELTA_LATCH1_LED_VOICE,
- },
-};
-
-static int ams_delta_led_probe(struct platform_device *pdev)
-{
- int i, ret;
-
- for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i++) {
- ams_delta_leds[i].cdev.flags |= LED_CORE_SUSPENDRESUME;
- ret = led_classdev_register(&pdev->dev,
- &ams_delta_leds[i].cdev);
- if (ret < 0)
- goto fail;
- }
-
- return 0;
-fail:
- while (--i >= 0)
- led_classdev_unregister(&ams_delta_leds[i].cdev);
- return ret;
-}
-
-static int ams_delta_led_remove(struct platform_device *pdev)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i++)
- led_classdev_unregister(&ams_delta_leds[i].cdev);
-
- return 0;
-}
-
-static struct platform_driver ams_delta_led_driver = {
- .probe = ams_delta_led_probe,
- .remove = ams_delta_led_remove,
- .driver = {
- .name = "ams-delta-led",
- .owner = THIS_MODULE,
- },
-};
-
-module_platform_driver(ams_delta_led_driver);
-
-MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
-MODULE_DESCRIPTION("Amstrad Delta LED driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:ams-delta-led");
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index faa4741df6d3..10f122a3a856 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -277,8 +277,8 @@ config DM_MIRROR
needed for live data migration tools such as 'pvmove'.
config DM_RAID
- tristate "RAID 1/4/5/6 target (EXPERIMENTAL)"
- depends on BLK_DEV_DM && EXPERIMENTAL
+ tristate "RAID 1/4/5/6 target"
+ depends on BLK_DEV_DM
select MD_RAID1
select MD_RAID456
select BLK_DEV_MD
@@ -359,8 +359,8 @@ config DM_DELAY
If unsure, say N.
config DM_UEVENT
- bool "DM uevents (EXPERIMENTAL)"
- depends on BLK_DEV_DM && EXPERIMENTAL
+ bool "DM uevents"
+ depends on BLK_DEV_DM
---help---
Generate udev events for DM events.
@@ -370,4 +370,24 @@ config DM_FLAKEY
---help---
A target that intermittently fails I/O for debugging purposes.
+config DM_VERITY
+ tristate "Verity target support (EXPERIMENTAL)"
+ depends on BLK_DEV_DM && EXPERIMENTAL
+ select CRYPTO
+ select CRYPTO_HASH
+ select DM_BUFIO
+ ---help---
+ This device-mapper target creates a read-only device that
+ transparently validates the data on one underlying device against
+ a pre-generated tree of cryptographic checksums stored on a second
+ device.
+
+ You'll need to activate the digests you're going to use in the
+ cryptoapi configuration.
+
+ To compile this code as a module, choose M here: the module will
+ be called dm-verity.
+
+ If unsure, say N.
+
endif # MD
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 046860c7a166..8b2e0dffe82e 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_DM_LOG_USERSPACE) += dm-log-userspace.o
obj-$(CONFIG_DM_ZERO) += dm-zero.o
obj-$(CONFIG_DM_RAID) += dm-raid.o
obj-$(CONFIG_DM_THIN_PROVISIONING) += dm-thin-pool.o
+obj-$(CONFIG_DM_VERITY) += dm-verity.o
ifeq ($(CONFIG_DM_UEVENT),y)
dm-mod-objs += dm-uevent.o
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index b6e58c7b6df5..cc06a1e52423 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -578,7 +578,7 @@ static void write_endio(struct bio *bio, int error)
struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
b->write_error = error;
- if (error) {
+ if (unlikely(error)) {
struct dm_bufio_client *c = b->c;
(void)cmpxchg(&c->async_write_error, 0, error);
}
@@ -697,13 +697,20 @@ static void __wait_for_free_buffer(struct dm_bufio_client *c)
dm_bufio_lock(c);
}
+enum new_flag {
+ NF_FRESH = 0,
+ NF_READ = 1,
+ NF_GET = 2,
+ NF_PREFETCH = 3
+};
+
/*
* Allocate a new buffer. If the allocation is not possible, wait until
* some other thread frees a buffer.
*
* May drop the lock and regain it.
*/
-static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c)
+static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
{
struct dm_buffer *b;
@@ -726,6 +733,9 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client
return b;
}
+ if (nf == NF_PREFETCH)
+ return NULL;
+
if (!list_empty(&c->reserved_buffers)) {
b = list_entry(c->reserved_buffers.next,
struct dm_buffer, lru_list);
@@ -743,9 +753,12 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client
}
}
-static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c)
+static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
{
- struct dm_buffer *b = __alloc_buffer_wait_no_callback(c);
+ struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
+
+ if (!b)
+ return NULL;
if (c->alloc_callback)
c->alloc_callback(b);
@@ -865,32 +878,23 @@ static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
* Getting a buffer
*--------------------------------------------------------------*/
-enum new_flag {
- NF_FRESH = 0,
- NF_READ = 1,
- NF_GET = 2
-};
-
static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
- enum new_flag nf, struct dm_buffer **bp,
- int *need_submit)
+ enum new_flag nf, int *need_submit)
{
struct dm_buffer *b, *new_b = NULL;
*need_submit = 0;
b = __find(c, block);
- if (b) {
- b->hold_count++;
- __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
- test_bit(B_WRITING, &b->state));
- return b;
- }
+ if (b)
+ goto found_buffer;
if (nf == NF_GET)
return NULL;
- new_b = __alloc_buffer_wait(c);
+ new_b = __alloc_buffer_wait(c, nf);
+ if (!new_b)
+ return NULL;
/*
* We've had a period where the mutex was unlocked, so need to
@@ -899,10 +903,7 @@ static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
b = __find(c, block);
if (b) {
__free_buffer_wake(new_b);
- b->hold_count++;
- __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
- test_bit(B_WRITING, &b->state));
- return b;
+ goto found_buffer;
}
__check_watermark(c);
@@ -922,6 +923,24 @@ static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
*need_submit = 1;
return b;
+
+found_buffer:
+ if (nf == NF_PREFETCH)
+ return NULL;
+ /*
+ * Note: it is essential that we don't wait for the buffer to be
+ * read if dm_bufio_get function is used. Both dm_bufio_get and
+ * dm_bufio_prefetch can be used in the driver request routine.
+ * If the user called both dm_bufio_prefetch and dm_bufio_get on
+ * the same buffer, it would deadlock if we waited.
+ */
+ if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
+ return NULL;
+
+ b->hold_count++;
+ __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
+ test_bit(B_WRITING, &b->state));
+ return b;
}
/*
@@ -956,10 +975,10 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
struct dm_buffer *b;
dm_bufio_lock(c);
- b = __bufio_new(c, block, nf, bp, &need_submit);
+ b = __bufio_new(c, block, nf, &need_submit);
dm_bufio_unlock(c);
- if (!b || IS_ERR(b))
+ if (!b)
return b;
if (need_submit)
@@ -1005,13 +1024,47 @@ void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
}
EXPORT_SYMBOL_GPL(dm_bufio_new);
+void dm_bufio_prefetch(struct dm_bufio_client *c,
+ sector_t block, unsigned n_blocks)
+{
+ struct blk_plug plug;
+
+ blk_start_plug(&plug);
+ dm_bufio_lock(c);
+
+ for (; n_blocks--; block++) {
+ int need_submit;
+ struct dm_buffer *b;
+ b = __bufio_new(c, block, NF_PREFETCH, &need_submit);
+ if (unlikely(b != NULL)) {
+ dm_bufio_unlock(c);
+
+ if (need_submit)
+ submit_io(b, READ, b->block, read_endio);
+ dm_bufio_release(b);
+
+ dm_bufio_cond_resched();
+
+ if (!n_blocks)
+ goto flush_plug;
+ dm_bufio_lock(c);
+ }
+
+ }
+
+ dm_bufio_unlock(c);
+
+flush_plug:
+ blk_finish_plug(&plug);
+}
+EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
+
void dm_bufio_release(struct dm_buffer *b)
{
struct dm_bufio_client *c = b->c;
dm_bufio_lock(c);
- BUG_ON(test_bit(B_READING, &b->state));
BUG_ON(!b->hold_count);
b->hold_count--;
@@ -1024,6 +1077,7 @@ void dm_bufio_release(struct dm_buffer *b)
* invalid buffer.
*/
if ((b->read_error || b->write_error) &&
+ !test_bit(B_READING, &b->state) &&
!test_bit(B_WRITING, &b->state) &&
!test_bit(B_DIRTY, &b->state)) {
__unlink_buffer(b);
@@ -1041,6 +1095,8 @@ void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
dm_bufio_lock(c);
+ BUG_ON(test_bit(B_READING, &b->state));
+
if (!test_and_set_bit(B_DIRTY, &b->state))
__relink_lru(b, LIST_DIRTY);
diff --git a/drivers/md/dm-bufio.h b/drivers/md/dm-bufio.h
index 5c4c3a04e381..b142946a9e32 100644
--- a/drivers/md/dm-bufio.h
+++ b/drivers/md/dm-bufio.h
@@ -63,6 +63,14 @@ void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
struct dm_buffer **bp);
/*
+ * Prefetch the specified blocks to the cache.
+ * The function starts to read the blocks and returns without waiting for
+ * I/O to finish.
+ */
+void dm_bufio_prefetch(struct dm_bufio_client *c,
+ sector_t block, unsigned n_blocks);
+
+/*
* Release a reference obtained with dm_bufio_{read,get,new}. The data
* pointer and dm_buffer pointer is no longer valid after this call.
*/
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index db6b51639cee..3f06df59fd82 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -176,7 +176,6 @@ struct crypt_config {
#define MIN_IOS 16
#define MIN_POOL_PAGES 32
-#define MIN_BIO_PAGES 8
static struct kmem_cache *_crypt_io_pool;
@@ -848,12 +847,11 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
}
/*
- * if additional pages cannot be allocated without waiting,
- * return a partially allocated bio, the caller will then try
- * to allocate additional bios while submitting this partial bio
+ * If additional pages cannot be allocated without waiting,
+ * return a partially-allocated bio. The caller will then try
+ * to allocate more bios while submitting this partial bio.
*/
- if (i == (MIN_BIO_PAGES - 1))
- gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
+ gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
@@ -1046,16 +1044,14 @@ static void kcryptd_queue_io(struct dm_crypt_io *io)
queue_work(cc->io_queue, &io->work);
}
-static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
- int error, int async)
+static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
{
struct bio *clone = io->ctx.bio_out;
struct crypt_config *cc = io->target->private;
- if (unlikely(error < 0)) {
+ if (unlikely(io->error < 0)) {
crypt_free_buffer_pages(cc, clone);
bio_put(clone);
- io->error = -EIO;
crypt_dec_pending(io);
return;
}
@@ -1106,12 +1102,16 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
sector += bio_sectors(clone);
crypt_inc_pending(io);
+
r = crypt_convert(cc, &io->ctx);
+ if (r < 0)
+ io->error = -EIO;
+
crypt_finished = atomic_dec_and_test(&io->ctx.pending);
/* Encryption was already finished, submit io now */
if (crypt_finished) {
- kcryptd_crypt_write_io_submit(io, r, 0);
+ kcryptd_crypt_write_io_submit(io, 0);
/*
* If there was an error, do not try next fragments.
@@ -1162,11 +1162,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
crypt_dec_pending(io);
}
-static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error)
+static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
{
- if (unlikely(error < 0))
- io->error = -EIO;
-
crypt_dec_pending(io);
}
@@ -1181,9 +1178,11 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
io->sector);
r = crypt_convert(cc, &io->ctx);
+ if (r < 0)
+ io->error = -EIO;
if (atomic_dec_and_test(&io->ctx.pending))
- kcryptd_crypt_read_done(io, r);
+ kcryptd_crypt_read_done(io);
crypt_dec_pending(io);
}
@@ -1204,15 +1203,18 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
+ if (error < 0)
+ io->error = -EIO;
+
mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
if (!atomic_dec_and_test(&ctx->pending))
return;
if (bio_data_dir(io->base_bio) == READ)
- kcryptd_crypt_read_done(io, error);
+ kcryptd_crypt_read_done(io);
else
- kcryptd_crypt_write_io_submit(io, error, 1);
+ kcryptd_crypt_write_io_submit(io, 1);
}
static void kcryptd_crypt(struct work_struct *work)
@@ -1413,6 +1415,7 @@ static int crypt_ctr_cipher(struct dm_target *ti,
char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount;
char *cipher_api = NULL;
int cpu, ret = -EINVAL;
+ char dummy;
/* Convert to crypto api definition? */
if (strchr(cipher_in, '(')) {
@@ -1434,7 +1437,7 @@ static int crypt_ctr_cipher(struct dm_target *ti,
if (!keycount)
cc->tfms_count = 1;
- else if (sscanf(keycount, "%u", &cc->tfms_count) != 1 ||
+ else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
!is_power_of_2(cc->tfms_count)) {
ti->error = "Bad cipher key count specification";
return -EINVAL;
@@ -1579,6 +1582,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
int ret;
struct dm_arg_set as;
const char *opt_string;
+ char dummy;
static struct dm_arg _args[] = {
{0, 1, "Invalid number of feature args"},
@@ -1636,7 +1640,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ret = -EINVAL;
- if (sscanf(argv[2], "%llu", &tmpll) != 1) {
+ if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) {
ti->error = "Invalid iv_offset sector";
goto bad;
}
@@ -1647,7 +1651,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- if (sscanf(argv[4], "%llu", &tmpll) != 1) {
+ if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
ti->error = "Invalid device sector";
goto bad;
}
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index f18375dcedd9..2dc22dddb2ae 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -131,6 +131,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct delay_c *dc;
unsigned long long tmpll;
+ char dummy;
if (argc != 3 && argc != 6) {
ti->error = "requires exactly 3 or 6 arguments";
@@ -145,13 +146,13 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
dc->reads = dc->writes = 0;
- if (sscanf(argv[1], "%llu", &tmpll) != 1) {
+ if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1) {
ti->error = "Invalid device sector";
goto bad;
}
dc->start_read = tmpll;
- if (sscanf(argv[2], "%u", &dc->read_delay) != 1) {
+ if (sscanf(argv[2], "%u%c", &dc->read_delay, &dummy) != 1) {
ti->error = "Invalid delay";
goto bad;
}
@@ -166,13 +167,13 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (argc == 3)
goto out;
- if (sscanf(argv[4], "%llu", &tmpll) != 1) {
+ if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
ti->error = "Invalid write device sector";
goto bad_dev_read;
}
dc->start_write = tmpll;
- if (sscanf(argv[5], "%u", &dc->write_delay) != 1) {
+ if (sscanf(argv[5], "%u%c", &dc->write_delay, &dummy) != 1) {
ti->error = "Invalid write delay";
goto bad_dev_read;
}
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 042e71996569..aa70f7d43a1a 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -283,7 +283,7 @@ int dm_exception_store_init(void)
return 0;
persistent_fail:
- dm_persistent_snapshot_exit();
+ dm_transient_snapshot_exit();
transient_fail:
return r;
}
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index b280c433e4a0..ac49c01f1a44 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -160,6 +160,7 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
unsigned long long tmpll;
struct dm_arg_set as;
const char *devname;
+ char dummy;
as.argc = argc;
as.argv = argv;
@@ -178,7 +179,7 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
devname = dm_shift_arg(&as);
- if (sscanf(dm_shift_arg(&as), "%llu", &tmpll) != 1) {
+ if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1) {
ti->error = "Invalid device sector";
goto bad;
}
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 1ce84ed0b765..a1a3e6df17b8 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -880,6 +880,7 @@ static int dev_set_geometry(struct dm_ioctl *param, size_t param_size)
struct hd_geometry geometry;
unsigned long indata[4];
char *geostr = (char *) param + param->data_start;
+ char dummy;
md = find_device(param);
if (!md)
@@ -891,8 +892,8 @@ static int dev_set_geometry(struct dm_ioctl *param, size_t param_size)
goto out;
}
- x = sscanf(geostr, "%lu %lu %lu %lu", indata,
- indata + 1, indata + 2, indata + 3);
+ x = sscanf(geostr, "%lu %lu %lu %lu%c", indata,
+ indata + 1, indata + 2, indata + 3, &dummy);
if (x != 4) {
DMWARN("Unable to interpret geometry settings.");
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 9728839f844a..3639eeab6042 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -29,6 +29,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct linear_c *lc;
unsigned long long tmp;
+ char dummy;
if (argc != 2) {
ti->error = "Invalid argument count";
@@ -41,7 +42,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
return -ENOMEM;
}
- if (sscanf(argv[1], "%llu", &tmp) != 1) {
+ if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1) {
ti->error = "dm-linear: Invalid device sector";
goto bad;
}
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 3b52bb72bd1f..65ebaebf502b 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -369,6 +369,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
unsigned int region_count;
size_t bitset_size, buf_size;
int r;
+ char dummy;
if (argc < 1 || argc > 2) {
DMWARN("wrong number of arguments to dirty region log");
@@ -387,7 +388,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
}
}
- if (sscanf(argv[0], "%u", &region_size) != 1 ||
+ if (sscanf(argv[0], "%u%c", &region_size, &dummy) != 1 ||
!_check_region_size(ti, region_size)) {
DMWARN("invalid region size %s", argv[0]);
return -EINVAL;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 801d92d237cf..922a3385eead 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -226,6 +226,27 @@ static void free_multipath(struct multipath *m)
kfree(m);
}
+static int set_mapinfo(struct multipath *m, union map_info *info)
+{
+ struct dm_mpath_io *mpio;
+
+ mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
+ if (!mpio)
+ return -ENOMEM;
+
+ memset(mpio, 0, sizeof(*mpio));
+ info->ptr = mpio;
+
+ return 0;
+}
+
+static void clear_mapinfo(struct multipath *m, union map_info *info)
+{
+ struct dm_mpath_io *mpio = info->ptr;
+
+ info->ptr = NULL;
+ mempool_free(mpio, m->mpio_pool);
+}
/*-----------------------------------------------
* Path selection
@@ -341,13 +362,14 @@ static int __must_push_back(struct multipath *m)
}
static int map_io(struct multipath *m, struct request *clone,
- struct dm_mpath_io *mpio, unsigned was_queued)
+ union map_info *map_context, unsigned was_queued)
{
int r = DM_MAPIO_REMAPPED;
size_t nr_bytes = blk_rq_bytes(clone);
unsigned long flags;
struct pgpath *pgpath;
struct block_device *bdev;
+ struct dm_mpath_io *mpio = map_context->ptr;
spin_lock_irqsave(&m->lock, flags);
@@ -423,7 +445,6 @@ static void dispatch_queued_ios(struct multipath *m)
{
int r;
unsigned long flags;
- struct dm_mpath_io *mpio;
union map_info *info;
struct request *clone, *n;
LIST_HEAD(cl);
@@ -436,16 +457,15 @@ static void dispatch_queued_ios(struct multipath *m)
list_del_init(&clone->queuelist);
info = dm_get_rq_mapinfo(clone);
- mpio = info->ptr;
- r = map_io(m, clone, mpio, 1);
+ r = map_io(m, clone, info, 1);
if (r < 0) {
- mempool_free(mpio, m->mpio_pool);
+ clear_mapinfo(m, info);
dm_kill_unmapped_request(clone, r);
} else if (r == DM_MAPIO_REMAPPED)
dm_dispatch_request(clone);
else if (r == DM_MAPIO_REQUEUE) {
- mempool_free(mpio, m->mpio_pool);
+ clear_mapinfo(m, info);
dm_requeue_unmapped_request(clone);
}
}
@@ -908,20 +928,16 @@ static int multipath_map(struct dm_target *ti, struct request *clone,
union map_info *map_context)
{
int r;
- struct dm_mpath_io *mpio;
struct multipath *m = (struct multipath *) ti->private;
- mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
- if (!mpio)
+ if (set_mapinfo(m, map_context) < 0)
/* ENOMEM, requeue */
return DM_MAPIO_REQUEUE;
- memset(mpio, 0, sizeof(*mpio));
- map_context->ptr = mpio;
clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
- r = map_io(m, clone, mpio, 0);
+ r = map_io(m, clone, map_context, 0);
if (r < 0 || r == DM_MAPIO_REQUEUE)
- mempool_free(mpio, m->mpio_pool);
+ clear_mapinfo(m, map_context);
return r;
}
@@ -1054,8 +1070,9 @@ static int switch_pg_num(struct multipath *m, const char *pgstr)
struct priority_group *pg;
unsigned pgnum;
unsigned long flags;
+ char dummy;
- if (!pgstr || (sscanf(pgstr, "%u", &pgnum) != 1) || !pgnum ||
+ if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
(pgnum > m->nr_priority_groups)) {
DMWARN("invalid PG number supplied to switch_pg_num");
return -EINVAL;
@@ -1085,8 +1102,9 @@ static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed)
{
struct priority_group *pg;
unsigned pgnum;
+ char dummy;
- if (!pgstr || (sscanf(pgstr, "%u", &pgnum) != 1) || !pgnum ||
+ if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
(pgnum > m->nr_priority_groups)) {
DMWARN("invalid PG number supplied to bypass_pg");
return -EINVAL;
@@ -1261,13 +1279,15 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
struct path_selector *ps;
int r;
+ BUG_ON(!mpio);
+
r = do_end_io(m, clone, error, mpio);
if (pgpath) {
ps = &pgpath->pg->ps;
if (ps->type->end_io)
ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
}
- mempool_free(mpio, m->mpio_pool);
+ clear_mapinfo(m, map_context);
return r;
}
diff --git a/drivers/md/dm-queue-length.c b/drivers/md/dm-queue-length.c
index 03a837aa5ce6..3941fae0de9f 100644
--- a/drivers/md/dm-queue-length.c
+++ b/drivers/md/dm-queue-length.c
@@ -112,6 +112,7 @@ static int ql_add_path(struct path_selector *ps, struct dm_path *path,
struct selector *s = ps->context;
struct path_info *pi;
unsigned repeat_count = QL_MIN_IO;
+ char dummy;
/*
* Arguments: [<repeat_count>]
@@ -123,7 +124,7 @@ static int ql_add_path(struct path_selector *ps, struct dm_path *path,
return -EINVAL;
}
- if ((argc == 1) && (sscanf(argv[0], "%u", &repeat_count) != 1)) {
+ if ((argc == 1) && (sscanf(argv[0], "%u%c", &repeat_count, &dummy) != 1)) {
*error = "queue-length ps: invalid repeat count";
return -EINVAL;
}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index c5a875d7b882..b0ba52459ed7 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -604,7 +604,9 @@ static int read_disk_sb(struct md_rdev *rdev, int size)
return 0;
if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, 1)) {
- DMERR("Failed to read device superblock");
+ DMERR("Failed to read superblock of device at position %d",
+ rdev->raid_disk);
+ set_bit(Faulty, &rdev->flags);
return -EINVAL;
}
@@ -855,9 +857,25 @@ static int super_validate(struct mddev *mddev, struct md_rdev *rdev)
static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
{
int ret;
+ unsigned redundancy = 0;
+ struct raid_dev *dev;
struct md_rdev *rdev, *freshest;
struct mddev *mddev = &rs->md;
+ switch (rs->raid_type->level) {
+ case 1:
+ redundancy = rs->md.raid_disks - 1;
+ break;
+ case 4:
+ case 5:
+ case 6:
+ redundancy = rs->raid_type->parity_devs;
+ break;
+ default:
+ ti->error = "Unknown RAID type";
+ return -EINVAL;
+ }
+
freshest = NULL;
rdev_for_each(rdev, mddev) {
if (!rdev->meta_bdev)
@@ -872,6 +890,37 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
case 0:
break;
default:
+ dev = container_of(rdev, struct raid_dev, rdev);
+ if (redundancy--) {
+ if (dev->meta_dev)
+ dm_put_device(ti, dev->meta_dev);
+
+ dev->meta_dev = NULL;
+ rdev->meta_bdev = NULL;
+
+ if (rdev->sb_page)
+ put_page(rdev->sb_page);
+
+ rdev->sb_page = NULL;
+
+ rdev->sb_loaded = 0;
+
+ /*
+ * We might be able to salvage the data device
+ * even though the meta device has failed. For
+ * now, we behave as though '- -' had been
+ * set for this device in the table.
+ */
+ if (dev->data_dev)
+ dm_put_device(ti, dev->data_dev);
+
+ dev->data_dev = NULL;
+ rdev->bdev = NULL;
+
+ list_del(&rdev->same_set);
+
+ continue;
+ }
ti->error = "Failed to load superblock";
return ret;
}
@@ -1214,7 +1263,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 1, 0},
+ .version = {1, 2, 0},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 9bfd057be686..d039de8322f0 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -924,8 +924,9 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
unsigned int mirror, char **argv)
{
unsigned long long offset;
+ char dummy;
- if (sscanf(argv[1], "%llu", &offset) != 1) {
+ if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1) {
ti->error = "Invalid offset";
return -EINVAL;
}
@@ -953,13 +954,14 @@ static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
{
unsigned param_count;
struct dm_dirty_log *dl;
+ char dummy;
if (argc < 2) {
ti->error = "Insufficient mirror log arguments";
return NULL;
}
- if (sscanf(argv[1], "%u", &param_count) != 1) {
+ if (sscanf(argv[1], "%u%c", &param_count, &dummy) != 1) {
ti->error = "Invalid mirror log argument count";
return NULL;
}
@@ -986,13 +988,14 @@ static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
{
unsigned num_features;
struct dm_target *ti = ms->ti;
+ char dummy;
*args_used = 0;
if (!argc)
return 0;
- if (sscanf(argv[0], "%u", &num_features) != 1) {
+ if (sscanf(argv[0], "%u%c", &num_features, &dummy) != 1) {
ti->error = "Invalid number of features";
return -EINVAL;
}
@@ -1036,6 +1039,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
unsigned int nr_mirrors, m, args_used;
struct mirror_set *ms;
struct dm_dirty_log *dl;
+ char dummy;
dl = create_dirty_log(ti, argc, argv, &args_used);
if (!dl)
@@ -1044,7 +1048,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
argv += args_used;
argc -= args_used;
- if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
+ if (!argc || sscanf(argv[0], "%u%c", &nr_mirrors, &dummy) != 1 ||
nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) {
ti->error = "Invalid number of mirrors";
dm_dirty_log_destroy(dl);
diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c
index 27f1d423b76c..6ab1192cdd5f 100644
--- a/drivers/md/dm-round-robin.c
+++ b/drivers/md/dm-round-robin.c
@@ -114,6 +114,7 @@ static int rr_add_path(struct path_selector *ps, struct dm_path *path,
struct selector *s = (struct selector *) ps->context;
struct path_info *pi;
unsigned repeat_count = RR_MIN_IO;
+ char dummy;
if (argc > 1) {
*error = "round-robin ps: incorrect number of arguments";
@@ -121,7 +122,7 @@ static int rr_add_path(struct path_selector *ps, struct dm_path *path,
}
/* First path argument is number of I/Os before switching path */
- if ((argc == 1) && (sscanf(argv[0], "%u", &repeat_count) != 1)) {
+ if ((argc == 1) && (sscanf(argv[0], "%u%c", &repeat_count, &dummy) != 1)) {
*error = "round-robin ps: invalid repeat count";
return -EINVAL;
}
diff --git a/drivers/md/dm-service-time.c b/drivers/md/dm-service-time.c
index 59883bd78214..9df8f6bd6418 100644
--- a/drivers/md/dm-service-time.c
+++ b/drivers/md/dm-service-time.c
@@ -110,6 +110,7 @@ static int st_add_path(struct path_selector *ps, struct dm_path *path,
struct path_info *pi;
unsigned repeat_count = ST_MIN_IO;
unsigned relative_throughput = 1;
+ char dummy;
/*
* Arguments: [<repeat_count> [<relative_throughput>]]
@@ -128,13 +129,13 @@ static int st_add_path(struct path_selector *ps, struct dm_path *path,
return -EINVAL;
}
- if (argc && (sscanf(argv[0], "%u", &repeat_count) != 1)) {
+ if (argc && (sscanf(argv[0], "%u%c", &repeat_count, &dummy) != 1)) {
*error = "service-time ps: invalid repeat count";
return -EINVAL;
}
if ((argc == 2) &&
- (sscanf(argv[1], "%u", &relative_throughput) != 1 ||
+ (sscanf(argv[1], "%u%c", &relative_throughput, &dummy) != 1 ||
relative_throughput > ST_MAX_RELATIVE_THROUGHPUT)) {
*error = "service-time ps: invalid relative_throughput value";
return -EINVAL;
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 3d80cf0c152d..35c94ff24ad5 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -75,8 +75,9 @@ static int get_stripe(struct dm_target *ti, struct stripe_c *sc,
unsigned int stripe, char **argv)
{
unsigned long long start;
+ char dummy;
- if (sscanf(argv[1], "%llu", &start) != 1)
+ if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1)
return -EINVAL;
if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 63cc54289aff..2e227fbf1622 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -268,8 +268,7 @@ void dm_table_destroy(struct dm_table *t)
vfree(t->highs);
/* free the device list */
- if (t->devices.next != &t->devices)
- free_devices(&t->devices);
+ free_devices(&t->devices);
dm_free_md_mempools(t->mempools);
@@ -464,10 +463,11 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
struct dm_dev_internal *dd;
unsigned int major, minor;
struct dm_table *t = ti->table;
+ char dummy;
BUG_ON(!t);
- if (sscanf(path, "%u:%u", &major, &minor) == 2) {
+ if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
/* Extract the major/minor numbers */
dev = MKDEV(major, minor);
if (MAJOR(dev) != major || MINOR(dev) != minor)
@@ -842,9 +842,10 @@ static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
unsigned *value, char **error, unsigned grouped)
{
const char *arg_str = dm_shift_arg(arg_set);
+ char dummy;
if (!arg_str ||
- (sscanf(arg_str, "%u", value) != 1) ||
+ (sscanf(arg_str, "%u%c", value, &dummy) != 1) ||
(*value < arg->min) ||
(*value > arg->max) ||
(grouped && arg_set->argc < *value)) {
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 237571af77fd..737d38865b69 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -614,7 +614,7 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
if (r < 0)
goto out;
- r = dm_sm_root_size(pmd->metadata_sm, &data_len);
+ r = dm_sm_root_size(pmd->data_sm, &data_len);
if (r < 0)
goto out;
@@ -713,6 +713,9 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
if (r)
goto bad;
+ if (bdev_size > THIN_METADATA_MAX_SECTORS)
+ bdev_size = THIN_METADATA_MAX_SECTORS;
+
disk_super = dm_block_data(sblock);
disk_super->magic = cpu_to_le64(THIN_SUPERBLOCK_MAGIC);
disk_super->version = cpu_to_le32(THIN_VERSION);
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index 859c16896877..ed4725e67c96 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -11,6 +11,19 @@
#define THIN_METADATA_BLOCK_SIZE 4096
+/*
+ * The metadata device is currently limited in size.
+ *
+ * We have one block of index, which can hold 255 index entries. Each
+ * index entry contains allocation info about 16k metadata blocks.
+ */
+#define THIN_METADATA_MAX_SECTORS (255 * (1 << 14) * (THIN_METADATA_BLOCK_SIZE / (1 << SECTOR_SHIFT)))
+
+/*
+ * A metadata device larger than 16GB triggers a warning.
+ */
+#define THIN_METADATA_MAX_SECTORS_WARNING (16 * (1024 * 1024 * 1024 >> SECTOR_SHIFT))
+
/*----------------------------------------------------------------*/
struct dm_pool_metadata;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index c3087575fef0..213ae32a0fc4 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -23,6 +23,7 @@
#define DEFERRED_SET_SIZE 64
#define MAPPING_POOL_SIZE 1024
#define PRISON_CELLS 1024
+#define COMMIT_PERIOD HZ
/*
* The block size of the device holding pool data must be
@@ -32,16 +33,6 @@
#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
/*
- * The metadata device is currently limited in size. The limitation is
- * checked lower down in dm-space-map-metadata, but we also check it here
- * so we can fail early.
- *
- * We have one block of index, which can hold 255 index entries. Each
- * index entry contains allocation info about 16k metadata blocks.
- */
-#define METADATA_DEV_MAX_SECTORS (255 * (1 << 14) * (THIN_METADATA_BLOCK_SIZE / (1 << SECTOR_SHIFT)))
-
-/*
* Device id is restricted to 24 bits.
*/
#define MAX_DEV_ID ((1 << 24) - 1)
@@ -72,7 +63,7 @@
* missed out if the io covers the block. (schedule_copy).
*
* iv) insert the new mapping into the origin's btree
- * (process_prepared_mappings). This act of inserting breaks some
+ * (process_prepared_mapping). This act of inserting breaks some
* sharing of btree nodes between the two devices. Breaking sharing only
* effects the btree of that specific device. Btrees for the other
* devices that share the block never change. The btree for the origin
@@ -124,7 +115,7 @@ struct cell {
struct hlist_node list;
struct bio_prison *prison;
struct cell_key key;
- unsigned count;
+ struct bio *holder;
struct bio_list bios;
};
@@ -220,54 +211,59 @@ static struct cell *__search_bucket(struct hlist_head *bucket,
* This may block if a new cell needs allocating. You must ensure that
* cells will be unlocked even if the calling thread is blocked.
*
- * Returns the number of entries in the cell prior to the new addition
- * or < 0 on failure.
+ * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
*/
static int bio_detain(struct bio_prison *prison, struct cell_key *key,
struct bio *inmate, struct cell **ref)
{
- int r;
+ int r = 1;
unsigned long flags;
uint32_t hash = hash_key(prison, key);
- struct cell *uninitialized_var(cell), *cell2 = NULL;
+ struct cell *cell, *cell2;
BUG_ON(hash > prison->nr_buckets);
spin_lock_irqsave(&prison->lock, flags);
+
cell = __search_bucket(prison->cells + hash, key);
+ if (cell) {
+ bio_list_add(&cell->bios, inmate);
+ goto out;
+ }
- if (!cell) {
- /*
- * Allocate a new cell
- */
- spin_unlock_irqrestore(&prison->lock, flags);
- cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO);
- spin_lock_irqsave(&prison->lock, flags);
+ /*
+ * Allocate a new cell
+ */
+ spin_unlock_irqrestore(&prison->lock, flags);
+ cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO);
+ spin_lock_irqsave(&prison->lock, flags);
- /*
- * We've been unlocked, so we have to double check that
- * nobody else has inserted this cell in the meantime.
- */
- cell = __search_bucket(prison->cells + hash, key);
+ /*
+ * We've been unlocked, so we have to double check that
+ * nobody else has inserted this cell in the meantime.
+ */
+ cell = __search_bucket(prison->cells + hash, key);
+ if (cell) {
+ mempool_free(cell2, prison->cell_pool);
+ bio_list_add(&cell->bios, inmate);
+ goto out;
+ }
- if (!cell) {
- cell = cell2;
- cell2 = NULL;
+ /*
+ * Use new cell.
+ */
+ cell = cell2;
- cell->prison = prison;
- memcpy(&cell->key, key, sizeof(cell->key));
- cell->count = 0;
- bio_list_init(&cell->bios);
- hlist_add_head(&cell->list, prison->cells + hash);
- }
- }
+ cell->prison = prison;
+ memcpy(&cell->key, key, sizeof(cell->key));
+ cell->holder = inmate;
+ bio_list_init(&cell->bios);
+ hlist_add_head(&cell->list, prison->cells + hash);
- r = cell->count++;
- bio_list_add(&cell->bios, inmate);
- spin_unlock_irqrestore(&prison->lock, flags);
+ r = 0;
- if (cell2)
- mempool_free(cell2, prison->cell_pool);
+out:
+ spin_unlock_irqrestore(&prison->lock, flags);
*ref = cell;
@@ -283,8 +279,8 @@ static void __cell_release(struct cell *cell, struct bio_list *inmates)
hlist_del(&cell->list);
- if (inmates)
- bio_list_merge(inmates, &cell->bios);
+ bio_list_add(inmates, cell->holder);
+ bio_list_merge(inmates, &cell->bios);
mempool_free(cell, prison->cell_pool);
}
@@ -305,22 +301,44 @@ static void cell_release(struct cell *cell, struct bio_list *bios)
* bio may be in the cell. This function releases the cell, and also does
* a sanity check.
*/
+static void __cell_release_singleton(struct cell *cell, struct bio *bio)
+{
+ hlist_del(&cell->list);
+ BUG_ON(cell->holder != bio);
+ BUG_ON(!bio_list_empty(&cell->bios));
+}
+
static void cell_release_singleton(struct cell *cell, struct bio *bio)
{
- struct bio_prison *prison = cell->prison;
- struct bio_list bios;
- struct bio *b;
unsigned long flags;
-
- bio_list_init(&bios);
+ struct bio_prison *prison = cell->prison;
spin_lock_irqsave(&prison->lock, flags);
- __cell_release(cell, &bios);
+ __cell_release_singleton(cell, bio);
spin_unlock_irqrestore(&prison->lock, flags);
+}
+
+/*
+ * Sometimes we don't want the holder, just the additional bios.
+ */
+static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
+{
+ struct bio_prison *prison = cell->prison;
+
+ hlist_del(&cell->list);
+ bio_list_merge(inmates, &cell->bios);
- b = bio_list_pop(&bios);
- BUG_ON(b != bio);
- BUG_ON(!bio_list_empty(&bios));
+ mempool_free(cell, prison->cell_pool);
+}
+
+static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
+{
+ unsigned long flags;
+ struct bio_prison *prison = cell->prison;
+
+ spin_lock_irqsave(&prison->lock, flags);
+ __cell_release_no_holder(cell, inmates);
+ spin_unlock_irqrestore(&prison->lock, flags);
}
static void cell_error(struct cell *cell)
@@ -471,6 +489,13 @@ static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
* devices.
*/
struct new_mapping;
+
+struct pool_features {
+ unsigned zero_new_blocks:1;
+ unsigned discard_enabled:1;
+ unsigned discard_passdown:1;
+};
+
struct pool {
struct list_head list;
struct dm_target *ti; /* Only set if a pool target is bound */
@@ -484,7 +509,7 @@ struct pool {
dm_block_t offset_mask;
dm_block_t low_water_blocks;
- unsigned zero_new_blocks:1;
+ struct pool_features pf;
unsigned low_water_triggered:1; /* A dm event has been sent */
unsigned no_free_space:1; /* A -ENOSPC warning has been issued */
@@ -493,17 +518,21 @@ struct pool {
struct workqueue_struct *wq;
struct work_struct worker;
+ struct delayed_work waker;
unsigned ref_count;
+ unsigned long last_commit_jiffies;
spinlock_t lock;
struct bio_list deferred_bios;
struct bio_list deferred_flush_bios;
struct list_head prepared_mappings;
+ struct list_head prepared_discards;
struct bio_list retry_on_resume_list;
- struct deferred_set ds; /* FIXME: move to thin_c */
+ struct deferred_set shared_read_ds;
+ struct deferred_set all_io_ds;
struct new_mapping *next_mapping;
mempool_t *mapping_pool;
@@ -521,7 +550,7 @@ struct pool_c {
struct dm_target_callbacks callbacks;
dm_block_t low_water_blocks;
- unsigned zero_new_blocks:1;
+ struct pool_features pf;
};
/*
@@ -529,6 +558,7 @@ struct pool_c {
*/
struct thin_c {
struct dm_dev *pool_dev;
+ struct dm_dev *origin_dev;
dm_thin_id dev_id;
struct pool *pool;
@@ -597,6 +627,13 @@ static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev
/*----------------------------------------------------------------*/
+struct endio_hook {
+ struct thin_c *tc;
+ struct deferred_entry *shared_read_entry;
+ struct deferred_entry *all_io_entry;
+ struct new_mapping *overwrite_mapping;
+};
+
static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
{
struct bio *bio;
@@ -607,7 +644,8 @@ static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
bio_list_init(master);
while ((bio = bio_list_pop(&bios))) {
- if (dm_get_mapinfo(bio)->ptr == tc)
+ struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ if (h->tc == tc)
bio_endio(bio, DM_ENDIO_REQUEUE);
else
bio_list_add(master, bio);
@@ -646,14 +684,16 @@ static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
(bio->bi_sector & pool->offset_mask);
}
-static void remap_and_issue(struct thin_c *tc, struct bio *bio,
- dm_block_t block)
+static void remap_to_origin(struct thin_c *tc, struct bio *bio)
+{
+ bio->bi_bdev = tc->origin_dev->bdev;
+}
+
+static void issue(struct thin_c *tc, struct bio *bio)
{
struct pool *pool = tc->pool;
unsigned long flags;
- remap(tc, bio, block);
-
/*
* Batch together any FUA/FLUSH bios we find and then issue
* a single commit for them in process_deferred_bios().
@@ -666,6 +706,19 @@ static void remap_and_issue(struct thin_c *tc, struct bio *bio,
generic_make_request(bio);
}
+static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
+{
+ remap_to_origin(tc, bio);
+ issue(tc, bio);
+}
+
+static void remap_and_issue(struct thin_c *tc, struct bio *bio,
+ dm_block_t block)
+{
+ remap(tc, bio, block);
+ issue(tc, bio);
+}
+
/*
* wake_worker() is used when new work is queued and when pool_resume is
* ready to continue deferred IO processing.
@@ -680,21 +733,17 @@ static void wake_worker(struct pool *pool)
/*
* Bio endio functions.
*/
-struct endio_hook {
- struct thin_c *tc;
- bio_end_io_t *saved_bi_end_io;
- struct deferred_entry *entry;
-};
-
struct new_mapping {
struct list_head list;
- int prepared;
+ unsigned quiesced:1;
+ unsigned prepared:1;
+ unsigned pass_discard:1;
struct thin_c *tc;
dm_block_t virt_block;
dm_block_t data_block;
- struct cell *cell;
+ struct cell *cell, *cell2;
int err;
/*
@@ -711,7 +760,7 @@ static void __maybe_add_mapping(struct new_mapping *m)
{
struct pool *pool = m->tc->pool;
- if (list_empty(&m->list) && m->prepared) {
+ if (m->quiesced && m->prepared) {
list_add(&m->list, &pool->prepared_mappings);
wake_worker(pool);
}
@@ -734,7 +783,8 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
static void overwrite_endio(struct bio *bio, int err)
{
unsigned long flags;
- struct new_mapping *m = dm_get_mapinfo(bio)->ptr;
+ struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct new_mapping *m = h->overwrite_mapping;
struct pool *pool = m->tc->pool;
m->err = err;
@@ -745,31 +795,6 @@ static void overwrite_endio(struct bio *bio, int err)
spin_unlock_irqrestore(&pool->lock, flags);
}
-static void shared_read_endio(struct bio *bio, int err)
-{
- struct list_head mappings;
- struct new_mapping *m, *tmp;
- struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
- unsigned long flags;
- struct pool *pool = h->tc->pool;
-
- bio->bi_end_io = h->saved_bi_end_io;
- bio_endio(bio, err);
-
- INIT_LIST_HEAD(&mappings);
- ds_dec(h->entry, &mappings);
-
- spin_lock_irqsave(&pool->lock, flags);
- list_for_each_entry_safe(m, tmp, &mappings, list) {
- list_del(&m->list);
- INIT_LIST_HEAD(&m->list);
- __maybe_add_mapping(m);
- }
- spin_unlock_irqrestore(&pool->lock, flags);
-
- mempool_free(h, pool->endio_hook_pool);
-}
-
/*----------------------------------------------------------------*/
/*
@@ -800,21 +825,16 @@ static void cell_defer(struct thin_c *tc, struct cell *cell,
* Same as cell_defer above, except it omits one particular detainee,
* a write bio that covers the block and has already been processed.
*/
-static void cell_defer_except(struct thin_c *tc, struct cell *cell,
- struct bio *exception)
+static void cell_defer_except(struct thin_c *tc, struct cell *cell)
{
struct bio_list bios;
- struct bio *bio;
struct pool *pool = tc->pool;
unsigned long flags;
bio_list_init(&bios);
- cell_release(cell, &bios);
spin_lock_irqsave(&pool->lock, flags);
- while ((bio = bio_list_pop(&bios)))
- if (bio != exception)
- bio_list_add(&pool->deferred_bios, bio);
+ cell_release_no_holder(cell, &pool->deferred_bios);
spin_unlock_irqrestore(&pool->lock, flags);
wake_worker(pool);
@@ -854,7 +874,7 @@ static void process_prepared_mapping(struct new_mapping *m)
* the bios in the cell.
*/
if (bio) {
- cell_defer_except(tc, m->cell, bio);
+ cell_defer_except(tc, m->cell);
bio_endio(bio, 0);
} else
cell_defer(tc, m->cell, m->data_block);
@@ -863,7 +883,30 @@ static void process_prepared_mapping(struct new_mapping *m)
mempool_free(m, tc->pool->mapping_pool);
}
-static void process_prepared_mappings(struct pool *pool)
+static void process_prepared_discard(struct new_mapping *m)
+{
+ int r;
+ struct thin_c *tc = m->tc;
+
+ r = dm_thin_remove_block(tc->td, m->virt_block);
+ if (r)
+ DMERR("dm_thin_remove_block() failed");
+
+ /*
+ * Pass the discard down to the underlying device?
+ */
+ if (m->pass_discard)
+ remap_and_issue(tc, m->bio, m->data_block);
+ else
+ bio_endio(m->bio, 0);
+
+ cell_defer_except(tc, m->cell);
+ cell_defer_except(tc, m->cell2);
+ mempool_free(m, tc->pool->mapping_pool);
+}
+
+static void process_prepared(struct pool *pool, struct list_head *head,
+ void (*fn)(struct new_mapping *))
{
unsigned long flags;
struct list_head maps;
@@ -871,21 +914,27 @@ static void process_prepared_mappings(struct pool *pool)
INIT_LIST_HEAD(&maps);
spin_lock_irqsave(&pool->lock, flags);
- list_splice_init(&pool->prepared_mappings, &maps);
+ list_splice_init(head, &maps);
spin_unlock_irqrestore(&pool->lock, flags);
list_for_each_entry_safe(m, tmp, &maps, list)
- process_prepared_mapping(m);
+ fn(m);
}
/*
* Deferred bio jobs.
*/
-static int io_overwrites_block(struct pool *pool, struct bio *bio)
+static int io_overlaps_block(struct pool *pool, struct bio *bio)
{
- return ((bio_data_dir(bio) == WRITE) &&
- !(bio->bi_sector & pool->offset_mask)) &&
+ return !(bio->bi_sector & pool->offset_mask) &&
(bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT));
+
+}
+
+static int io_overwrites_block(struct pool *pool, struct bio *bio)
+{
+ return (bio_data_dir(bio) == WRITE) &&
+ io_overlaps_block(pool, bio);
}
static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
@@ -917,7 +966,8 @@ static struct new_mapping *get_next_mapping(struct pool *pool)
}
static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
- dm_block_t data_origin, dm_block_t data_dest,
+ struct dm_dev *origin, dm_block_t data_origin,
+ dm_block_t data_dest,
struct cell *cell, struct bio *bio)
{
int r;
@@ -925,6 +975,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
struct new_mapping *m = get_next_mapping(pool);
INIT_LIST_HEAD(&m->list);
+ m->quiesced = 0;
m->prepared = 0;
m->tc = tc;
m->virt_block = virt_block;
@@ -933,7 +984,8 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
m->err = 0;
m->bio = NULL;
- ds_add_work(&pool->ds, &m->list);
+ if (!ds_add_work(&pool->shared_read_ds, &m->list))
+ m->quiesced = 1;
/*
* IO to pool_dev remaps to the pool target's data_dev.
@@ -942,14 +994,15 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
* bio immediately. Otherwise we use kcopyd to clone the data first.
*/
if (io_overwrites_block(pool, bio)) {
+ struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ h->overwrite_mapping = m;
m->bio = bio;
save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
- dm_get_mapinfo(bio)->ptr = m;
remap_and_issue(tc, bio, data_dest);
} else {
struct dm_io_region from, to;
- from.bdev = tc->pool_dev->bdev;
+ from.bdev = origin->bdev;
from.sector = data_origin * pool->sectors_per_block;
from.count = pool->sectors_per_block;
@@ -967,6 +1020,22 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
}
}
+static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
+ dm_block_t data_origin, dm_block_t data_dest,
+ struct cell *cell, struct bio *bio)
+{
+ schedule_copy(tc, virt_block, tc->pool_dev,
+ data_origin, data_dest, cell, bio);
+}
+
+static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
+ dm_block_t data_dest,
+ struct cell *cell, struct bio *bio)
+{
+ schedule_copy(tc, virt_block, tc->origin_dev,
+ virt_block, data_dest, cell, bio);
+}
+
static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
dm_block_t data_block, struct cell *cell,
struct bio *bio)
@@ -975,6 +1044,7 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
struct new_mapping *m = get_next_mapping(pool);
INIT_LIST_HEAD(&m->list);
+ m->quiesced = 1;
m->prepared = 0;
m->tc = tc;
m->virt_block = virt_block;
@@ -988,13 +1058,14 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
* zeroing pre-existing data, we can issue the bio immediately.
* Otherwise we use kcopyd to zero the data first.
*/
- if (!pool->zero_new_blocks)
+ if (!pool->pf.zero_new_blocks)
process_prepared_mapping(m);
else if (io_overwrites_block(pool, bio)) {
+ struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ h->overwrite_mapping = m;
m->bio = bio;
save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
- dm_get_mapinfo(bio)->ptr = m;
remap_and_issue(tc, bio, data_block);
} else {
@@ -1081,7 +1152,8 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
*/
static void retry_on_resume(struct bio *bio)
{
- struct thin_c *tc = dm_get_mapinfo(bio)->ptr;
+ struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct thin_c *tc = h->tc;
struct pool *pool = tc->pool;
unsigned long flags;
@@ -1102,6 +1174,86 @@ static void no_space(struct cell *cell)
retry_on_resume(bio);
}
+static void process_discard(struct thin_c *tc, struct bio *bio)
+{
+ int r;
+ struct pool *pool = tc->pool;
+ struct cell *cell, *cell2;
+ struct cell_key key, key2;
+ dm_block_t block = get_bio_block(tc, bio);
+ struct dm_thin_lookup_result lookup_result;
+ struct new_mapping *m;
+
+ build_virtual_key(tc->td, block, &key);
+ if (bio_detain(tc->pool->prison, &key, bio, &cell))
+ return;
+
+ r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
+ switch (r) {
+ case 0:
+ /*
+ * Check nobody is fiddling with this pool block. This can
+ * happen if someone's in the process of breaking sharing
+ * on this block.
+ */
+ build_data_key(tc->td, lookup_result.block, &key2);
+ if (bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
+ cell_release_singleton(cell, bio);
+ break;
+ }
+
+ if (io_overlaps_block(pool, bio)) {
+ /*
+ * IO may still be going to the destination block. We must
+ * quiesce before we can do the removal.
+ */
+ m = get_next_mapping(pool);
+ m->tc = tc;
+ m->pass_discard = (!lookup_result.shared) & pool->pf.discard_passdown;
+ m->virt_block = block;
+ m->data_block = lookup_result.block;
+ m->cell = cell;
+ m->cell2 = cell2;
+ m->err = 0;
+ m->bio = bio;
+
+ if (!ds_add_work(&pool->all_io_ds, &m->list)) {
+ list_add(&m->list, &pool->prepared_discards);
+ wake_worker(pool);
+ }
+ } else {
+ /*
+ * This path is hit if people are ignoring
+ * limits->discard_granularity. It ignores any
+ * part of the discard that is in a subsequent
+ * block.
+ */
+ sector_t offset = bio->bi_sector - (block << pool->block_shift);
+ unsigned remaining = (pool->sectors_per_block - offset) << 9;
+ bio->bi_size = min(bio->bi_size, remaining);
+
+ cell_release_singleton(cell, bio);
+ cell_release_singleton(cell2, bio);
+ remap_and_issue(tc, bio, lookup_result.block);
+ }
+ break;
+
+ case -ENODATA:
+ /*
+ * It isn't provisioned, just forget it.
+ */
+ cell_release_singleton(cell, bio);
+ bio_endio(bio, 0);
+ break;
+
+ default:
+ DMERR("discard: find block unexpectedly returned %d", r);
+ cell_release_singleton(cell, bio);
+ bio_io_error(bio);
+ break;
+ }
+}
+
static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
struct cell_key *key,
struct dm_thin_lookup_result *lookup_result,
@@ -1113,8 +1265,8 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
r = alloc_data_block(tc, &data_block);
switch (r) {
case 0:
- schedule_copy(tc, block, lookup_result->block,
- data_block, cell, bio);
+ schedule_internal_copy(tc, block, lookup_result->block,
+ data_block, cell, bio);
break;
case -ENOSPC:
@@ -1147,13 +1299,9 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
if (bio_data_dir(bio) == WRITE)
break_sharing(tc, bio, block, &key, lookup_result, cell);
else {
- struct endio_hook *h;
- h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
+ struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
- h->tc = tc;
- h->entry = ds_inc(&pool->ds);
- save_and_set_endio(bio, &h->saved_bi_end_io, shared_read_endio);
- dm_get_mapinfo(bio)->ptr = h;
+ h->shared_read_entry = ds_inc(&pool->shared_read_ds);
cell_release_singleton(cell, bio);
remap_and_issue(tc, bio, lookup_result->block);
@@ -1188,7 +1336,10 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
r = alloc_data_block(tc, &data_block);
switch (r) {
case 0:
- schedule_zero(tc, block, data_block, cell, bio);
+ if (tc->origin_dev)
+ schedule_external_copy(tc, block, data_block, cell, bio);
+ else
+ schedule_zero(tc, block, data_block, cell, bio);
break;
case -ENOSPC:
@@ -1239,16 +1390,27 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
break;
case -ENODATA:
- provision_block(tc, bio, block, cell);
+ if (bio_data_dir(bio) == READ && tc->origin_dev) {
+ cell_release_singleton(cell, bio);
+ remap_to_origin_and_issue(tc, bio);
+ } else
+ provision_block(tc, bio, block, cell);
break;
default:
DMERR("dm_thin_find_block() failed, error = %d", r);
+ cell_release_singleton(cell, bio);
bio_io_error(bio);
break;
}
}
+static int need_commit_due_to_time(struct pool *pool)
+{
+ return jiffies < pool->last_commit_jiffies ||
+ jiffies > pool->last_commit_jiffies + COMMIT_PERIOD;
+}
+
static void process_deferred_bios(struct pool *pool)
{
unsigned long flags;
@@ -1264,7 +1426,9 @@ static void process_deferred_bios(struct pool *pool)
spin_unlock_irqrestore(&pool->lock, flags);
while ((bio = bio_list_pop(&bios))) {
- struct thin_c *tc = dm_get_mapinfo(bio)->ptr;
+ struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct thin_c *tc = h->tc;
+
/*
* If we've got no free new_mapping structs, and processing
* this bio might require one, we pause until there are some
@@ -1277,7 +1441,11 @@ static void process_deferred_bios(struct pool *pool)
break;
}
- process_bio(tc, bio);
+
+ if (bio->bi_rw & REQ_DISCARD)
+ process_discard(tc, bio);
+ else
+ process_bio(tc, bio);
}
/*
@@ -1290,7 +1458,7 @@ static void process_deferred_bios(struct pool *pool)
bio_list_init(&pool->deferred_flush_bios);
spin_unlock_irqrestore(&pool->lock, flags);
- if (bio_list_empty(&bios))
+ if (bio_list_empty(&bios) && !need_commit_due_to_time(pool))
return;
r = dm_pool_commit_metadata(pool->pmd);
@@ -1301,6 +1469,7 @@ static void process_deferred_bios(struct pool *pool)
bio_io_error(bio);
return;
}
+ pool->last_commit_jiffies = jiffies;
while ((bio = bio_list_pop(&bios)))
generic_make_request(bio);
@@ -1310,10 +1479,22 @@ static void do_worker(struct work_struct *ws)
{
struct pool *pool = container_of(ws, struct pool, worker);
- process_prepared_mappings(pool);
+ process_prepared(pool, &pool->prepared_mappings, process_prepared_mapping);
+ process_prepared(pool, &pool->prepared_discards, process_prepared_discard);
process_deferred_bios(pool);
}
+/*
+ * We want to commit periodically so that not too much
+ * unwritten data builds up.
+ */
+static void do_waker(struct work_struct *ws)
+{
+ struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
+ wake_worker(pool);
+ queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
+}
+
/*----------------------------------------------------------------*/
/*
@@ -1335,6 +1516,19 @@ static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
wake_worker(pool);
}
+static struct endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
+{
+ struct pool *pool = tc->pool;
+ struct endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
+
+ h->tc = tc;
+ h->shared_read_entry = NULL;
+ h->all_io_entry = bio->bi_rw & REQ_DISCARD ? NULL : ds_inc(&pool->all_io_ds);
+ h->overwrite_mapping = NULL;
+
+ return h;
+}
+
/*
* Non-blocking function called from the thin target's map function.
*/
@@ -1347,12 +1541,8 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
struct dm_thin_device *td = tc->td;
struct dm_thin_lookup_result result;
- /*
- * Save the thin context for easy access from the deferred bio later.
- */
- map_context->ptr = tc;
-
- if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
+ map_context->ptr = thin_hook_bio(tc, bio);
+ if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
thin_defer_bio(tc, bio);
return DM_MAPIO_SUBMITTED;
}
@@ -1434,7 +1624,7 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
pool->ti = ti;
pool->low_water_blocks = pt->low_water_blocks;
- pool->zero_new_blocks = pt->zero_new_blocks;
+ pool->pf = pt->pf;
return 0;
}
@@ -1448,6 +1638,14 @@ static void unbind_control_target(struct pool *pool, struct dm_target *ti)
/*----------------------------------------------------------------
* Pool creation
*--------------------------------------------------------------*/
+/* Initialize pool features. */
+static void pool_features_init(struct pool_features *pf)
+{
+ pf->zero_new_blocks = 1;
+ pf->discard_enabled = 1;
+ pf->discard_passdown = 1;
+}
+
static void __pool_destroy(struct pool *pool)
{
__pool_table_remove(pool);
@@ -1495,7 +1693,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
pool->block_shift = ffs(block_size) - 1;
pool->offset_mask = block_size - 1;
pool->low_water_blocks = 0;
- pool->zero_new_blocks = 1;
+ pool_features_init(&pool->pf);
pool->prison = prison_create(PRISON_CELLS);
if (!pool->prison) {
*error = "Error creating pool's bio prison";
@@ -1523,14 +1721,17 @@ static struct pool *pool_create(struct mapped_device *pool_md,
}
INIT_WORK(&pool->worker, do_worker);
+ INIT_DELAYED_WORK(&pool->waker, do_waker);
spin_lock_init(&pool->lock);
bio_list_init(&pool->deferred_bios);
bio_list_init(&pool->deferred_flush_bios);
INIT_LIST_HEAD(&pool->prepared_mappings);
+ INIT_LIST_HEAD(&pool->prepared_discards);
pool->low_water_triggered = 0;
pool->no_free_space = 0;
bio_list_init(&pool->retry_on_resume_list);
- ds_init(&pool->ds);
+ ds_init(&pool->shared_read_ds);
+ ds_init(&pool->all_io_ds);
pool->next_mapping = NULL;
pool->mapping_pool =
@@ -1549,6 +1750,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
goto bad_endio_hook_pool;
}
pool->ref_count = 1;
+ pool->last_commit_jiffies = jiffies;
pool->pool_md = pool_md;
pool->md_dev = metadata_dev;
__pool_table_insert(pool);
@@ -1588,7 +1790,8 @@ static void __pool_dec(struct pool *pool)
static struct pool *__pool_find(struct mapped_device *pool_md,
struct block_device *metadata_dev,
- unsigned long block_size, char **error)
+ unsigned long block_size, char **error,
+ int *created)
{
struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
@@ -1604,8 +1807,10 @@ static struct pool *__pool_find(struct mapped_device *pool_md,
return ERR_PTR(-EINVAL);
__pool_inc(pool);
- } else
+ } else {
pool = pool_create(pool_md, metadata_dev, block_size, error);
+ *created = 1;
+ }
}
return pool;
@@ -1629,10 +1834,6 @@ static void pool_dtr(struct dm_target *ti)
mutex_unlock(&dm_thin_pool_table.mutex);
}
-struct pool_features {
- unsigned zero_new_blocks:1;
-};
-
static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
struct dm_target *ti)
{
@@ -1641,7 +1842,7 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
const char *arg_name;
static struct dm_arg _args[] = {
- {0, 1, "Invalid number of pool feature arguments"},
+ {0, 3, "Invalid number of pool feature arguments"},
};
/*
@@ -1661,6 +1862,12 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
if (!strcasecmp(arg_name, "skip_block_zeroing")) {
pf->zero_new_blocks = 0;
continue;
+ } else if (!strcasecmp(arg_name, "ignore_discard")) {
+ pf->discard_enabled = 0;
+ continue;
+ } else if (!strcasecmp(arg_name, "no_discard_passdown")) {
+ pf->discard_passdown = 0;
+ continue;
}
ti->error = "Unrecognised pool feature requested";
@@ -1678,10 +1885,12 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
*
* Optional feature arguments are:
* skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
+ * ignore_discard: disable discard
+ * no_discard_passdown: don't pass discards down to the data device
*/
static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
{
- int r;
+ int r, pool_created = 0;
struct pool_c *pt;
struct pool *pool;
struct pool_features pf;
@@ -1691,6 +1900,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
dm_block_t low_water_blocks;
struct dm_dev *metadata_dev;
sector_t metadata_dev_size;
+ char b[BDEVNAME_SIZE];
/*
* FIXME Remove validation from scope of lock.
@@ -1712,11 +1922,9 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
metadata_dev_size = i_size_read(metadata_dev->bdev->bd_inode) >> SECTOR_SHIFT;
- if (metadata_dev_size > METADATA_DEV_MAX_SECTORS) {
- ti->error = "Metadata device is too large";
- r = -EINVAL;
- goto out_metadata;
- }
+ if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
+ DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
+ bdevname(metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
if (r) {
@@ -1742,8 +1950,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
/*
* Set default pool features.
*/
- memset(&pf, 0, sizeof(pf));
- pf.zero_new_blocks = 1;
+ pool_features_init(&pf);
dm_consume_args(&as, 4);
r = parse_pool_features(&as, &pf, ti);
@@ -1757,20 +1964,58 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
- block_size, &ti->error);
+ block_size, &ti->error, &pool_created);
if (IS_ERR(pool)) {
r = PTR_ERR(pool);
goto out_free_pt;
}
+ /*
+ * 'pool_created' reflects whether this is the first table load.
+ * Top level discard support is not allowed to be changed after
+ * initial load. This would require a pool reload to trigger thin
+ * device changes.
+ */
+ if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) {
+ ti->error = "Discard support cannot be disabled once enabled";
+ r = -EINVAL;
+ goto out_flags_changed;
+ }
+
+ /*
+ * If discard_passdown was enabled verify that the data device
+ * supports discards. Disable discard_passdown if not; otherwise
+ * -EOPNOTSUPP will be returned.
+ */
+ if (pf.discard_passdown) {
+ struct request_queue *q = bdev_get_queue(data_dev->bdev);
+ if (!q || !blk_queue_discard(q)) {
+ DMWARN("Discard unsupported by data device: Disabling discard passdown.");
+ pf.discard_passdown = 0;
+ }
+ }
+
pt->pool = pool;
pt->ti = ti;
pt->metadata_dev = metadata_dev;
pt->data_dev = data_dev;
pt->low_water_blocks = low_water_blocks;
- pt->zero_new_blocks = pf.zero_new_blocks;
+ pt->pf = pf;
ti->num_flush_requests = 1;
- ti->num_discard_requests = 0;
+ /*
+ * Only need to enable discards if the pool should pass
+ * them down to the data device. The thin device's discard
+ * processing will cause mappings to be removed from the btree.
+ */
+ if (pf.discard_enabled && pf.discard_passdown) {
+ ti->num_discard_requests = 1;
+ /*
+ * Setting 'discards_supported' circumvents the normal
+ * stacking of discard limits (this keeps the pool and
+ * thin devices' discard limits consistent).
+ */
+ ti->discards_supported = 1;
+ }
ti->private = pt;
pt->callbacks.congested_fn = pool_is_congested;
@@ -1780,6 +2025,8 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
return 0;
+out_flags_changed:
+ __pool_dec(pool);
out_free_pt:
kfree(pt);
out:
@@ -1878,7 +2125,7 @@ static void pool_resume(struct dm_target *ti)
__requeue_bios(pool);
spin_unlock_irqrestore(&pool->lock, flags);
- wake_worker(pool);
+ do_waker(&pool->waker.work);
}
static void pool_postsuspend(struct dm_target *ti)
@@ -1887,6 +2134,7 @@ static void pool_postsuspend(struct dm_target *ti)
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
+ cancel_delayed_work(&pool->waker);
flush_workqueue(pool->wq);
r = dm_pool_commit_metadata(pool->pmd);
@@ -2067,7 +2315,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
static int pool_status(struct dm_target *ti, status_type_t type,
char *result, unsigned maxlen)
{
- int r;
+ int r, count;
unsigned sz = 0;
uint64_t transaction_id;
dm_block_t nr_free_blocks_data;
@@ -2130,10 +2378,19 @@ static int pool_status(struct dm_target *ti, status_type_t type,
(unsigned long)pool->sectors_per_block,
(unsigned long long)pt->low_water_blocks);
- DMEMIT("%u ", !pool->zero_new_blocks);
+ count = !pool->pf.zero_new_blocks + !pool->pf.discard_enabled +
+ !pool->pf.discard_passdown;
+ DMEMIT("%u ", count);
- if (!pool->zero_new_blocks)
+ if (!pool->pf.zero_new_blocks)
DMEMIT("skip_block_zeroing ");
+
+ if (!pool->pf.discard_enabled)
+ DMEMIT("ignore_discard ");
+
+ if (!pool->pf.discard_passdown)
+ DMEMIT("no_discard_passdown ");
+
break;
}
@@ -2162,6 +2419,21 @@ static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
}
+static void set_discard_limits(struct pool *pool, struct queue_limits *limits)
+{
+ /*
+ * FIXME: these limits may be incompatible with the pool's data device
+ */
+ limits->max_discard_sectors = pool->sectors_per_block;
+
+ /*
+ * This is just a hint, and not enforced. We have to cope with
+ * bios that overlap 2 blocks.
+ */
+ limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
+ limits->discard_zeroes_data = pool->pf.zero_new_blocks;
+}
+
static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct pool_c *pt = ti->private;
@@ -2169,13 +2441,15 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
blk_limits_io_min(limits, 0);
blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
+ if (pool->pf.discard_enabled)
+ set_discard_limits(pool, limits);
}
static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 0, 0},
+ .version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -2202,6 +2476,8 @@ static void thin_dtr(struct dm_target *ti)
__pool_dec(tc->pool);
dm_pool_close_thin_device(tc->td);
dm_put_device(ti, tc->pool_dev);
+ if (tc->origin_dev)
+ dm_put_device(ti, tc->origin_dev);
kfree(tc);
mutex_unlock(&dm_thin_pool_table.mutex);
@@ -2210,21 +2486,25 @@ static void thin_dtr(struct dm_target *ti)
/*
* Thin target parameters:
*
- * <pool_dev> <dev_id>
+ * <pool_dev> <dev_id> [origin_dev]
*
* pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
* dev_id: the internal device identifier
+ * origin_dev: a device external to the pool that should act as the origin
+ *
+ * If the pool device has discards disabled, they get disabled for the thin
+ * device as well.
*/
static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
{
int r;
struct thin_c *tc;
- struct dm_dev *pool_dev;
+ struct dm_dev *pool_dev, *origin_dev;
struct mapped_device *pool_md;
mutex_lock(&dm_thin_pool_table.mutex);
- if (argc != 2) {
+ if (argc != 2 && argc != 3) {
ti->error = "Invalid argument count";
r = -EINVAL;
goto out_unlock;
@@ -2237,6 +2517,15 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto out_unlock;
}
+ if (argc == 3) {
+ r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
+ if (r) {
+ ti->error = "Error opening origin device";
+ goto bad_origin_dev;
+ }
+ tc->origin_dev = origin_dev;
+ }
+
r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
if (r) {
ti->error = "Error opening pool device";
@@ -2273,8 +2562,12 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->split_io = tc->pool->sectors_per_block;
ti->num_flush_requests = 1;
- ti->num_discard_requests = 0;
- ti->discards_supported = 0;
+
+ /* In case the pool supports discards, pass them on. */
+ if (tc->pool->pf.discard_enabled) {
+ ti->discards_supported = 1;
+ ti->num_discard_requests = 1;
+ }
dm_put(pool_md);
@@ -2289,6 +2582,9 @@ bad_pool_lookup:
bad_common:
dm_put_device(ti, tc->pool_dev);
bad_pool_dev:
+ if (tc->origin_dev)
+ dm_put_device(ti, tc->origin_dev);
+bad_origin_dev:
kfree(tc);
out_unlock:
mutex_unlock(&dm_thin_pool_table.mutex);
@@ -2299,11 +2595,46 @@ out_unlock:
static int thin_map(struct dm_target *ti, struct bio *bio,
union map_info *map_context)
{
- bio->bi_sector -= ti->begin;
+ bio->bi_sector = dm_target_offset(ti, bio->bi_sector);
return thin_bio_map(ti, bio, map_context);
}
+static int thin_endio(struct dm_target *ti,
+ struct bio *bio, int err,
+ union map_info *map_context)
+{
+ unsigned long flags;
+ struct endio_hook *h = map_context->ptr;
+ struct list_head work;
+ struct new_mapping *m, *tmp;
+ struct pool *pool = h->tc->pool;
+
+ if (h->shared_read_entry) {
+ INIT_LIST_HEAD(&work);
+ ds_dec(h->shared_read_entry, &work);
+
+ spin_lock_irqsave(&pool->lock, flags);
+ list_for_each_entry_safe(m, tmp, &work, list) {
+ list_del(&m->list);
+ m->quiesced = 1;
+ __maybe_add_mapping(m);
+ }
+ spin_unlock_irqrestore(&pool->lock, flags);
+ }
+
+ if (h->all_io_entry) {
+ INIT_LIST_HEAD(&work);
+ ds_dec(h->all_io_entry, &work);
+ list_for_each_entry_safe(m, tmp, &work, list)
+ list_add(&m->list, &pool->prepared_discards);
+ }
+
+ mempool_free(h, pool->endio_hook_pool);
+
+ return 0;
+}
+
static void thin_postsuspend(struct dm_target *ti)
{
if (dm_noflush_suspending(ti))
@@ -2347,6 +2678,8 @@ static int thin_status(struct dm_target *ti, status_type_t type,
DMEMIT("%s %lu",
format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
(unsigned long) tc->dev_id);
+ if (tc->origin_dev)
+ DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
break;
}
}
@@ -2377,18 +2710,21 @@ static int thin_iterate_devices(struct dm_target *ti,
static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct thin_c *tc = ti->private;
+ struct pool *pool = tc->pool;
blk_limits_io_min(limits, 0);
- blk_limits_io_opt(limits, tc->pool->sectors_per_block << SECTOR_SHIFT);
+ blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
+ set_discard_limits(pool, limits);
}
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 0, 0},
+ .version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
.map = thin_map,
+ .end_io = thin_endio,
.postsuspend = thin_postsuspend,
.status = thin_status,
.iterate_devices = thin_iterate_devices,
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
new file mode 100644
index 000000000000..fa365d39b612
--- /dev/null
+++ b/drivers/md/dm-verity.c
@@ -0,0 +1,913 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * Author: Mikulas Patocka <mpatocka@redhat.com>
+ *
+ * Based on Chromium dm-verity driver (C) 2011 The Chromium OS Authors
+ *
+ * This file is released under the GPLv2.
+ *
+ * In the file "/sys/module/dm_verity/parameters/prefetch_cluster" you can set
+ * default prefetch value. Data are read in "prefetch_cluster" chunks from the
+ * hash device. Setting this greatly improves performance when data and hash
+ * are on the same disk on different partitions on devices with poor random
+ * access behavior.
+ */
+
+#include "dm-bufio.h"
+
+#include <linux/module.h>
+#include <linux/device-mapper.h>
+#include <crypto/hash.h>
+
+#define DM_MSG_PREFIX "verity"
+
+#define DM_VERITY_IO_VEC_INLINE 16
+#define DM_VERITY_MEMPOOL_SIZE 4
+#define DM_VERITY_DEFAULT_PREFETCH_SIZE 262144
+
+#define DM_VERITY_MAX_LEVELS 63
+
+static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
+
+module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, S_IRUGO | S_IWUSR);
+
+struct dm_verity {
+ struct dm_dev *data_dev;
+ struct dm_dev *hash_dev;
+ struct dm_target *ti;
+ struct dm_bufio_client *bufio;
+ char *alg_name;
+ struct crypto_shash *tfm;
+ u8 *root_digest; /* digest of the root block */
+ u8 *salt; /* salt: its size is salt_size */
+ unsigned salt_size;
+ sector_t data_start; /* data offset in 512-byte sectors */
+ sector_t hash_start; /* hash start in blocks */
+ sector_t data_blocks; /* the number of data blocks */
+ sector_t hash_blocks; /* the number of hash blocks */
+ unsigned char data_dev_block_bits; /* log2(data blocksize) */
+ unsigned char hash_dev_block_bits; /* log2(hash blocksize) */
+ unsigned char hash_per_block_bits; /* log2(hashes in hash block) */
+ unsigned char levels; /* the number of tree levels */
+ unsigned char version;
+ unsigned digest_size; /* digest size for the current hash algorithm */
+ unsigned shash_descsize;/* the size of temporary space for crypto */
+ int hash_failed; /* set to 1 if hash of any block failed */
+
+ mempool_t *io_mempool; /* mempool of struct dm_verity_io */
+ mempool_t *vec_mempool; /* mempool of bio vector */
+
+ struct workqueue_struct *verify_wq;
+
+ /* starting blocks for each tree level. 0 is the lowest level. */
+ sector_t hash_level_block[DM_VERITY_MAX_LEVELS];
+};
+
+struct dm_verity_io {
+ struct dm_verity *v;
+ struct bio *bio;
+
+ /* original values of bio->bi_end_io and bio->bi_private */
+ bio_end_io_t *orig_bi_end_io;
+ void *orig_bi_private;
+
+ sector_t block;
+ unsigned n_blocks;
+
+ /* saved bio vector */
+ struct bio_vec *io_vec;
+ unsigned io_vec_size;
+
+ struct work_struct work;
+
+ /* A space for short vectors; longer vectors are allocated separately. */
+ struct bio_vec io_vec_inline[DM_VERITY_IO_VEC_INLINE];
+
+ /*
+ * Three variably-size fields follow this struct:
+ *
+ * u8 hash_desc[v->shash_descsize];
+ * u8 real_digest[v->digest_size];
+ * u8 want_digest[v->digest_size];
+ *
+ * To access them use: io_hash_desc(), io_real_digest() and io_want_digest().
+ */
+};
+
+static struct shash_desc *io_hash_desc(struct dm_verity *v, struct dm_verity_io *io)
+{
+ return (struct shash_desc *)(io + 1);
+}
+
+static u8 *io_real_digest(struct dm_verity *v, struct dm_verity_io *io)
+{
+ return (u8 *)(io + 1) + v->shash_descsize;
+}
+
+static u8 *io_want_digest(struct dm_verity *v, struct dm_verity_io *io)
+{
+ return (u8 *)(io + 1) + v->shash_descsize + v->digest_size;
+}
+
+/*
+ * Auxiliary structure appended to each dm-bufio buffer. If the value
+ * hash_verified is nonzero, hash of the block has been verified.
+ *
+ * The variable hash_verified is set to 0 when allocating the buffer, then
+ * it can be changed to 1 and it is never reset to 0 again.
+ *
+ * There is no lock around this value, a race condition can at worst cause
+ * that multiple processes verify the hash of the same buffer simultaneously
+ * and write 1 to hash_verified simultaneously.
+ * This condition is harmless, so we don't need locking.
+ */
+struct buffer_aux {
+ int hash_verified;
+};
+
+/*
+ * Initialize struct buffer_aux for a freshly created buffer.
+ */
+static void dm_bufio_alloc_callback(struct dm_buffer *buf)
+{
+ struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
+
+ aux->hash_verified = 0;
+}
+
+/*
+ * Translate input sector number to the sector number on the target device.
+ */
+static sector_t verity_map_sector(struct dm_verity *v, sector_t bi_sector)
+{
+ return v->data_start + dm_target_offset(v->ti, bi_sector);
+}
+
+/*
+ * Return hash position of a specified block at a specified tree level
+ * (0 is the lowest level).
+ * The lowest "hash_per_block_bits"-bits of the result denote hash position
+ * inside a hash block. The remaining bits denote location of the hash block.
+ */
+static sector_t verity_position_at_level(struct dm_verity *v, sector_t block,
+ int level)
+{
+ return block >> (level * v->hash_per_block_bits);
+}
+
+static void verity_hash_at_level(struct dm_verity *v, sector_t block, int level,
+ sector_t *hash_block, unsigned *offset)
+{
+ sector_t position = verity_position_at_level(v, block, level);
+ unsigned idx;
+
+ *hash_block = v->hash_level_block[level] + (position >> v->hash_per_block_bits);
+
+ if (!offset)
+ return;
+
+ idx = position & ((1 << v->hash_per_block_bits) - 1);
+ if (!v->version)
+ *offset = idx * v->digest_size;
+ else
+ *offset = idx << (v->hash_dev_block_bits - v->hash_per_block_bits);
+}
+
+/*
+ * Verify hash of a metadata block pertaining to the specified data block
+ * ("block" argument) at a specified level ("level" argument).
+ *
+ * On successful return, io_want_digest(v, io) contains the hash value for
+ * a lower tree level or for the data block (if we're at the lowest leve).
+ *
+ * If "skip_unverified" is true, unverified buffer is skipped and 1 is returned.
+ * If "skip_unverified" is false, unverified buffer is hashed and verified
+ * against current value of io_want_digest(v, io).
+ */
+static int verity_verify_level(struct dm_verity_io *io, sector_t block,
+ int level, bool skip_unverified)
+{
+ struct dm_verity *v = io->v;
+ struct dm_buffer *buf;
+ struct buffer_aux *aux;
+ u8 *data;
+ int r;
+ sector_t hash_block;
+ unsigned offset;
+
+ verity_hash_at_level(v, block, level, &hash_block, &offset);
+
+ data = dm_bufio_read(v->bufio, hash_block, &buf);
+ if (unlikely(IS_ERR(data)))
+ return PTR_ERR(data);
+
+ aux = dm_bufio_get_aux_data(buf);
+
+ if (!aux->hash_verified) {
+ struct shash_desc *desc;
+ u8 *result;
+
+ if (skip_unverified) {
+ r = 1;
+ goto release_ret_r;
+ }
+
+ desc = io_hash_desc(v, io);
+ desc->tfm = v->tfm;
+ desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ r = crypto_shash_init(desc);
+ if (r < 0) {
+ DMERR("crypto_shash_init failed: %d", r);
+ goto release_ret_r;
+ }
+
+ if (likely(v->version >= 1)) {
+ r = crypto_shash_update(desc, v->salt, v->salt_size);
+ if (r < 0) {
+ DMERR("crypto_shash_update failed: %d", r);
+ goto release_ret_r;
+ }
+ }
+
+ r = crypto_shash_update(desc, data, 1 << v->hash_dev_block_bits);
+ if (r < 0) {
+ DMERR("crypto_shash_update failed: %d", r);
+ goto release_ret_r;
+ }
+
+ if (!v->version) {
+ r = crypto_shash_update(desc, v->salt, v->salt_size);
+ if (r < 0) {
+ DMERR("crypto_shash_update failed: %d", r);
+ goto release_ret_r;
+ }
+ }
+
+ result = io_real_digest(v, io);
+ r = crypto_shash_final(desc, result);
+ if (r < 0) {
+ DMERR("crypto_shash_final failed: %d", r);
+ goto release_ret_r;
+ }
+ if (unlikely(memcmp(result, io_want_digest(v, io), v->digest_size))) {
+ DMERR_LIMIT("metadata block %llu is corrupted",
+ (unsigned long long)hash_block);
+ v->hash_failed = 1;
+ r = -EIO;
+ goto release_ret_r;
+ } else
+ aux->hash_verified = 1;
+ }
+
+ data += offset;
+
+ memcpy(io_want_digest(v, io), data, v->digest_size);
+
+ dm_bufio_release(buf);
+ return 0;
+
+release_ret_r:
+ dm_bufio_release(buf);
+
+ return r;
+}
+
+/*
+ * Verify one "dm_verity_io" structure.
+ */
+static int verity_verify_io(struct dm_verity_io *io)
+{
+ struct dm_verity *v = io->v;
+ unsigned b;
+ int i;
+ unsigned vector = 0, offset = 0;
+
+ for (b = 0; b < io->n_blocks; b++) {
+ struct shash_desc *desc;
+ u8 *result;
+ int r;
+ unsigned todo;
+
+ if (likely(v->levels)) {
+ /*
+ * First, we try to get the requested hash for
+ * the current block. If the hash block itself is
+ * verified, zero is returned. If it isn't, this
+ * function returns 0 and we fall back to whole
+ * chain verification.
+ */
+ int r = verity_verify_level(io, io->block + b, 0, true);
+ if (likely(!r))
+ goto test_block_hash;
+ if (r < 0)
+ return r;
+ }
+
+ memcpy(io_want_digest(v, io), v->root_digest, v->digest_size);
+
+ for (i = v->levels - 1; i >= 0; i--) {
+ int r = verity_verify_level(io, io->block + b, i, false);
+ if (unlikely(r))
+ return r;
+ }
+
+test_block_hash:
+ desc = io_hash_desc(v, io);
+ desc->tfm = v->tfm;
+ desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ r = crypto_shash_init(desc);
+ if (r < 0) {
+ DMERR("crypto_shash_init failed: %d", r);
+ return r;
+ }
+
+ if (likely(v->version >= 1)) {
+ r = crypto_shash_update(desc, v->salt, v->salt_size);
+ if (r < 0) {
+ DMERR("crypto_shash_update failed: %d", r);
+ return r;
+ }
+ }
+
+ todo = 1 << v->data_dev_block_bits;
+ do {
+ struct bio_vec *bv;
+ u8 *page;
+ unsigned len;
+
+ BUG_ON(vector >= io->io_vec_size);
+ bv = &io->io_vec[vector];
+ page = kmap_atomic(bv->bv_page);
+ len = bv->bv_len - offset;
+ if (likely(len >= todo))
+ len = todo;
+ r = crypto_shash_update(desc,
+ page + bv->bv_offset + offset, len);
+ kunmap_atomic(page);
+ if (r < 0) {
+ DMERR("crypto_shash_update failed: %d", r);
+ return r;
+ }
+ offset += len;
+ if (likely(offset == bv->bv_len)) {
+ offset = 0;
+ vector++;
+ }
+ todo -= len;
+ } while (todo);
+
+ if (!v->version) {
+ r = crypto_shash_update(desc, v->salt, v->salt_size);
+ if (r < 0) {
+ DMERR("crypto_shash_update failed: %d", r);
+ return r;
+ }
+ }
+
+ result = io_real_digest(v, io);
+ r = crypto_shash_final(desc, result);
+ if (r < 0) {
+ DMERR("crypto_shash_final failed: %d", r);
+ return r;
+ }
+ if (unlikely(memcmp(result, io_want_digest(v, io), v->digest_size))) {
+ DMERR_LIMIT("data block %llu is corrupted",
+ (unsigned long long)(io->block + b));
+ v->hash_failed = 1;
+ return -EIO;
+ }
+ }
+ BUG_ON(vector != io->io_vec_size);
+ BUG_ON(offset);
+
+ return 0;
+}
+
+/*
+ * End one "io" structure with a given error.
+ */
+static void verity_finish_io(struct dm_verity_io *io, int error)
+{
+ struct bio *bio = io->bio;
+ struct dm_verity *v = io->v;
+
+ bio->bi_end_io = io->orig_bi_end_io;
+ bio->bi_private = io->orig_bi_private;
+
+ if (io->io_vec != io->io_vec_inline)
+ mempool_free(io->io_vec, v->vec_mempool);
+
+ mempool_free(io, v->io_mempool);
+
+ bio_endio(bio, error);
+}
+
+static void verity_work(struct work_struct *w)
+{
+ struct dm_verity_io *io = container_of(w, struct dm_verity_io, work);
+
+ verity_finish_io(io, verity_verify_io(io));
+}
+
+static void verity_end_io(struct bio *bio, int error)
+{
+ struct dm_verity_io *io = bio->bi_private;
+
+ if (error) {
+ verity_finish_io(io, error);
+ return;
+ }
+
+ INIT_WORK(&io->work, verity_work);
+ queue_work(io->v->verify_wq, &io->work);
+}
+
+/*
+ * Prefetch buffers for the specified io.
+ * The root buffer is not prefetched, it is assumed that it will be cached
+ * all the time.
+ */
+static void verity_prefetch_io(struct dm_verity *v, struct dm_verity_io *io)
+{
+ int i;
+
+ for (i = v->levels - 2; i >= 0; i--) {
+ sector_t hash_block_start;
+ sector_t hash_block_end;
+ verity_hash_at_level(v, io->block, i, &hash_block_start, NULL);
+ verity_hash_at_level(v, io->block + io->n_blocks - 1, i, &hash_block_end, NULL);
+ if (!i) {
+ unsigned cluster = *(volatile unsigned *)&dm_verity_prefetch_cluster;
+
+ cluster >>= v->data_dev_block_bits;
+ if (unlikely(!cluster))
+ goto no_prefetch_cluster;
+
+ if (unlikely(cluster & (cluster - 1)))
+ cluster = 1 << (fls(cluster) - 1);
+
+ hash_block_start &= ~(sector_t)(cluster - 1);
+ hash_block_end |= cluster - 1;
+ if (unlikely(hash_block_end >= v->hash_blocks))
+ hash_block_end = v->hash_blocks - 1;
+ }
+no_prefetch_cluster:
+ dm_bufio_prefetch(v->bufio, hash_block_start,
+ hash_block_end - hash_block_start + 1);
+ }
+}
+
+/*
+ * Bio map function. It allocates dm_verity_io structure and bio vector and
+ * fills them. Then it issues prefetches and the I/O.
+ */
+static int verity_map(struct dm_target *ti, struct bio *bio,
+ union map_info *map_context)
+{
+ struct dm_verity *v = ti->private;
+ struct dm_verity_io *io;
+
+ bio->bi_bdev = v->data_dev->bdev;
+ bio->bi_sector = verity_map_sector(v, bio->bi_sector);
+
+ if (((unsigned)bio->bi_sector | bio_sectors(bio)) &
+ ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
+ DMERR_LIMIT("unaligned io");
+ return -EIO;
+ }
+
+ if ((bio->bi_sector + bio_sectors(bio)) >>
+ (v->data_dev_block_bits - SECTOR_SHIFT) > v->data_blocks) {
+ DMERR_LIMIT("io out of range");
+ return -EIO;
+ }
+
+ if (bio_data_dir(bio) == WRITE)
+ return -EIO;
+
+ io = mempool_alloc(v->io_mempool, GFP_NOIO);
+ io->v = v;
+ io->bio = bio;
+ io->orig_bi_end_io = bio->bi_end_io;
+ io->orig_bi_private = bio->bi_private;
+ io->block = bio->bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
+ io->n_blocks = bio->bi_size >> v->data_dev_block_bits;
+
+ bio->bi_end_io = verity_end_io;
+ bio->bi_private = io;
+ io->io_vec_size = bio->bi_vcnt - bio->bi_idx;
+ if (io->io_vec_size < DM_VERITY_IO_VEC_INLINE)
+ io->io_vec = io->io_vec_inline;
+ else
+ io->io_vec = mempool_alloc(v->vec_mempool, GFP_NOIO);
+ memcpy(io->io_vec, bio_iovec(bio),
+ io->io_vec_size * sizeof(struct bio_vec));
+
+ verity_prefetch_io(v, io);
+
+ generic_make_request(bio);
+
+ return DM_MAPIO_SUBMITTED;
+}
+
+/*
+ * Status: V (valid) or C (corruption found)
+ */
+static int verity_status(struct dm_target *ti, status_type_t type,
+ char *result, unsigned maxlen)
+{
+ struct dm_verity *v = ti->private;
+ unsigned sz = 0;
+ unsigned x;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ DMEMIT("%c", v->hash_failed ? 'C' : 'V');
+ break;
+ case STATUSTYPE_TABLE:
+ DMEMIT("%u %s %s %u %u %llu %llu %s ",
+ v->version,
+ v->data_dev->name,
+ v->hash_dev->name,
+ 1 << v->data_dev_block_bits,
+ 1 << v->hash_dev_block_bits,
+ (unsigned long long)v->data_blocks,
+ (unsigned long long)v->hash_start,
+ v->alg_name
+ );
+ for (x = 0; x < v->digest_size; x++)
+ DMEMIT("%02x", v->root_digest[x]);
+ DMEMIT(" ");
+ if (!v->salt_size)
+ DMEMIT("-");
+ else
+ for (x = 0; x < v->salt_size; x++)
+ DMEMIT("%02x", v->salt[x]);
+ break;
+ }
+
+ return 0;
+}
+
+static int verity_ioctl(struct dm_target *ti, unsigned cmd,
+ unsigned long arg)
+{
+ struct dm_verity *v = ti->private;
+ int r = 0;
+
+ if (v->data_start ||
+ ti->len != i_size_read(v->data_dev->bdev->bd_inode) >> SECTOR_SHIFT)
+ r = scsi_verify_blk_ioctl(NULL, cmd);
+
+ return r ? : __blkdev_driver_ioctl(v->data_dev->bdev, v->data_dev->mode,
+ cmd, arg);
+}
+
+static int verity_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
+ struct bio_vec *biovec, int max_size)
+{
+ struct dm_verity *v = ti->private;
+ struct request_queue *q = bdev_get_queue(v->data_dev->bdev);
+
+ if (!q->merge_bvec_fn)
+ return max_size;
+
+ bvm->bi_bdev = v->data_dev->bdev;
+ bvm->bi_sector = verity_map_sector(v, bvm->bi_sector);
+
+ return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
+}
+
+static int verity_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct dm_verity *v = ti->private;
+
+ return fn(ti, v->data_dev, v->data_start, ti->len, data);
+}
+
+static void verity_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+ struct dm_verity *v = ti->private;
+
+ if (limits->logical_block_size < 1 << v->data_dev_block_bits)
+ limits->logical_block_size = 1 << v->data_dev_block_bits;
+
+ if (limits->physical_block_size < 1 << v->data_dev_block_bits)
+ limits->physical_block_size = 1 << v->data_dev_block_bits;
+
+ blk_limits_io_min(limits, limits->logical_block_size);
+}
+
+static void verity_dtr(struct dm_target *ti)
+{
+ struct dm_verity *v = ti->private;
+
+ if (v->verify_wq)
+ destroy_workqueue(v->verify_wq);
+
+ if (v->vec_mempool)
+ mempool_destroy(v->vec_mempool);
+
+ if (v->io_mempool)
+ mempool_destroy(v->io_mempool);
+
+ if (v->bufio)
+ dm_bufio_client_destroy(v->bufio);
+
+ kfree(v->salt);
+ kfree(v->root_digest);
+
+ if (v->tfm)
+ crypto_free_shash(v->tfm);
+
+ kfree(v->alg_name);
+
+ if (v->hash_dev)
+ dm_put_device(ti, v->hash_dev);
+
+ if (v->data_dev)
+ dm_put_device(ti, v->data_dev);
+
+ kfree(v);
+}
+
+/*
+ * Target parameters:
+ * <version> The current format is version 1.
+ * Vsn 0 is compatible with original Chromium OS releases.
+ * <data device>
+ * <hash device>
+ * <data block size>
+ * <hash block size>
+ * <the number of data blocks>
+ * <hash start block>
+ * <algorithm>
+ * <digest>
+ * <salt> Hex string or "-" if no salt.
+ */
+static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
+{
+ struct dm_verity *v;
+ unsigned num;
+ unsigned long long num_ll;
+ int r;
+ int i;
+ sector_t hash_position;
+ char dummy;
+
+ v = kzalloc(sizeof(struct dm_verity), GFP_KERNEL);
+ if (!v) {
+ ti->error = "Cannot allocate verity structure";
+ return -ENOMEM;
+ }
+ ti->private = v;
+ v->ti = ti;
+
+ if ((dm_table_get_mode(ti->table) & ~FMODE_READ)) {
+ ti->error = "Device must be readonly";
+ r = -EINVAL;
+ goto bad;
+ }
+
+ if (argc != 10) {
+ ti->error = "Invalid argument count: exactly 10 arguments required";
+ r = -EINVAL;
+ goto bad;
+ }
+
+ if (sscanf(argv[0], "%d%c", &num, &dummy) != 1 ||
+ num < 0 || num > 1) {
+ ti->error = "Invalid version";
+ r = -EINVAL;
+ goto bad;
+ }
+ v->version = num;
+
+ r = dm_get_device(ti, argv[1], FMODE_READ, &v->data_dev);
+ if (r) {
+ ti->error = "Data device lookup failed";
+ goto bad;
+ }
+
+ r = dm_get_device(ti, argv[2], FMODE_READ, &v->hash_dev);
+ if (r) {
+ ti->error = "Data device lookup failed";
+ goto bad;
+ }
+
+ if (sscanf(argv[3], "%u%c", &num, &dummy) != 1 ||
+ !num || (num & (num - 1)) ||
+ num < bdev_logical_block_size(v->data_dev->bdev) ||
+ num > PAGE_SIZE) {
+ ti->error = "Invalid data device block size";
+ r = -EINVAL;
+ goto bad;
+ }
+ v->data_dev_block_bits = ffs(num) - 1;
+
+ if (sscanf(argv[4], "%u%c", &num, &dummy) != 1 ||
+ !num || (num & (num - 1)) ||
+ num < bdev_logical_block_size(v->hash_dev->bdev) ||
+ num > INT_MAX) {
+ ti->error = "Invalid hash device block size";
+ r = -EINVAL;
+ goto bad;
+ }
+ v->hash_dev_block_bits = ffs(num) - 1;
+
+ if (sscanf(argv[5], "%llu%c", &num_ll, &dummy) != 1 ||
+ num_ll << (v->data_dev_block_bits - SECTOR_SHIFT) !=
+ (sector_t)num_ll << (v->data_dev_block_bits - SECTOR_SHIFT)) {
+ ti->error = "Invalid data blocks";
+ r = -EINVAL;
+ goto bad;
+ }
+ v->data_blocks = num_ll;
+
+ if (ti->len > (v->data_blocks << (v->data_dev_block_bits - SECTOR_SHIFT))) {
+ ti->error = "Data device is too small";
+ r = -EINVAL;
+ goto bad;
+ }
+
+ if (sscanf(argv[6], "%llu%c", &num_ll, &dummy) != 1 ||
+ num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT) !=
+ (sector_t)num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT)) {
+ ti->error = "Invalid hash start";
+ r = -EINVAL;
+ goto bad;
+ }
+ v->hash_start = num_ll;
+
+ v->alg_name = kstrdup(argv[7], GFP_KERNEL);
+ if (!v->alg_name) {
+ ti->error = "Cannot allocate algorithm name";
+ r = -ENOMEM;
+ goto bad;
+ }
+
+ v->tfm = crypto_alloc_shash(v->alg_name, 0, 0);
+ if (IS_ERR(v->tfm)) {
+ ti->error = "Cannot initialize hash function";
+ r = PTR_ERR(v->tfm);
+ v->tfm = NULL;
+ goto bad;
+ }
+ v->digest_size = crypto_shash_digestsize(v->tfm);
+ if ((1 << v->hash_dev_block_bits) < v->digest_size * 2) {
+ ti->error = "Digest size too big";
+ r = -EINVAL;
+ goto bad;
+ }
+ v->shash_descsize =
+ sizeof(struct shash_desc) + crypto_shash_descsize(v->tfm);
+
+ v->root_digest = kmalloc(v->digest_size, GFP_KERNEL);
+ if (!v->root_digest) {
+ ti->error = "Cannot allocate root digest";
+ r = -ENOMEM;
+ goto bad;
+ }
+ if (strlen(argv[8]) != v->digest_size * 2 ||
+ hex2bin(v->root_digest, argv[8], v->digest_size)) {
+ ti->error = "Invalid root digest";
+ r = -EINVAL;
+ goto bad;
+ }
+
+ if (strcmp(argv[9], "-")) {
+ v->salt_size = strlen(argv[9]) / 2;
+ v->salt = kmalloc(v->salt_size, GFP_KERNEL);
+ if (!v->salt) {
+ ti->error = "Cannot allocate salt";
+ r = -ENOMEM;
+ goto bad;
+ }
+ if (strlen(argv[9]) != v->salt_size * 2 ||
+ hex2bin(v->salt, argv[9], v->salt_size)) {
+ ti->error = "Invalid salt";
+ r = -EINVAL;
+ goto bad;
+ }
+ }
+
+ v->hash_per_block_bits =
+ fls((1 << v->hash_dev_block_bits) / v->digest_size) - 1;
+
+ v->levels = 0;
+ if (v->data_blocks)
+ while (v->hash_per_block_bits * v->levels < 64 &&
+ (unsigned long long)(v->data_blocks - 1) >>
+ (v->hash_per_block_bits * v->levels))
+ v->levels++;
+
+ if (v->levels > DM_VERITY_MAX_LEVELS) {
+ ti->error = "Too many tree levels";
+ r = -E2BIG;
+ goto bad;
+ }
+
+ hash_position = v->hash_start;
+ for (i = v->levels - 1; i >= 0; i--) {
+ sector_t s;
+ v->hash_level_block[i] = hash_position;
+ s = verity_position_at_level(v, v->data_blocks, i);
+ s = (s >> v->hash_per_block_bits) +
+ !!(s & ((1 << v->hash_per_block_bits) - 1));
+ if (hash_position + s < hash_position) {
+ ti->error = "Hash device offset overflow";
+ r = -E2BIG;
+ goto bad;
+ }
+ hash_position += s;
+ }
+ v->hash_blocks = hash_position;
+
+ v->bufio = dm_bufio_client_create(v->hash_dev->bdev,
+ 1 << v->hash_dev_block_bits, 1, sizeof(struct buffer_aux),
+ dm_bufio_alloc_callback, NULL);
+ if (IS_ERR(v->bufio)) {
+ ti->error = "Cannot initialize dm-bufio";
+ r = PTR_ERR(v->bufio);
+ v->bufio = NULL;
+ goto bad;
+ }
+
+ if (dm_bufio_get_device_size(v->bufio) < v->hash_blocks) {
+ ti->error = "Hash device is too small";
+ r = -E2BIG;
+ goto bad;
+ }
+
+ v->io_mempool = mempool_create_kmalloc_pool(DM_VERITY_MEMPOOL_SIZE,
+ sizeof(struct dm_verity_io) + v->shash_descsize + v->digest_size * 2);
+ if (!v->io_mempool) {
+ ti->error = "Cannot allocate io mempool";
+ r = -ENOMEM;
+ goto bad;
+ }
+
+ v->vec_mempool = mempool_create_kmalloc_pool(DM_VERITY_MEMPOOL_SIZE,
+ BIO_MAX_PAGES * sizeof(struct bio_vec));
+ if (!v->vec_mempool) {
+ ti->error = "Cannot allocate vector mempool";
+ r = -ENOMEM;
+ goto bad;
+ }
+
+ /* WQ_UNBOUND greatly improves performance when running on ramdisk */
+ v->verify_wq = alloc_workqueue("kverityd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus());
+ if (!v->verify_wq) {
+ ti->error = "Cannot allocate workqueue";
+ r = -ENOMEM;
+ goto bad;
+ }
+
+ return 0;
+
+bad:
+ verity_dtr(ti);
+
+ return r;
+}
+
+static struct target_type verity_target = {
+ .name = "verity",
+ .version = {1, 0, 0},
+ .module = THIS_MODULE,
+ .ctr = verity_ctr,
+ .dtr = verity_dtr,
+ .map = verity_map,
+ .status = verity_status,
+ .ioctl = verity_ioctl,
+ .merge = verity_merge,
+ .iterate_devices = verity_iterate_devices,
+ .io_hints = verity_io_hints,
+};
+
+static int __init dm_verity_init(void)
+{
+ int r;
+
+ r = dm_register_target(&verity_target);
+ if (r < 0)
+ DMERR("register failed %d", r);
+
+ return r;
+}
+
+static void __exit dm_verity_exit(void)
+{
+ dm_unregister_target(&verity_target);
+}
+
+module_init(dm_verity_init);
+module_exit(dm_verity_exit);
+
+MODULE_AUTHOR("Mikulas Patocka <mpatocka@redhat.com>");
+MODULE_AUTHOR("Mandeep Baines <msb@chromium.org>");
+MODULE_AUTHOR("Will Drewry <wad@chromium.org>");
+MODULE_DESCRIPTION(DM_NAME " target for transparent disk integrity checking");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index b89c548ec3f8..e24143cc2040 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1016,6 +1016,7 @@ static void __map_bio(struct dm_target *ti, struct bio *clone,
/*
* Store bio_set for cleanup.
*/
+ clone->bi_end_io = NULL;
clone->bi_private = md->bs;
bio_put(clone);
free_tio(md, tio);
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
index d279c768f8f1..5709bfeab1e8 100644
--- a/drivers/md/persistent-data/dm-btree-internal.h
+++ b/drivers/md/persistent-data/dm-btree-internal.h
@@ -108,12 +108,9 @@ static inline void *value_base(struct node *n)
return &n->keys[le32_to_cpu(n->header.max_entries)];
}
-/*
- * FIXME: Now that value size is stored in node we don't need the third parm.
- */
-static inline void *value_ptr(struct node *n, uint32_t index, size_t value_size)
+static inline void *value_ptr(struct node *n, uint32_t index)
{
- BUG_ON(value_size != le32_to_cpu(n->header.value_size));
+ uint32_t value_size = le32_to_cpu(n->header.value_size);
return value_base(n) + (value_size * index);
}
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index 023fbc2d389e..aa71e2359a07 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -61,20 +61,20 @@ static void node_shift(struct node *n, int shift)
if (shift < 0) {
shift = -shift;
BUG_ON(shift > nr_entries);
- BUG_ON((void *) key_ptr(n, shift) >= value_ptr(n, shift, value_size));
+ BUG_ON((void *) key_ptr(n, shift) >= value_ptr(n, shift));
memmove(key_ptr(n, 0),
key_ptr(n, shift),
(nr_entries - shift) * sizeof(__le64));
- memmove(value_ptr(n, 0, value_size),
- value_ptr(n, shift, value_size),
+ memmove(value_ptr(n, 0),
+ value_ptr(n, shift),
(nr_entries - shift) * value_size);
} else {
BUG_ON(nr_entries + shift > le32_to_cpu(n->header.max_entries));
memmove(key_ptr(n, shift),
key_ptr(n, 0),
nr_entries * sizeof(__le64));
- memmove(value_ptr(n, shift, value_size),
- value_ptr(n, 0, value_size),
+ memmove(value_ptr(n, shift),
+ value_ptr(n, 0),
nr_entries * value_size);
}
}
@@ -91,16 +91,16 @@ static void node_copy(struct node *left, struct node *right, int shift)
memcpy(key_ptr(left, nr_left),
key_ptr(right, 0),
shift * sizeof(__le64));
- memcpy(value_ptr(left, nr_left, value_size),
- value_ptr(right, 0, value_size),
+ memcpy(value_ptr(left, nr_left),
+ value_ptr(right, 0),
shift * value_size);
} else {
BUG_ON(shift > le32_to_cpu(right->header.max_entries));
memcpy(key_ptr(right, 0),
key_ptr(left, nr_left - shift),
shift * sizeof(__le64));
- memcpy(value_ptr(right, 0, value_size),
- value_ptr(left, nr_left - shift, value_size),
+ memcpy(value_ptr(right, 0),
+ value_ptr(left, nr_left - shift),
shift * value_size);
}
}
@@ -120,26 +120,17 @@ static void delete_at(struct node *n, unsigned index)
key_ptr(n, index + 1),
nr_to_copy * sizeof(__le64));
- memmove(value_ptr(n, index, value_size),
- value_ptr(n, index + 1, value_size),
+ memmove(value_ptr(n, index),
+ value_ptr(n, index + 1),
nr_to_copy * value_size);
}
n->header.nr_entries = cpu_to_le32(nr_entries - 1);
}
-static unsigned del_threshold(struct node *n)
-{
- return le32_to_cpu(n->header.max_entries) / 3;
-}
-
static unsigned merge_threshold(struct node *n)
{
- /*
- * The extra one is because we know we're potentially going to
- * delete an entry.
- */
- return 2 * (le32_to_cpu(n->header.max_entries) / 3) + 1;
+ return le32_to_cpu(n->header.max_entries) / 3;
}
struct child {
@@ -175,7 +166,7 @@ static int init_child(struct dm_btree_info *info, struct node *parent,
if (inc)
inc_children(info->tm, result->n, &le64_type);
- *((__le64 *) value_ptr(parent, index, sizeof(__le64))) =
+ *((__le64 *) value_ptr(parent, index)) =
cpu_to_le64(dm_block_location(result->block));
return 0;
@@ -188,6 +179,15 @@ static int exit_child(struct dm_btree_info *info, struct child *c)
static void shift(struct node *left, struct node *right, int count)
{
+ uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+ uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
+ uint32_t max_entries = le32_to_cpu(left->header.max_entries);
+ uint32_t r_max_entries = le32_to_cpu(right->header.max_entries);
+
+ BUG_ON(max_entries != r_max_entries);
+ BUG_ON(nr_left - count > max_entries);
+ BUG_ON(nr_right + count > max_entries);
+
if (!count)
return;
@@ -199,13 +199,8 @@ static void shift(struct node *left, struct node *right, int count)
node_shift(right, count);
}
- left->header.nr_entries =
- cpu_to_le32(le32_to_cpu(left->header.nr_entries) - count);
- BUG_ON(le32_to_cpu(left->header.nr_entries) > le32_to_cpu(left->header.max_entries));
-
- right->header.nr_entries =
- cpu_to_le32(le32_to_cpu(right->header.nr_entries) + count);
- BUG_ON(le32_to_cpu(right->header.nr_entries) > le32_to_cpu(right->header.max_entries));
+ left->header.nr_entries = cpu_to_le32(nr_left - count);
+ right->header.nr_entries = cpu_to_le32(nr_right + count);
}
static void __rebalance2(struct dm_btree_info *info, struct node *parent,
@@ -215,8 +210,9 @@ static void __rebalance2(struct dm_btree_info *info, struct node *parent,
struct node *right = r->n;
uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
+ unsigned threshold = 2 * merge_threshold(left) + 1;
- if (nr_left + nr_right <= merge_threshold(left)) {
+ if (nr_left + nr_right < threshold) {
/*
* Merge
*/
@@ -234,9 +230,6 @@ static void __rebalance2(struct dm_btree_info *info, struct node *parent,
* Rebalance.
*/
unsigned target_left = (nr_left + nr_right) / 2;
- unsigned shift_ = nr_left - target_left;
- BUG_ON(le32_to_cpu(left->header.max_entries) <= nr_left - shift_);
- BUG_ON(le32_to_cpu(right->header.max_entries) <= nr_right + shift_);
shift(left, right, nr_left - target_left);
*key_ptr(parent, r->index) = right->keys[0];
}
@@ -272,6 +265,84 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
return exit_child(info, &right);
}
+/*
+ * We dump as many entries from center as possible into left, then the rest
+ * in right, then rebalance2. This wastes some cpu, but I want something
+ * simple atm.
+ */
+static void delete_center_node(struct dm_btree_info *info, struct node *parent,
+ struct child *l, struct child *c, struct child *r,
+ struct node *left, struct node *center, struct node *right,
+ uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
+{
+ uint32_t max_entries = le32_to_cpu(left->header.max_entries);
+ unsigned shift = min(max_entries - nr_left, nr_center);
+
+ BUG_ON(nr_left + shift > max_entries);
+ node_copy(left, center, -shift);
+ left->header.nr_entries = cpu_to_le32(nr_left + shift);
+
+ if (shift != nr_center) {
+ shift = nr_center - shift;
+ BUG_ON((nr_right + shift) > max_entries);
+ node_shift(right, shift);
+ node_copy(center, right, shift);
+ right->header.nr_entries = cpu_to_le32(nr_right + shift);
+ }
+ *key_ptr(parent, r->index) = right->keys[0];
+
+ delete_at(parent, c->index);
+ r->index--;
+
+ dm_tm_dec(info->tm, dm_block_location(c->block));
+ __rebalance2(info, parent, l, r);
+}
+
+/*
+ * Redistributes entries among 3 sibling nodes.
+ */
+static void redistribute3(struct dm_btree_info *info, struct node *parent,
+ struct child *l, struct child *c, struct child *r,
+ struct node *left, struct node *center, struct node *right,
+ uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
+{
+ int s;
+ uint32_t max_entries = le32_to_cpu(left->header.max_entries);
+ unsigned target = (nr_left + nr_center + nr_right) / 3;
+ BUG_ON(target > max_entries);
+
+ if (nr_left < nr_right) {
+ s = nr_left - target;
+
+ if (s < 0 && nr_center < -s) {
+ /* not enough in central node */
+ shift(left, center, nr_center);
+ s = nr_center - target;
+ shift(left, right, s);
+ nr_right += s;
+ } else
+ shift(left, center, s);
+
+ shift(center, right, target - nr_right);
+
+ } else {
+ s = target - nr_right;
+ if (s > 0 && nr_center < s) {
+ /* not enough in central node */
+ shift(center, right, nr_center);
+ s = target - nr_center;
+ shift(left, right, s);
+ nr_left -= s;
+ } else
+ shift(center, right, s);
+
+ shift(left, center, nr_left - target);
+ }
+
+ *key_ptr(parent, c->index) = center->keys[0];
+ *key_ptr(parent, r->index) = right->keys[0];
+}
+
static void __rebalance3(struct dm_btree_info *info, struct node *parent,
struct child *l, struct child *c, struct child *r)
{
@@ -282,62 +353,18 @@ static void __rebalance3(struct dm_btree_info *info, struct node *parent,
uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
uint32_t nr_center = le32_to_cpu(center->header.nr_entries);
uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
- uint32_t max_entries = le32_to_cpu(left->header.max_entries);
- unsigned target;
+ unsigned threshold = merge_threshold(left) * 4 + 1;
BUG_ON(left->header.max_entries != center->header.max_entries);
BUG_ON(center->header.max_entries != right->header.max_entries);
- if (((nr_left + nr_center + nr_right) / 2) < merge_threshold(center)) {
- /*
- * Delete center node:
- *
- * We dump as many entries from center as possible into
- * left, then the rest in right, then rebalance2. This
- * wastes some cpu, but I want something simple atm.
- */
- unsigned shift = min(max_entries - nr_left, nr_center);
-
- BUG_ON(nr_left + shift > max_entries);
- node_copy(left, center, -shift);
- left->header.nr_entries = cpu_to_le32(nr_left + shift);
-
- if (shift != nr_center) {
- shift = nr_center - shift;
- BUG_ON((nr_right + shift) >= max_entries);
- node_shift(right, shift);
- node_copy(center, right, shift);
- right->header.nr_entries = cpu_to_le32(nr_right + shift);
- }
- *key_ptr(parent, r->index) = right->keys[0];
-
- delete_at(parent, c->index);
- r->index--;
-
- dm_tm_dec(info->tm, dm_block_location(c->block));
- __rebalance2(info, parent, l, r);
-
- return;
- }
-
- /*
- * Rebalance
- */
- target = (nr_left + nr_center + nr_right) / 3;
- BUG_ON(target > max_entries);
-
- /*
- * Adjust the left node
- */
- shift(left, center, nr_left - target);
-
- /*
- * Adjust the right node
- */
- shift(center, right, target - nr_right);
- *key_ptr(parent, c->index) = center->keys[0];
- *key_ptr(parent, r->index) = right->keys[0];
+ if ((nr_left + nr_center + nr_right) < threshold)
+ delete_center_node(info, parent, l, c, r, left, center, right,
+ nr_left, nr_center, nr_right);
+ else
+ redistribute3(info, parent, l, c, r, left, center, right,
+ nr_left, nr_center, nr_right);
}
static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
@@ -441,9 +468,6 @@ static int rebalance_children(struct shadow_spine *s,
if (r)
return r;
- if (child_entries > del_threshold(n))
- return 0;
-
has_left_sibling = i > 0;
has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1);
@@ -496,7 +520,7 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
*/
if (shadow_has_parent(s)) {
__le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
- memcpy(value_ptr(dm_block_data(shadow_parent(s)), i, sizeof(__le64)),
+ memcpy(value_ptr(dm_block_data(shadow_parent(s)), i),
&location, sizeof(__le64));
}
@@ -553,7 +577,7 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
if (info->value_type.dec)
info->value_type.dec(info->value_type.context,
- value_ptr(n, index, info->value_type.size));
+ value_ptr(n, index));
delete_at(n, index);
}
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index bd1e7ffbe26c..d12b2cc51f1a 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -74,8 +74,7 @@ void inc_children(struct dm_transaction_manager *tm, struct node *n,
dm_tm_inc(tm, value64(n, i));
else if (vt->inc)
for (i = 0; i < nr_entries; i++)
- vt->inc(vt->context,
- value_ptr(n, i, vt->size));
+ vt->inc(vt->context, value_ptr(n, i));
}
static int insert_at(size_t value_size, struct node *node, unsigned index,
@@ -281,7 +280,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
for (i = 0; i < f->nr_children; i++)
info->value_type.dec(info->value_type.context,
- value_ptr(f->n, i, info->value_type.size));
+ value_ptr(f->n, i));
}
f->current_child = f->nr_children;
}
@@ -320,7 +319,7 @@ static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key,
} while (!(flags & LEAF_NODE));
*result_key = le64_to_cpu(ro_node(s)->keys[i]);
- memcpy(v, value_ptr(ro_node(s), i, value_size), value_size);
+ memcpy(v, value_ptr(ro_node(s), i), value_size);
return 0;
}
@@ -432,7 +431,7 @@ static int btree_split_sibling(struct shadow_spine *s, dm_block_t root,
size = le32_to_cpu(ln->header.flags) & INTERNAL_NODE ?
sizeof(uint64_t) : s->info->value_type.size;
- memcpy(value_ptr(rn, 0, size), value_ptr(ln, nr_left, size),
+ memcpy(value_ptr(rn, 0), value_ptr(ln, nr_left),
size * nr_right);
/*
@@ -443,7 +442,7 @@ static int btree_split_sibling(struct shadow_spine *s, dm_block_t root,
pn = dm_block_data(parent);
location = cpu_to_le64(dm_block_location(left));
__dm_bless_for_disk(&location);
- memcpy_disk(value_ptr(pn, parent_index, sizeof(__le64)),
+ memcpy_disk(value_ptr(pn, parent_index),
&location, sizeof(__le64));
location = cpu_to_le64(dm_block_location(right));
@@ -529,8 +528,8 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
sizeof(__le64) : s->info->value_type.size;
- memcpy(value_ptr(ln, 0, size), value_ptr(pn, 0, size), nr_left * size);
- memcpy(value_ptr(rn, 0, size), value_ptr(pn, nr_left, size),
+ memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
+ memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left),
nr_right * size);
/* new_parent should just point to l and r now */
@@ -545,12 +544,12 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
val = cpu_to_le64(dm_block_location(left));
__dm_bless_for_disk(&val);
pn->keys[0] = ln->keys[0];
- memcpy_disk(value_ptr(pn, 0, sizeof(__le64)), &val, sizeof(__le64));
+ memcpy_disk(value_ptr(pn, 0), &val, sizeof(__le64));
val = cpu_to_le64(dm_block_location(right));
__dm_bless_for_disk(&val);
pn->keys[1] = rn->keys[0];
- memcpy_disk(value_ptr(pn, 1, sizeof(__le64)), &val, sizeof(__le64));
+ memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64));
/*
* rejig the spine. This is ugly, since it knows too
@@ -595,7 +594,7 @@ static int btree_insert_raw(struct shadow_spine *s, dm_block_t root,
__le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
__dm_bless_for_disk(&location);
- memcpy_disk(value_ptr(dm_block_data(shadow_parent(s)), i, sizeof(uint64_t)),
+ memcpy_disk(value_ptr(dm_block_data(shadow_parent(s)), i),
&location, sizeof(__le64));
}
@@ -710,12 +709,12 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
(!info->value_type.equal ||
!info->value_type.equal(
info->value_type.context,
- value_ptr(n, index, info->value_type.size),
+ value_ptr(n, index),
value))) {
info->value_type.dec(info->value_type.context,
- value_ptr(n, index, info->value_type.size));
+ value_ptr(n, index));
}
- memcpy_disk(value_ptr(n, index, info->value_type.size),
+ memcpy_disk(value_ptr(n, index),
value, info->value_type.size);
}
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index df2494c06cdc..ff3beed6ad2d 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -405,8 +405,6 @@ int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
if (r < 0)
return r;
-#if 0
- /* FIXME: dm_btree_remove doesn't handle this yet */
if (old > 2) {
r = dm_btree_remove(&ll->ref_count_info,
ll->ref_count_root,
@@ -414,7 +412,6 @@ int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
if (r)
return r;
}
-#endif
} else {
__le32 le_rc = cpu_to_le32(ref_count);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 1489c3540f96..243e0c663c37 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -848,8 +848,9 @@ config MCP_SA11X0
# Chip drivers
config MCP_UCB1200
- tristate "Support for UCB1200 / UCB1300"
- depends on MCP
+ bool "Support for UCB1200 / UCB1300"
+ depends on MCP_SA11X0
+ select MCP
config MCP_UCB1200_TS
tristate "Touchscreen interface support"
diff --git a/drivers/mfd/mcp-core.c b/drivers/mfd/mcp-core.c
index 86cc3f7841cd..6acf2e03f2ba 100644
--- a/drivers/mfd/mcp-core.c
+++ b/drivers/mfd/mcp-core.c
@@ -19,7 +19,6 @@
#include <linux/string.h>
#include <linux/mfd/mcp.h>
-#include <mach/dma.h>
#include <asm/system.h>
@@ -48,39 +47,11 @@ static int mcp_bus_remove(struct device *dev)
return 0;
}
-static int mcp_bus_suspend(struct device *dev, pm_message_t state)
-{
- struct mcp *mcp = to_mcp(dev);
- int ret = 0;
-
- if (dev->driver) {
- struct mcp_driver *drv = to_mcp_driver(dev->driver);
-
- ret = drv->suspend(mcp, state);
- }
- return ret;
-}
-
-static int mcp_bus_resume(struct device *dev)
-{
- struct mcp *mcp = to_mcp(dev);
- int ret = 0;
-
- if (dev->driver) {
- struct mcp_driver *drv = to_mcp_driver(dev->driver);
-
- ret = drv->resume(mcp);
- }
- return ret;
-}
-
static struct bus_type mcp_bus_type = {
.name = "mcp",
.match = mcp_bus_match,
.probe = mcp_bus_probe,
.remove = mcp_bus_remove,
- .suspend = mcp_bus_suspend,
- .resume = mcp_bus_resume,
};
/**
@@ -208,6 +179,7 @@ struct mcp *mcp_host_alloc(struct device *parent, size_t size)
mcp = kzalloc(sizeof(struct mcp) + size, GFP_KERNEL);
if (mcp) {
spin_lock_init(&mcp->lock);
+ device_initialize(&mcp->attached_device);
mcp->attached_device.parent = parent;
mcp->attached_device.bus = &mcp_bus_type;
mcp->attached_device.dma_mask = parent->dma_mask;
@@ -217,18 +189,25 @@ struct mcp *mcp_host_alloc(struct device *parent, size_t size)
}
EXPORT_SYMBOL(mcp_host_alloc);
-int mcp_host_register(struct mcp *mcp)
+int mcp_host_add(struct mcp *mcp, void *pdata)
{
+ mcp->attached_device.platform_data = pdata;
dev_set_name(&mcp->attached_device, "mcp0");
- return device_register(&mcp->attached_device);
+ return device_add(&mcp->attached_device);
+}
+EXPORT_SYMBOL(mcp_host_add);
+
+void mcp_host_del(struct mcp *mcp)
+{
+ device_del(&mcp->attached_device);
}
-EXPORT_SYMBOL(mcp_host_register);
+EXPORT_SYMBOL(mcp_host_del);
-void mcp_host_unregister(struct mcp *mcp)
+void mcp_host_free(struct mcp *mcp)
{
- device_unregister(&mcp->attached_device);
+ put_device(&mcp->attached_device);
}
-EXPORT_SYMBOL(mcp_host_unregister);
+EXPORT_SYMBOL(mcp_host_free);
int mcp_driver_register(struct mcp_driver *mcpdrv)
{
diff --git a/drivers/mfd/mcp-sa11x0.c b/drivers/mfd/mcp-sa11x0.c
index 02c53a0766c4..1c0ceacaa1f6 100644
--- a/drivers/mfd/mcp-sa11x0.c
+++ b/drivers/mfd/mcp-sa11x0.c
@@ -13,51 +13,61 @@
*/
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/mfd/mcp.h>
-#include <mach/dma.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/system.h>
#include <mach/mcp.h>
-#include <mach/assabet.h>
-
+#define DRIVER_NAME "sa11x0-mcp"
struct mcp_sa11x0 {
- u32 mccr0;
- u32 mccr1;
+ void __iomem *base0;
+ void __iomem *base1;
+ u32 mccr0;
+ u32 mccr1;
};
+/* Register offsets */
+#define MCCR0(m) ((m)->base0 + 0x00)
+#define MCDR0(m) ((m)->base0 + 0x08)
+#define MCDR1(m) ((m)->base0 + 0x0c)
+#define MCDR2(m) ((m)->base0 + 0x10)
+#define MCSR(m) ((m)->base0 + 0x18)
+#define MCCR1(m) ((m)->base1 + 0x00)
+
#define priv(mcp) ((struct mcp_sa11x0 *)mcp_priv(mcp))
static void
mcp_sa11x0_set_telecom_divisor(struct mcp *mcp, unsigned int divisor)
{
- unsigned int mccr0;
+ struct mcp_sa11x0 *m = priv(mcp);
divisor /= 32;
- mccr0 = Ser4MCCR0 & ~0x00007f00;
- mccr0 |= divisor << 8;
- Ser4MCCR0 = mccr0;
+ m->mccr0 &= ~0x00007f00;
+ m->mccr0 |= divisor << 8;
+ writel_relaxed(m->mccr0, MCCR0(m));
}
static void
mcp_sa11x0_set_audio_divisor(struct mcp *mcp, unsigned int divisor)
{
- unsigned int mccr0;
+ struct mcp_sa11x0 *m = priv(mcp);
divisor /= 32;
- mccr0 = Ser4MCCR0 & ~0x0000007f;
- mccr0 |= divisor;
- Ser4MCCR0 = mccr0;
+ m->mccr0 &= ~0x0000007f;
+ m->mccr0 |= divisor;
+ writel_relaxed(m->mccr0, MCCR0(m));
}
/*
@@ -69,14 +79,15 @@ mcp_sa11x0_set_audio_divisor(struct mcp *mcp, unsigned int divisor)
static void
mcp_sa11x0_write(struct mcp *mcp, unsigned int reg, unsigned int val)
{
+ struct mcp_sa11x0 *m = priv(mcp);
int ret = -ETIME;
int i;
- Ser4MCDR2 = reg << 17 | MCDR2_Wr | (val & 0xffff);
+ writel_relaxed(reg << 17 | MCDR2_Wr | (val & 0xffff), MCDR2(m));
for (i = 0; i < 2; i++) {
udelay(mcp->rw_timeout);
- if (Ser4MCSR & MCSR_CWC) {
+ if (readl_relaxed(MCSR(m)) & MCSR_CWC) {
ret = 0;
break;
}
@@ -95,15 +106,16 @@ mcp_sa11x0_write(struct mcp *mcp, unsigned int reg, unsigned int val)
static unsigned int
mcp_sa11x0_read(struct mcp *mcp, unsigned int reg)
{
+ struct mcp_sa11x0 *m = priv(mcp);
int ret = -ETIME;
int i;
- Ser4MCDR2 = reg << 17 | MCDR2_Rd;
+ writel_relaxed(reg << 17 | MCDR2_Rd, MCDR2(m));
for (i = 0; i < 2; i++) {
udelay(mcp->rw_timeout);
- if (Ser4MCSR & MCSR_CRC) {
- ret = Ser4MCDR2 & 0xffff;
+ if (readl_relaxed(MCSR(m)) & MCSR_CRC) {
+ ret = readl_relaxed(MCDR2(m)) & 0xffff;
break;
}
}
@@ -116,13 +128,19 @@ mcp_sa11x0_read(struct mcp *mcp, unsigned int reg)
static void mcp_sa11x0_enable(struct mcp *mcp)
{
- Ser4MCSR = -1;
- Ser4MCCR0 |= MCCR0_MCE;
+ struct mcp_sa11x0 *m = priv(mcp);
+
+ writel(-1, MCSR(m));
+ m->mccr0 |= MCCR0_MCE;
+ writel_relaxed(m->mccr0, MCCR0(m));
}
static void mcp_sa11x0_disable(struct mcp *mcp)
{
- Ser4MCCR0 &= ~MCCR0_MCE;
+ struct mcp_sa11x0 *m = priv(mcp);
+
+ m->mccr0 &= ~MCCR0_MCE;
+ writel_relaxed(m->mccr0, MCCR0(m));
}
/*
@@ -137,55 +155,64 @@ static struct mcp_ops mcp_sa11x0 = {
.disable = mcp_sa11x0_disable,
};
-static int mcp_sa11x0_probe(struct platform_device *pdev)
+static int mcp_sa11x0_probe(struct platform_device *dev)
{
- struct mcp_plat_data *data = pdev->dev.platform_data;
+ struct mcp_plat_data *data = dev->dev.platform_data;
+ struct resource *mem0, *mem1;
+ struct mcp_sa11x0 *m;
struct mcp *mcp;
int ret;
if (!data)
return -ENODEV;
- if (!request_mem_region(0x80060000, 0x60, "sa11x0-mcp"))
- return -EBUSY;
+ mem0 = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ mem1 = platform_get_resource(dev, IORESOURCE_MEM, 1);
+ if (!mem0 || !mem1)
+ return -ENXIO;
+
+ if (!request_mem_region(mem0->start, resource_size(mem0),
+ DRIVER_NAME)) {
+ ret = -EBUSY;
+ goto err_mem0;
+ }
- mcp = mcp_host_alloc(&pdev->dev, sizeof(struct mcp_sa11x0));
+ if (!request_mem_region(mem1->start, resource_size(mem1),
+ DRIVER_NAME)) {
+ ret = -EBUSY;
+ goto err_mem1;
+ }
+
+ mcp = mcp_host_alloc(&dev->dev, sizeof(struct mcp_sa11x0));
if (!mcp) {
ret = -ENOMEM;
- goto release;
+ goto err_alloc;
}
mcp->owner = THIS_MODULE;
mcp->ops = &mcp_sa11x0;
mcp->sclk_rate = data->sclk_rate;
- mcp->dma_audio_rd = DMA_Ser4MCP0Rd;
- mcp->dma_audio_wr = DMA_Ser4MCP0Wr;
- mcp->dma_telco_rd = DMA_Ser4MCP1Rd;
- mcp->dma_telco_wr = DMA_Ser4MCP1Wr;
- mcp->gpio_base = data->gpio_base;
- platform_set_drvdata(pdev, mcp);
+ m = priv(mcp);
+ m->mccr0 = data->mccr0 | 0x7f7f;
+ m->mccr1 = data->mccr1;
- if (machine_is_assabet()) {
- ASSABET_BCR_set(ASSABET_BCR_CODEC_RST);
+ m->base0 = ioremap(mem0->start, resource_size(mem0));
+ m->base1 = ioremap(mem1->start, resource_size(mem1));
+ if (!m->base0 || !m->base1) {
+ ret = -ENOMEM;
+ goto err_ioremap;
}
- /*
- * Setup the PPC unit correctly.
- */
- PPDR &= ~PPC_RXD4;
- PPDR |= PPC_TXD4 | PPC_SCLK | PPC_SFRM;
- PSDR |= PPC_RXD4;
- PSDR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM);
- PPSR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM);
+ platform_set_drvdata(dev, mcp);
/*
* Initialise device. Note that we initially
* set the sampling rate to minimum.
*/
- Ser4MCSR = -1;
- Ser4MCCR1 = data->mccr1;
- Ser4MCCR0 = data->mccr0 | 0x7f7f;
+ writel_relaxed(-1, MCSR(m));
+ writel_relaxed(m->mccr1, MCCR1(m));
+ writel_relaxed(m->mccr0, MCCR0(m));
/*
* Calculate the read/write timeout (us) from the bit clock
@@ -195,62 +222,90 @@ static int mcp_sa11x0_probe(struct platform_device *pdev)
mcp->rw_timeout = (64 * 3 * 1000000 + mcp->sclk_rate - 1) /
mcp->sclk_rate;
- ret = mcp_host_register(mcp);
+ ret = mcp_host_add(mcp, data->codec_pdata);
if (ret == 0)
- goto out;
+ return 0;
- release:
- release_mem_region(0x80060000, 0x60);
- platform_set_drvdata(pdev, NULL);
+ platform_set_drvdata(dev, NULL);
- out:
+ err_ioremap:
+ iounmap(m->base1);
+ iounmap(m->base0);
+ mcp_host_free(mcp);
+ err_alloc:
+ release_mem_region(mem1->start, resource_size(mem1));
+ err_mem1:
+ release_mem_region(mem0->start, resource_size(mem0));
+ err_mem0:
return ret;
}
static int mcp_sa11x0_remove(struct platform_device *dev)
{
struct mcp *mcp = platform_get_drvdata(dev);
+ struct mcp_sa11x0 *m = priv(mcp);
+ struct resource *mem0, *mem1;
+
+ if (m->mccr0 & MCCR0_MCE)
+ dev_warn(&dev->dev,
+ "device left active (missing disable call?)\n");
+
+ mem0 = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ mem1 = platform_get_resource(dev, IORESOURCE_MEM, 1);
platform_set_drvdata(dev, NULL);
- mcp_host_unregister(mcp);
- release_mem_region(0x80060000, 0x60);
+ mcp_host_del(mcp);
+ iounmap(m->base1);
+ iounmap(m->base0);
+ mcp_host_free(mcp);
+ release_mem_region(mem1->start, resource_size(mem1));
+ release_mem_region(mem0->start, resource_size(mem0));
return 0;
}
-static int mcp_sa11x0_suspend(struct platform_device *dev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int mcp_sa11x0_suspend(struct device *dev)
{
- struct mcp *mcp = platform_get_drvdata(dev);
+ struct mcp_sa11x0 *m = priv(dev_get_drvdata(dev));
+
+ if (m->mccr0 & MCCR0_MCE)
+ dev_warn(dev, "device left active (missing disable call?)\n");
- priv(mcp)->mccr0 = Ser4MCCR0;
- priv(mcp)->mccr1 = Ser4MCCR1;
- Ser4MCCR0 &= ~MCCR0_MCE;
+ writel(m->mccr0 & ~MCCR0_MCE, MCCR0(m));
return 0;
}
-static int mcp_sa11x0_resume(struct platform_device *dev)
+static int mcp_sa11x0_resume(struct device *dev)
{
- struct mcp *mcp = platform_get_drvdata(dev);
+ struct mcp_sa11x0 *m = priv(dev_get_drvdata(dev));
- Ser4MCCR1 = priv(mcp)->mccr1;
- Ser4MCCR0 = priv(mcp)->mccr0;
+ writel_relaxed(m->mccr1, MCCR1(m));
+ writel_relaxed(m->mccr0, MCCR0(m));
return 0;
}
-
-/*
- * The driver for the SA11x0 MCP port.
- */
-MODULE_ALIAS("platform:sa11x0-mcp");
+#endif
+
+static const struct dev_pm_ops mcp_sa11x0_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+ .suspend = mcp_sa11x0_suspend,
+ .freeze = mcp_sa11x0_suspend,
+ .poweroff = mcp_sa11x0_suspend,
+ .resume_noirq = mcp_sa11x0_resume,
+ .thaw_noirq = mcp_sa11x0_resume,
+ .restore_noirq = mcp_sa11x0_resume,
+#endif
+};
static struct platform_driver mcp_sa11x0_driver = {
.probe = mcp_sa11x0_probe,
.remove = mcp_sa11x0_remove,
- .suspend = mcp_sa11x0_suspend,
- .resume = mcp_sa11x0_resume,
.driver = {
- .name = "sa11x0-mcp",
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &mcp_sa11x0_pm_ops,
},
};
@@ -259,6 +314,7 @@ static struct platform_driver mcp_sa11x0_driver = {
*/
module_platform_driver(mcp_sa11x0_driver);
+MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
MODULE_DESCRIPTION("SA11x0 multimedia communications port driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/ucb1x00-assabet.c b/drivers/mfd/ucb1x00-assabet.c
index cea9da60850d..b63c0756a669 100644
--- a/drivers/mfd/ucb1x00-assabet.c
+++ b/drivers/mfd/ucb1x00-assabet.c
@@ -11,14 +11,15 @@
*/
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/err.h>
#include <linux/fs.h>
+#include <linux/gpio_keys.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
#include <linux/proc_fs.h>
-#include <linux/device.h>
#include <linux/mfd/ucb1x00.h>
-#include <mach/dma.h>
-
-
#define UCB1X00_ATTR(name,input)\
static ssize_t name##_show(struct device *dev, struct device_attribute *attr, \
char *buf) \
@@ -38,14 +39,45 @@ UCB1X00_ATTR(batt_temp, UCB_ADC_INP_AD2);
static int ucb1x00_assabet_add(struct ucb1x00_dev *dev)
{
- device_create_file(&dev->ucb->dev, &dev_attr_vbatt);
- device_create_file(&dev->ucb->dev, &dev_attr_vcharger);
- device_create_file(&dev->ucb->dev, &dev_attr_batt_temp);
+ struct ucb1x00 *ucb = dev->ucb;
+ struct platform_device *pdev;
+ struct gpio_keys_platform_data keys;
+ static struct gpio_keys_button buttons[6];
+ unsigned i;
+
+ memset(buttons, 0, sizeof(buttons));
+ memset(&keys, 0, sizeof(keys));
+
+ for (i = 0; i < ARRAY_SIZE(buttons); i++) {
+ buttons[i].code = BTN_0 + i;
+ buttons[i].gpio = ucb->gpio.base + i;
+ buttons[i].type = EV_KEY;
+ buttons[i].can_disable = true;
+ }
+
+ keys.buttons = buttons;
+ keys.nbuttons = ARRAY_SIZE(buttons);
+ keys.poll_interval = 50;
+ keys.name = "ucb1x00";
+
+ pdev = platform_device_register_data(&ucb->dev, "gpio-keys", -1,
+ &keys, sizeof(keys));
+
+ device_create_file(&ucb->dev, &dev_attr_vbatt);
+ device_create_file(&ucb->dev, &dev_attr_vcharger);
+ device_create_file(&ucb->dev, &dev_attr_batt_temp);
+
+ dev->priv = pdev;
return 0;
}
static void ucb1x00_assabet_remove(struct ucb1x00_dev *dev)
{
+ struct platform_device *pdev = dev->priv;
+
+ if (!IS_ERR(pdev))
+ platform_device_unregister(pdev);
+
device_remove_file(&dev->ucb->dev, &dev_attr_batt_temp);
device_remove_file(&dev->ucb->dev, &dev_attr_vcharger);
device_remove_file(&dev->ucb->dev, &dev_attr_vbatt);
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index febc90cdef7e..70f02daeb22a 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -23,14 +23,12 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/mfd/ucb1x00.h>
+#include <linux/pm.h>
#include <linux/gpio.h>
-#include <linux/semaphore.h>
-
-#include <mach/dma.h>
-#include <mach/hardware.h>
static DEFINE_MUTEX(ucb1x00_mutex);
static LIST_HEAD(ucb1x00_drivers);
@@ -102,7 +100,7 @@ void ucb1x00_io_write(struct ucb1x00 *ucb, unsigned int set, unsigned int clear)
* ucb1x00_enable must have been called to enable the comms
* before using this function.
*
- * This function does not take any semaphores or spinlocks.
+ * This function does not take any mutexes or spinlocks.
*/
unsigned int ucb1x00_io_read(struct ucb1x00 *ucb)
{
@@ -120,14 +118,22 @@ static void ucb1x00_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
else
ucb->io_out &= ~(1 << offset);
+ ucb1x00_enable(ucb);
ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
+ ucb1x00_disable(ucb);
spin_unlock_irqrestore(&ucb->io_lock, flags);
}
static int ucb1x00_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio);
- return ucb1x00_reg_read(ucb, UCB_IO_DATA) & (1 << offset);
+ unsigned val;
+
+ ucb1x00_enable(ucb);
+ val = ucb1x00_reg_read(ucb, UCB_IO_DATA);
+ ucb1x00_disable(ucb);
+
+ return val & (1 << offset);
}
static int ucb1x00_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -137,7 +143,9 @@ static int ucb1x00_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
spin_lock_irqsave(&ucb->io_lock, flags);
ucb->io_dir &= ~(1 << offset);
+ ucb1x00_enable(ucb);
ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
+ ucb1x00_disable(ucb);
spin_unlock_irqrestore(&ucb->io_lock, flags);
return 0;
@@ -157,6 +165,7 @@ static int ucb1x00_gpio_direction_output(struct gpio_chip *chip, unsigned offset
else
ucb->io_out &= ~mask;
+ ucb1x00_enable(ucb);
if (old != ucb->io_out)
ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
@@ -164,11 +173,19 @@ static int ucb1x00_gpio_direction_output(struct gpio_chip *chip, unsigned offset
ucb->io_dir |= mask;
ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
}
+ ucb1x00_disable(ucb);
spin_unlock_irqrestore(&ucb->io_lock, flags);
return 0;
}
+static int ucb1x00_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio);
+
+ return ucb->irq_base > 0 ? ucb->irq_base + offset : -ENXIO;
+}
+
/*
* UCB1300 data sheet says we must:
* 1. enable ADC => 5us (including reference startup time)
@@ -186,7 +203,7 @@ static int ucb1x00_gpio_direction_output(struct gpio_chip *chip, unsigned offset
* Any code wishing to use the ADC converter must call this
* function prior to using it.
*
- * This function takes the ADC semaphore to prevent two or more
+ * This function takes the ADC mutex to prevent two or more
* concurrent uses, and therefore may sleep. As a result, it
* can only be called from process context, not interrupt
* context.
@@ -196,7 +213,7 @@ static int ucb1x00_gpio_direction_output(struct gpio_chip *chip, unsigned offset
*/
void ucb1x00_adc_enable(struct ucb1x00 *ucb)
{
- down(&ucb->adc_sem);
+ mutex_lock(&ucb->adc_mutex);
ucb->adc_cr |= UCB_ADC_ENA;
@@ -218,7 +235,7 @@ void ucb1x00_adc_enable(struct ucb1x00 *ucb)
* complete (2 frames max without sync).
*
* If called for a synchronised ADC conversion, it may sleep
- * with the ADC semaphore held.
+ * with the ADC mutex held.
*/
unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync)
{
@@ -246,7 +263,7 @@ unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync)
* ucb1x00_adc_disable - disable the ADC converter
* @ucb: UCB1x00 structure describing chip
*
- * Disable the ADC converter and release the ADC semaphore.
+ * Disable the ADC converter and release the ADC mutex.
*/
void ucb1x00_adc_disable(struct ucb1x00 *ucb)
{
@@ -254,7 +271,7 @@ void ucb1x00_adc_disable(struct ucb1x00 *ucb)
ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr);
ucb1x00_disable(ucb);
- up(&ucb->adc_sem);
+ mutex_unlock(&ucb->adc_mutex);
}
/*
@@ -265,10 +282,9 @@ void ucb1x00_adc_disable(struct ucb1x00 *ucb)
* SIBCLK to talk to the chip. We leave the clock running until
* we have finished processing all interrupts from the chip.
*/
-static irqreturn_t ucb1x00_irq(int irqnr, void *devid)
+static void ucb1x00_irq(unsigned int irq, struct irq_desc *desc)
{
- struct ucb1x00 *ucb = devid;
- struct ucb1x00_irq *irq;
+ struct ucb1x00 *ucb = irq_desc_get_handler_data(desc);
unsigned int isr, i;
ucb1x00_enable(ucb);
@@ -276,157 +292,104 @@ static irqreturn_t ucb1x00_irq(int irqnr, void *devid)
ucb1x00_reg_write(ucb, UCB_IE_CLEAR, isr);
ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
- for (i = 0, irq = ucb->irq_handler; i < 16 && isr; i++, isr >>= 1, irq++)
- if (isr & 1 && irq->fn)
- irq->fn(i, irq->devid);
+ for (i = 0; i < 16 && isr; i++, isr >>= 1, irq++)
+ if (isr & 1)
+ generic_handle_irq(ucb->irq_base + i);
ucb1x00_disable(ucb);
-
- return IRQ_HANDLED;
}
-/**
- * ucb1x00_hook_irq - hook a UCB1x00 interrupt
- * @ucb: UCB1x00 structure describing chip
- * @idx: interrupt index
- * @fn: function to call when interrupt is triggered
- * @devid: device id to pass to interrupt handler
- *
- * Hook the specified interrupt. You can only register one handler
- * for each interrupt source. The interrupt source is not enabled
- * by this function; use ucb1x00_enable_irq instead.
- *
- * Interrupt handlers will be called with other interrupts enabled.
- *
- * Returns zero on success, or one of the following errors:
- * -EINVAL if the interrupt index is invalid
- * -EBUSY if the interrupt has already been hooked
- */
-int ucb1x00_hook_irq(struct ucb1x00 *ucb, unsigned int idx, void (*fn)(int, void *), void *devid)
+static void ucb1x00_irq_update(struct ucb1x00 *ucb, unsigned mask)
{
- struct ucb1x00_irq *irq;
- int ret = -EINVAL;
-
- if (idx < 16) {
- irq = ucb->irq_handler + idx;
- ret = -EBUSY;
-
- spin_lock_irq(&ucb->lock);
- if (irq->fn == NULL) {
- irq->devid = devid;
- irq->fn = fn;
- ret = 0;
- }
- spin_unlock_irq(&ucb->lock);
- }
- return ret;
+ ucb1x00_enable(ucb);
+ if (ucb->irq_ris_enbl & mask)
+ ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
+ ucb->irq_mask);
+ if (ucb->irq_fal_enbl & mask)
+ ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
+ ucb->irq_mask);
+ ucb1x00_disable(ucb);
}
-/**
- * ucb1x00_enable_irq - enable an UCB1x00 interrupt source
- * @ucb: UCB1x00 structure describing chip
- * @idx: interrupt index
- * @edges: interrupt edges to enable
- *
- * Enable the specified interrupt to trigger on %UCB_RISING,
- * %UCB_FALLING or both edges. The interrupt should have been
- * hooked by ucb1x00_hook_irq.
- */
-void ucb1x00_enable_irq(struct ucb1x00 *ucb, unsigned int idx, int edges)
+static void ucb1x00_irq_noop(struct irq_data *data)
{
- unsigned long flags;
+}
- if (idx < 16) {
- spin_lock_irqsave(&ucb->lock, flags);
+static void ucb1x00_irq_mask(struct irq_data *data)
+{
+ struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
+ unsigned mask = 1 << (data->irq - ucb->irq_base);
- ucb1x00_enable(ucb);
- if (edges & UCB_RISING) {
- ucb->irq_ris_enbl |= 1 << idx;
- ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl);
- }
- if (edges & UCB_FALLING) {
- ucb->irq_fal_enbl |= 1 << idx;
- ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl);
- }
- ucb1x00_disable(ucb);
- spin_unlock_irqrestore(&ucb->lock, flags);
- }
+ raw_spin_lock(&ucb->irq_lock);
+ ucb->irq_mask &= ~mask;
+ ucb1x00_irq_update(ucb, mask);
+ raw_spin_unlock(&ucb->irq_lock);
}
-/**
- * ucb1x00_disable_irq - disable an UCB1x00 interrupt source
- * @ucb: UCB1x00 structure describing chip
- * @edges: interrupt edges to disable
- *
- * Disable the specified interrupt triggering on the specified
- * (%UCB_RISING, %UCB_FALLING or both) edges.
- */
-void ucb1x00_disable_irq(struct ucb1x00 *ucb, unsigned int idx, int edges)
+static void ucb1x00_irq_unmask(struct irq_data *data)
{
- unsigned long flags;
+ struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
+ unsigned mask = 1 << (data->irq - ucb->irq_base);
- if (idx < 16) {
- spin_lock_irqsave(&ucb->lock, flags);
-
- ucb1x00_enable(ucb);
- if (edges & UCB_RISING) {
- ucb->irq_ris_enbl &= ~(1 << idx);
- ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl);
- }
- if (edges & UCB_FALLING) {
- ucb->irq_fal_enbl &= ~(1 << idx);
- ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl);
- }
- ucb1x00_disable(ucb);
- spin_unlock_irqrestore(&ucb->lock, flags);
- }
+ raw_spin_lock(&ucb->irq_lock);
+ ucb->irq_mask |= mask;
+ ucb1x00_irq_update(ucb, mask);
+ raw_spin_unlock(&ucb->irq_lock);
}
-/**
- * ucb1x00_free_irq - disable and free the specified UCB1x00 interrupt
- * @ucb: UCB1x00 structure describing chip
- * @idx: interrupt index
- * @devid: device id.
- *
- * Disable the interrupt source and remove the handler. devid must
- * match the devid passed when hooking the interrupt.
- *
- * Returns zero on success, or one of the following errors:
- * -EINVAL if the interrupt index is invalid
- * -ENOENT if devid does not match
- */
-int ucb1x00_free_irq(struct ucb1x00 *ucb, unsigned int idx, void *devid)
+static int ucb1x00_irq_set_type(struct irq_data *data, unsigned int type)
{
- struct ucb1x00_irq *irq;
- int ret;
+ struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
+ unsigned mask = 1 << (data->irq - ucb->irq_base);
- if (idx >= 16)
- goto bad;
+ raw_spin_lock(&ucb->irq_lock);
+ if (type & IRQ_TYPE_EDGE_RISING)
+ ucb->irq_ris_enbl |= mask;
+ else
+ ucb->irq_ris_enbl &= ~mask;
- irq = ucb->irq_handler + idx;
- ret = -ENOENT;
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ ucb->irq_fal_enbl |= mask;
+ else
+ ucb->irq_fal_enbl &= ~mask;
+ if (ucb->irq_mask & mask) {
+ ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
+ ucb->irq_mask);
+ ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
+ ucb->irq_mask);
+ }
+ raw_spin_unlock(&ucb->irq_lock);
- spin_lock_irq(&ucb->lock);
- if (irq->devid == devid) {
- ucb->irq_ris_enbl &= ~(1 << idx);
- ucb->irq_fal_enbl &= ~(1 << idx);
+ return 0;
+}
- ucb1x00_enable(ucb);
- ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl);
- ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl);
- ucb1x00_disable(ucb);
+static int ucb1x00_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+ struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
+ struct ucb1x00_plat_data *pdata = ucb->mcp->attached_device.platform_data;
+ unsigned mask = 1 << (data->irq - ucb->irq_base);
- irq->fn = NULL;
- irq->devid = NULL;
- ret = 0;
- }
- spin_unlock_irq(&ucb->lock);
- return ret;
+ if (!pdata || !pdata->can_wakeup)
+ return -EINVAL;
-bad:
- printk(KERN_ERR "Freeing bad UCB1x00 irq %d\n", idx);
- return -EINVAL;
+ raw_spin_lock(&ucb->irq_lock);
+ if (on)
+ ucb->irq_wake |= mask;
+ else
+ ucb->irq_wake &= ~mask;
+ raw_spin_unlock(&ucb->irq_lock);
+
+ return 0;
}
+static struct irq_chip ucb1x00_irqchip = {
+ .name = "ucb1x00",
+ .irq_ack = ucb1x00_irq_noop,
+ .irq_mask = ucb1x00_irq_mask,
+ .irq_unmask = ucb1x00_irq_unmask,
+ .irq_set_type = ucb1x00_irq_set_type,
+ .irq_set_wake = ucb1x00_irq_set_wake,
+};
+
static int ucb1x00_add_dev(struct ucb1x00 *ucb, struct ucb1x00_driver *drv)
{
struct ucb1x00_dev *dev;
@@ -440,8 +403,8 @@ static int ucb1x00_add_dev(struct ucb1x00 *ucb, struct ucb1x00_driver *drv)
ret = drv->add(dev);
if (ret == 0) {
- list_add(&dev->dev_node, &ucb->devs);
- list_add(&dev->drv_node, &drv->devs);
+ list_add_tail(&dev->dev_node, &ucb->devs);
+ list_add_tail(&dev->drv_node, &drv->devs);
} else {
kfree(dev);
}
@@ -533,98 +496,126 @@ static struct class ucb1x00_class = {
static int ucb1x00_probe(struct mcp *mcp)
{
- struct ucb1x00 *ucb;
+ struct ucb1x00_plat_data *pdata = mcp->attached_device.platform_data;
struct ucb1x00_driver *drv;
- unsigned int id;
+ struct ucb1x00 *ucb;
+ unsigned id, i, irq_base;
int ret = -ENODEV;
- int temp;
+
+ /* Tell the platform to deassert the UCB1x00 reset */
+ if (pdata && pdata->reset)
+ pdata->reset(UCB_RST_PROBE);
mcp_enable(mcp);
id = mcp_reg_read(mcp, UCB_ID);
+ mcp_disable(mcp);
if (id != UCB_ID_1200 && id != UCB_ID_1300 && id != UCB_ID_TC35143) {
printk(KERN_WARNING "UCB1x00 ID not found: %04x\n", id);
- goto err_disable;
+ goto out;
}
ucb = kzalloc(sizeof(struct ucb1x00), GFP_KERNEL);
ret = -ENOMEM;
if (!ucb)
- goto err_disable;
-
+ goto out;
+ device_initialize(&ucb->dev);
ucb->dev.class = &ucb1x00_class;
ucb->dev.parent = &mcp->attached_device;
dev_set_name(&ucb->dev, "ucb1x00");
- spin_lock_init(&ucb->lock);
+ raw_spin_lock_init(&ucb->irq_lock);
spin_lock_init(&ucb->io_lock);
- sema_init(&ucb->adc_sem, 1);
+ mutex_init(&ucb->adc_mutex);
ucb->id = id;
ucb->mcp = mcp;
+
+ ret = device_add(&ucb->dev);
+ if (ret)
+ goto err_dev_add;
+
+ ucb1x00_enable(ucb);
ucb->irq = ucb1x00_detect_irq(ucb);
+ ucb1x00_disable(ucb);
if (ucb->irq == NO_IRQ) {
- printk(KERN_ERR "UCB1x00: IRQ probe failed\n");
+ dev_err(&ucb->dev, "IRQ probe failed\n");
ret = -ENODEV;
- goto err_free;
+ goto err_no_irq;
}
ucb->gpio.base = -1;
- if (mcp->gpio_base != 0) {
+ irq_base = pdata ? pdata->irq_base : 0;
+ ucb->irq_base = irq_alloc_descs(-1, irq_base, 16, -1);
+ if (ucb->irq_base < 0) {
+ dev_err(&ucb->dev, "unable to allocate 16 irqs: %d\n",
+ ucb->irq_base);
+ goto err_irq_alloc;
+ }
+
+ for (i = 0; i < 16; i++) {
+ unsigned irq = ucb->irq_base + i;
+
+ irq_set_chip_and_handler(irq, &ucb1x00_irqchip, handle_edge_irq);
+ irq_set_chip_data(irq, ucb);
+ set_irq_flags(irq, IRQF_VALID | IRQ_NOREQUEST);
+ }
+
+ irq_set_irq_type(ucb->irq, IRQ_TYPE_EDGE_RISING);
+ irq_set_handler_data(ucb->irq, ucb);
+ irq_set_chained_handler(ucb->irq, ucb1x00_irq);
+
+ if (pdata && pdata->gpio_base) {
ucb->gpio.label = dev_name(&ucb->dev);
- ucb->gpio.base = mcp->gpio_base;
+ ucb->gpio.dev = &ucb->dev;
+ ucb->gpio.owner = THIS_MODULE;
+ ucb->gpio.base = pdata->gpio_base;
ucb->gpio.ngpio = 10;
ucb->gpio.set = ucb1x00_gpio_set;
ucb->gpio.get = ucb1x00_gpio_get;
ucb->gpio.direction_input = ucb1x00_gpio_direction_input;
ucb->gpio.direction_output = ucb1x00_gpio_direction_output;
+ ucb->gpio.to_irq = ucb1x00_to_irq;
ret = gpiochip_add(&ucb->gpio);
if (ret)
- goto err_free;
+ goto err_gpio_add;
} else
dev_info(&ucb->dev, "gpio_base not set so no gpiolib support");
- ret = request_irq(ucb->irq, ucb1x00_irq, IRQF_TRIGGER_RISING,
- "UCB1x00", ucb);
- if (ret) {
- printk(KERN_ERR "ucb1x00: unable to grab irq%d: %d\n",
- ucb->irq, ret);
- goto err_gpio;
- }
-
mcp_set_drvdata(mcp, ucb);
- ret = device_register(&ucb->dev);
- if (ret)
- goto err_irq;
-
+ if (pdata)
+ device_set_wakeup_capable(&ucb->dev, pdata->can_wakeup);
INIT_LIST_HEAD(&ucb->devs);
mutex_lock(&ucb1x00_mutex);
- list_add(&ucb->node, &ucb1x00_devices);
+ list_add_tail(&ucb->node, &ucb1x00_devices);
list_for_each_entry(drv, &ucb1x00_drivers, node) {
ucb1x00_add_dev(ucb, drv);
}
mutex_unlock(&ucb1x00_mutex);
- goto out;
+ return ret;
- err_irq:
- free_irq(ucb->irq, ucb);
- err_gpio:
- if (ucb->gpio.base != -1)
- temp = gpiochip_remove(&ucb->gpio);
- err_free:
- kfree(ucb);
- err_disable:
- mcp_disable(mcp);
+ err_gpio_add:
+ irq_set_chained_handler(ucb->irq, NULL);
+ err_irq_alloc:
+ if (ucb->irq_base > 0)
+ irq_free_descs(ucb->irq_base, 16);
+ err_no_irq:
+ device_del(&ucb->dev);
+ err_dev_add:
+ put_device(&ucb->dev);
out:
+ if (pdata && pdata->reset)
+ pdata->reset(UCB_RST_PROBE_FAIL);
return ret;
}
static void ucb1x00_remove(struct mcp *mcp)
{
+ struct ucb1x00_plat_data *pdata = mcp->attached_device.platform_data;
struct ucb1x00 *ucb = mcp_get_drvdata(mcp);
struct list_head *l, *n;
int ret;
@@ -643,8 +634,12 @@ static void ucb1x00_remove(struct mcp *mcp)
dev_err(&ucb->dev, "Can't remove gpio chip: %d\n", ret);
}
- free_irq(ucb->irq, ucb);
+ irq_set_chained_handler(ucb->irq, NULL);
+ irq_free_descs(ucb->irq_base, 16);
device_unregister(&ucb->dev);
+
+ if (pdata && pdata->reset)
+ pdata->reset(UCB_RST_REMOVE);
}
int ucb1x00_register_driver(struct ucb1x00_driver *drv)
@@ -653,7 +648,7 @@ int ucb1x00_register_driver(struct ucb1x00_driver *drv)
INIT_LIST_HEAD(&drv->devs);
mutex_lock(&ucb1x00_mutex);
- list_add(&drv->node, &ucb1x00_drivers);
+ list_add_tail(&drv->node, &ucb1x00_drivers);
list_for_each_entry(ucb, &ucb1x00_devices, node) {
ucb1x00_add_dev(ucb, drv);
}
@@ -674,44 +669,86 @@ void ucb1x00_unregister_driver(struct ucb1x00_driver *drv)
mutex_unlock(&ucb1x00_mutex);
}
-static int ucb1x00_suspend(struct mcp *mcp, pm_message_t state)
+static int ucb1x00_suspend(struct device *dev)
{
- struct ucb1x00 *ucb = mcp_get_drvdata(mcp);
- struct ucb1x00_dev *dev;
+ struct ucb1x00_plat_data *pdata = dev->platform_data;
+ struct ucb1x00 *ucb = dev_get_drvdata(dev);
+ struct ucb1x00_dev *udev;
mutex_lock(&ucb1x00_mutex);
- list_for_each_entry(dev, &ucb->devs, dev_node) {
- if (dev->drv->suspend)
- dev->drv->suspend(dev, state);
+ list_for_each_entry(udev, &ucb->devs, dev_node) {
+ if (udev->drv->suspend)
+ udev->drv->suspend(udev);
}
mutex_unlock(&ucb1x00_mutex);
+
+ if (ucb->irq_wake) {
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&ucb->irq_lock, flags);
+ ucb1x00_enable(ucb);
+ ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
+ ucb->irq_wake);
+ ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
+ ucb->irq_wake);
+ ucb1x00_disable(ucb);
+ raw_spin_unlock_irqrestore(&ucb->irq_lock, flags);
+
+ enable_irq_wake(ucb->irq);
+ } else if (pdata && pdata->reset)
+ pdata->reset(UCB_RST_SUSPEND);
+
return 0;
}
-static int ucb1x00_resume(struct mcp *mcp)
+static int ucb1x00_resume(struct device *dev)
{
- struct ucb1x00 *ucb = mcp_get_drvdata(mcp);
- struct ucb1x00_dev *dev;
+ struct ucb1x00_plat_data *pdata = dev->platform_data;
+ struct ucb1x00 *ucb = dev_get_drvdata(dev);
+ struct ucb1x00_dev *udev;
+
+ if (!ucb->irq_wake && pdata && pdata->reset)
+ pdata->reset(UCB_RST_RESUME);
+ ucb1x00_enable(ucb);
ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
+
+ if (ucb->irq_wake) {
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&ucb->irq_lock, flags);
+ ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
+ ucb->irq_mask);
+ ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
+ ucb->irq_mask);
+ raw_spin_unlock_irqrestore(&ucb->irq_lock, flags);
+
+ disable_irq_wake(ucb->irq);
+ }
+ ucb1x00_disable(ucb);
+
mutex_lock(&ucb1x00_mutex);
- list_for_each_entry(dev, &ucb->devs, dev_node) {
- if (dev->drv->resume)
- dev->drv->resume(dev);
+ list_for_each_entry(udev, &ucb->devs, dev_node) {
+ if (udev->drv->resume)
+ udev->drv->resume(udev);
}
mutex_unlock(&ucb1x00_mutex);
return 0;
}
+static const struct dev_pm_ops ucb1x00_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ucb1x00_suspend, ucb1x00_resume)
+};
+
static struct mcp_driver ucb1x00_driver = {
.drv = {
.name = "ucb1x00",
+ .owner = THIS_MODULE,
+ .pm = &ucb1x00_pm_ops,
},
.probe = ucb1x00_probe,
.remove = ucb1x00_remove,
- .suspend = ucb1x00_suspend,
- .resume = ucb1x00_resume,
};
static int __init ucb1x00_init(void)
@@ -742,14 +779,10 @@ EXPORT_SYMBOL(ucb1x00_adc_enable);
EXPORT_SYMBOL(ucb1x00_adc_read);
EXPORT_SYMBOL(ucb1x00_adc_disable);
-EXPORT_SYMBOL(ucb1x00_hook_irq);
-EXPORT_SYMBOL(ucb1x00_free_irq);
-EXPORT_SYMBOL(ucb1x00_enable_irq);
-EXPORT_SYMBOL(ucb1x00_disable_irq);
-
EXPORT_SYMBOL(ucb1x00_register_driver);
EXPORT_SYMBOL(ucb1x00_unregister_driver);
+MODULE_ALIAS("mcp:ucb1x00");
MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
MODULE_DESCRIPTION("UCB1x00 core driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
index 63a3cbdfa3f3..1e0e20c0e082 100644
--- a/drivers/mfd/ucb1x00-ts.c
+++ b/drivers/mfd/ucb1x00-ts.c
@@ -20,8 +20,9 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
-#include <linux/smp.h>
+#include <linux/interrupt.h>
#include <linux/sched.h>
+#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/string.h>
@@ -32,7 +33,6 @@
#include <linux/kthread.h>
#include <linux/mfd/ucb1x00.h>
-#include <mach/dma.h>
#include <mach/collie.h>
#include <asm/mach-types.h>
@@ -42,6 +42,8 @@ struct ucb1x00_ts {
struct input_dev *idev;
struct ucb1x00 *ucb;
+ spinlock_t irq_lock;
+ unsigned irq_disabled;
wait_queue_head_t irq_wait;
struct task_struct *rtask;
u16 x_res;
@@ -238,7 +240,12 @@ static int ucb1x00_thread(void *_ts)
if (ucb1x00_ts_pen_down(ts)) {
set_current_state(TASK_INTERRUPTIBLE);
- ucb1x00_enable_irq(ts->ucb, UCB_IRQ_TSPX, machine_is_collie() ? UCB_RISING : UCB_FALLING);
+ spin_lock_irq(&ts->irq_lock);
+ if (ts->irq_disabled) {
+ ts->irq_disabled = 0;
+ enable_irq(ts->ucb->irq_base + UCB_IRQ_TSPX);
+ }
+ spin_unlock_irq(&ts->irq_lock);
ucb1x00_disable(ts->ucb);
/*
@@ -281,23 +288,37 @@ static int ucb1x00_thread(void *_ts)
* We only detect touch screen _touches_ with this interrupt
* handler, and even then we just schedule our task.
*/
-static void ucb1x00_ts_irq(int idx, void *id)
+static irqreturn_t ucb1x00_ts_irq(int irq, void *id)
{
struct ucb1x00_ts *ts = id;
- ucb1x00_disable_irq(ts->ucb, UCB_IRQ_TSPX, UCB_FALLING);
+ spin_lock(&ts->irq_lock);
+ ts->irq_disabled = 1;
+ disable_irq_nosync(ts->ucb->irq_base + UCB_IRQ_TSPX);
+ spin_unlock(&ts->irq_lock);
wake_up(&ts->irq_wait);
+
+ return IRQ_HANDLED;
}
static int ucb1x00_ts_open(struct input_dev *idev)
{
struct ucb1x00_ts *ts = input_get_drvdata(idev);
+ unsigned long flags = 0;
int ret = 0;
BUG_ON(ts->rtask);
+ if (machine_is_collie())
+ flags = IRQF_TRIGGER_RISING;
+ else
+ flags = IRQF_TRIGGER_FALLING;
+
+ ts->irq_disabled = 0;
+
init_waitqueue_head(&ts->irq_wait);
- ret = ucb1x00_hook_irq(ts->ucb, UCB_IRQ_TSPX, ucb1x00_ts_irq, ts);
+ ret = request_irq(ts->ucb->irq_base + UCB_IRQ_TSPX, ucb1x00_ts_irq,
+ flags, "ucb1x00-ts", ts);
if (ret < 0)
goto out;
@@ -314,7 +335,7 @@ static int ucb1x00_ts_open(struct input_dev *idev)
if (!IS_ERR(ts->rtask)) {
ret = 0;
} else {
- ucb1x00_free_irq(ts->ucb, UCB_IRQ_TSPX, ts);
+ free_irq(ts->ucb->irq_base + UCB_IRQ_TSPX, ts);
ts->rtask = NULL;
ret = -EFAULT;
}
@@ -334,7 +355,7 @@ static void ucb1x00_ts_close(struct input_dev *idev)
kthread_stop(ts->rtask);
ucb1x00_enable(ts->ucb);
- ucb1x00_free_irq(ts->ucb, UCB_IRQ_TSPX, ts);
+ free_irq(ts->ucb->irq_base + UCB_IRQ_TSPX, ts);
ucb1x00_reg_write(ts->ucb, UCB_TS_CR, 0);
ucb1x00_disable(ts->ucb);
}
@@ -359,11 +380,13 @@ static int ucb1x00_ts_add(struct ucb1x00_dev *dev)
ts->ucb = dev->ucb;
ts->idev = idev;
ts->adcsync = adcsync ? UCB_SYNC : UCB_NOSYNC;
+ spin_lock_init(&ts->irq_lock);
idev->name = "Touchscreen panel";
idev->id.product = ts->ucb->id;
idev->open = ucb1x00_ts_open;
idev->close = ucb1x00_ts_close;
+ idev->dev.parent = &ts->ucb->dev;
idev->evbit[0] = BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY);
idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
diff --git a/drivers/misc/atmel_tclib.c b/drivers/misc/atmel_tclib.c
index 4bcfc3759734..c8d8e38d0d8a 100644
--- a/drivers/misc/atmel_tclib.c
+++ b/drivers/misc/atmel_tclib.c
@@ -6,12 +6,10 @@
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/export.h>
-
-/* Number of bytes to reserve for the iomem resource */
-#define ATMEL_TC_IOMEM_SIZE 256
-
+#include <linux/of.h>
/*
* This is a thin library to solve the problem of how to portably allocate
@@ -48,10 +46,17 @@ struct atmel_tc *atmel_tc_alloc(unsigned block, const char *name)
struct atmel_tc *tc;
struct platform_device *pdev = NULL;
struct resource *r;
+ size_t size;
spin_lock(&tc_list_lock);
list_for_each_entry(tc, &tc_list, node) {
- if (tc->pdev->id == block) {
+ if (tc->pdev->dev.of_node) {
+ if (of_alias_get_id(tc->pdev->dev.of_node, "tcb")
+ == block) {
+ pdev = tc->pdev;
+ break;
+ }
+ } else if (tc->pdev->id == block) {
pdev = tc->pdev;
break;
}
@@ -61,11 +66,15 @@ struct atmel_tc *atmel_tc_alloc(unsigned block, const char *name)
goto fail;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- r = request_mem_region(r->start, ATMEL_TC_IOMEM_SIZE, name);
if (!r)
goto fail;
- tc->regs = ioremap(r->start, ATMEL_TC_IOMEM_SIZE);
+ size = resource_size(r);
+ r = request_mem_region(r->start, size, name);
+ if (!r)
+ goto fail;
+
+ tc->regs = ioremap(r->start, size);
if (!tc->regs)
goto fail_ioremap;
@@ -76,7 +85,7 @@ out:
return tc;
fail_ioremap:
- release_mem_region(r->start, ATMEL_TC_IOMEM_SIZE);
+ release_mem_region(r->start, size);
fail:
tc = NULL;
goto out;
@@ -96,7 +105,7 @@ void atmel_tc_free(struct atmel_tc *tc)
spin_lock(&tc_list_lock);
if (tc->regs) {
iounmap(tc->regs);
- release_mem_region(tc->iomem->start, ATMEL_TC_IOMEM_SIZE);
+ release_mem_region(tc->iomem->start, resource_size(tc->iomem));
tc->regs = NULL;
tc->iomem = NULL;
}
@@ -104,6 +113,30 @@ void atmel_tc_free(struct atmel_tc *tc)
}
EXPORT_SYMBOL_GPL(atmel_tc_free);
+#if defined(CONFIG_OF)
+static struct atmel_tcb_config tcb_rm9200_config = {
+ .counter_width = 16,
+};
+
+static struct atmel_tcb_config tcb_sam9x5_config = {
+ .counter_width = 32,
+};
+
+static const struct of_device_id atmel_tcb_dt_ids[] = {
+ {
+ .compatible = "atmel,at91rm9200-tcb",
+ .data = &tcb_rm9200_config,
+ }, {
+ .compatible = "atmel,at91sam9x5-tcb",
+ .data = &tcb_sam9x5_config,
+ }, {
+ /* sentinel */
+ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_tcb_dt_ids);
+#endif
+
static int __init tc_probe(struct platform_device *pdev)
{
struct atmel_tc *tc;
@@ -129,6 +162,14 @@ static int __init tc_probe(struct platform_device *pdev)
return -EINVAL;
}
+ /* Now take SoC information if available */
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(atmel_tcb_dt_ids, pdev->dev.of_node);
+ if (match)
+ tc->tcb_config = match->data;
+ }
+
tc->clk[0] = clk;
tc->clk[1] = clk_get(&pdev->dev, "t1_clk");
if (IS_ERR(tc->clk[1]))
@@ -153,7 +194,10 @@ static int __init tc_probe(struct platform_device *pdev)
}
static struct platform_driver tc_driver = {
- .driver.name = "atmel_tcb",
+ .driver = {
+ .name = "atmel_tcb",
+ .of_match_table = of_match_ptr(atmel_tcb_dt_ids),
+ },
};
static int __init tc_init(void)
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 00fcbed1afd2..ecbee9bf87b2 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -395,7 +395,7 @@ config MMC_SPI
config MMC_S3C
tristate "Samsung S3C SD/MMC Card Interface support"
- depends on ARCH_S3C2410
+ depends on ARCH_S3C24XX
help
This selects a driver for the MCI interface found in
Samsung's S3C2410, S3C2412, S3C2440, S3C2442 CPUs.
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index 947faa5d2ce4..efdb81d21c44 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -86,7 +86,6 @@ static inline int at91mci_is_mci1rev2xx(void)
{
return ( cpu_is_at91sam9260()
|| cpu_is_at91sam9263()
- || cpu_is_at91cap9()
|| cpu_is_at91sam9rl()
|| cpu_is_at91sam9g10()
|| cpu_is_at91sam9g20()
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 0be4e2013632..6193a0d7bde5 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -464,7 +464,7 @@ static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
err = PTR_ERR(clk);
goto err_clk_get;
}
- clk_enable(clk);
+ clk_prepare_enable(clk);
pltfm_host->clk = clk;
if (!is_imx25_esdhc(imx_data))
@@ -559,7 +559,7 @@ no_card_detect_irq:
gpio_free(boarddata->wp_gpio);
no_card_detect_pin:
no_board_data:
- clk_disable(pltfm_host->clk);
+ clk_disable_unprepare(pltfm_host->clk);
clk_put(pltfm_host->clk);
err_clk_get:
kfree(imx_data);
@@ -586,7 +586,7 @@ static int __devexit sdhci_esdhc_imx_remove(struct platform_device *pdev)
gpio_free(boarddata->cd_gpio);
}
- clk_disable(pltfm_host->clk);
+ clk_disable_unprepare(pltfm_host->clk);
clk_put(pltfm_host->clk);
kfree(imx_data);
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 1af756ee0f9a..b19e7d435f8d 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -518,9 +518,6 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
if (pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
host->mmc->caps = MMC_CAP_NONREMOVABLE;
- if (pdata->host_caps)
- host->mmc->caps |= pdata->host_caps;
-
if (pdata->pm_caps)
host->mmc->pm_caps |= pdata->pm_caps;
@@ -544,6 +541,9 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
if (pdata->host_caps)
host->mmc->caps |= pdata->host_caps;
+ if (pdata->host_caps2)
+ host->mmc->caps2 |= pdata->host_caps2;
+
ret = sdhci_add_host(host);
if (ret) {
dev_err(dev, "sdhci_add_host() failed\n");
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 1be621841400..284cf3433720 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -1,6 +1,6 @@
menuconfig MTD
tristate "Memory Technology Device (MTD) support"
- depends on HAS_IOMEM
+ depends on GENERIC_IO
help
Memory Technology Devices are flash, RAM and similar chips, often
used for solid state file systems on embedded devices. This option
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 37b05c3f2792..8d3dac40d7e6 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -1,5 +1,6 @@
menu "Self-contained MTD device drivers"
depends on MTD!=n
+ depends on HAS_IOMEM
config MTD_PMC551
tristate "Ramix PMC551 PCI Mezzanine RAM card support"
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 6c5c431c64af..8af67cfd671a 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -1,5 +1,6 @@
menu "Mapping drivers for chip access"
depends on MTD!=n
+ depends on HAS_IOMEM
config MTD_COMPLEX_MAPPINGS
bool "Support non-linear mappings of flash chips"
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index 502821997707..cbc3b7867910 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -23,106 +23,6 @@
#include <asm/sizes.h>
#include <asm/mach/flash.h>
-#if 0
-/*
- * This is here for documentation purposes only - until these people
- * submit their machine types. It will be gone January 2005.
- */
-static struct mtd_partition consus_partitions[] = {
- {
- .name = "Consus boot firmware",
- .offset = 0,
- .size = 0x00040000,
- .mask_flags = MTD_WRITABLE, /* force read-only */
- }, {
- .name = "Consus kernel",
- .offset = 0x00040000,
- .size = 0x00100000,
- .mask_flags = 0,
- }, {
- .name = "Consus disk",
- .offset = 0x00140000,
- /* The rest (up to 16M) for jffs. We could put 0 and
- make it find the size automatically, but right now
- i have 32 megs. jffs will use all 32 megs if given
- the chance, and this leads to horrible problems
- when you try to re-flash the image because blob
- won't erase the whole partition. */
- .size = 0x01000000 - 0x00140000,
- .mask_flags = 0,
- }, {
- /* this disk is a secondary disk, which can be used as
- needed, for simplicity, make it the size of the other
- consus partition, although realistically it could be
- the remainder of the disk (depending on the file
- system used) */
- .name = "Consus disk2",
- .offset = 0x01000000,
- .size = 0x01000000 - 0x00140000,
- .mask_flags = 0,
- }
-};
-
-/* Frodo has 2 x 16M 28F128J3A flash chips in bank 0: */
-static struct mtd_partition frodo_partitions[] =
-{
- {
- .name = "bootloader",
- .size = 0x00040000,
- .offset = 0x00000000,
- .mask_flags = MTD_WRITEABLE
- }, {
- .name = "bootloader params",
- .size = 0x00040000,
- .offset = MTDPART_OFS_APPEND,
- .mask_flags = MTD_WRITEABLE
- }, {
- .name = "kernel",
- .size = 0x00100000,
- .offset = MTDPART_OFS_APPEND,
- .mask_flags = MTD_WRITEABLE
- }, {
- .name = "ramdisk",
- .size = 0x00400000,
- .offset = MTDPART_OFS_APPEND,
- .mask_flags = MTD_WRITEABLE
- }, {
- .name = "file system",
- .size = MTDPART_SIZ_FULL,
- .offset = MTDPART_OFS_APPEND
- }
-};
-
-static struct mtd_partition jornada56x_partitions[] = {
- {
- .name = "bootldr",
- .size = 0x00040000,
- .offset = 0,
- .mask_flags = MTD_WRITEABLE,
- }, {
- .name = "rootfs",
- .size = MTDPART_SIZ_FULL,
- .offset = MTDPART_OFS_APPEND,
- }
-};
-
-static void jornada56x_set_vpp(int vpp)
-{
- if (vpp)
- GPSR = GPIO_GPIO26;
- else
- GPCR = GPIO_GPIO26;
- GPDR |= GPIO_GPIO26;
-}
-
-/*
- * Machine Phys Size set_vpp
- * Consus : SA1100_CS0_PHYS SZ_32M
- * Frodo : SA1100_CS0_PHYS SZ_32M
- * Jornada56x: SA1100_CS0_PHYS SZ_32M jornada56x_set_vpp
- */
-#endif
-
struct sa_subdev_info {
char name[16];
struct map_info map;
@@ -373,21 +273,9 @@ static int __exit sa1100_mtd_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static void sa1100_mtd_shutdown(struct platform_device *dev)
-{
- struct sa_info *info = platform_get_drvdata(dev);
- if (info && mtd_suspend(info->mtd) == 0)
- mtd_resume(info->mtd);
-}
-#else
-#define sa1100_mtd_shutdown NULL
-#endif
-
static struct platform_driver sa1100_mtd_driver = {
.probe = sa1100_mtd_probe,
.remove = __exit_p(sa1100_mtd_remove),
- .shutdown = sa1100_mtd_shutdown,
.driver = {
.name = "sa1100-mtd",
.owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 3b1d6da874e0..a3c4de551ebe 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -187,7 +187,7 @@ config MTD_NAND_PPCHAMELEONEVB
config MTD_NAND_S3C2410
tristate "NAND Flash support for Samsung S3C SoCs"
- depends on ARCH_S3C2410 || ARCH_S3C64XX
+ depends on ARCH_S3C24XX || ARCH_S3C64XX
help
This enables the NAND flash controller on the S3C24xx and S3C64xx
SoCs
@@ -246,6 +246,7 @@ config MTD_NAND_BCM_UMI_HWCS
config MTD_NAND_DISKONCHIP
tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation) (EXPERIMENTAL)"
depends on EXPERIMENTAL
+ depends on HAS_IOMEM
select REED_SOLOMON
select REED_SOLOMON_DEC16
help
@@ -431,6 +432,7 @@ config MTD_NAND_GPMI_NAND
config MTD_NAND_PLATFORM
tristate "Support for generic platform NAND driver"
+ depends on HAS_IOMEM
help
This implements a generic NAND driver for on-SOC platform
devices. You will need to provide platform-specific functions
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index 3197e9764fcd..73416951f4c1 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -26,7 +26,7 @@
#include <asm/io.h>
#include <mach/hardware.h>
#include <asm/sizes.h>
-#include <asm/gpio.h>
+#include <linux/gpio.h>
#include <plat/board-ams-delta.h>
/*
@@ -34,8 +34,6 @@
*/
static struct mtd_info *ams_delta_mtd = NULL;
-#define NAND_MASK (AMS_DELTA_LATCH2_NAND_NRE | AMS_DELTA_LATCH2_NAND_NWE | AMS_DELTA_LATCH2_NAND_CLE | AMS_DELTA_LATCH2_NAND_ALE | AMS_DELTA_LATCH2_NAND_NCE | AMS_DELTA_LATCH2_NAND_NWP)
-
/*
* Define partitions for flash devices
*/
@@ -68,10 +66,9 @@ static void ams_delta_write_byte(struct mtd_info *mtd, u_char byte)
writew(0, io_base + OMAP_MPUIO_IO_CNTL);
writew(byte, this->IO_ADDR_W);
- ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NWE, 0);
+ gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NWE, 0);
ndelay(40);
- ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NWE,
- AMS_DELTA_LATCH2_NAND_NWE);
+ gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NWE, 1);
}
static u_char ams_delta_read_byte(struct mtd_info *mtd)
@@ -80,12 +77,11 @@ static u_char ams_delta_read_byte(struct mtd_info *mtd)
struct nand_chip *this = mtd->priv;
void __iomem *io_base = this->priv;
- ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NRE, 0);
+ gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NRE, 0);
ndelay(40);
writew(~0, io_base + OMAP_MPUIO_IO_CNTL);
res = readw(this->IO_ADDR_R);
- ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NRE,
- AMS_DELTA_LATCH2_NAND_NRE);
+ gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NRE, 1);
return res;
}
@@ -132,15 +128,12 @@ static void ams_delta_hwcontrol(struct mtd_info *mtd, int cmd,
{
if (ctrl & NAND_CTRL_CHANGE) {
- unsigned long bits;
-
- bits = (~ctrl & NAND_NCE) ? AMS_DELTA_LATCH2_NAND_NCE : 0;
- bits |= (ctrl & NAND_CLE) ? AMS_DELTA_LATCH2_NAND_CLE : 0;
- bits |= (ctrl & NAND_ALE) ? AMS_DELTA_LATCH2_NAND_ALE : 0;
-
- ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_CLE |
- AMS_DELTA_LATCH2_NAND_ALE |
- AMS_DELTA_LATCH2_NAND_NCE, bits);
+ gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NCE,
+ (ctrl & NAND_NCE) == 0);
+ gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_CLE,
+ (ctrl & NAND_CLE) != 0);
+ gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_ALE,
+ (ctrl & NAND_ALE) != 0);
}
if (cmd != NAND_CMD_NONE)
@@ -152,6 +145,39 @@ static int ams_delta_nand_ready(struct mtd_info *mtd)
return gpio_get_value(AMS_DELTA_GPIO_PIN_NAND_RB);
}
+static const struct gpio _mandatory_gpio[] = {
+ {
+ .gpio = AMS_DELTA_GPIO_PIN_NAND_NCE,
+ .flags = GPIOF_OUT_INIT_HIGH,
+ .label = "nand_nce",
+ },
+ {
+ .gpio = AMS_DELTA_GPIO_PIN_NAND_NRE,
+ .flags = GPIOF_OUT_INIT_HIGH,
+ .label = "nand_nre",
+ },
+ {
+ .gpio = AMS_DELTA_GPIO_PIN_NAND_NWP,
+ .flags = GPIOF_OUT_INIT_HIGH,
+ .label = "nand_nwp",
+ },
+ {
+ .gpio = AMS_DELTA_GPIO_PIN_NAND_NWE,
+ .flags = GPIOF_OUT_INIT_HIGH,
+ .label = "nand_nwe",
+ },
+ {
+ .gpio = AMS_DELTA_GPIO_PIN_NAND_ALE,
+ .flags = GPIOF_OUT_INIT_LOW,
+ .label = "nand_ale",
+ },
+ {
+ .gpio = AMS_DELTA_GPIO_PIN_NAND_CLE,
+ .flags = GPIOF_OUT_INIT_LOW,
+ .label = "nand_cle",
+ },
+};
+
/*
* Main initialization routine
*/
@@ -223,10 +249,9 @@ static int __devinit ams_delta_init(struct platform_device *pdev)
platform_set_drvdata(pdev, io_base);
/* Set chip enabled, but */
- ams_delta_latch2_write(NAND_MASK, AMS_DELTA_LATCH2_NAND_NRE |
- AMS_DELTA_LATCH2_NAND_NWE |
- AMS_DELTA_LATCH2_NAND_NCE |
- AMS_DELTA_LATCH2_NAND_NWP);
+ err = gpio_request_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio));
+ if (err)
+ goto out_gpio;
/* Scan to find existence of the device */
if (nand_scan(ams_delta_mtd, 1)) {
@@ -241,7 +266,10 @@ static int __devinit ams_delta_init(struct platform_device *pdev)
goto out;
out_mtd:
+ gpio_free_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio));
+out_gpio:
platform_set_drvdata(pdev, NULL);
+ gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB);
iounmap(io_base);
out_release_io:
release_mem_region(res->start, resource_size(res));
@@ -262,6 +290,8 @@ static int __devexit ams_delta_cleanup(struct platform_device *pdev)
/* Release resources, unregister device */
nand_release(ams_delta_mtd);
+ gpio_free_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio));
+ gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB);
iounmap(io_base);
release_mem_region(res->start, resource_size(res));
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 35b4fb55dbd6..ae7e37d9ac17 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -27,6 +27,10 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_mtd.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
@@ -34,22 +38,10 @@
#include <linux/dmaengine.h>
#include <linux/gpio.h>
#include <linux/io.h>
+#include <linux/platform_data/atmel.h>
-#include <mach/board.h>
#include <mach/cpu.h>
-#ifdef CONFIG_MTD_NAND_ATMEL_ECC_HW
-#define hard_ecc 1
-#else
-#define hard_ecc 0
-#endif
-
-#ifdef CONFIG_MTD_NAND_ATMEL_ECC_NONE
-#define no_ecc 1
-#else
-#define no_ecc 0
-#endif
-
static int use_dma = 1;
module_param(use_dma, int, 0);
@@ -95,7 +87,7 @@ struct atmel_nand_host {
struct mtd_info mtd;
void __iomem *io_base;
dma_addr_t io_phys;
- struct atmel_nand_data *board;
+ struct atmel_nand_data board;
struct device *dev;
void __iomem *ecc;
@@ -113,8 +105,8 @@ static int cpu_has_dma(void)
*/
static void atmel_nand_enable(struct atmel_nand_host *host)
{
- if (gpio_is_valid(host->board->enable_pin))
- gpio_set_value(host->board->enable_pin, 0);
+ if (gpio_is_valid(host->board.enable_pin))
+ gpio_set_value(host->board.enable_pin, 0);
}
/*
@@ -122,8 +114,8 @@ static void atmel_nand_enable(struct atmel_nand_host *host)
*/
static void atmel_nand_disable(struct atmel_nand_host *host)
{
- if (gpio_is_valid(host->board->enable_pin))
- gpio_set_value(host->board->enable_pin, 1);
+ if (gpio_is_valid(host->board.enable_pin))
+ gpio_set_value(host->board.enable_pin, 1);
}
/*
@@ -144,9 +136,9 @@ static void atmel_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl
return;
if (ctrl & NAND_CLE)
- writeb(cmd, host->io_base + (1 << host->board->cle));
+ writeb(cmd, host->io_base + (1 << host->board.cle));
else
- writeb(cmd, host->io_base + (1 << host->board->ale));
+ writeb(cmd, host->io_base + (1 << host->board.ale));
}
/*
@@ -157,8 +149,8 @@ static int atmel_nand_device_ready(struct mtd_info *mtd)
struct nand_chip *nand_chip = mtd->priv;
struct atmel_nand_host *host = nand_chip->priv;
- return gpio_get_value(host->board->rdy_pin) ^
- !!host->board->rdy_pin_active_low;
+ return gpio_get_value(host->board.rdy_pin) ^
+ !!host->board.rdy_pin_active_low;
}
/*
@@ -273,7 +265,7 @@ static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len)
if (atmel_nand_dma_op(mtd, buf, len, 1) == 0)
return;
- if (host->board->bus_width_16)
+ if (host->board.bus_width_16)
atmel_read_buf16(mtd, buf, len);
else
atmel_read_buf8(mtd, buf, len);
@@ -289,7 +281,7 @@ static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0)
return;
- if (host->board->bus_width_16)
+ if (host->board.bus_width_16)
atmel_write_buf16(mtd, buf, len);
else
atmel_write_buf8(mtd, buf, len);
@@ -481,6 +473,56 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
}
}
+#if defined(CONFIG_OF)
+static int __devinit atmel_of_init_port(struct atmel_nand_host *host,
+ struct device_node *np)
+{
+ u32 val;
+ int ecc_mode;
+ struct atmel_nand_data *board = &host->board;
+ enum of_gpio_flags flags;
+
+ if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) {
+ if (val >= 32) {
+ dev_err(host->dev, "invalid addr-offset %u\n", val);
+ return -EINVAL;
+ }
+ board->ale = val;
+ }
+
+ if (of_property_read_u32(np, "atmel,nand-cmd-offset", &val) == 0) {
+ if (val >= 32) {
+ dev_err(host->dev, "invalid cmd-offset %u\n", val);
+ return -EINVAL;
+ }
+ board->cle = val;
+ }
+
+ ecc_mode = of_get_nand_ecc_mode(np);
+
+ board->ecc_mode = ecc_mode < 0 ? NAND_ECC_SOFT : ecc_mode;
+
+ board->on_flash_bbt = of_get_nand_on_flash_bbt(np);
+
+ if (of_get_nand_bus_width(np) == 16)
+ board->bus_width_16 = 1;
+
+ board->rdy_pin = of_get_gpio_flags(np, 0, &flags);
+ board->rdy_pin_active_low = (flags == OF_GPIO_ACTIVE_LOW);
+
+ board->enable_pin = of_get_gpio(np, 1);
+ board->det_pin = of_get_gpio(np, 2);
+
+ return 0;
+}
+#else
+static int __devinit atmel_of_init_port(struct atmel_nand_host *host,
+ struct device_node *np)
+{
+ return -EINVAL;
+}
+#endif
+
/*
* Probe for the NAND device.
*/
@@ -491,6 +533,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
struct nand_chip *nand_chip;
struct resource *regs;
struct resource *mem;
+ struct mtd_part_parser_data ppdata = {};
int res;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -517,8 +560,15 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
mtd = &host->mtd;
nand_chip = &host->nand_chip;
- host->board = pdev->dev.platform_data;
host->dev = &pdev->dev;
+ if (pdev->dev.of_node) {
+ res = atmel_of_init_port(host, pdev->dev.of_node);
+ if (res)
+ goto err_nand_ioremap;
+ } else {
+ memcpy(&host->board, pdev->dev.platform_data,
+ sizeof(struct atmel_nand_data));
+ }
nand_chip->priv = host; /* link the private data structures */
mtd->priv = nand_chip;
@@ -529,26 +579,25 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
nand_chip->IO_ADDR_W = host->io_base;
nand_chip->cmd_ctrl = atmel_nand_cmd_ctrl;
- if (gpio_is_valid(host->board->rdy_pin))
+ if (gpio_is_valid(host->board.rdy_pin))
nand_chip->dev_ready = atmel_nand_device_ready;
+ nand_chip->ecc.mode = host->board.ecc_mode;
+
regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!regs && hard_ecc) {
+ if (!regs && nand_chip->ecc.mode == NAND_ECC_HW) {
printk(KERN_ERR "atmel_nand: can't get I/O resource "
"regs\nFalling back on software ECC\n");
+ nand_chip->ecc.mode = NAND_ECC_SOFT;
}
- nand_chip->ecc.mode = NAND_ECC_SOFT; /* enable ECC */
- if (no_ecc)
- nand_chip->ecc.mode = NAND_ECC_NONE;
- if (hard_ecc && regs) {
+ if (nand_chip->ecc.mode == NAND_ECC_HW) {
host->ecc = ioremap(regs->start, resource_size(regs));
if (host->ecc == NULL) {
printk(KERN_ERR "atmel_nand: ioremap failed\n");
res = -EIO;
goto err_ecc_ioremap;
}
- nand_chip->ecc.mode = NAND_ECC_HW;
nand_chip->ecc.calculate = atmel_nand_calculate;
nand_chip->ecc.correct = atmel_nand_correct;
nand_chip->ecc.hwctl = atmel_nand_hwctl;
@@ -558,7 +607,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
nand_chip->chip_delay = 20; /* 20us command delay time */
- if (host->board->bus_width_16) /* 16-bit bus width */
+ if (host->board.bus_width_16) /* 16-bit bus width */
nand_chip->options |= NAND_BUSWIDTH_16;
nand_chip->read_buf = atmel_read_buf;
@@ -567,15 +616,15 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
atmel_nand_enable(host);
- if (gpio_is_valid(host->board->det_pin)) {
- if (gpio_get_value(host->board->det_pin)) {
+ if (gpio_is_valid(host->board.det_pin)) {
+ if (gpio_get_value(host->board.det_pin)) {
printk(KERN_INFO "No SmartMedia card inserted.\n");
res = -ENXIO;
goto err_no_card;
}
}
- if (on_flash_bbt) {
+ if (host->board.on_flash_bbt || on_flash_bbt) {
printk(KERN_INFO "atmel_nand: Use On Flash BBT\n");
nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
}
@@ -650,8 +699,9 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
}
mtd->name = "atmel_nand";
- res = mtd_device_parse_register(mtd, NULL, 0,
- host->board->parts, host->board->num_parts);
+ ppdata.of_node = pdev->dev.of_node;
+ res = mtd_device_parse_register(mtd, NULL, &ppdata,
+ host->board.parts, host->board.num_parts);
if (!res)
return res;
@@ -695,11 +745,21 @@ static int __exit atmel_nand_remove(struct platform_device *pdev)
return 0;
}
+#if defined(CONFIG_OF)
+static const struct of_device_id atmel_nand_dt_ids[] = {
+ { .compatible = "atmel,at91rm9200-nand" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_nand_dt_ids);
+#endif
+
static struct platform_driver atmel_nand_driver = {
.remove = __exit_p(atmel_nand_remove),
.driver = {
.name = "atmel_nand",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(atmel_nand_dt_ids),
},
};
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index 772ad2966619..91467bb03634 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -1,6 +1,7 @@
menuconfig MTD_ONENAND
tristate "OneNAND Device Support"
depends on MTD
+ depends on HAS_IOMEM
help
This enables support for accessing all type of OneNAND flash
devices. For further information see
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index 068c3563e00f..88bbd8ffa7fe 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -190,8 +190,10 @@ static struct devprobe2 isa_probes[] __initdata = {
{seeq8005_probe, 0},
#endif
#ifdef CONFIG_CS89x0
+#ifndef CONFIG_CS89x0_PLATFORM
{cs89x0_probe, 0},
#endif
+#endif
#ifdef CONFIG_AT1700
{at1700_probe, 0},
#endif
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index 1f8648f099c7..8388e36cf08f 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -5,8 +5,7 @@
config NET_VENDOR_CIRRUS
bool "Cirrus devices"
default y
- depends on ISA || EISA || MACH_IXDP2351 || ARCH_IXDP2X01 \
- || MACH_MX31ADS || MACH_QQ2440 || (ARM && ARCH_EP93XX) || MAC
+ depends on ISA || EISA || ARM || MAC
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -21,8 +20,7 @@ if NET_VENDOR_CIRRUS
config CS89x0
tristate "CS89x0 support"
- depends on (ISA || EISA || MACH_IXDP2351 \
- || ARCH_IXDP2X01 || MACH_MX31ADS || MACH_QQ2440)
+ depends on ISA || EISA || ARM
---help---
Support for CS89x0 chipset based Ethernet cards. If you have a
network (Ethernet) card of this type, say Y and read the
@@ -33,10 +31,15 @@ config CS89x0
To compile this driver as a module, choose M here. The module
will be called cs89x0.
-config CS89x0_NONISA_IRQ
- def_bool y
- depends on CS89x0 != n
- depends on MACH_IXDP2351 || ARCH_IXDP2X01 || MACH_MX31ADS || MACH_QQ2440
+config CS89x0_PLATFORM
+ bool "CS89x0 platform driver support"
+ depends on CS89x0
+ help
+ Say Y to compile the cs89x0 driver as a platform driver. This
+ makes this driver suitable for use on certain evaluation boards
+ such as the iMX21ADS.
+
+ If you are unsure, say N.
config EP93XX_ETH
tristate "EP93xx Ethernet support"
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index d5ff93653e4c..30fee428c489 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -100,9 +100,6 @@
*/
-/* Always include 'config.h' first in case the user wants to turn on
- or override something. */
-#include <linux/module.h>
/*
* Set this to zero to disable DMA code
@@ -131,9 +128,12 @@
*/
+#include <linux/module.h>
+#include <linux/printk.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/fcntl.h>
@@ -151,6 +151,7 @@
#include <asm/system.h>
#include <asm/io.h>
#include <asm/irq.h>
+#include <linux/atomic.h>
#if ALLOW_DMA
#include <asm/dma.h>
#endif
@@ -174,26 +175,20 @@ static char version[] __initdata =
them to system IRQ numbers. This mapping is card specific and is set to
the configuration of the Cirrus Eval board for this chip. */
#if defined(CONFIG_MACH_IXDP2351)
+#define CS89x0_NONISA_IRQ
static unsigned int netcard_portlist[] __used __initdata = {IXDP2351_VIRT_CS8900_BASE, 0};
static unsigned int cs8900_irq_map[] = {IRQ_IXDP2351_CS8900, 0, 0, 0};
#elif defined(CONFIG_ARCH_IXDP2X01)
+#define CS89x0_NONISA_IRQ
static unsigned int netcard_portlist[] __used __initdata = {IXDP2X01_CS8900_VIRT_BASE, 0};
static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0};
-#elif defined(CONFIG_MACH_QQ2440)
-#include <mach/qq2440.h>
-static unsigned int netcard_portlist[] __used __initdata = { QQ2440_CS8900_VIRT_BASE + 0x300, 0 };
-static unsigned int cs8900_irq_map[] = { QQ2440_CS8900_IRQ, 0, 0, 0 };
-#elif defined(CONFIG_MACH_MX31ADS)
-#include <mach/board-mx31ads.h>
-static unsigned int netcard_portlist[] __used __initdata = {
- PBC_BASE_ADDRESS + PBC_CS8900A_IOBASE + 0x300, 0
-};
-static unsigned cs8900_irq_map[] = {EXPIO_INT_ENET_INT, 0, 0, 0};
#else
+#ifndef CONFIG_CS89x0_PLATFORM
static unsigned int netcard_portlist[] __used __initdata =
{ 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0};
static unsigned int cs8900_irq_map[] = {10,11,12,5};
#endif
+#endif
#if DEBUGGING
static unsigned int net_debug = DEBUGGING;
@@ -236,11 +231,16 @@ struct net_local {
unsigned char *end_dma_buff; /* points to the end of the buffer */
unsigned char *rx_dma_ptr; /* points to the next packet */
#endif
+#ifdef CONFIG_CS89x0_PLATFORM
+ void __iomem *virt_addr;/* Virtual address for accessing the CS89x0. */
+ unsigned long phys_addr;/* Physical address for accessing the CS89x0. */
+ unsigned long size; /* Length of CS89x0 memory region. */
+#endif
};
/* Index to functions, as function prototypes. */
-static int cs89x0_probe1(struct net_device *dev, int ioaddr, int modular);
+static int cs89x0_probe1(struct net_device *dev, unsigned long ioaddr, int modular);
static int net_open(struct net_device *dev);
static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t net_interrupt(int irq, void *dev_id);
@@ -294,6 +294,7 @@ static int __init media_fn(char *str)
__setup("cs89x0_media=", media_fn);
+#ifndef CONFIG_CS89x0_PLATFORM
/* Check for a network adaptor of this type, and return '0' iff one exists.
If dev->base_addr == 0, probe all likely locations.
If dev->base_addr == 1, always return failure.
@@ -343,6 +344,7 @@ out:
return ERR_PTR(err);
}
#endif
+#endif
#if defined(CONFIG_MACH_IXDP2351)
static u16
@@ -504,7 +506,7 @@ static const struct net_device_ops net_ops = {
*/
static int __init
-cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
+cs89x0_probe1(struct net_device *dev, unsigned long ioaddr, int modular)
{
struct net_local *lp = netdev_priv(dev);
static unsigned version_printed;
@@ -529,15 +531,12 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
lp->force = g_cs89x0_media__force;
#endif
-#if defined(CONFIG_MACH_QQ2440)
- lp->force |= FORCE_RJ45 | FORCE_FULL;
-#endif
}
/* Grab the region so we can find another board if autoIRQ fails. */
/* WTF is going on here? */
if (!request_region(ioaddr & ~3, NETCARD_IO_EXTENT, DRV_NAME)) {
- printk(KERN_ERR "%s: request_region(0x%x, 0x%x) failed\n",
+ printk(KERN_ERR "%s: request_region(0x%lx, 0x%x) failed\n",
DRV_NAME, ioaddr, NETCARD_IO_EXTENT);
retval = -EBUSY;
goto out1;
@@ -549,7 +548,7 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
will skip the test for the ADD_PORT. */
if (ioaddr & 1) {
if (net_debug > 1)
- printk(KERN_INFO "%s: odd ioaddr 0x%x\n", dev->name, ioaddr);
+ printk(KERN_INFO "%s: odd ioaddr 0x%lx\n", dev->name, ioaddr);
if ((ioaddr & 2) != 2)
if ((readword(ioaddr & ~3, ADD_PORT) & ADD_MASK) != ADD_SIG) {
printk(KERN_ERR "%s: bad signature 0x%x\n",
@@ -560,13 +559,13 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
}
ioaddr &= ~3;
- printk(KERN_DEBUG "PP_addr at %x[%x]: 0x%x\n",
+ printk(KERN_DEBUG "PP_addr at %lx[%x]: 0x%x\n",
ioaddr, ADD_PORT, readword(ioaddr, ADD_PORT));
writeword(ioaddr, ADD_PORT, PP_ChipID);
tmp = readword(ioaddr, DATA_PORT);
if (tmp != CHIP_EISA_ID_SIG) {
- printk(KERN_DEBUG "%s: incorrect signature at %x[%x]: 0x%x!="
+ printk(KERN_DEBUG "%s: incorrect signature at %lx[%x]: 0x%x!="
CHIP_EISA_ID_SIG_STR "\n",
dev->name, ioaddr, DATA_PORT, tmp);
retval = -ENODEV;
@@ -736,8 +735,9 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
dev->irq = i;
} else {
i = lp->isa_config & INT_NO_MASK;
+#ifndef CONFIG_CS89x0_PLATFORM
if (lp->chip_type == CS8900) {
-#ifdef CONFIG_CS89x0_NONISA_IRQ
+#ifdef CS89x0_NONISA_IRQ
i = cs8900_irq_map[0];
#else
/* Translate the IRQ using the IRQ mapping table. */
@@ -758,6 +758,7 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
}
#endif
}
+#endif
if (!dev->irq)
dev->irq = i;
}
@@ -1168,6 +1169,7 @@ write_irq(struct net_device *dev, int chip_type, int irq)
int i;
if (chip_type == CS8900) {
+#ifndef CONFIG_CS89x0_PLATFORM
/* Search the mapping table for the corresponding IRQ pin. */
for (i = 0; i != ARRAY_SIZE(cs8900_irq_map); i++)
if (cs8900_irq_map[i] == irq)
@@ -1175,6 +1177,10 @@ write_irq(struct net_device *dev, int chip_type, int irq)
/* Not found */
if (i == ARRAY_SIZE(cs8900_irq_map))
i = 3;
+#else
+ /* INTRQ0 pin is used for interrupt generation. */
+ i = 0;
+#endif
writereg(dev, PP_CS8900_ISAINT, i);
} else {
writereg(dev, PP_CS8920_ISAINT, irq);
@@ -1228,7 +1234,7 @@ net_open(struct net_device *dev)
}
else
{
-#ifndef CONFIG_CS89x0_NONISA_IRQ
+#if !defined(CS89x0_NONISA_IRQ) && !defined(CONFIG_CS89x0_PLATFORM)
if (((1 << dev->irq) & lp->irq_map) == 0) {
printk(KERN_ERR "%s: IRQ %d is not in our map of allowable IRQs, which is %x\n",
dev->name, dev->irq, lp->irq_map);
@@ -1746,7 +1752,7 @@ static int set_mac_address(struct net_device *dev, void *p)
return 0;
}
-#ifdef MODULE
+#if defined(MODULE) && !defined(CONFIG_CS89x0_PLATFORM)
static struct net_device *dev_cs89x0;
@@ -1900,7 +1906,97 @@ cleanup_module(void)
release_region(dev_cs89x0->base_addr, NETCARD_IO_EXTENT);
free_netdev(dev_cs89x0);
}
-#endif /* MODULE */
+#endif /* MODULE && !CONFIG_CS89x0_PLATFORM */
+
+#ifdef CONFIG_CS89x0_PLATFORM
+static int __init cs89x0_platform_probe(struct platform_device *pdev)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
+ struct net_local *lp;
+ struct resource *mem_res;
+ int err;
+
+ if (!dev)
+ return -ENOMEM;
+
+ lp = netdev_priv(dev);
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dev->irq = platform_get_irq(pdev, 0);
+ if (mem_res == NULL || dev->irq <= 0) {
+ dev_warn(&dev->dev, "memory/interrupt resource missing.\n");
+ err = -ENXIO;
+ goto free;
+ }
+
+ lp->phys_addr = mem_res->start;
+ lp->size = resource_size(mem_res);
+ if (!request_mem_region(lp->phys_addr, lp->size, DRV_NAME)) {
+ dev_warn(&dev->dev, "request_mem_region() failed.\n");
+ err = -EBUSY;
+ goto free;
+ }
+
+ lp->virt_addr = ioremap(lp->phys_addr, lp->size);
+ if (!lp->virt_addr) {
+ dev_warn(&dev->dev, "ioremap() failed.\n");
+ err = -ENOMEM;
+ goto release;
+ }
+
+ err = cs89x0_probe1(dev, (unsigned long)lp->virt_addr, 0);
+ if (err) {
+ dev_warn(&dev->dev, "no cs8900 or cs8920 detected.\n");
+ goto unmap;
+ }
+
+ platform_set_drvdata(pdev, dev);
+ return 0;
+
+unmap:
+ iounmap(lp->virt_addr);
+release:
+ release_mem_region(lp->phys_addr, lp->size);
+free:
+ free_netdev(dev);
+ return err;
+}
+
+static int cs89x0_platform_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct net_local *lp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ iounmap(lp->virt_addr);
+ release_mem_region(lp->phys_addr, lp->size);
+ free_netdev(dev);
+ return 0;
+}
+
+static struct platform_driver cs89x0_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .remove = cs89x0_platform_remove,
+};
+
+static int __init cs89x0_init(void)
+{
+ return platform_driver_probe(&cs89x0_driver, cs89x0_platform_probe);
+}
+
+module_init(cs89x0_init);
+
+static void __exit cs89x0_cleanup(void)
+{
+ platform_driver_unregister(&cs89x0_driver);
+}
+
+module_exit(cs89x0_cleanup);
+
+#endif /* CONFIG_CS89x0_PLATFORM */
/*
* Local variables:
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index d9428f0e738a..e7bed5303997 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -968,7 +968,6 @@ static int gfar_probe(struct platform_device *ofdev)
struct gfar_private *priv = NULL;
struct gfar __iomem *regs = NULL;
int err = 0, i, grp_idx = 0;
- int len_devname;
u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
u32 isrg = 0;
u32 __iomem *baddr;
@@ -1169,40 +1168,16 @@ static int gfar_probe(struct platform_device *ofdev)
priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
/* fill out IRQ number and name fields */
- len_devname = strlen(dev->name);
for (i = 0; i < priv->num_grps; i++) {
- strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name,
- len_devname);
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
- strncpy(&priv->gfargrp[i].int_name_tx[len_devname],
- "_g", sizeof("_g"));
- priv->gfargrp[i].int_name_tx[
- strlen(priv->gfargrp[i].int_name_tx)] = i+48;
- strncpy(&priv->gfargrp[i].int_name_tx[strlen(
- priv->gfargrp[i].int_name_tx)],
- "_tx", sizeof("_tx") + 1);
-
- strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name,
- len_devname);
- strncpy(&priv->gfargrp[i].int_name_rx[len_devname],
- "_g", sizeof("_g"));
- priv->gfargrp[i].int_name_rx[
- strlen(priv->gfargrp[i].int_name_rx)] = i+48;
- strncpy(&priv->gfargrp[i].int_name_rx[strlen(
- priv->gfargrp[i].int_name_rx)],
- "_rx", sizeof("_rx") + 1);
-
- strncpy(&priv->gfargrp[i].int_name_er[0], dev->name,
- len_devname);
- strncpy(&priv->gfargrp[i].int_name_er[len_devname],
- "_g", sizeof("_g"));
- priv->gfargrp[i].int_name_er[strlen(
- priv->gfargrp[i].int_name_er)] = i+48;
- strncpy(&priv->gfargrp[i].int_name_er[strlen(\
- priv->gfargrp[i].int_name_er)],
- "_er", sizeof("_er") + 1);
+ sprintf(priv->gfargrp[i].int_name_tx, "%s%s%c%s",
+ dev->name, "_g", '0' + i, "_tx");
+ sprintf(priv->gfargrp[i].int_name_rx, "%s%s%c%s",
+ dev->name, "_g", '0' + i, "_rx");
+ sprintf(priv->gfargrp[i].int_name_er, "%s%s%c%s",
+ dev->name, "_g", '0' + i, "_er");
} else
- priv->gfargrp[i].int_name_tx[len_devname] = '\0';
+ strcpy(priv->gfargrp[i].int_name_tx, dev->name);
}
/* Initialize the filer table */
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index fc2488adca36..4c9f8d487dbb 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -517,7 +517,7 @@ extern const char gfar_driver_version[];
#define RXFCB_PERR_MASK 0x000c
#define RXFCB_PERR_BADL3 0x0008
-#define GFAR_INT_NAME_MAX IFNAMSIZ + 4
+#define GFAR_INT_NAME_MAX (IFNAMSIZ + 6) /* '_g#_xx' */
struct txbd8
{
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 2b5af22419a5..385a4d5c7c25 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -36,8 +36,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 25
-#define QLCNIC_LINUX_VERSIONID "5.0.26"
+#define _QLCNIC_LINUX_SUBVERSION 27
+#define QLCNIC_LINUX_VERSIONID "5.0.27"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 81bb1a69e69f..75c32e875fef 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1458,8 +1458,10 @@ qlcnic_reset_context(struct qlcnic_adapter *adapter)
if (netif_running(netdev)) {
err = qlcnic_attach(adapter);
- if (!err)
+ if (!err) {
__qlcnic_up(adapter, netdev);
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+ }
}
netif_device_attach(netdev);
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 1dc4fad593e7..fee449355014 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2280,7 +2280,7 @@ static int __devinit smc_drv_probe(struct platform_device *pdev)
if (ret)
goto out_release_io;
#if defined(CONFIG_SA1100_ASSABET)
- NCR_0 |= NCR_ENET_OSC_EN;
+ neponset_ncr_set(NCR_ENET_OSC_EN);
#endif
platform_set_drvdata(pdev, ndev);
ret = smc_enable_device(pdev);
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index e535137eb2d0..468047866c8c 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -356,7 +356,7 @@ config VLSI_FIR
config SA1100_FIR
tristate "SA1100 Internal IR"
- depends on ARCH_SA1100 && IRDA
+ depends on ARCH_SA1100 && IRDA && DMA_SA11X0
config VIA_FIR
tristate "VIA VT8231/VT1211 SIR/MIR/FIR"
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index da2705061a60..a0d1913a58d3 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -15,7 +15,7 @@
* This driver takes one kernel command line parameter, sa1100ir=, with
* the following options:
* max_rate:baudrate - set the maximum baud rate
- * power_leve:level - set the transmitter power level
+ * power_level:level - set the transmitter power level
* tx_lpm:0|1 - set transmit low power mode
*/
#include <linux/module.h>
@@ -30,13 +30,13 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/sa11x0-dma.h>
#include <net/irda/irda.h>
#include <net/irda/wrapper.h>
#include <net/irda/irda_device.h>
-#include <asm/irq.h>
-#include <mach/dma.h>
#include <mach/hardware.h>
#include <asm/mach/irda.h>
@@ -44,8 +44,15 @@ static int power_level = 3;
static int tx_lpm;
static int max_rate = 4000000;
+struct sa1100_buf {
+ struct device *dev;
+ struct sk_buff *skb;
+ struct scatterlist sg;
+ struct dma_chan *chan;
+ dma_cookie_t cookie;
+};
+
struct sa1100_irda {
- unsigned char hscr0;
unsigned char utcr4;
unsigned char power;
unsigned char open;
@@ -53,12 +60,8 @@ struct sa1100_irda {
int speed;
int newspeed;
- struct sk_buff *txskb;
- struct sk_buff *rxskb;
- dma_addr_t txbuf_dma;
- dma_addr_t rxbuf_dma;
- dma_regs_t *txdma;
- dma_regs_t *rxdma;
+ struct sa1100_buf dma_rx;
+ struct sa1100_buf dma_tx;
struct device *dev;
struct irda_platform_data *pdata;
@@ -67,23 +70,103 @@ struct sa1100_irda {
iobuff_t tx_buff;
iobuff_t rx_buff;
+
+ int (*tx_start)(struct sk_buff *, struct net_device *, struct sa1100_irda *);
+ irqreturn_t (*irq)(struct net_device *, struct sa1100_irda *);
};
+static int sa1100_irda_set_speed(struct sa1100_irda *, int);
+
#define IS_FIR(si) ((si)->speed >= 4000000)
#define HPSIR_MAX_RXLEN 2047
+static struct dma_slave_config sa1100_irda_sir_tx = {
+ .direction = DMA_TO_DEVICE,
+ .dst_addr = __PREG(Ser2UTDR),
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+ .dst_maxburst = 4,
+};
+
+static struct dma_slave_config sa1100_irda_fir_rx = {
+ .direction = DMA_FROM_DEVICE,
+ .src_addr = __PREG(Ser2HSDR),
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+ .src_maxburst = 8,
+};
+
+static struct dma_slave_config sa1100_irda_fir_tx = {
+ .direction = DMA_TO_DEVICE,
+ .dst_addr = __PREG(Ser2HSDR),
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+ .dst_maxburst = 8,
+};
+
+static unsigned sa1100_irda_dma_xferred(struct sa1100_buf *buf)
+{
+ struct dma_chan *chan = buf->chan;
+ struct dma_tx_state state;
+ enum dma_status status;
+
+ status = chan->device->device_tx_status(chan, buf->cookie, &state);
+ if (status != DMA_PAUSED)
+ return 0;
+
+ return sg_dma_len(&buf->sg) - state.residue;
+}
+
+static int sa1100_irda_dma_request(struct device *dev, struct sa1100_buf *buf,
+ const char *name, struct dma_slave_config *cfg)
+{
+ dma_cap_mask_t m;
+ int ret;
+
+ dma_cap_zero(m);
+ dma_cap_set(DMA_SLAVE, m);
+
+ buf->chan = dma_request_channel(m, sa11x0_dma_filter_fn, (void *)name);
+ if (!buf->chan) {
+ dev_err(dev, "unable to request DMA channel for %s\n",
+ name);
+ return -ENOENT;
+ }
+
+ ret = dmaengine_slave_config(buf->chan, cfg);
+ if (ret)
+ dev_warn(dev, "DMA slave_config for %s returned %d\n",
+ name, ret);
+
+ buf->dev = buf->chan->device->dev;
+
+ return 0;
+}
+
+static void sa1100_irda_dma_start(struct sa1100_buf *buf,
+ enum dma_transfer_direction dir, dma_async_tx_callback cb, void *cb_p)
+{
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *chan = buf->chan;
+
+ desc = chan->device->device_prep_slave_sg(chan, &buf->sg, 1, dir,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (desc) {
+ desc->callback = cb;
+ desc->callback_param = cb_p;
+ buf->cookie = dmaengine_submit(desc);
+ dma_async_issue_pending(chan);
+ }
+}
+
/*
* Allocate and map the receive buffer, unless it is already allocated.
*/
static int sa1100_irda_rx_alloc(struct sa1100_irda *si)
{
- if (si->rxskb)
+ if (si->dma_rx.skb)
return 0;
- si->rxskb = alloc_skb(HPSIR_MAX_RXLEN + 1, GFP_ATOMIC);
-
- if (!si->rxskb) {
+ si->dma_rx.skb = alloc_skb(HPSIR_MAX_RXLEN + 1, GFP_ATOMIC);
+ if (!si->dma_rx.skb) {
printk(KERN_ERR "sa1100_ir: out of memory for RX SKB\n");
return -ENOMEM;
}
@@ -92,11 +175,14 @@ static int sa1100_irda_rx_alloc(struct sa1100_irda *si)
* Align any IP headers that may be contained
* within the frame.
*/
- skb_reserve(si->rxskb, 1);
+ skb_reserve(si->dma_rx.skb, 1);
+
+ sg_set_buf(&si->dma_rx.sg, si->dma_rx.skb->data, HPSIR_MAX_RXLEN);
+ if (dma_map_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE) == 0) {
+ dev_kfree_skb_any(si->dma_rx.skb);
+ return -ENOMEM;
+ }
- si->rxbuf_dma = dma_map_single(si->dev, si->rxskb->data,
- HPSIR_MAX_RXLEN,
- DMA_FROM_DEVICE);
return 0;
}
@@ -106,7 +192,7 @@ static int sa1100_irda_rx_alloc(struct sa1100_irda *si)
*/
static void sa1100_irda_rx_dma_start(struct sa1100_irda *si)
{
- if (!si->rxskb) {
+ if (!si->dma_rx.skb) {
printk(KERN_ERR "sa1100_ir: rx buffer went missing\n");
return;
}
@@ -114,254 +200,87 @@ static void sa1100_irda_rx_dma_start(struct sa1100_irda *si)
/*
* First empty receive FIFO
*/
- Ser2HSCR0 = si->hscr0 | HSCR0_HSSP;
+ Ser2HSCR0 = HSCR0_HSSP;
/*
* Enable the DMA, receiver and receive interrupt.
*/
- sa1100_clear_dma(si->rxdma);
- sa1100_start_dma(si->rxdma, si->rxbuf_dma, HPSIR_MAX_RXLEN);
- Ser2HSCR0 = si->hscr0 | HSCR0_HSSP | HSCR0_RXE;
+ dmaengine_terminate_all(si->dma_rx.chan);
+ sa1100_irda_dma_start(&si->dma_rx, DMA_DEV_TO_MEM, NULL, NULL);
+
+ Ser2HSCR0 = HSCR0_HSSP | HSCR0_RXE;
}
-/*
- * Set the IrDA communications speed.
- */
-static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed)
+static void sa1100_irda_check_speed(struct sa1100_irda *si)
{
- unsigned long flags;
- int brd, ret = -EINVAL;
-
- switch (speed) {
- case 9600: case 19200: case 38400:
- case 57600: case 115200:
- brd = 3686400 / (16 * speed) - 1;
-
- /*
- * Stop the receive DMA.
- */
- if (IS_FIR(si))
- sa1100_stop_dma(si->rxdma);
-
- local_irq_save(flags);
-
- Ser2UTCR3 = 0;
- Ser2HSCR0 = HSCR0_UART;
-
- Ser2UTCR1 = brd >> 8;
- Ser2UTCR2 = brd;
-
- /*
- * Clear status register
- */
- Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID;
- Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE;
-
- if (si->pdata->set_speed)
- si->pdata->set_speed(si->dev, speed);
-
- si->speed = speed;
-
- local_irq_restore(flags);
- ret = 0;
- break;
-
- case 4000000:
- local_irq_save(flags);
-
- si->hscr0 = 0;
-
- Ser2HSSR0 = 0xff;
- Ser2HSCR0 = si->hscr0 | HSCR0_HSSP;
- Ser2UTCR3 = 0;
-
- si->speed = speed;
-
- if (si->pdata->set_speed)
- si->pdata->set_speed(si->dev, speed);
-
- sa1100_irda_rx_alloc(si);
- sa1100_irda_rx_dma_start(si);
-
- local_irq_restore(flags);
-
- break;
-
- default:
- break;
+ if (si->newspeed) {
+ sa1100_irda_set_speed(si, si->newspeed);
+ si->newspeed = 0;
}
-
- return ret;
}
/*
- * Control the power state of the IrDA transmitter.
- * State:
- * 0 - off
- * 1 - short range, lowest power
- * 2 - medium range, medium power
- * 3 - maximum range, high power
- *
- * Currently, only assabet is known to support this.
+ * HP-SIR format support.
*/
-static int
-__sa1100_irda_set_power(struct sa1100_irda *si, unsigned int state)
+static void sa1100_irda_sirtxdma_irq(void *id)
{
- int ret = 0;
- if (si->pdata->set_power)
- ret = si->pdata->set_power(si->dev, state);
- return ret;
-}
-
-static inline int
-sa1100_set_power(struct sa1100_irda *si, unsigned int state)
-{
- int ret;
-
- ret = __sa1100_irda_set_power(si, state);
- if (ret == 0)
- si->power = state;
+ struct net_device *dev = id;
+ struct sa1100_irda *si = netdev_priv(dev);
- return ret;
-}
+ dma_unmap_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE);
+ dev_kfree_skb(si->dma_tx.skb);
+ si->dma_tx.skb = NULL;
-static int sa1100_irda_startup(struct sa1100_irda *si)
-{
- int ret;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += sg_dma_len(&si->dma_tx.sg);
- /*
- * Ensure that the ports for this device are setup correctly.
- */
- if (si->pdata->startup) {
- ret = si->pdata->startup(si->dev);
- if (ret)
- return ret;
- }
-
- /*
- * Configure PPC for IRDA - we want to drive TXD2 low.
- * We also want to drive this pin low during sleep.
- */
- PPSR &= ~PPC_TXD2;
- PSDR &= ~PPC_TXD2;
- PPDR |= PPC_TXD2;
-
- /*
- * Enable HP-SIR modulation, and ensure that the port is disabled.
- */
- Ser2UTCR3 = 0;
- Ser2HSCR0 = HSCR0_UART;
- Ser2UTCR4 = si->utcr4;
- Ser2UTCR0 = UTCR0_8BitData;
- Ser2HSCR2 = HSCR2_TrDataH | HSCR2_RcDataL;
+ /* We need to ensure that the transmitter has finished. */
+ do
+ rmb();
+ while (Ser2UTSR1 & UTSR1_TBY);
/*
- * Clear status register
+ * Ok, we've finished transmitting. Now enable the receiver.
+ * Sometimes we get a receive IRQ immediately after a transmit...
*/
Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID;
+ Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE;
- ret = sa1100_irda_set_speed(si, si->speed = 9600);
- if (ret) {
- Ser2UTCR3 = 0;
- Ser2HSCR0 = 0;
-
- if (si->pdata->shutdown)
- si->pdata->shutdown(si->dev);
- }
-
- return ret;
-}
-
-static void sa1100_irda_shutdown(struct sa1100_irda *si)
-{
- /*
- * Stop all DMA activity.
- */
- sa1100_stop_dma(si->rxdma);
- sa1100_stop_dma(si->txdma);
-
- /* Disable the port. */
- Ser2UTCR3 = 0;
- Ser2HSCR0 = 0;
+ sa1100_irda_check_speed(si);
- if (si->pdata->shutdown)
- si->pdata->shutdown(si->dev);
+ /* I'm hungry! */
+ netif_wake_queue(dev);
}
-#ifdef CONFIG_PM
-/*
- * Suspend the IrDA interface.
- */
-static int sa1100_irda_suspend(struct platform_device *pdev, pm_message_t state)
+static int sa1100_irda_sir_tx_start(struct sk_buff *skb, struct net_device *dev,
+ struct sa1100_irda *si)
{
- struct net_device *dev = platform_get_drvdata(pdev);
- struct sa1100_irda *si;
-
- if (!dev)
- return 0;
-
- si = netdev_priv(dev);
- if (si->open) {
- /*
- * Stop the transmit queue
- */
- netif_device_detach(dev);
- disable_irq(dev->irq);
- sa1100_irda_shutdown(si);
- __sa1100_irda_set_power(si, 0);
+ si->tx_buff.data = si->tx_buff.head;
+ si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data,
+ si->tx_buff.truesize);
+
+ si->dma_tx.skb = skb;
+ sg_set_buf(&si->dma_tx.sg, si->tx_buff.data, si->tx_buff.len);
+ if (dma_map_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE) == 0) {
+ si->dma_tx.skb = NULL;
+ netif_wake_queue(dev);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
}
- return 0;
-}
-
-/*
- * Resume the IrDA interface.
- */
-static int sa1100_irda_resume(struct platform_device *pdev)
-{
- struct net_device *dev = platform_get_drvdata(pdev);
- struct sa1100_irda *si;
-
- if (!dev)
- return 0;
+ sa1100_irda_dma_start(&si->dma_tx, DMA_MEM_TO_DEV, sa1100_irda_sirtxdma_irq, dev);
- si = netdev_priv(dev);
- if (si->open) {
- /*
- * If we missed a speed change, initialise at the new speed
- * directly. It is debatable whether this is actually
- * required, but in the interests of continuing from where
- * we left off it is desirable. The converse argument is
- * that we should re-negotiate at 9600 baud again.
- */
- if (si->newspeed) {
- si->speed = si->newspeed;
- si->newspeed = 0;
- }
-
- sa1100_irda_startup(si);
- __sa1100_irda_set_power(si, si->power);
- enable_irq(dev->irq);
-
- /*
- * This automatically wakes up the queue
- */
- netif_device_attach(dev);
- }
+ /*
+ * The mean turn-around time is enforced by XBOF padding,
+ * so we don't have to do anything special here.
+ */
+ Ser2UTCR3 = UTCR3_TXE;
- return 0;
+ return NETDEV_TX_OK;
}
-#else
-#define sa1100_irda_suspend NULL
-#define sa1100_irda_resume NULL
-#endif
-/*
- * HP-SIR format interrupt service routines.
- */
-static void sa1100_irda_hpsir_irq(struct net_device *dev)
+static irqreturn_t sa1100_irda_sir_irq(struct net_device *dev, struct sa1100_irda *si)
{
- struct sa1100_irda *si = netdev_priv(dev);
int status;
status = Ser2UTSR0;
@@ -414,51 +333,96 @@ static void sa1100_irda_hpsir_irq(struct net_device *dev)
}
- if (status & UTSR0_TFS && si->tx_buff.len) {
- /*
- * Transmitter FIFO is not full
- */
- do {
- Ser2UTDR = *si->tx_buff.data++;
- si->tx_buff.len -= 1;
- } while (Ser2UTSR1 & UTSR1_TNF && si->tx_buff.len);
+ return IRQ_HANDLED;
+}
- if (si->tx_buff.len == 0) {
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += si->tx_buff.data -
- si->tx_buff.head;
+/*
+ * FIR format support.
+ */
+static void sa1100_irda_firtxdma_irq(void *id)
+{
+ struct net_device *dev = id;
+ struct sa1100_irda *si = netdev_priv(dev);
+ struct sk_buff *skb;
- /*
- * We need to ensure that the transmitter has
- * finished.
- */
- do
- rmb();
- while (Ser2UTSR1 & UTSR1_TBY);
+ /*
+ * Wait for the transmission to complete. Unfortunately,
+ * the hardware doesn't give us an interrupt to indicate
+ * "end of frame".
+ */
+ do
+ rmb();
+ while (!(Ser2HSSR0 & HSSR0_TUR) || Ser2HSSR1 & HSSR1_TBY);
- /*
- * Ok, we've finished transmitting. Now enable
- * the receiver. Sometimes we get a receive IRQ
- * immediately after a transmit...
- */
- Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID;
- Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE;
+ /*
+ * Clear the transmit underrun bit.
+ */
+ Ser2HSSR0 = HSSR0_TUR;
- if (si->newspeed) {
- sa1100_irda_set_speed(si, si->newspeed);
- si->newspeed = 0;
- }
+ /*
+ * Do we need to change speed? Note that we're lazy
+ * here - we don't free the old dma_rx.skb. We don't need
+ * to allocate a buffer either.
+ */
+ sa1100_irda_check_speed(si);
- /* I'm hungry! */
- netif_wake_queue(dev);
- }
+ /*
+ * Start reception. This disables the transmitter for
+ * us. This will be using the existing RX buffer.
+ */
+ sa1100_irda_rx_dma_start(si);
+
+ /* Account and free the packet. */
+ skb = si->dma_tx.skb;
+ if (skb) {
+ dma_unmap_sg(si->dma_tx.dev, &si->dma_tx.sg, 1,
+ DMA_TO_DEVICE);
+ dev->stats.tx_packets ++;
+ dev->stats.tx_bytes += skb->len;
+ dev_kfree_skb_irq(skb);
+ si->dma_tx.skb = NULL;
}
+
+ /*
+ * Make sure that the TX queue is available for sending
+ * (for retries). TX has priority over RX at all times.
+ */
+ netif_wake_queue(dev);
+}
+
+static int sa1100_irda_fir_tx_start(struct sk_buff *skb, struct net_device *dev,
+ struct sa1100_irda *si)
+{
+ int mtt = irda_get_mtt(skb);
+
+ si->dma_tx.skb = skb;
+ sg_set_buf(&si->dma_tx.sg, skb->data, skb->len);
+ if (dma_map_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE) == 0) {
+ si->dma_tx.skb = NULL;
+ netif_wake_queue(dev);
+ dev->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ sa1100_irda_dma_start(&si->dma_tx, DMA_MEM_TO_DEV, sa1100_irda_firtxdma_irq, dev);
+
+ /*
+ * If we have a mean turn-around time, impose the specified
+ * specified delay. We could shorten this by timing from
+ * the point we received the packet.
+ */
+ if (mtt)
+ udelay(mtt);
+
+ Ser2HSCR0 = HSCR0_HSSP | HSCR0_TXE;
+
+ return NETDEV_TX_OK;
}
static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev)
{
- struct sk_buff *skb = si->rxskb;
- dma_addr_t dma_addr;
+ struct sk_buff *skb = si->dma_rx.skb;
unsigned int len, stat, data;
if (!skb) {
@@ -469,11 +433,10 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev
/*
* Get the current data position.
*/
- dma_addr = sa1100_get_dma_pos(si->rxdma);
- len = dma_addr - si->rxbuf_dma;
+ len = sa1100_irda_dma_xferred(&si->dma_rx);
if (len > HPSIR_MAX_RXLEN)
len = HPSIR_MAX_RXLEN;
- dma_unmap_single(si->dev, si->rxbuf_dma, len, DMA_FROM_DEVICE);
+ dma_unmap_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE);
do {
/*
@@ -501,7 +464,7 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev
} while (Ser2HSSR0 & HSSR0_EIF);
if (stat & HSSR1_EOF) {
- si->rxskb = NULL;
+ si->dma_rx.skb = NULL;
skb_put(skb, len);
skb->dev = dev;
@@ -518,28 +481,23 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev
netif_rx(skb);
} else {
/*
- * Remap the buffer.
+ * Remap the buffer - it was previously mapped, and we
+ * hope that this succeeds.
*/
- si->rxbuf_dma = dma_map_single(si->dev, si->rxskb->data,
- HPSIR_MAX_RXLEN,
- DMA_FROM_DEVICE);
+ dma_map_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE);
}
}
/*
- * FIR format interrupt service routine. We only have to
- * handle RX events; transmit events go via the TX DMA handler.
- *
- * No matter what, we disable RX, process, and the restart RX.
+ * We only have to handle RX events here; transmit events go via the TX
+ * DMA handler. We disable RX, process, and the restart RX.
*/
-static void sa1100_irda_fir_irq(struct net_device *dev)
+static irqreturn_t sa1100_irda_fir_irq(struct net_device *dev, struct sa1100_irda *si)
{
- struct sa1100_irda *si = netdev_priv(dev);
-
/*
* Stop RX DMA
*/
- sa1100_stop_dma(si->rxdma);
+ dmaengine_pause(si->dma_rx.chan);
/*
* Framing error - we throw away the packet completely.
@@ -555,7 +513,7 @@ static void sa1100_irda_fir_irq(struct net_device *dev)
/*
* Clear out the DMA...
*/
- Ser2HSCR0 = si->hscr0 | HSCR0_HSSP;
+ Ser2HSCR0 = HSCR0_HSSP;
/*
* Clear selected status bits now, so we
@@ -577,74 +535,124 @@ static void sa1100_irda_fir_irq(struct net_device *dev)
* No matter what happens, we must restart reception.
*/
sa1100_irda_rx_dma_start(si);
-}
-static irqreturn_t sa1100_irda_irq(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- if (IS_FIR(((struct sa1100_irda *)netdev_priv(dev))))
- sa1100_irda_fir_irq(dev);
- else
- sa1100_irda_hpsir_irq(dev);
return IRQ_HANDLED;
}
/*
- * TX DMA completion handler.
+ * Set the IrDA communications speed.
*/
-static void sa1100_irda_txdma_irq(void *id)
+static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed)
{
- struct net_device *dev = id;
- struct sa1100_irda *si = netdev_priv(dev);
- struct sk_buff *skb = si->txskb;
+ unsigned long flags;
+ int brd, ret = -EINVAL;
- si->txskb = NULL;
+ switch (speed) {
+ case 9600: case 19200: case 38400:
+ case 57600: case 115200:
+ brd = 3686400 / (16 * speed) - 1;
- /*
- * Wait for the transmission to complete. Unfortunately,
- * the hardware doesn't give us an interrupt to indicate
- * "end of frame".
- */
- do
- rmb();
- while (!(Ser2HSSR0 & HSSR0_TUR) || Ser2HSSR1 & HSSR1_TBY);
+ /* Stop the receive DMA, and configure transmit. */
+ if (IS_FIR(si)) {
+ dmaengine_terminate_all(si->dma_rx.chan);
+ dmaengine_slave_config(si->dma_tx.chan,
+ &sa1100_irda_sir_tx);
+ }
- /*
- * Clear the transmit underrun bit.
- */
- Ser2HSSR0 = HSSR0_TUR;
+ local_irq_save(flags);
- /*
- * Do we need to change speed? Note that we're lazy
- * here - we don't free the old rxskb. We don't need
- * to allocate a buffer either.
- */
- if (si->newspeed) {
- sa1100_irda_set_speed(si, si->newspeed);
- si->newspeed = 0;
- }
+ Ser2UTCR3 = 0;
+ Ser2HSCR0 = HSCR0_UART;
- /*
- * Start reception. This disables the transmitter for
- * us. This will be using the existing RX buffer.
- */
- sa1100_irda_rx_dma_start(si);
+ Ser2UTCR1 = brd >> 8;
+ Ser2UTCR2 = brd;
- /*
- * Account and free the packet.
- */
- if (skb) {
- dma_unmap_single(si->dev, si->txbuf_dma, skb->len, DMA_TO_DEVICE);
- dev->stats.tx_packets ++;
- dev->stats.tx_bytes += skb->len;
- dev_kfree_skb_irq(skb);
+ /*
+ * Clear status register
+ */
+ Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID;
+ Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE;
+
+ if (si->pdata->set_speed)
+ si->pdata->set_speed(si->dev, speed);
+
+ si->speed = speed;
+ si->tx_start = sa1100_irda_sir_tx_start;
+ si->irq = sa1100_irda_sir_irq;
+
+ local_irq_restore(flags);
+ ret = 0;
+ break;
+
+ case 4000000:
+ if (!IS_FIR(si))
+ dmaengine_slave_config(si->dma_tx.chan,
+ &sa1100_irda_fir_tx);
+
+ local_irq_save(flags);
+
+ Ser2HSSR0 = 0xff;
+ Ser2HSCR0 = HSCR0_HSSP;
+ Ser2UTCR3 = 0;
+
+ si->speed = speed;
+ si->tx_start = sa1100_irda_fir_tx_start;
+ si->irq = sa1100_irda_fir_irq;
+
+ if (si->pdata->set_speed)
+ si->pdata->set_speed(si->dev, speed);
+
+ sa1100_irda_rx_alloc(si);
+ sa1100_irda_rx_dma_start(si);
+
+ local_irq_restore(flags);
+
+ break;
+
+ default:
+ break;
}
- /*
- * Make sure that the TX queue is available for sending
- * (for retries). TX has priority over RX at all times.
- */
- netif_wake_queue(dev);
+ return ret;
+}
+
+/*
+ * Control the power state of the IrDA transmitter.
+ * State:
+ * 0 - off
+ * 1 - short range, lowest power
+ * 2 - medium range, medium power
+ * 3 - maximum range, high power
+ *
+ * Currently, only assabet is known to support this.
+ */
+static int
+__sa1100_irda_set_power(struct sa1100_irda *si, unsigned int state)
+{
+ int ret = 0;
+ if (si->pdata->set_power)
+ ret = si->pdata->set_power(si->dev, state);
+ return ret;
+}
+
+static inline int
+sa1100_set_power(struct sa1100_irda *si, unsigned int state)
+{
+ int ret;
+
+ ret = __sa1100_irda_set_power(si, state);
+ if (ret == 0)
+ si->power = state;
+
+ return ret;
+}
+
+static irqreturn_t sa1100_irda_irq(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct sa1100_irda *si = netdev_priv(dev);
+
+ return si->irq(dev, si);
}
static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -660,62 +668,19 @@ static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
if (speed != si->speed && speed != -1)
si->newspeed = speed;
- /*
- * If this is an empty frame, we can bypass a lot.
- */
+ /* If this is an empty frame, we can bypass a lot. */
if (skb->len == 0) {
- if (si->newspeed) {
- si->newspeed = 0;
- sa1100_irda_set_speed(si, speed);
- }
+ sa1100_irda_check_speed(si);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
- if (!IS_FIR(si)) {
- netif_stop_queue(dev);
-
- si->tx_buff.data = si->tx_buff.head;
- si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data,
- si->tx_buff.truesize);
-
- /*
- * Set the transmit interrupt enable. This will fire
- * off an interrupt immediately. Note that we disable
- * the receiver so we won't get spurious characteres
- * received.
- */
- Ser2UTCR3 = UTCR3_TIE | UTCR3_TXE;
-
- dev_kfree_skb(skb);
- } else {
- int mtt = irda_get_mtt(skb);
-
- /*
- * We must not be transmitting...
- */
- BUG_ON(si->txskb);
-
- netif_stop_queue(dev);
-
- si->txskb = skb;
- si->txbuf_dma = dma_map_single(si->dev, skb->data,
- skb->len, DMA_TO_DEVICE);
-
- sa1100_start_dma(si->txdma, si->txbuf_dma, skb->len);
-
- /*
- * If we have a mean turn-around time, impose the specified
- * specified delay. We could shorten this by timing from
- * the point we received the packet.
- */
- if (mtt)
- udelay(mtt);
+ netif_stop_queue(dev);
- Ser2HSCR0 = si->hscr0 | HSCR0_HSSP | HSCR0_TXE;
- }
+ /* We must not already have a skb to transmit... */
+ BUG_ON(si->dma_tx.skb);
- return NETDEV_TX_OK;
+ return si->tx_start(skb, dev, si);
}
static int
@@ -762,6 +727,69 @@ sa1100_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
return ret;
}
+static int sa1100_irda_startup(struct sa1100_irda *si)
+{
+ int ret;
+
+ /*
+ * Ensure that the ports for this device are setup correctly.
+ */
+ if (si->pdata->startup) {
+ ret = si->pdata->startup(si->dev);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Configure PPC for IRDA - we want to drive TXD2 low.
+ * We also want to drive this pin low during sleep.
+ */
+ PPSR &= ~PPC_TXD2;
+ PSDR &= ~PPC_TXD2;
+ PPDR |= PPC_TXD2;
+
+ /*
+ * Enable HP-SIR modulation, and ensure that the port is disabled.
+ */
+ Ser2UTCR3 = 0;
+ Ser2HSCR0 = HSCR0_UART;
+ Ser2UTCR4 = si->utcr4;
+ Ser2UTCR0 = UTCR0_8BitData;
+ Ser2HSCR2 = HSCR2_TrDataH | HSCR2_RcDataL;
+
+ /*
+ * Clear status register
+ */
+ Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID;
+
+ ret = sa1100_irda_set_speed(si, si->speed = 9600);
+ if (ret) {
+ Ser2UTCR3 = 0;
+ Ser2HSCR0 = 0;
+
+ if (si->pdata->shutdown)
+ si->pdata->shutdown(si->dev);
+ }
+
+ return ret;
+}
+
+static void sa1100_irda_shutdown(struct sa1100_irda *si)
+{
+ /*
+ * Stop all DMA activity.
+ */
+ dmaengine_terminate_all(si->dma_rx.chan);
+ dmaengine_terminate_all(si->dma_tx.chan);
+
+ /* Disable the port. */
+ Ser2UTCR3 = 0;
+ Ser2HSCR0 = 0;
+
+ if (si->pdata->shutdown)
+ si->pdata->shutdown(si->dev);
+}
+
static int sa1100_irda_start(struct net_device *dev)
{
struct sa1100_irda *si = netdev_priv(dev);
@@ -769,26 +797,17 @@ static int sa1100_irda_start(struct net_device *dev)
si->speed = 9600;
- err = request_irq(dev->irq, sa1100_irda_irq, 0, dev->name, dev);
- if (err)
- goto err_irq;
-
- err = sa1100_request_dma(DMA_Ser2HSSPRd, "IrDA receive",
- NULL, NULL, &si->rxdma);
+ err = sa1100_irda_dma_request(si->dev, &si->dma_rx, "Ser2ICPRc",
+ &sa1100_irda_fir_rx);
if (err)
goto err_rx_dma;
- err = sa1100_request_dma(DMA_Ser2HSSPWr, "IrDA transmit",
- sa1100_irda_txdma_irq, dev, &si->txdma);
+ err = sa1100_irda_dma_request(si->dev, &si->dma_tx, "Ser2ICPTr",
+ &sa1100_irda_sir_tx);
if (err)
goto err_tx_dma;
/*
- * The interrupt must remain disabled for now.
- */
- disable_irq(dev->irq);
-
- /*
* Setup the serial port for the specified speed.
*/
err = sa1100_irda_startup(si);
@@ -803,44 +822,60 @@ static int sa1100_irda_start(struct net_device *dev)
if (!si->irlap)
goto err_irlap;
+ err = request_irq(dev->irq, sa1100_irda_irq, 0, dev->name, dev);
+ if (err)
+ goto err_irq;
+
/*
* Now enable the interrupt and start the queue
*/
si->open = 1;
sa1100_set_power(si, power_level); /* low power mode */
- enable_irq(dev->irq);
+
netif_start_queue(dev);
return 0;
+err_irq:
+ irlap_close(si->irlap);
err_irlap:
si->open = 0;
sa1100_irda_shutdown(si);
err_startup:
- sa1100_free_dma(si->txdma);
+ dma_release_channel(si->dma_tx.chan);
err_tx_dma:
- sa1100_free_dma(si->rxdma);
+ dma_release_channel(si->dma_rx.chan);
err_rx_dma:
- free_irq(dev->irq, dev);
-err_irq:
return err;
}
static int sa1100_irda_stop(struct net_device *dev)
{
struct sa1100_irda *si = netdev_priv(dev);
+ struct sk_buff *skb;
+
+ netif_stop_queue(dev);
- disable_irq(dev->irq);
+ si->open = 0;
sa1100_irda_shutdown(si);
/*
- * If we have been doing DMA receive, make sure we
+ * If we have been doing any DMA activity, make sure we
* tidy that up cleanly.
*/
- if (si->rxskb) {
- dma_unmap_single(si->dev, si->rxbuf_dma, HPSIR_MAX_RXLEN,
- DMA_FROM_DEVICE);
- dev_kfree_skb(si->rxskb);
- si->rxskb = NULL;
+ skb = si->dma_rx.skb;
+ if (skb) {
+ dma_unmap_sg(si->dma_rx.dev, &si->dma_rx.sg, 1,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(skb);
+ si->dma_rx.skb = NULL;
+ }
+
+ skb = si->dma_tx.skb;
+ if (skb) {
+ dma_unmap_sg(si->dma_tx.dev, &si->dma_tx.sg, 1,
+ DMA_TO_DEVICE);
+ dev_kfree_skb(skb);
+ si->dma_tx.skb = NULL;
}
/* Stop IrLAP */
@@ -849,14 +884,11 @@ static int sa1100_irda_stop(struct net_device *dev)
si->irlap = NULL;
}
- netif_stop_queue(dev);
- si->open = 0;
-
/*
* Free resources
*/
- sa1100_free_dma(si->txdma);
- sa1100_free_dma(si->rxdma);
+ dma_release_channel(si->dma_tx.chan);
+ dma_release_channel(si->dma_rx.chan);
free_irq(dev->irq, dev);
sa1100_set_power(si, 0);
@@ -888,11 +920,15 @@ static int sa1100_irda_probe(struct platform_device *pdev)
struct net_device *dev;
struct sa1100_irda *si;
unsigned int baudrate_mask;
- int err;
+ int err, irq;
if (!pdev->dev.platform_data)
return -EINVAL;
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return irq < 0 ? irq : -ENXIO;
+
err = request_mem_region(__PREG(Ser2UTCR0), 0x24, "IrDA") ? 0 : -EBUSY;
if (err)
goto err_mem_1;
@@ -907,22 +943,27 @@ static int sa1100_irda_probe(struct platform_device *pdev)
if (!dev)
goto err_mem_4;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
si = netdev_priv(dev);
si->dev = &pdev->dev;
si->pdata = pdev->dev.platform_data;
+ sg_init_table(&si->dma_rx.sg, 1);
+ sg_init_table(&si->dma_tx.sg, 1);
+
/*
* Initialise the HP-SIR buffers
*/
err = sa1100_irda_init_iobuf(&si->rx_buff, 14384);
if (err)
goto err_mem_5;
- err = sa1100_irda_init_iobuf(&si->tx_buff, 4000);
+ err = sa1100_irda_init_iobuf(&si->tx_buff, IRDA_SIR_MAX_FRAME);
if (err)
goto err_mem_5;
dev->netdev_ops = &sa1100_irda_netdev_ops;
- dev->irq = IRQ_Ser2ICP;
+ dev->irq = irq;
irda_init_max_qos_capabilies(&si->qos);
@@ -996,6 +1037,74 @@ static int sa1100_irda_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+/*
+ * Suspend the IrDA interface.
+ */
+static int sa1100_irda_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct sa1100_irda *si;
+
+ if (!dev)
+ return 0;
+
+ si = netdev_priv(dev);
+ if (si->open) {
+ /*
+ * Stop the transmit queue
+ */
+ netif_device_detach(dev);
+ disable_irq(dev->irq);
+ sa1100_irda_shutdown(si);
+ __sa1100_irda_set_power(si, 0);
+ }
+
+ return 0;
+}
+
+/*
+ * Resume the IrDA interface.
+ */
+static int sa1100_irda_resume(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct sa1100_irda *si;
+
+ if (!dev)
+ return 0;
+
+ si = netdev_priv(dev);
+ if (si->open) {
+ /*
+ * If we missed a speed change, initialise at the new speed
+ * directly. It is debatable whether this is actually
+ * required, but in the interests of continuing from where
+ * we left off it is desirable. The converse argument is
+ * that we should re-negotiate at 9600 baud again.
+ */
+ if (si->newspeed) {
+ si->speed = si->newspeed;
+ si->newspeed = 0;
+ }
+
+ sa1100_irda_startup(si);
+ __sa1100_irda_set_power(si, si->power);
+ enable_irq(dev->irq);
+
+ /*
+ * This automatically wakes up the queue
+ */
+ netif_device_attach(dev);
+ }
+
+ return 0;
+}
+#else
+#define sa1100_irda_suspend NULL
+#define sa1100_irda_resume NULL
+#endif
+
static struct platform_driver sa1100ir_driver = {
.probe = sa1100_irda_probe,
.remove = sa1100_irda_remove,
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index 790cbdea7392..3886b30ed373 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -164,12 +164,14 @@ static void rx_complete(struct urb *req)
/* Can't use pskb_pull() on page in IRQ */
memcpy(skb_put(skb, 1), page_address(page), 1);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- page, 1, req->actual_length);
+ page, 1, req->actual_length,
+ req->actual_length);
page = NULL;
}
} else {
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- page, 0, req->actual_length);
+ page, 0, req->actual_length,
+ req->actual_length);
page = NULL;
}
if (req->actual_length < PAGE_SIZE)
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index aac68f5195c0..552d24bf862e 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -409,6 +409,42 @@ static const struct usb_device_id products[] = {
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_force_int4,
},
+ { /* ZTE (Vodafone) K3565-Z */
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x19d2,
+ .idProduct = 0x0063,
+ .bInterfaceClass = 0xff,
+ .bInterfaceSubClass = 0xff,
+ .bInterfaceProtocol = 0xff,
+ .driver_info = (unsigned long)&qmi_wwan_force_int4,
+ },
+ { /* ZTE (Vodafone) K3570-Z */
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x19d2,
+ .idProduct = 0x1008,
+ .bInterfaceClass = 0xff,
+ .bInterfaceSubClass = 0xff,
+ .bInterfaceProtocol = 0xff,
+ .driver_info = (unsigned long)&qmi_wwan_force_int4,
+ },
+ { /* ZTE (Vodafone) K3571-Z */
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x19d2,
+ .idProduct = 0x1010,
+ .bInterfaceClass = 0xff,
+ .bInterfaceSubClass = 0xff,
+ .bInterfaceProtocol = 0xff,
+ .driver_info = (unsigned long)&qmi_wwan_force_int4,
+ },
+ { /* ZTE (Vodafone) K4505-Z */
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x19d2,
+ .idProduct = 0x0104,
+ .bInterfaceClass = 0xff,
+ .bInterfaceSubClass = 0xff,
+ .bInterfaceProtocol = 0xff,
+ .driver_info = (unsigned long)&qmi_wwan_force_int4,
+ },
{QMI_GOBI_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
{QMI_GOBI_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
{QMI_GOBI_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
index c5b1d199e0bc..b25c01be0d90 100644
--- a/drivers/net/wireless/iwlegacy/3945.c
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -499,7 +499,8 @@ il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
le32_to_cpu(rx_end->status), stats);
skb_add_rx_frag(skb, 0, rxb->page,
- (void *)rx_hdr->payload - (void *)pkt, len);
+ (void *)rx_hdr->payload - (void *)pkt, len,
+ len);
il_update_stats(il, false, fc, len);
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 7b54dbb338be..17f1c6853182 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -596,7 +596,8 @@ il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
return;
}
- skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
+ skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len,
+ len);
il_update_stats(il, false, fc, len);
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
index 44c6f712b77d..f4b84d1596e3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
@@ -796,7 +796,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
offset = (void *)hdr - rxb_addr(rxb);
p = rxb_steal_page(rxb);
- skb_add_rx_frag(skb, 0, p, offset, len);
+ skb_add_rx_frag(skb, 0, p, offset, len, len);
iwl_update_stats(priv, false, fc, len);
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 6ea51dcbc728..8e84ce9765a9 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -91,4 +91,8 @@ config OF_PCI_IRQ
help
OpenFirmware PCI IRQ routing helpers
+config OF_MTD
+ depends on MTD
+ def_bool y
+
endmenu # OF
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index a73f5a51ff4c..aa90e602c8a7 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_OF_SELFTEST) += selftest.o
obj-$(CONFIG_OF_MDIO) += of_mdio.o
obj-$(CONFIG_OF_PCI) += of_pci.o
obj-$(CONFIG_OF_PCI_IRQ) += of_pci_irq.o
+obj-$(CONFIG_OF_MTD) += of_mtd.o
diff --git a/drivers/of/of_mtd.c b/drivers/of/of_mtd.c
new file mode 100644
index 000000000000..e7cad627a5d1
--- /dev/null
+++ b/drivers/of/of_mtd.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
+ *
+ * OF helpers for mtd.
+ *
+ * This file is released under the GPLv2
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/of_mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/export.h>
+
+/**
+ * It maps 'enum nand_ecc_modes_t' found in include/linux/mtd/nand.h
+ * into the device tree binding of 'nand-ecc', so that MTD
+ * device driver can get nand ecc from device tree.
+ */
+static const char *nand_ecc_modes[] = {
+ [NAND_ECC_NONE] = "none",
+ [NAND_ECC_SOFT] = "soft",
+ [NAND_ECC_HW] = "hw",
+ [NAND_ECC_HW_SYNDROME] = "hw_syndrome",
+ [NAND_ECC_HW_OOB_FIRST] = "hw_oob_first",
+ [NAND_ECC_SOFT_BCH] = "soft_bch",
+};
+
+/**
+ * of_get_nand_ecc_mode - Get nand ecc mode for given device_node
+ * @np: Pointer to the given device_node
+ *
+ * The function gets ecc mode string from property 'nand-ecc-mode',
+ * and return its index in nand_ecc_modes table, or errno in error case.
+ */
+const int of_get_nand_ecc_mode(struct device_node *np)
+{
+ const char *pm;
+ int err, i;
+
+ err = of_property_read_string(np, "nand-ecc-mode", &pm);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
+ if (!strcasecmp(pm, nand_ecc_modes[i]))
+ return i;
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(of_get_nand_ecc_mode);
+
+/**
+ * of_get_nand_bus_width - Get nand bus witdh for given device_node
+ * @np: Pointer to the given device_node
+ *
+ * return bus width option, or errno in error case.
+ */
+int of_get_nand_bus_width(struct device_node *np)
+{
+ u32 val;
+
+ if (of_property_read_u32(np, "nand-bus-width", &val))
+ return 8;
+
+ switch(val) {
+ case 8:
+ case 16:
+ return val;
+ default:
+ return -EIO;
+ }
+}
+EXPORT_SYMBOL_GPL(of_get_nand_bus_width);
+
+/**
+ * of_get_nand_on_flash_bbt - Get nand on flash bbt for given device_node
+ * @np: Pointer to the given device_node
+ *
+ * return true if present false other wise
+ */
+bool of_get_nand_on_flash_bbt(struct device_node *np)
+{
+ return of_property_read_bool(np, "nand-on-flash-bbt");
+}
+EXPORT_SYMBOL_GPL(of_get_nand_on_flash_bbt);
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
index 4902206f53d9..1dd68f502634 100644
--- a/drivers/pcmcia/at91_cf.c
+++ b/drivers/pcmcia/at91_cf.c
@@ -26,6 +26,7 @@
#include <mach/board.h>
#include <mach/at91rm9200_mc.h>
+#include <mach/at91_ramc.h>
/*
@@ -156,7 +157,7 @@ static int at91_cf_set_io_map(struct pcmcia_socket *s, struct pccard_io_map *io)
/*
* Use 16 bit accesses unless/until we need 8-bit i/o space.
*/
- csr = at91_sys_read(AT91_SMC_CSR(cf->board->chipselect)) & ~AT91_SMC_DBW;
+ csr = at91_ramc_read(0, AT91_SMC_CSR(cf->board->chipselect)) & ~AT91_SMC_DBW;
/*
* NOTE: this CF controller ignores IOIS16, so we can't really do
@@ -175,7 +176,7 @@ static int at91_cf_set_io_map(struct pcmcia_socket *s, struct pccard_io_map *io)
csr |= AT91_SMC_DBW_16;
pr_debug("%s: 16bit i/o bus\n", driver_name);
}
- at91_sys_write(AT91_SMC_CSR(cf->board->chipselect), csr);
+ at91_ramc_write(0, AT91_SMC_CSR(cf->board->chipselect), csr);
io->start = cf->socket.io_offset;
io->stop = io->start + SZ_2K - 1;
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c
index ef5848f65241..70f728ce1856 100644
--- a/drivers/pcmcia/sa1111_generic.c
+++ b/drivers/pcmcia/sa1111_generic.c
@@ -22,6 +22,40 @@
#include "sa1111_generic.h"
+/*
+ * These are offsets from the above base.
+ */
+#define PCCR 0x0000
+#define PCSSR 0x0004
+#define PCSR 0x0008
+
+#define PCSR_S0_READY (1<<0)
+#define PCSR_S1_READY (1<<1)
+#define PCSR_S0_DETECT (1<<2)
+#define PCSR_S1_DETECT (1<<3)
+#define PCSR_S0_VS1 (1<<4)
+#define PCSR_S0_VS2 (1<<5)
+#define PCSR_S1_VS1 (1<<6)
+#define PCSR_S1_VS2 (1<<7)
+#define PCSR_S0_WP (1<<8)
+#define PCSR_S1_WP (1<<9)
+#define PCSR_S0_BVD1 (1<<10)
+#define PCSR_S0_BVD2 (1<<11)
+#define PCSR_S1_BVD1 (1<<12)
+#define PCSR_S1_BVD2 (1<<13)
+
+#define PCCR_S0_RST (1<<0)
+#define PCCR_S1_RST (1<<1)
+#define PCCR_S0_FLT (1<<2)
+#define PCCR_S1_FLT (1<<3)
+#define PCCR_S0_PWAITEN (1<<4)
+#define PCCR_S1_PWAITEN (1<<5)
+#define PCCR_S0_PSE (1<<6)
+#define PCCR_S1_PSE (1<<7)
+
+#define PCSSR_S0_SLEEP (1<<0)
+#define PCSSR_S1_SLEEP (1<<1)
+
#define IDX_IRQ_S0_READY_NINT (0)
#define IDX_IRQ_S0_CD_VALID (1)
#define IDX_IRQ_S0_BVD1_STSCHG (2)
@@ -32,7 +66,7 @@
void sa1111_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state)
{
struct sa1111_pcmcia_socket *s = to_skt(skt);
- unsigned long status = sa1111_readl(s->dev->mapbase + SA1111_PCSR);
+ unsigned long status = sa1111_readl(s->dev->mapbase + PCSR);
switch (skt->nr) {
case 0:
@@ -88,10 +122,10 @@ int sa1111_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_s
pccr_set_mask |= PCCR_S0_FLT|PCCR_S1_FLT;
local_irq_save(flags);
- val = sa1111_readl(s->dev->mapbase + SA1111_PCCR);
+ val = sa1111_readl(s->dev->mapbase + PCCR);
val &= ~pccr_skt_mask;
val |= pccr_set_mask & pccr_skt_mask;
- sa1111_writel(val, s->dev->mapbase + SA1111_PCCR);
+ sa1111_writel(val, s->dev->mapbase + PCCR);
local_irq_restore(flags);
return 0;
@@ -141,20 +175,26 @@ int sa1111_pcmcia_add(struct sa1111_dev *dev, struct pcmcia_low_level *ops,
static int pcmcia_probe(struct sa1111_dev *dev)
{
void __iomem *base;
+ int ret;
+
+ ret = sa1111_enable_device(dev);
+ if (ret)
+ return ret;
dev_set_drvdata(&dev->dev, NULL);
- if (!request_mem_region(dev->res.start, 512,
- SA1111_DRIVER_NAME(dev)))
+ if (!request_mem_region(dev->res.start, 512, SA1111_DRIVER_NAME(dev))) {
+ sa1111_disable_device(dev);
return -EBUSY;
+ }
base = dev->mapbase;
/*
* Initialise the suspend state.
*/
- sa1111_writel(PCSSR_S0_SLEEP | PCSSR_S1_SLEEP, base + SA1111_PCSSR);
- sa1111_writel(PCCR_S0_FLT | PCCR_S1_FLT, base + SA1111_PCCR);
+ sa1111_writel(PCSSR_S0_SLEEP | PCSSR_S1_SLEEP, base + PCSSR);
+ sa1111_writel(PCCR_S0_FLT | PCCR_S1_FLT, base + PCCR);
#ifdef CONFIG_SA1100_BADGE4
pcmcia_badge4_init(&dev->dev);
@@ -184,6 +224,7 @@ static int __devexit pcmcia_remove(struct sa1111_dev *dev)
}
release_mem_region(dev->res.start, 512);
+ sa1111_disable_device(dev);
return 0;
}
diff --git a/drivers/pcmcia/sa1111_neponset.c b/drivers/pcmcia/sa1111_neponset.c
index 50f297d850e7..1d78739c4c07 100644
--- a/drivers/pcmcia/sa1111_neponset.c
+++ b/drivers/pcmcia/sa1111_neponset.c
@@ -94,12 +94,7 @@ neponset_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_sta
ret = sa1111_pcmcia_configure_socket(skt, state);
if (ret == 0) {
- unsigned long flags;
-
- local_irq_save(flags);
- NCR_0 = (NCR_0 & ~ncr_mask) | ncr_set;
-
- local_irq_restore(flags);
+ neponset_ncr_frob(ncr_mask, ncr_set);
sa1111_set_io(s->dev, pa_dwr_mask, pa_dwr_set);
}
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index a229de98ae6f..36db5a441eba 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -258,14 +258,6 @@ config REGULATOR_DB8500_PRCMU
This driver supports the voltage domain regulators controlled by the
DB8500 PRCMU
-config REGULATOR_BQ24022
- tristate "TI bq24022 Dual Input 1-Cell Li-Ion Charger IC"
- help
- This driver controls a TI bq24022 Charger attached via
- GPIOs. The provided current regulator can enable/disable
- charging select between 100 mA and 500 mA charging current
- limit.
-
config REGULATOR_TPS6105X
tristate "TI TPS6105X Power regulators"
depends on TPS6105X
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index b5042c885d89..94b52745e957 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -16,7 +16,6 @@ obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o
obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o
obj-$(CONFIG_REGULATOR_AD5398) += ad5398.o
obj-$(CONFIG_REGULATOR_ANATOP) += anatop-regulator.o
-obj-$(CONFIG_REGULATOR_BQ24022) += bq24022.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
obj-$(CONFIG_REGULATOR_DA9052) += da9052-regulator.o
obj-$(CONFIG_REGULATOR_DBX500_PRCMU) += dbx500-prcmu.o
diff --git a/drivers/regulator/bq24022.c b/drivers/regulator/bq24022.c
deleted file mode 100644
index 9fab6d1bbe80..000000000000
--- a/drivers/regulator/bq24022.c
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Support for TI bq24022 (bqTINY-II) Dual Input (USB/AC Adpater)
- * 1-Cell Li-Ion Charger connected via GPIOs.
- *
- * Copyright (c) 2008 Philipp Zabel
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/gpio.h>
-#include <linux/regulator/bq24022.h>
-#include <linux/regulator/driver.h>
-
-
-static int bq24022_set_current_limit(struct regulator_dev *rdev,
- int min_uA, int max_uA)
-{
- struct bq24022_mach_info *pdata = rdev_get_drvdata(rdev);
-
- dev_dbg(rdev_get_dev(rdev), "setting current limit to %s mA\n",
- max_uA >= 500000 ? "500" : "100");
-
- /* REVISIT: maybe return error if min_uA != 0 ? */
- gpio_set_value(pdata->gpio_iset2, max_uA >= 500000);
- return 0;
-}
-
-static int bq24022_get_current_limit(struct regulator_dev *rdev)
-{
- struct bq24022_mach_info *pdata = rdev_get_drvdata(rdev);
-
- return gpio_get_value(pdata->gpio_iset2) ? 500000 : 100000;
-}
-
-static int bq24022_enable(struct regulator_dev *rdev)
-{
- struct bq24022_mach_info *pdata = rdev_get_drvdata(rdev);
-
- dev_dbg(rdev_get_dev(rdev), "enabling charger\n");
-
- gpio_set_value(pdata->gpio_nce, 0);
- return 0;
-}
-
-static int bq24022_disable(struct regulator_dev *rdev)
-{
- struct bq24022_mach_info *pdata = rdev_get_drvdata(rdev);
-
- dev_dbg(rdev_get_dev(rdev), "disabling charger\n");
-
- gpio_set_value(pdata->gpio_nce, 1);
- return 0;
-}
-
-static int bq24022_is_enabled(struct regulator_dev *rdev)
-{
- struct bq24022_mach_info *pdata = rdev_get_drvdata(rdev);
-
- return !gpio_get_value(pdata->gpio_nce);
-}
-
-static struct regulator_ops bq24022_ops = {
- .set_current_limit = bq24022_set_current_limit,
- .get_current_limit = bq24022_get_current_limit,
- .enable = bq24022_enable,
- .disable = bq24022_disable,
- .is_enabled = bq24022_is_enabled,
-};
-
-static struct regulator_desc bq24022_desc = {
- .name = "bq24022",
- .ops = &bq24022_ops,
- .type = REGULATOR_CURRENT,
- .owner = THIS_MODULE,
-};
-
-static int __init bq24022_probe(struct platform_device *pdev)
-{
- struct bq24022_mach_info *pdata = pdev->dev.platform_data;
- struct regulator_dev *bq24022;
- int ret;
-
- if (!pdata || !pdata->gpio_nce || !pdata->gpio_iset2)
- return -EINVAL;
-
- ret = gpio_request(pdata->gpio_nce, "ncharge_en");
- if (ret) {
- dev_dbg(&pdev->dev, "couldn't request nCE GPIO: %d\n",
- pdata->gpio_nce);
- goto err_ce;
- }
- ret = gpio_request(pdata->gpio_iset2, "charge_mode");
- if (ret) {
- dev_dbg(&pdev->dev, "couldn't request ISET2 GPIO: %d\n",
- pdata->gpio_iset2);
- goto err_iset2;
- }
- ret = gpio_direction_output(pdata->gpio_iset2, 0);
- ret = gpio_direction_output(pdata->gpio_nce, 1);
-
- bq24022 = regulator_register(&bq24022_desc, &pdev->dev,
- pdata->init_data, pdata, NULL);
- if (IS_ERR(bq24022)) {
- dev_dbg(&pdev->dev, "couldn't register regulator\n");
- ret = PTR_ERR(bq24022);
- goto err_reg;
- }
- platform_set_drvdata(pdev, bq24022);
- dev_dbg(&pdev->dev, "registered regulator\n");
-
- return 0;
-err_reg:
- gpio_free(pdata->gpio_iset2);
-err_iset2:
- gpio_free(pdata->gpio_nce);
-err_ce:
- return ret;
-}
-
-static int __devexit bq24022_remove(struct platform_device *pdev)
-{
- struct bq24022_mach_info *pdata = pdev->dev.platform_data;
- struct regulator_dev *bq24022 = platform_get_drvdata(pdev);
-
- regulator_unregister(bq24022);
- gpio_free(pdata->gpio_iset2);
- gpio_free(pdata->gpio_nce);
-
- return 0;
-}
-
-static struct platform_driver bq24022_driver = {
- .driver = {
- .name = "bq24022",
- },
- .remove = __devexit_p(bq24022_remove),
-};
-
-static int __init bq24022_init(void)
-{
- return platform_driver_probe(&bq24022_driver, bq24022_probe);
-}
-
-static void __exit bq24022_exit(void)
-{
- platform_driver_unregister(&bq24022_driver);
-}
-
-module_init(bq24022_init);
-module_exit(bq24022_exit);
-
-MODULE_AUTHOR("Philipp Zabel");
-MODULE_DESCRIPTION("TI bq24022 Li-Ion Charger driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
new file mode 100644
index 000000000000..24d880e78ec6
--- /dev/null
+++ b/drivers/remoteproc/Kconfig
@@ -0,0 +1,28 @@
+menu "Remoteproc drivers (EXPERIMENTAL)"
+
+# REMOTEPROC gets selected by whoever wants it
+config REMOTEPROC
+ tristate
+ depends on EXPERIMENTAL
+
+config OMAP_REMOTEPROC
+ tristate "OMAP remoteproc support"
+ depends on ARCH_OMAP4
+ depends on OMAP_IOMMU
+ select REMOTEPROC
+ select OMAP_MBOX_FWK
+ select RPMSG
+ help
+ Say y here to support OMAP's remote processors (dual M3
+ and DSP on OMAP4) via the remote processor framework.
+
+ Currently only supported on OMAP4.
+
+ Usually you want to say y here, in order to enable multimedia
+ use-cases to run on your platform (multimedia codecs are
+ offloaded to remote DSP processors using this framework).
+
+ It's safe to say n here if you're not interested in multimedia
+ offloading or just want a bare minimum kernel.
+
+endmenu
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
new file mode 100644
index 000000000000..5445d9b23294
--- /dev/null
+++ b/drivers/remoteproc/Makefile
@@ -0,0 +1,9 @@
+#
+# Generic framework for controlling remote processors
+#
+
+obj-$(CONFIG_REMOTEPROC) += remoteproc.o
+remoteproc-y := remoteproc_core.o
+remoteproc-y += remoteproc_debugfs.o
+remoteproc-y += remoteproc_virtio.o
+obj-$(CONFIG_OMAP_REMOTEPROC) += omap_remoteproc.o
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
new file mode 100644
index 000000000000..69425c4e86f3
--- /dev/null
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -0,0 +1,229 @@
+/*
+ * OMAP Remote Processor driver
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Ohad Ben-Cohen <ohad@wizery.com>
+ * Brian Swetland <swetland@google.com>
+ * Fernando Guzman Lugo <fernando.lugo@ti.com>
+ * Mark Grosen <mgrosen@ti.com>
+ * Suman Anna <s-anna@ti.com>
+ * Hari Kanigeri <h-kanigeri2@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/remoteproc.h>
+
+#include <plat/mailbox.h>
+#include <plat/remoteproc.h>
+
+#include "omap_remoteproc.h"
+#include "remoteproc_internal.h"
+
+/**
+ * struct omap_rproc - omap remote processor state
+ * @mbox: omap mailbox handle
+ * @nb: notifier block that will be invoked on inbound mailbox messages
+ * @rproc: rproc handle
+ */
+struct omap_rproc {
+ struct omap_mbox *mbox;
+ struct notifier_block nb;
+ struct rproc *rproc;
+};
+
+/**
+ * omap_rproc_mbox_callback() - inbound mailbox message handler
+ * @this: notifier block
+ * @index: unused
+ * @data: mailbox payload
+ *
+ * This handler is invoked by omap's mailbox driver whenever a mailbox
+ * message is received. Usually, the mailbox payload simply contains
+ * the index of the virtqueue that is kicked by the remote processor,
+ * and we let remoteproc core handle it.
+ *
+ * In addition to virtqueue indices, we also have some out-of-band values
+ * that indicates different events. Those values are deliberately very
+ * big so they don't coincide with virtqueue indices.
+ */
+static int omap_rproc_mbox_callback(struct notifier_block *this,
+ unsigned long index, void *data)
+{
+ mbox_msg_t msg = (mbox_msg_t) data;
+ struct omap_rproc *oproc = container_of(this, struct omap_rproc, nb);
+ struct device *dev = oproc->rproc->dev;
+ const char *name = oproc->rproc->name;
+
+ dev_dbg(dev, "mbox msg: 0x%x\n", msg);
+
+ switch (msg) {
+ case RP_MBOX_CRASH:
+ /* just log this for now. later, we'll also do recovery */
+ dev_err(dev, "omap rproc %s crashed\n", name);
+ break;
+ case RP_MBOX_ECHO_REPLY:
+ dev_info(dev, "received echo reply from %s\n", name);
+ break;
+ default:
+ /* msg contains the index of the triggered vring */
+ if (rproc_vq_interrupt(oproc->rproc, msg) == IRQ_NONE)
+ dev_dbg(dev, "no message was found in vqid %d\n", msg);
+ }
+
+ return NOTIFY_DONE;
+}
+
+/* kick a virtqueue */
+static void omap_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct omap_rproc *oproc = rproc->priv;
+ int ret;
+
+ /* send the index of the triggered virtqueue in the mailbox payload */
+ ret = omap_mbox_msg_send(oproc->mbox, vqid);
+ if (ret)
+ dev_err(rproc->dev, "omap_mbox_msg_send failed: %d\n", ret);
+}
+
+/*
+ * Power up the remote processor.
+ *
+ * This function will be invoked only after the firmware for this rproc
+ * was loaded, parsed successfully, and all of its resource requirements
+ * were met.
+ */
+static int omap_rproc_start(struct rproc *rproc)
+{
+ struct omap_rproc *oproc = rproc->priv;
+ struct platform_device *pdev = to_platform_device(rproc->dev);
+ struct omap_rproc_pdata *pdata = pdev->dev.platform_data;
+ int ret;
+
+ oproc->nb.notifier_call = omap_rproc_mbox_callback;
+
+ /* every omap rproc is assigned a mailbox instance for messaging */
+ oproc->mbox = omap_mbox_get(pdata->mbox_name, &oproc->nb);
+ if (IS_ERR(oproc->mbox)) {
+ ret = PTR_ERR(oproc->mbox);
+ dev_err(rproc->dev, "omap_mbox_get failed: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Ping the remote processor. this is only for sanity-sake;
+ * there is no functional effect whatsoever.
+ *
+ * Note that the reply will _not_ arrive immediately: this message
+ * will wait in the mailbox fifo until the remote processor is booted.
+ */
+ ret = omap_mbox_msg_send(oproc->mbox, RP_MBOX_ECHO_REQUEST);
+ if (ret) {
+ dev_err(rproc->dev, "omap_mbox_get failed: %d\n", ret);
+ goto put_mbox;
+ }
+
+ ret = pdata->device_enable(pdev);
+ if (ret) {
+ dev_err(rproc->dev, "omap_device_enable failed: %d\n", ret);
+ goto put_mbox;
+ }
+
+ return 0;
+
+put_mbox:
+ omap_mbox_put(oproc->mbox, &oproc->nb);
+ return ret;
+}
+
+/* power off the remote processor */
+static int omap_rproc_stop(struct rproc *rproc)
+{
+ struct platform_device *pdev = to_platform_device(rproc->dev);
+ struct omap_rproc_pdata *pdata = pdev->dev.platform_data;
+ struct omap_rproc *oproc = rproc->priv;
+ int ret;
+
+ ret = pdata->device_shutdown(pdev);
+ if (ret)
+ return ret;
+
+ omap_mbox_put(oproc->mbox, &oproc->nb);
+
+ return 0;
+}
+
+static struct rproc_ops omap_rproc_ops = {
+ .start = omap_rproc_start,
+ .stop = omap_rproc_stop,
+ .kick = omap_rproc_kick,
+};
+
+static int __devinit omap_rproc_probe(struct platform_device *pdev)
+{
+ struct omap_rproc_pdata *pdata = pdev->dev.platform_data;
+ struct omap_rproc *oproc;
+ struct rproc *rproc;
+ int ret;
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(pdev->dev.parent, "dma_set_coherent_mask: %d\n", ret);
+ return ret;
+ }
+
+ rproc = rproc_alloc(&pdev->dev, pdata->name, &omap_rproc_ops,
+ pdata->firmware, sizeof(*oproc));
+ if (!rproc)
+ return -ENOMEM;
+
+ oproc = rproc->priv;
+ oproc->rproc = rproc;
+
+ platform_set_drvdata(pdev, rproc);
+
+ ret = rproc_register(rproc);
+ if (ret)
+ goto free_rproc;
+
+ return 0;
+
+free_rproc:
+ rproc_free(rproc);
+ return ret;
+}
+
+static int __devexit omap_rproc_remove(struct platform_device *pdev)
+{
+ struct rproc *rproc = platform_get_drvdata(pdev);
+
+ return rproc_unregister(rproc);
+}
+
+static struct platform_driver omap_rproc_driver = {
+ .probe = omap_rproc_probe,
+ .remove = __devexit_p(omap_rproc_remove),
+ .driver = {
+ .name = "omap-rproc",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(omap_rproc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("OMAP Remote Processor control driver");
diff --git a/drivers/remoteproc/omap_remoteproc.h b/drivers/remoteproc/omap_remoteproc.h
new file mode 100644
index 000000000000..f6d2036d383d
--- /dev/null
+++ b/drivers/remoteproc/omap_remoteproc.h
@@ -0,0 +1,69 @@
+/*
+ * Remote processor messaging
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Texas Instruments nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _OMAP_RPMSG_H
+#define _OMAP_RPMSG_H
+
+/*
+ * enum - Predefined Mailbox Messages
+ *
+ * @RP_MBOX_READY: informs the M3's that we're up and running. this is
+ * part of the init sequence sent that the M3 expects to see immediately
+ * after it is booted.
+ *
+ * @RP_MBOX_PENDING_MSG: informs the receiver that there is an inbound
+ * message waiting in its own receive-side vring. please note that currently
+ * this message is optional: alternatively, one can explicitly send the index
+ * of the triggered virtqueue itself. the preferred approach will be decided
+ * as we progress and experiment with those two different approaches.
+ *
+ * @RP_MBOX_CRASH: this message is sent if BIOS crashes
+ *
+ * @RP_MBOX_ECHO_REQUEST: a mailbox-level "ping" message.
+ *
+ * @RP_MBOX_ECHO_REPLY: a mailbox-level reply to a "ping"
+ *
+ * @RP_MBOX_ABORT_REQUEST: a "please crash" request, used for testing the
+ * recovery mechanism (to some extent).
+ */
+enum omap_rp_mbox_messages {
+ RP_MBOX_READY = 0xFFFFFF00,
+ RP_MBOX_PENDING_MSG = 0xFFFFFF01,
+ RP_MBOX_CRASH = 0xFFFFFF02,
+ RP_MBOX_ECHO_REQUEST = 0xFFFFFF03,
+ RP_MBOX_ECHO_REPLY = 0xFFFFFF04,
+ RP_MBOX_ABORT_REQUEST = 0xFFFFFF05,
+};
+
+#endif /* _OMAP_RPMSG_H */
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
new file mode 100644
index 000000000000..ee15c68fb519
--- /dev/null
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -0,0 +1,1586 @@
+/*
+ * Remote Processor Framework
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Ohad Ben-Cohen <ohad@wizery.com>
+ * Brian Swetland <swetland@google.com>
+ * Mark Grosen <mgrosen@ti.com>
+ * Fernando Guzman Lugo <fernando.lugo@ti.com>
+ * Suman Anna <s-anna@ti.com>
+ * Robert Tivy <rtivy@ti.com>
+ * Armando Uribe De Leon <x0095078@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/string.h>
+#include <linux/debugfs.h>
+#include <linux/remoteproc.h>
+#include <linux/iommu.h>
+#include <linux/klist.h>
+#include <linux/elf.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_ring.h>
+#include <asm/byteorder.h>
+
+#include "remoteproc_internal.h"
+
+static void klist_rproc_get(struct klist_node *n);
+static void klist_rproc_put(struct klist_node *n);
+
+/*
+ * klist of the available remote processors.
+ *
+ * We need this in order to support name-based lookups (needed by the
+ * rproc_get_by_name()).
+ *
+ * That said, we don't use rproc_get_by_name() at this point.
+ * The use cases that do require its existence should be
+ * scrutinized, and hopefully migrated to rproc_boot() using device-based
+ * binding.
+ *
+ * If/when this materializes, we could drop the klist (and the by_name
+ * API).
+ */
+static DEFINE_KLIST(rprocs, klist_rproc_get, klist_rproc_put);
+
+typedef int (*rproc_handle_resources_t)(struct rproc *rproc,
+ struct resource_table *table, int len);
+typedef int (*rproc_handle_resource_t)(struct rproc *rproc, void *, int avail);
+
+/*
+ * This is the IOMMU fault handler we register with the IOMMU API
+ * (when relevant; not all remote processors access memory through
+ * an IOMMU).
+ *
+ * IOMMU core will invoke this handler whenever the remote processor
+ * will try to access an unmapped device address.
+ *
+ * Currently this is mostly a stub, but it will be later used to trigger
+ * the recovery of the remote processor.
+ */
+static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev,
+ unsigned long iova, int flags)
+{
+ dev_err(dev, "iommu fault: da 0x%lx flags 0x%x\n", iova, flags);
+
+ /*
+ * Let the iommu core know we're not really handling this fault;
+ * we just plan to use this as a recovery trigger.
+ */
+ return -ENOSYS;
+}
+
+static int rproc_enable_iommu(struct rproc *rproc)
+{
+ struct iommu_domain *domain;
+ struct device *dev = rproc->dev;
+ int ret;
+
+ /*
+ * We currently use iommu_present() to decide if an IOMMU
+ * setup is needed.
+ *
+ * This works for simple cases, but will easily fail with
+ * platforms that do have an IOMMU, but not for this specific
+ * rproc.
+ *
+ * This will be easily solved by introducing hw capabilities
+ * that will be set by the remoteproc driver.
+ */
+ if (!iommu_present(dev->bus)) {
+ dev_dbg(dev, "iommu not found\n");
+ return 0;
+ }
+
+ domain = iommu_domain_alloc(dev->bus);
+ if (!domain) {
+ dev_err(dev, "can't alloc iommu domain\n");
+ return -ENOMEM;
+ }
+
+ iommu_set_fault_handler(domain, rproc_iommu_fault);
+
+ ret = iommu_attach_device(domain, dev);
+ if (ret) {
+ dev_err(dev, "can't attach iommu device: %d\n", ret);
+ goto free_domain;
+ }
+
+ rproc->domain = domain;
+
+ return 0;
+
+free_domain:
+ iommu_domain_free(domain);
+ return ret;
+}
+
+static void rproc_disable_iommu(struct rproc *rproc)
+{
+ struct iommu_domain *domain = rproc->domain;
+ struct device *dev = rproc->dev;
+
+ if (!domain)
+ return;
+
+ iommu_detach_device(domain, dev);
+ iommu_domain_free(domain);
+
+ return;
+}
+
+/*
+ * Some remote processors will ask us to allocate them physically contiguous
+ * memory regions (which we call "carveouts"), and map them to specific
+ * device addresses (which are hardcoded in the firmware).
+ *
+ * They may then ask us to copy objects into specific device addresses (e.g.
+ * code/data sections) or expose us certain symbols in other device address
+ * (e.g. their trace buffer).
+ *
+ * This function is an internal helper with which we can go over the allocated
+ * carveouts and translate specific device address to kernel virtual addresses
+ * so we can access the referenced memory.
+ *
+ * Note: phys_to_virt(iommu_iova_to_phys(rproc->domain, da)) will work too,
+ * but only on kernel direct mapped RAM memory. Instead, we're just using
+ * here the output of the DMA API, which should be more correct.
+ */
+static void *rproc_da_to_va(struct rproc *rproc, u64 da, int len)
+{
+ struct rproc_mem_entry *carveout;
+ void *ptr = NULL;
+
+ list_for_each_entry(carveout, &rproc->carveouts, node) {
+ int offset = da - carveout->da;
+
+ /* try next carveout if da is too small */
+ if (offset < 0)
+ continue;
+
+ /* try next carveout if da is too large */
+ if (offset + len > carveout->len)
+ continue;
+
+ ptr = carveout->va + offset;
+
+ break;
+ }
+
+ return ptr;
+}
+
+/**
+ * rproc_load_segments() - load firmware segments to memory
+ * @rproc: remote processor which will be booted using these fw segments
+ * @elf_data: the content of the ELF firmware image
+ * @len: firmware size (in bytes)
+ *
+ * This function loads the firmware segments to memory, where the remote
+ * processor expects them.
+ *
+ * Some remote processors will expect their code and data to be placed
+ * in specific device addresses, and can't have them dynamically assigned.
+ *
+ * We currently support only those kind of remote processors, and expect
+ * the program header's paddr member to contain those addresses. We then go
+ * through the physically contiguous "carveout" memory regions which we
+ * allocated (and mapped) earlier on behalf of the remote processor,
+ * and "translate" device address to kernel addresses, so we can copy the
+ * segments where they are expected.
+ *
+ * Currently we only support remote processors that required carveout
+ * allocations and got them mapped onto their iommus. Some processors
+ * might be different: they might not have iommus, and would prefer to
+ * directly allocate memory for every segment/resource. This is not yet
+ * supported, though.
+ */
+static int
+rproc_load_segments(struct rproc *rproc, const u8 *elf_data, size_t len)
+{
+ struct device *dev = rproc->dev;
+ struct elf32_hdr *ehdr;
+ struct elf32_phdr *phdr;
+ int i, ret = 0;
+
+ ehdr = (struct elf32_hdr *)elf_data;
+ phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
+
+ /* go through the available ELF segments */
+ for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
+ u32 da = phdr->p_paddr;
+ u32 memsz = phdr->p_memsz;
+ u32 filesz = phdr->p_filesz;
+ u32 offset = phdr->p_offset;
+ void *ptr;
+
+ if (phdr->p_type != PT_LOAD)
+ continue;
+
+ dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
+ phdr->p_type, da, memsz, filesz);
+
+ if (filesz > memsz) {
+ dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
+ filesz, memsz);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (offset + filesz > len) {
+ dev_err(dev, "truncated fw: need 0x%x avail 0x%x\n",
+ offset + filesz, len);
+ ret = -EINVAL;
+ break;
+ }
+
+ /* grab the kernel address for this device address */
+ ptr = rproc_da_to_va(rproc, da, memsz);
+ if (!ptr) {
+ dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz);
+ ret = -EINVAL;
+ break;
+ }
+
+ /* put the segment where the remote processor expects it */
+ if (phdr->p_filesz)
+ memcpy(ptr, elf_data + phdr->p_offset, filesz);
+
+ /*
+ * Zero out remaining memory for this segment.
+ *
+ * This isn't strictly required since dma_alloc_coherent already
+ * did this for us. albeit harmless, we may consider removing
+ * this.
+ */
+ if (memsz > filesz)
+ memset(ptr + filesz, 0, memsz - filesz);
+ }
+
+ return ret;
+}
+
+static int
+__rproc_handle_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
+{
+ struct rproc *rproc = rvdev->rproc;
+ struct device *dev = rproc->dev;
+ struct fw_rsc_vdev_vring *vring = &rsc->vring[i];
+ dma_addr_t dma;
+ void *va;
+ int ret, size, notifyid;
+
+ dev_dbg(dev, "vdev rsc: vring%d: da %x, qsz %d, align %d\n",
+ i, vring->da, vring->num, vring->align);
+
+ /* make sure reserved bytes are zeroes */
+ if (vring->reserved) {
+ dev_err(dev, "vring rsc has non zero reserved bytes\n");
+ return -EINVAL;
+ }
+
+ /* verify queue size and vring alignment are sane */
+ if (!vring->num || !vring->align) {
+ dev_err(dev, "invalid qsz (%d) or alignment (%d)\n",
+ vring->num, vring->align);
+ return -EINVAL;
+ }
+
+ /* actual size of vring (in bytes) */
+ size = PAGE_ALIGN(vring_size(vring->num, vring->align));
+
+ if (!idr_pre_get(&rproc->notifyids, GFP_KERNEL)) {
+ dev_err(dev, "idr_pre_get failed\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Allocate non-cacheable memory for the vring. In the future
+ * this call will also configure the IOMMU for us
+ */
+ va = dma_alloc_coherent(dev, size, &dma, GFP_KERNEL);
+ if (!va) {
+ dev_err(dev, "dma_alloc_coherent failed\n");
+ return -EINVAL;
+ }
+
+ /* assign an rproc-wide unique index for this vring */
+ /* TODO: assign a notifyid for rvdev updates as well */
+ ret = idr_get_new(&rproc->notifyids, &rvdev->vring[i], &notifyid);
+ if (ret) {
+ dev_err(dev, "idr_get_new failed: %d\n", ret);
+ dma_free_coherent(dev, size, va, dma);
+ return ret;
+ }
+
+ /* let the rproc know the da and notifyid of this vring */
+ /* TODO: expose this to remote processor */
+ vring->da = dma;
+ vring->notifyid = notifyid;
+
+ dev_dbg(dev, "vring%d: va %p dma %x size %x idr %d\n", i, va,
+ dma, size, notifyid);
+
+ rvdev->vring[i].len = vring->num;
+ rvdev->vring[i].align = vring->align;
+ rvdev->vring[i].va = va;
+ rvdev->vring[i].dma = dma;
+ rvdev->vring[i].notifyid = notifyid;
+ rvdev->vring[i].rvdev = rvdev;
+
+ return 0;
+}
+
+static void __rproc_free_vrings(struct rproc_vdev *rvdev, int i)
+{
+ struct rproc *rproc = rvdev->rproc;
+
+ for (i--; i > 0; i--) {
+ struct rproc_vring *rvring = &rvdev->vring[i];
+ int size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
+
+ dma_free_coherent(rproc->dev, size, rvring->va, rvring->dma);
+ idr_remove(&rproc->notifyids, rvring->notifyid);
+ }
+}
+
+/**
+ * rproc_handle_vdev() - handle a vdev fw resource
+ * @rproc: the remote processor
+ * @rsc: the vring resource descriptor
+ * @avail: size of available data (for sanity checking the image)
+ *
+ * This resource entry requests the host to statically register a virtio
+ * device (vdev), and setup everything needed to support it. It contains
+ * everything needed to make it possible: the virtio device id, virtio
+ * device features, vrings information, virtio config space, etc...
+ *
+ * Before registering the vdev, the vrings are allocated from non-cacheable
+ * physically contiguous memory. Currently we only support two vrings per
+ * remote processor (temporary limitation). We might also want to consider
+ * doing the vring allocation only later when ->find_vqs() is invoked, and
+ * then release them upon ->del_vqs().
+ *
+ * Note: @da is currently not really handled correctly: we dynamically
+ * allocate it using the DMA API, ignoring requested hard coded addresses,
+ * and we don't take care of any required IOMMU programming. This is all
+ * going to be taken care of when the generic iommu-based DMA API will be
+ * merged. Meanwhile, statically-addressed iommu-based firmware images should
+ * use RSC_DEVMEM resource entries to map their required @da to the physical
+ * address of their base CMA region (ouch, hacky!).
+ *
+ * Returns 0 on success, or an appropriate error code otherwise
+ */
+static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
+ int avail)
+{
+ struct device *dev = rproc->dev;
+ struct rproc_vdev *rvdev;
+ int i, ret;
+
+ /* make sure resource isn't truncated */
+ if (sizeof(*rsc) + rsc->num_of_vrings * sizeof(struct fw_rsc_vdev_vring)
+ + rsc->config_len > avail) {
+ dev_err(rproc->dev, "vdev rsc is truncated\n");
+ return -EINVAL;
+ }
+
+ /* make sure reserved bytes are zeroes */
+ if (rsc->reserved[0] || rsc->reserved[1]) {
+ dev_err(dev, "vdev rsc has non zero reserved bytes\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "vdev rsc: id %d, dfeatures %x, cfg len %d, %d vrings\n",
+ rsc->id, rsc->dfeatures, rsc->config_len, rsc->num_of_vrings);
+
+ /* we currently support only two vrings per rvdev */
+ if (rsc->num_of_vrings > ARRAY_SIZE(rvdev->vring)) {
+ dev_err(dev, "too many vrings: %d\n", rsc->num_of_vrings);
+ return -EINVAL;
+ }
+
+ rvdev = kzalloc(sizeof(struct rproc_vdev), GFP_KERNEL);
+ if (!rvdev)
+ return -ENOMEM;
+
+ rvdev->rproc = rproc;
+
+ /* allocate the vrings */
+ for (i = 0; i < rsc->num_of_vrings; i++) {
+ ret = __rproc_handle_vring(rvdev, rsc, i);
+ if (ret)
+ goto free_vrings;
+ }
+
+ /* remember the device features */
+ rvdev->dfeatures = rsc->dfeatures;
+
+ list_add_tail(&rvdev->node, &rproc->rvdevs);
+
+ /* it is now safe to add the virtio device */
+ ret = rproc_add_virtio_dev(rvdev, rsc->id);
+ if (ret)
+ goto free_vrings;
+
+ return 0;
+
+free_vrings:
+ __rproc_free_vrings(rvdev, i);
+ kfree(rvdev);
+ return ret;
+}
+
+/**
+ * rproc_handle_trace() - handle a shared trace buffer resource
+ * @rproc: the remote processor
+ * @rsc: the trace resource descriptor
+ * @avail: size of available data (for sanity checking the image)
+ *
+ * In case the remote processor dumps trace logs into memory,
+ * export it via debugfs.
+ *
+ * Currently, the 'da' member of @rsc should contain the device address
+ * where the remote processor is dumping the traces. Later we could also
+ * support dynamically allocating this address using the generic
+ * DMA API (but currently there isn't a use case for that).
+ *
+ * Returns 0 on success, or an appropriate error code otherwise
+ */
+static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
+ int avail)
+{
+ struct rproc_mem_entry *trace;
+ struct device *dev = rproc->dev;
+ void *ptr;
+ char name[15];
+
+ if (sizeof(*rsc) > avail) {
+ dev_err(rproc->dev, "trace rsc is truncated\n");
+ return -EINVAL;
+ }
+
+ /* make sure reserved bytes are zeroes */
+ if (rsc->reserved) {
+ dev_err(dev, "trace rsc has non zero reserved bytes\n");
+ return -EINVAL;
+ }
+
+ /* what's the kernel address of this resource ? */
+ ptr = rproc_da_to_va(rproc, rsc->da, rsc->len);
+ if (!ptr) {
+ dev_err(dev, "erroneous trace resource entry\n");
+ return -EINVAL;
+ }
+
+ trace = kzalloc(sizeof(*trace), GFP_KERNEL);
+ if (!trace) {
+ dev_err(dev, "kzalloc trace failed\n");
+ return -ENOMEM;
+ }
+
+ /* set the trace buffer dma properties */
+ trace->len = rsc->len;
+ trace->va = ptr;
+
+ /* make sure snprintf always null terminates, even if truncating */
+ snprintf(name, sizeof(name), "trace%d", rproc->num_traces);
+
+ /* create the debugfs entry */
+ trace->priv = rproc_create_trace_file(name, rproc, trace);
+ if (!trace->priv) {
+ trace->va = NULL;
+ kfree(trace);
+ return -EINVAL;
+ }
+
+ list_add_tail(&trace->node, &rproc->traces);
+
+ rproc->num_traces++;
+
+ dev_dbg(dev, "%s added: va %p, da 0x%x, len 0x%x\n", name, ptr,
+ rsc->da, rsc->len);
+
+ return 0;
+}
+
+/**
+ * rproc_handle_devmem() - handle devmem resource entry
+ * @rproc: remote processor handle
+ * @rsc: the devmem resource entry
+ * @avail: size of available data (for sanity checking the image)
+ *
+ * Remote processors commonly need to access certain on-chip peripherals.
+ *
+ * Some of these remote processors access memory via an iommu device,
+ * and might require us to configure their iommu before they can access
+ * the on-chip peripherals they need.
+ *
+ * This resource entry is a request to map such a peripheral device.
+ *
+ * These devmem entries will contain the physical address of the device in
+ * the 'pa' member. If a specific device address is expected, then 'da' will
+ * contain it (currently this is the only use case supported). 'len' will
+ * contain the size of the physical region we need to map.
+ *
+ * Currently we just "trust" those devmem entries to contain valid physical
+ * addresses, but this is going to change: we want the implementations to
+ * tell us ranges of physical addresses the firmware is allowed to request,
+ * and not allow firmwares to request access to physical addresses that
+ * are outside those ranges.
+ */
+static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc,
+ int avail)
+{
+ struct rproc_mem_entry *mapping;
+ int ret;
+
+ /* no point in handling this resource without a valid iommu domain */
+ if (!rproc->domain)
+ return -EINVAL;
+
+ if (sizeof(*rsc) > avail) {
+ dev_err(rproc->dev, "devmem rsc is truncated\n");
+ return -EINVAL;
+ }
+
+ /* make sure reserved bytes are zeroes */
+ if (rsc->reserved) {
+ dev_err(rproc->dev, "devmem rsc has non zero reserved bytes\n");
+ return -EINVAL;
+ }
+
+ mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+ if (!mapping) {
+ dev_err(rproc->dev, "kzalloc mapping failed\n");
+ return -ENOMEM;
+ }
+
+ ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags);
+ if (ret) {
+ dev_err(rproc->dev, "failed to map devmem: %d\n", ret);
+ goto out;
+ }
+
+ /*
+ * We'll need this info later when we'll want to unmap everything
+ * (e.g. on shutdown).
+ *
+ * We can't trust the remote processor not to change the resource
+ * table, so we must maintain this info independently.
+ */
+ mapping->da = rsc->da;
+ mapping->len = rsc->len;
+ list_add_tail(&mapping->node, &rproc->mappings);
+
+ dev_dbg(rproc->dev, "mapped devmem pa 0x%x, da 0x%x, len 0x%x\n",
+ rsc->pa, rsc->da, rsc->len);
+
+ return 0;
+
+out:
+ kfree(mapping);
+ return ret;
+}
+
+/**
+ * rproc_handle_carveout() - handle phys contig memory allocation requests
+ * @rproc: rproc handle
+ * @rsc: the resource entry
+ * @avail: size of available data (for image validation)
+ *
+ * This function will handle firmware requests for allocation of physically
+ * contiguous memory regions.
+ *
+ * These request entries should come first in the firmware's resource table,
+ * as other firmware entries might request placing other data objects inside
+ * these memory regions (e.g. data/code segments, trace resource entries, ...).
+ *
+ * Allocating memory this way helps utilizing the reserved physical memory
+ * (e.g. CMA) more efficiently, and also minimizes the number of TLB entries
+ * needed to map it (in case @rproc is using an IOMMU). Reducing the TLB
+ * pressure is important; it may have a substantial impact on performance.
+ */
+static int rproc_handle_carveout(struct rproc *rproc,
+ struct fw_rsc_carveout *rsc, int avail)
+{
+ struct rproc_mem_entry *carveout, *mapping;
+ struct device *dev = rproc->dev;
+ dma_addr_t dma;
+ void *va;
+ int ret;
+
+ if (sizeof(*rsc) > avail) {
+ dev_err(rproc->dev, "carveout rsc is truncated\n");
+ return -EINVAL;
+ }
+
+ /* make sure reserved bytes are zeroes */
+ if (rsc->reserved) {
+ dev_err(dev, "carveout rsc has non zero reserved bytes\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "carveout rsc: da %x, pa %x, len %x, flags %x\n",
+ rsc->da, rsc->pa, rsc->len, rsc->flags);
+
+ mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+ if (!mapping) {
+ dev_err(dev, "kzalloc mapping failed\n");
+ return -ENOMEM;
+ }
+
+ carveout = kzalloc(sizeof(*carveout), GFP_KERNEL);
+ if (!carveout) {
+ dev_err(dev, "kzalloc carveout failed\n");
+ ret = -ENOMEM;
+ goto free_mapping;
+ }
+
+ va = dma_alloc_coherent(dev, rsc->len, &dma, GFP_KERNEL);
+ if (!va) {
+ dev_err(dev, "failed to dma alloc carveout: %d\n", rsc->len);
+ ret = -ENOMEM;
+ goto free_carv;
+ }
+
+ dev_dbg(dev, "carveout va %p, dma %x, len 0x%x\n", va, dma, rsc->len);
+
+ /*
+ * Ok, this is non-standard.
+ *
+ * Sometimes we can't rely on the generic iommu-based DMA API
+ * to dynamically allocate the device address and then set the IOMMU
+ * tables accordingly, because some remote processors might
+ * _require_ us to use hard coded device addresses that their
+ * firmware was compiled with.
+ *
+ * In this case, we must use the IOMMU API directly and map
+ * the memory to the device address as expected by the remote
+ * processor.
+ *
+ * Obviously such remote processor devices should not be configured
+ * to use the iommu-based DMA API: we expect 'dma' to contain the
+ * physical address in this case.
+ */
+ if (rproc->domain) {
+ ret = iommu_map(rproc->domain, rsc->da, dma, rsc->len,
+ rsc->flags);
+ if (ret) {
+ dev_err(dev, "iommu_map failed: %d\n", ret);
+ goto dma_free;
+ }
+
+ /*
+ * We'll need this info later when we'll want to unmap
+ * everything (e.g. on shutdown).
+ *
+ * We can't trust the remote processor not to change the
+ * resource table, so we must maintain this info independently.
+ */
+ mapping->da = rsc->da;
+ mapping->len = rsc->len;
+ list_add_tail(&mapping->node, &rproc->mappings);
+
+ dev_dbg(dev, "carveout mapped 0x%x to 0x%x\n", rsc->da, dma);
+
+ /*
+ * Some remote processors might need to know the pa
+ * even though they are behind an IOMMU. E.g., OMAP4's
+ * remote M3 processor needs this so it can control
+ * on-chip hardware accelerators that are not behind
+ * the IOMMU, and therefor must know the pa.
+ *
+ * Generally we don't want to expose physical addresses
+ * if we don't have to (remote processors are generally
+ * _not_ trusted), so we might want to do this only for
+ * remote processor that _must_ have this (e.g. OMAP4's
+ * dual M3 subsystem).
+ */
+ rsc->pa = dma;
+ }
+
+ carveout->va = va;
+ carveout->len = rsc->len;
+ carveout->dma = dma;
+ carveout->da = rsc->da;
+
+ list_add_tail(&carveout->node, &rproc->carveouts);
+
+ return 0;
+
+dma_free:
+ dma_free_coherent(dev, rsc->len, va, dma);
+free_carv:
+ kfree(carveout);
+free_mapping:
+ kfree(mapping);
+ return ret;
+}
+
+/*
+ * A lookup table for resource handlers. The indices are defined in
+ * enum fw_resource_type.
+ */
+static rproc_handle_resource_t rproc_handle_rsc[] = {
+ [RSC_CARVEOUT] = (rproc_handle_resource_t)rproc_handle_carveout,
+ [RSC_DEVMEM] = (rproc_handle_resource_t)rproc_handle_devmem,
+ [RSC_TRACE] = (rproc_handle_resource_t)rproc_handle_trace,
+ [RSC_VDEV] = NULL, /* VDEVs were handled upon registrarion */
+};
+
+/* handle firmware resource entries before booting the remote processor */
+static int
+rproc_handle_boot_rsc(struct rproc *rproc, struct resource_table *table, int len)
+{
+ struct device *dev = rproc->dev;
+ rproc_handle_resource_t handler;
+ int ret = 0, i;
+
+ for (i = 0; i < table->num; i++) {
+ int offset = table->offset[i];
+ struct fw_rsc_hdr *hdr = (void *)table + offset;
+ int avail = len - offset - sizeof(*hdr);
+ void *rsc = (void *)hdr + sizeof(*hdr);
+
+ /* make sure table isn't truncated */
+ if (avail < 0) {
+ dev_err(dev, "rsc table is truncated\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "rsc: type %d\n", hdr->type);
+
+ if (hdr->type >= RSC_LAST) {
+ dev_warn(dev, "unsupported resource %d\n", hdr->type);
+ continue;
+ }
+
+ handler = rproc_handle_rsc[hdr->type];
+ if (!handler)
+ continue;
+
+ ret = handler(rproc, rsc, avail);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+/* handle firmware resource entries while registering the remote processor */
+static int
+rproc_handle_virtio_rsc(struct rproc *rproc, struct resource_table *table, int len)
+{
+ struct device *dev = rproc->dev;
+ int ret = 0, i;
+
+ for (i = 0; i < table->num; i++) {
+ int offset = table->offset[i];
+ struct fw_rsc_hdr *hdr = (void *)table + offset;
+ int avail = len - offset - sizeof(*hdr);
+ struct fw_rsc_vdev *vrsc;
+
+ /* make sure table isn't truncated */
+ if (avail < 0) {
+ dev_err(dev, "rsc table is truncated\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "%s: rsc type %d\n", __func__, hdr->type);
+
+ if (hdr->type != RSC_VDEV)
+ continue;
+
+ vrsc = (struct fw_rsc_vdev *)hdr->data;
+
+ ret = rproc_handle_vdev(rproc, vrsc, avail);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * rproc_find_rsc_table() - find the resource table
+ * @rproc: the rproc handle
+ * @elf_data: the content of the ELF firmware image
+ * @len: firmware size (in bytes)
+ * @tablesz: place holder for providing back the table size
+ *
+ * This function finds the resource table inside the remote processor's
+ * firmware. It is used both upon the registration of @rproc (in order
+ * to look for and register the supported virito devices), and when the
+ * @rproc is booted.
+ *
+ * Returns the pointer to the resource table if it is found, and write its
+ * size into @tablesz. If a valid table isn't found, NULL is returned
+ * (and @tablesz isn't set).
+ */
+static struct resource_table *
+rproc_find_rsc_table(struct rproc *rproc, const u8 *elf_data, size_t len,
+ int *tablesz)
+{
+ struct elf32_hdr *ehdr;
+ struct elf32_shdr *shdr;
+ const char *name_table;
+ struct device *dev = rproc->dev;
+ struct resource_table *table = NULL;
+ int i;
+
+ ehdr = (struct elf32_hdr *)elf_data;
+ shdr = (struct elf32_shdr *)(elf_data + ehdr->e_shoff);
+ name_table = elf_data + shdr[ehdr->e_shstrndx].sh_offset;
+
+ /* look for the resource table and handle it */
+ for (i = 0; i < ehdr->e_shnum; i++, shdr++) {
+ int size = shdr->sh_size;
+ int offset = shdr->sh_offset;
+
+ if (strcmp(name_table + shdr->sh_name, ".resource_table"))
+ continue;
+
+ table = (struct resource_table *)(elf_data + offset);
+
+ /* make sure we have the entire table */
+ if (offset + size > len) {
+ dev_err(dev, "resource table truncated\n");
+ return NULL;
+ }
+
+ /* make sure table has at least the header */
+ if (sizeof(struct resource_table) > size) {
+ dev_err(dev, "header-less resource table\n");
+ return NULL;
+ }
+
+ /* we don't support any version beyond the first */
+ if (table->ver != 1) {
+ dev_err(dev, "unsupported fw ver: %d\n", table->ver);
+ return NULL;
+ }
+
+ /* make sure reserved bytes are zeroes */
+ if (table->reserved[0] || table->reserved[1]) {
+ dev_err(dev, "non zero reserved bytes\n");
+ return NULL;
+ }
+
+ /* make sure the offsets array isn't truncated */
+ if (table->num * sizeof(table->offset[0]) +
+ sizeof(struct resource_table) > size) {
+ dev_err(dev, "resource table incomplete\n");
+ return NULL;
+ }
+
+ *tablesz = shdr->sh_size;
+ break;
+ }
+
+ return table;
+}
+
+/**
+ * rproc_resource_cleanup() - clean up and free all acquired resources
+ * @rproc: rproc handle
+ *
+ * This function will free all resources acquired for @rproc, and it
+ * is called whenever @rproc either shuts down or fails to boot.
+ */
+static void rproc_resource_cleanup(struct rproc *rproc)
+{
+ struct rproc_mem_entry *entry, *tmp;
+ struct device *dev = rproc->dev;
+
+ /* clean up debugfs trace entries */
+ list_for_each_entry_safe(entry, tmp, &rproc->traces, node) {
+ rproc_remove_trace_file(entry->priv);
+ rproc->num_traces--;
+ list_del(&entry->node);
+ kfree(entry);
+ }
+
+ /* clean up carveout allocations */
+ list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) {
+ dma_free_coherent(dev, entry->len, entry->va, entry->dma);
+ list_del(&entry->node);
+ kfree(entry);
+ }
+
+ /* clean up iommu mapping entries */
+ list_for_each_entry_safe(entry, tmp, &rproc->mappings, node) {
+ size_t unmapped;
+
+ unmapped = iommu_unmap(rproc->domain, entry->da, entry->len);
+ if (unmapped != entry->len) {
+ /* nothing much to do besides complaining */
+ dev_err(dev, "failed to unmap %u/%u\n", entry->len,
+ unmapped);
+ }
+
+ list_del(&entry->node);
+ kfree(entry);
+ }
+}
+
+/* make sure this fw image is sane */
+static int rproc_fw_sanity_check(struct rproc *rproc, const struct firmware *fw)
+{
+ const char *name = rproc->firmware;
+ struct device *dev = rproc->dev;
+ struct elf32_hdr *ehdr;
+ char class;
+
+ if (!fw) {
+ dev_err(dev, "failed to load %s\n", name);
+ return -EINVAL;
+ }
+
+ if (fw->size < sizeof(struct elf32_hdr)) {
+ dev_err(dev, "Image is too small\n");
+ return -EINVAL;
+ }
+
+ ehdr = (struct elf32_hdr *)fw->data;
+
+ /* We only support ELF32 at this point */
+ class = ehdr->e_ident[EI_CLASS];
+ if (class != ELFCLASS32) {
+ dev_err(dev, "Unsupported class: %d\n", class);
+ return -EINVAL;
+ }
+
+ /* We assume the firmware has the same endianess as the host */
+# ifdef __LITTLE_ENDIAN
+ if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) {
+# else /* BIG ENDIAN */
+ if (ehdr->e_ident[EI_DATA] != ELFDATA2MSB) {
+# endif
+ dev_err(dev, "Unsupported firmware endianess\n");
+ return -EINVAL;
+ }
+
+ if (fw->size < ehdr->e_shoff + sizeof(struct elf32_shdr)) {
+ dev_err(dev, "Image is too small\n");
+ return -EINVAL;
+ }
+
+ if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
+ dev_err(dev, "Image is corrupted (bad magic)\n");
+ return -EINVAL;
+ }
+
+ if (ehdr->e_phnum == 0) {
+ dev_err(dev, "No loadable segments\n");
+ return -EINVAL;
+ }
+
+ if (ehdr->e_phoff > fw->size) {
+ dev_err(dev, "Firmware size is too small\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * take a firmware and boot a remote processor with it.
+ */
+static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
+{
+ struct device *dev = rproc->dev;
+ const char *name = rproc->firmware;
+ struct elf32_hdr *ehdr;
+ struct resource_table *table;
+ int ret, tablesz;
+
+ ret = rproc_fw_sanity_check(rproc, fw);
+ if (ret)
+ return ret;
+
+ ehdr = (struct elf32_hdr *)fw->data;
+
+ dev_info(dev, "Booting fw image %s, size %d\n", name, fw->size);
+
+ /*
+ * if enabling an IOMMU isn't relevant for this rproc, this is
+ * just a nop
+ */
+ ret = rproc_enable_iommu(rproc);
+ if (ret) {
+ dev_err(dev, "can't enable iommu: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * The ELF entry point is the rproc's boot addr (though this is not
+ * a configurable property of all remote processors: some will always
+ * boot at a specific hardcoded address).
+ */
+ rproc->bootaddr = ehdr->e_entry;
+
+ /* look for the resource table */
+ table = rproc_find_rsc_table(rproc, fw->data, fw->size, &tablesz);
+ if (!table)
+ goto clean_up;
+
+ /* handle fw resources which are required to boot rproc */
+ ret = rproc_handle_boot_rsc(rproc, table, tablesz);
+ if (ret) {
+ dev_err(dev, "Failed to process resources: %d\n", ret);
+ goto clean_up;
+ }
+
+ /* load the ELF segments to memory */
+ ret = rproc_load_segments(rproc, fw->data, fw->size);
+ if (ret) {
+ dev_err(dev, "Failed to load program segments: %d\n", ret);
+ goto clean_up;
+ }
+
+ /* power up the remote processor */
+ ret = rproc->ops->start(rproc);
+ if (ret) {
+ dev_err(dev, "can't start rproc %s: %d\n", rproc->name, ret);
+ goto clean_up;
+ }
+
+ rproc->state = RPROC_RUNNING;
+
+ dev_info(dev, "remote processor %s is now up\n", rproc->name);
+
+ return 0;
+
+clean_up:
+ rproc_resource_cleanup(rproc);
+ rproc_disable_iommu(rproc);
+ return ret;
+}
+
+/*
+ * take a firmware and look for virtio devices to register.
+ *
+ * Note: this function is called asynchronously upon registration of the
+ * remote processor (so we must wait until it completes before we try
+ * to unregister the device. one other option is just to use kref here,
+ * that might be cleaner).
+ */
+static void rproc_fw_config_virtio(const struct firmware *fw, void *context)
+{
+ struct rproc *rproc = context;
+ struct resource_table *table;
+ int ret, tablesz;
+
+ if (rproc_fw_sanity_check(rproc, fw) < 0)
+ goto out;
+
+ /* look for the resource table */
+ table = rproc_find_rsc_table(rproc, fw->data, fw->size, &tablesz);
+ if (!table)
+ goto out;
+
+ /* look for virtio devices and register them */
+ ret = rproc_handle_virtio_rsc(rproc, table, tablesz);
+ if (ret)
+ goto out;
+
+out:
+ if (fw)
+ release_firmware(fw);
+ /* allow rproc_unregister() contexts, if any, to proceed */
+ complete_all(&rproc->firmware_loading_complete);
+}
+
+/**
+ * rproc_boot() - boot a remote processor
+ * @rproc: handle of a remote processor
+ *
+ * Boot a remote processor (i.e. load its firmware, power it on, ...).
+ *
+ * If the remote processor is already powered on, this function immediately
+ * returns (successfully).
+ *
+ * Returns 0 on success, and an appropriate error value otherwise.
+ */
+int rproc_boot(struct rproc *rproc)
+{
+ const struct firmware *firmware_p;
+ struct device *dev;
+ int ret;
+
+ if (!rproc) {
+ pr_err("invalid rproc handle\n");
+ return -EINVAL;
+ }
+
+ dev = rproc->dev;
+
+ ret = mutex_lock_interruptible(&rproc->lock);
+ if (ret) {
+ dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
+ return ret;
+ }
+
+ /* loading a firmware is required */
+ if (!rproc->firmware) {
+ dev_err(dev, "%s: no firmware to load\n", __func__);
+ ret = -EINVAL;
+ goto unlock_mutex;
+ }
+
+ /* prevent underlying implementation from being removed */
+ if (!try_module_get(dev->driver->owner)) {
+ dev_err(dev, "%s: can't get owner\n", __func__);
+ ret = -EINVAL;
+ goto unlock_mutex;
+ }
+
+ /* skip the boot process if rproc is already powered up */
+ if (atomic_inc_return(&rproc->power) > 1) {
+ ret = 0;
+ goto unlock_mutex;
+ }
+
+ dev_info(dev, "powering up %s\n", rproc->name);
+
+ /* load firmware */
+ ret = request_firmware(&firmware_p, rproc->firmware, dev);
+ if (ret < 0) {
+ dev_err(dev, "request_firmware failed: %d\n", ret);
+ goto downref_rproc;
+ }
+
+ ret = rproc_fw_boot(rproc, firmware_p);
+
+ release_firmware(firmware_p);
+
+downref_rproc:
+ if (ret) {
+ module_put(dev->driver->owner);
+ atomic_dec(&rproc->power);
+ }
+unlock_mutex:
+ mutex_unlock(&rproc->lock);
+ return ret;
+}
+EXPORT_SYMBOL(rproc_boot);
+
+/**
+ * rproc_shutdown() - power off the remote processor
+ * @rproc: the remote processor
+ *
+ * Power off a remote processor (previously booted with rproc_boot()).
+ *
+ * In case @rproc is still being used by an additional user(s), then
+ * this function will just decrement the power refcount and exit,
+ * without really powering off the device.
+ *
+ * Every call to rproc_boot() must (eventually) be accompanied by a call
+ * to rproc_shutdown(). Calling rproc_shutdown() redundantly is a bug.
+ *
+ * Notes:
+ * - we're not decrementing the rproc's refcount, only the power refcount.
+ * which means that the @rproc handle stays valid even after rproc_shutdown()
+ * returns, and users can still use it with a subsequent rproc_boot(), if
+ * needed.
+ * - don't call rproc_shutdown() to unroll rproc_get_by_name(), exactly
+ * because rproc_shutdown() _does not_ decrement the refcount of @rproc.
+ * To decrement the refcount of @rproc, use rproc_put() (but _only_ if
+ * you acquired @rproc using rproc_get_by_name()).
+ */
+void rproc_shutdown(struct rproc *rproc)
+{
+ struct device *dev = rproc->dev;
+ int ret;
+
+ ret = mutex_lock_interruptible(&rproc->lock);
+ if (ret) {
+ dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
+ return;
+ }
+
+ /* if the remote proc is still needed, bail out */
+ if (!atomic_dec_and_test(&rproc->power))
+ goto out;
+
+ /* power off the remote processor */
+ ret = rproc->ops->stop(rproc);
+ if (ret) {
+ atomic_inc(&rproc->power);
+ dev_err(dev, "can't stop rproc: %d\n", ret);
+ goto out;
+ }
+
+ /* clean up all acquired resources */
+ rproc_resource_cleanup(rproc);
+
+ rproc_disable_iommu(rproc);
+
+ rproc->state = RPROC_OFFLINE;
+
+ dev_info(dev, "stopped remote processor %s\n", rproc->name);
+
+out:
+ mutex_unlock(&rproc->lock);
+ if (!ret)
+ module_put(dev->driver->owner);
+}
+EXPORT_SYMBOL(rproc_shutdown);
+
+/**
+ * rproc_release() - completely deletes the existence of a remote processor
+ * @kref: the rproc's kref
+ *
+ * This function should _never_ be called directly.
+ *
+ * The only reasonable location to use it is as an argument when kref_put'ing
+ * @rproc's refcount.
+ *
+ * This way it will be called when no one holds a valid pointer to this @rproc
+ * anymore (and obviously after it is removed from the rprocs klist).
+ *
+ * Note: this function is not static because rproc_vdev_release() needs it when
+ * it decrements @rproc's refcount.
+ */
+void rproc_release(struct kref *kref)
+{
+ struct rproc *rproc = container_of(kref, struct rproc, refcount);
+ struct rproc_vdev *rvdev, *rvtmp;
+
+ dev_info(rproc->dev, "removing %s\n", rproc->name);
+
+ rproc_delete_debug_dir(rproc);
+
+ /* clean up remote vdev entries */
+ list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node) {
+ __rproc_free_vrings(rvdev, RVDEV_NUM_VRINGS);
+ list_del(&rvdev->node);
+ }
+
+ /*
+ * At this point no one holds a reference to rproc anymore,
+ * so we can directly unroll rproc_alloc()
+ */
+ rproc_free(rproc);
+}
+
+/* will be called when an rproc is added to the rprocs klist */
+static void klist_rproc_get(struct klist_node *n)
+{
+ struct rproc *rproc = container_of(n, struct rproc, node);
+
+ kref_get(&rproc->refcount);
+}
+
+/* will be called when an rproc is removed from the rprocs klist */
+static void klist_rproc_put(struct klist_node *n)
+{
+ struct rproc *rproc = container_of(n, struct rproc, node);
+
+ kref_put(&rproc->refcount, rproc_release);
+}
+
+static struct rproc *next_rproc(struct klist_iter *i)
+{
+ struct klist_node *n;
+
+ n = klist_next(i);
+ if (!n)
+ return NULL;
+
+ return container_of(n, struct rproc, node);
+}
+
+/**
+ * rproc_get_by_name() - find a remote processor by name and boot it
+ * @name: name of the remote processor
+ *
+ * Finds an rproc handle using the remote processor's name, and then
+ * boot it. If it's already powered on, then just immediately return
+ * (successfully).
+ *
+ * Returns the rproc handle on success, and NULL on failure.
+ *
+ * This function increments the remote processor's refcount, so always
+ * use rproc_put() to decrement it back once rproc isn't needed anymore.
+ *
+ * Note: currently this function (and its counterpart rproc_put()) are not
+ * being used. We need to scrutinize the use cases
+ * that still need them, and see if we can migrate them to use the non
+ * name-based boot/shutdown interface.
+ */
+struct rproc *rproc_get_by_name(const char *name)
+{
+ struct rproc *rproc;
+ struct klist_iter i;
+ int ret;
+
+ /* find the remote processor, and upref its refcount */
+ klist_iter_init(&rprocs, &i);
+ while ((rproc = next_rproc(&i)) != NULL)
+ if (!strcmp(rproc->name, name)) {
+ kref_get(&rproc->refcount);
+ break;
+ }
+ klist_iter_exit(&i);
+
+ /* can't find this rproc ? */
+ if (!rproc) {
+ pr_err("can't find remote processor %s\n", name);
+ return NULL;
+ }
+
+ ret = rproc_boot(rproc);
+ if (ret < 0) {
+ kref_put(&rproc->refcount, rproc_release);
+ return NULL;
+ }
+
+ return rproc;
+}
+EXPORT_SYMBOL(rproc_get_by_name);
+
+/**
+ * rproc_put() - decrement the refcount of a remote processor, and shut it down
+ * @rproc: the remote processor
+ *
+ * This function tries to shutdown @rproc, and it then decrements its
+ * refcount.
+ *
+ * After this function returns, @rproc may _not_ be used anymore, and its
+ * handle should be considered invalid.
+ *
+ * This function should be called _iff_ the @rproc handle was grabbed by
+ * calling rproc_get_by_name().
+ */
+void rproc_put(struct rproc *rproc)
+{
+ /* try to power off the remote processor */
+ rproc_shutdown(rproc);
+
+ /* downref rproc's refcount */
+ kref_put(&rproc->refcount, rproc_release);
+}
+EXPORT_SYMBOL(rproc_put);
+
+/**
+ * rproc_register() - register a remote processor
+ * @rproc: the remote processor handle to register
+ *
+ * Registers @rproc with the remoteproc framework, after it has been
+ * allocated with rproc_alloc().
+ *
+ * This is called by the platform-specific rproc implementation, whenever
+ * a new remote processor device is probed.
+ *
+ * Returns 0 on success and an appropriate error code otherwise.
+ *
+ * Note: this function initiates an asynchronous firmware loading
+ * context, which will look for virtio devices supported by the rproc's
+ * firmware.
+ *
+ * If found, those virtio devices will be created and added, so as a result
+ * of registering this remote processor, additional virtio drivers might be
+ * probed.
+ */
+int rproc_register(struct rproc *rproc)
+{
+ struct device *dev = rproc->dev;
+ int ret = 0;
+
+ /* expose to rproc_get_by_name users */
+ klist_add_tail(&rproc->node, &rprocs);
+
+ dev_info(rproc->dev, "%s is available\n", rproc->name);
+
+ dev_info(dev, "Note: remoteproc is still under development and considered experimental.\n");
+ dev_info(dev, "THE BINARY FORMAT IS NOT YET FINALIZED, and backward compatibility isn't yet guaranteed.\n");
+
+ /* create debugfs entries */
+ rproc_create_debug_dir(rproc);
+
+ /* rproc_unregister() calls must wait until async loader completes */
+ init_completion(&rproc->firmware_loading_complete);
+
+ /*
+ * We must retrieve early virtio configuration info from
+ * the firmware (e.g. whether to register a virtio device,
+ * what virtio features does it support, ...).
+ *
+ * We're initiating an asynchronous firmware loading, so we can
+ * be built-in kernel code, without hanging the boot process.
+ */
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+ rproc->firmware, dev, GFP_KERNEL,
+ rproc, rproc_fw_config_virtio);
+ if (ret < 0) {
+ dev_err(dev, "request_firmware_nowait failed: %d\n", ret);
+ complete_all(&rproc->firmware_loading_complete);
+ klist_remove(&rproc->node);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(rproc_register);
+
+/**
+ * rproc_alloc() - allocate a remote processor handle
+ * @dev: the underlying device
+ * @name: name of this remote processor
+ * @ops: platform-specific handlers (mainly start/stop)
+ * @firmware: name of firmware file to load
+ * @len: length of private data needed by the rproc driver (in bytes)
+ *
+ * Allocates a new remote processor handle, but does not register
+ * it yet.
+ *
+ * This function should be used by rproc implementations during initialization
+ * of the remote processor.
+ *
+ * After creating an rproc handle using this function, and when ready,
+ * implementations should then call rproc_register() to complete
+ * the registration of the remote processor.
+ *
+ * On success the new rproc is returned, and on failure, NULL.
+ *
+ * Note: _never_ directly deallocate @rproc, even if it was not registered
+ * yet. Instead, if you just need to unroll rproc_alloc(), use rproc_free().
+ */
+struct rproc *rproc_alloc(struct device *dev, const char *name,
+ const struct rproc_ops *ops,
+ const char *firmware, int len)
+{
+ struct rproc *rproc;
+
+ if (!dev || !name || !ops)
+ return NULL;
+
+ rproc = kzalloc(sizeof(struct rproc) + len, GFP_KERNEL);
+ if (!rproc) {
+ dev_err(dev, "%s: kzalloc failed\n", __func__);
+ return NULL;
+ }
+
+ rproc->dev = dev;
+ rproc->name = name;
+ rproc->ops = ops;
+ rproc->firmware = firmware;
+ rproc->priv = &rproc[1];
+
+ atomic_set(&rproc->power, 0);
+
+ kref_init(&rproc->refcount);
+
+ mutex_init(&rproc->lock);
+
+ idr_init(&rproc->notifyids);
+
+ INIT_LIST_HEAD(&rproc->carveouts);
+ INIT_LIST_HEAD(&rproc->mappings);
+ INIT_LIST_HEAD(&rproc->traces);
+ INIT_LIST_HEAD(&rproc->rvdevs);
+
+ rproc->state = RPROC_OFFLINE;
+
+ return rproc;
+}
+EXPORT_SYMBOL(rproc_alloc);
+
+/**
+ * rproc_free() - free an rproc handle that was allocated by rproc_alloc
+ * @rproc: the remote processor handle
+ *
+ * This function should _only_ be used if @rproc was only allocated,
+ * but not registered yet.
+ *
+ * If @rproc was already successfully registered (by calling rproc_register()),
+ * then use rproc_unregister() instead.
+ */
+void rproc_free(struct rproc *rproc)
+{
+ idr_remove_all(&rproc->notifyids);
+ idr_destroy(&rproc->notifyids);
+
+ kfree(rproc);
+}
+EXPORT_SYMBOL(rproc_free);
+
+/**
+ * rproc_unregister() - unregister a remote processor
+ * @rproc: rproc handle to unregister
+ *
+ * Unregisters a remote processor, and decrements its refcount.
+ * If its refcount drops to zero, then @rproc will be freed. If not,
+ * it will be freed later once the last reference is dropped.
+ *
+ * This function should be called when the platform specific rproc
+ * implementation decides to remove the rproc device. it should
+ * _only_ be called if a previous invocation of rproc_register()
+ * has completed successfully.
+ *
+ * After rproc_unregister() returns, @rproc is _not_ valid anymore and
+ * it shouldn't be used. More specifically, don't call rproc_free()
+ * or try to directly free @rproc after rproc_unregister() returns;
+ * none of these are needed, and calling them is a bug.
+ *
+ * Returns 0 on success and -EINVAL if @rproc isn't valid.
+ */
+int rproc_unregister(struct rproc *rproc)
+{
+ struct rproc_vdev *rvdev;
+
+ if (!rproc)
+ return -EINVAL;
+
+ /* if rproc is just being registered, wait */
+ wait_for_completion(&rproc->firmware_loading_complete);
+
+ /* clean up remote vdev entries */
+ list_for_each_entry(rvdev, &rproc->rvdevs, node)
+ rproc_remove_virtio_dev(rvdev);
+
+ /* the rproc is downref'ed as soon as it's removed from the klist */
+ klist_del(&rproc->node);
+
+ /* the rproc will only be released after its refcount drops to zero */
+ kref_put(&rproc->refcount, rproc_release);
+
+ return 0;
+}
+EXPORT_SYMBOL(rproc_unregister);
+
+static int __init remoteproc_init(void)
+{
+ rproc_init_debugfs();
+ return 0;
+}
+module_init(remoteproc_init);
+
+static void __exit remoteproc_exit(void)
+{
+ rproc_exit_debugfs();
+}
+module_exit(remoteproc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Generic Remote Processor Framework");
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
new file mode 100644
index 000000000000..70277a530133
--- /dev/null
+++ b/drivers/remoteproc/remoteproc_debugfs.c
@@ -0,0 +1,179 @@
+/*
+ * Remote Processor Framework
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Ohad Ben-Cohen <ohad@wizery.com>
+ * Mark Grosen <mgrosen@ti.com>
+ * Brian Swetland <swetland@google.com>
+ * Fernando Guzman Lugo <fernando.lugo@ti.com>
+ * Suman Anna <s-anna@ti.com>
+ * Robert Tivy <rtivy@ti.com>
+ * Armando Uribe De Leon <x0095078@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/remoteproc.h>
+#include <linux/device.h>
+
+/* remoteproc debugfs parent dir */
+static struct dentry *rproc_dbg;
+
+/*
+ * Some remote processors may support dumping trace logs into a shared
+ * memory buffer. We expose this trace buffer using debugfs, so users
+ * can easily tell what's going on remotely.
+ *
+ * We will most probably improve the rproc tracing facilities later on,
+ * but this kind of lightweight and simple mechanism is always good to have,
+ * as it provides very early tracing with little to no dependencies at all.
+ */
+static ssize_t rproc_trace_read(struct file *filp, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct rproc_mem_entry *trace = filp->private_data;
+ int len = strnlen(trace->va, trace->len);
+
+ return simple_read_from_buffer(userbuf, count, ppos, trace->va, len);
+}
+
+static int rproc_open_generic(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+
+ return 0;
+}
+
+static const struct file_operations trace_rproc_ops = {
+ .read = rproc_trace_read,
+ .open = rproc_open_generic,
+ .llseek = generic_file_llseek,
+};
+
+/*
+ * A state-to-string lookup table, for exposing a human readable state
+ * via debugfs. Always keep in sync with enum rproc_state
+ */
+static const char * const rproc_state_string[] = {
+ "offline",
+ "suspended",
+ "running",
+ "crashed",
+ "invalid",
+};
+
+/* expose the state of the remote processor via debugfs */
+static ssize_t rproc_state_read(struct file *filp, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct rproc *rproc = filp->private_data;
+ unsigned int state;
+ char buf[30];
+ int i;
+
+ state = rproc->state > RPROC_LAST ? RPROC_LAST : rproc->state;
+
+ i = snprintf(buf, 30, "%.28s (%d)\n", rproc_state_string[state],
+ rproc->state);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, i);
+}
+
+static const struct file_operations rproc_state_ops = {
+ .read = rproc_state_read,
+ .open = rproc_open_generic,
+ .llseek = generic_file_llseek,
+};
+
+/* expose the name of the remote processor via debugfs */
+static ssize_t rproc_name_read(struct file *filp, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct rproc *rproc = filp->private_data;
+ /* need room for the name, a newline and a terminating null */
+ char buf[100];
+ int i;
+
+ i = snprintf(buf, sizeof(buf), "%.98s\n", rproc->name);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, i);
+}
+
+static const struct file_operations rproc_name_ops = {
+ .read = rproc_name_read,
+ .open = rproc_open_generic,
+ .llseek = generic_file_llseek,
+};
+
+void rproc_remove_trace_file(struct dentry *tfile)
+{
+ debugfs_remove(tfile);
+}
+
+struct dentry *rproc_create_trace_file(const char *name, struct rproc *rproc,
+ struct rproc_mem_entry *trace)
+{
+ struct dentry *tfile;
+
+ tfile = debugfs_create_file(name, 0400, rproc->dbg_dir,
+ trace, &trace_rproc_ops);
+ if (!tfile) {
+ dev_err(rproc->dev, "failed to create debugfs trace entry\n");
+ return NULL;
+ }
+
+ return tfile;
+}
+
+void rproc_delete_debug_dir(struct rproc *rproc)
+{
+ if (!rproc->dbg_dir)
+ return;
+
+ debugfs_remove_recursive(rproc->dbg_dir);
+}
+
+void rproc_create_debug_dir(struct rproc *rproc)
+{
+ struct device *dev = rproc->dev;
+
+ if (!rproc_dbg)
+ return;
+
+ rproc->dbg_dir = debugfs_create_dir(dev_name(dev), rproc_dbg);
+ if (!rproc->dbg_dir)
+ return;
+
+ debugfs_create_file("name", 0400, rproc->dbg_dir,
+ rproc, &rproc_name_ops);
+ debugfs_create_file("state", 0400, rproc->dbg_dir,
+ rproc, &rproc_state_ops);
+}
+
+void __init rproc_init_debugfs(void)
+{
+ if (debugfs_initialized()) {
+ rproc_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (!rproc_dbg)
+ pr_err("can't create debugfs dir\n");
+ }
+}
+
+void __exit rproc_exit_debugfs(void)
+{
+ if (rproc_dbg)
+ debugfs_remove(rproc_dbg);
+}
diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h
new file mode 100644
index 000000000000..9f336d6bdef3
--- /dev/null
+++ b/drivers/remoteproc/remoteproc_internal.h
@@ -0,0 +1,44 @@
+/*
+ * Remote processor framework
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Ohad Ben-Cohen <ohad@wizery.com>
+ * Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef REMOTEPROC_INTERNAL_H
+#define REMOTEPROC_INTERNAL_H
+
+#include <linux/irqreturn.h>
+
+struct rproc;
+
+/* from remoteproc_core.c */
+void rproc_release(struct kref *kref);
+irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int vq_id);
+
+/* from remoteproc_virtio.c */
+int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id);
+void rproc_remove_virtio_dev(struct rproc_vdev *rvdev);
+
+/* from remoteproc_debugfs.c */
+void rproc_remove_trace_file(struct dentry *tfile);
+struct dentry *rproc_create_trace_file(const char *name, struct rproc *rproc,
+ struct rproc_mem_entry *trace);
+void rproc_delete_debug_dir(struct rproc *rproc);
+void rproc_create_debug_dir(struct rproc *rproc);
+void rproc_init_debugfs(void);
+void rproc_exit_debugfs(void);
+
+#endif /* REMOTEPROC_INTERNAL_H */
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
new file mode 100644
index 000000000000..ecf612130750
--- /dev/null
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -0,0 +1,289 @@
+/*
+ * Remote processor messaging transport (OMAP platform-specific bits)
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Ohad Ben-Cohen <ohad@wizery.com>
+ * Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/remoteproc.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_ring.h>
+#include <linux/err.h>
+#include <linux/kref.h>
+#include <linux/slab.h>
+
+#include "remoteproc_internal.h"
+
+/* kick the remote processor, and let it know which virtqueue to poke at */
+static void rproc_virtio_notify(struct virtqueue *vq)
+{
+ struct rproc_vring *rvring = vq->priv;
+ struct rproc *rproc = rvring->rvdev->rproc;
+ int notifyid = rvring->notifyid;
+
+ dev_dbg(rproc->dev, "kicking vq index: %d\n", notifyid);
+
+ rproc->ops->kick(rproc, notifyid);
+}
+
+/**
+ * rproc_vq_interrupt() - tell remoteproc that a virtqueue is interrupted
+ * @rproc: handle to the remote processor
+ * @notifyid: index of the signalled virtqueue (unique per this @rproc)
+ *
+ * This function should be called by the platform-specific rproc driver,
+ * when the remote processor signals that a specific virtqueue has pending
+ * messages available.
+ *
+ * Returns IRQ_NONE if no message was found in the @notifyid virtqueue,
+ * and otherwise returns IRQ_HANDLED.
+ */
+irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int notifyid)
+{
+ struct rproc_vring *rvring;
+
+ dev_dbg(rproc->dev, "vq index %d is interrupted\n", notifyid);
+
+ rvring = idr_find(&rproc->notifyids, notifyid);
+ if (!rvring || !rvring->vq)
+ return IRQ_NONE;
+
+ return vring_interrupt(0, rvring->vq);
+}
+EXPORT_SYMBOL(rproc_vq_interrupt);
+
+static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
+ unsigned id,
+ void (*callback)(struct virtqueue *vq),
+ const char *name)
+{
+ struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
+ struct rproc *rproc = vdev_to_rproc(vdev);
+ struct rproc_vring *rvring;
+ struct virtqueue *vq;
+ void *addr;
+ int len, size;
+
+ /* we're temporarily limited to two virtqueues per rvdev */
+ if (id >= ARRAY_SIZE(rvdev->vring))
+ return ERR_PTR(-EINVAL);
+
+ rvring = &rvdev->vring[id];
+
+ addr = rvring->va;
+ len = rvring->len;
+
+ /* zero vring */
+ size = vring_size(len, rvring->align);
+ memset(addr, 0, size);
+
+ dev_dbg(rproc->dev, "vring%d: va %p qsz %d notifyid %d\n",
+ id, addr, len, rvring->notifyid);
+
+ /*
+ * Create the new vq, and tell virtio we're not interested in
+ * the 'weak' smp barriers, since we're talking with a real device.
+ */
+ vq = vring_new_virtqueue(len, rvring->align, vdev, false, addr,
+ rproc_virtio_notify, callback, name);
+ if (!vq) {
+ dev_err(rproc->dev, "vring_new_virtqueue %s failed\n", name);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ rvring->vq = vq;
+ vq->priv = rvring;
+
+ return vq;
+}
+
+static void rproc_virtio_del_vqs(struct virtio_device *vdev)
+{
+ struct virtqueue *vq, *n;
+ struct rproc *rproc = vdev_to_rproc(vdev);
+ struct rproc_vring *rvring;
+
+ /* power down the remote processor before deleting vqs */
+ rproc_shutdown(rproc);
+
+ list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
+ rvring = vq->priv;
+ rvring->vq = NULL;
+ vring_del_virtqueue(vq);
+ }
+}
+
+static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char *names[])
+{
+ struct rproc *rproc = vdev_to_rproc(vdev);
+ int i, ret;
+
+ for (i = 0; i < nvqs; ++i) {
+ vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i]);
+ if (IS_ERR(vqs[i])) {
+ ret = PTR_ERR(vqs[i]);
+ goto error;
+ }
+ }
+
+ /* now that the vqs are all set, boot the remote processor */
+ ret = rproc_boot(rproc);
+ if (ret) {
+ dev_err(rproc->dev, "rproc_boot() failed %d\n", ret);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ rproc_virtio_del_vqs(vdev);
+ return ret;
+}
+
+/*
+ * We don't support yet real virtio status semantics.
+ *
+ * The plan is to provide this via the VDEV resource entry
+ * which is part of the firmware: this way the remote processor
+ * will be able to access the status values as set by us.
+ */
+static u8 rproc_virtio_get_status(struct virtio_device *vdev)
+{
+ return 0;
+}
+
+static void rproc_virtio_set_status(struct virtio_device *vdev, u8 status)
+{
+ dev_dbg(&vdev->dev, "status: %d\n", status);
+}
+
+static void rproc_virtio_reset(struct virtio_device *vdev)
+{
+ dev_dbg(&vdev->dev, "reset !\n");
+}
+
+/* provide the vdev features as retrieved from the firmware */
+static u32 rproc_virtio_get_features(struct virtio_device *vdev)
+{
+ struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
+
+ return rvdev->dfeatures;
+}
+
+static void rproc_virtio_finalize_features(struct virtio_device *vdev)
+{
+ struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
+
+ /* Give virtio_ring a chance to accept features */
+ vring_transport_features(vdev);
+
+ /*
+ * Remember the finalized features of our vdev, and provide it
+ * to the remote processor once it is powered on.
+ *
+ * Similarly to the status field, we don't expose yet the negotiated
+ * features to the remote processors at this point. This will be
+ * fixed as part of a small resource table overhaul and then an
+ * extension of the virtio resource entries.
+ */
+ rvdev->gfeatures = vdev->features[0];
+}
+
+static struct virtio_config_ops rproc_virtio_config_ops = {
+ .get_features = rproc_virtio_get_features,
+ .finalize_features = rproc_virtio_finalize_features,
+ .find_vqs = rproc_virtio_find_vqs,
+ .del_vqs = rproc_virtio_del_vqs,
+ .reset = rproc_virtio_reset,
+ .set_status = rproc_virtio_set_status,
+ .get_status = rproc_virtio_get_status,
+};
+
+/*
+ * This function is called whenever vdev is released, and is responsible
+ * to decrement the remote processor's refcount taken when vdev was
+ * added.
+ *
+ * Never call this function directly; it will be called by the driver
+ * core when needed.
+ */
+static void rproc_vdev_release(struct device *dev)
+{
+ struct virtio_device *vdev = dev_to_virtio(dev);
+ struct rproc *rproc = vdev_to_rproc(vdev);
+
+ kref_put(&rproc->refcount, rproc_release);
+}
+
+/**
+ * rproc_add_virtio_dev() - register an rproc-induced virtio device
+ * @rvdev: the remote vdev
+ *
+ * This function registers a virtio device. This vdev's partent is
+ * the rproc device.
+ *
+ * Returns 0 on success or an appropriate error value otherwise.
+ */
+int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
+{
+ struct rproc *rproc = rvdev->rproc;
+ struct device *dev = rproc->dev;
+ struct virtio_device *vdev = &rvdev->vdev;
+ int ret;
+
+ vdev->id.device = id,
+ vdev->config = &rproc_virtio_config_ops,
+ vdev->dev.parent = dev;
+ vdev->dev.release = rproc_vdev_release;
+
+ /*
+ * We're indirectly making a non-temporary copy of the rproc pointer
+ * here, because drivers probed with this vdev will indirectly
+ * access the wrapping rproc.
+ *
+ * Therefore we must increment the rproc refcount here, and decrement
+ * it _only_ when the vdev is released.
+ */
+ kref_get(&rproc->refcount);
+
+ ret = register_virtio_device(vdev);
+ if (ret) {
+ kref_put(&rproc->refcount, rproc_release);
+ dev_err(dev, "failed to register vdev: %d\n", ret);
+ goto out;
+ }
+
+ dev_info(dev, "registered %s (type %d)\n", dev_name(&vdev->dev), id);
+
+out:
+ return ret;
+}
+
+/**
+ * rproc_remove_virtio_dev() - remove an rproc-induced virtio device
+ * @rvdev: the remote vdev
+ *
+ * This function unregisters an existing virtio device.
+ */
+void rproc_remove_virtio_dev(struct rproc_vdev *rvdev)
+{
+ unregister_virtio_device(&rvdev->vdev);
+}
diff --git a/drivers/rpmsg/Kconfig b/drivers/rpmsg/Kconfig
new file mode 100644
index 000000000000..32aead65735a
--- /dev/null
+++ b/drivers/rpmsg/Kconfig
@@ -0,0 +1,10 @@
+menu "Rpmsg drivers (EXPERIMENTAL)"
+
+# RPMSG always gets selected by whoever wants it
+config RPMSG
+ tristate
+ select VIRTIO
+ select VIRTIO_RING
+ depends on EXPERIMENTAL
+
+endmenu
diff --git a/drivers/rpmsg/Makefile b/drivers/rpmsg/Makefile
new file mode 100644
index 000000000000..7617fcb8259f
--- /dev/null
+++ b/drivers/rpmsg/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_RPMSG) += virtio_rpmsg_bus.o
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
new file mode 100644
index 000000000000..75506ec2840e
--- /dev/null
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -0,0 +1,1054 @@
+/*
+ * Virtio-based remote processor messaging bus
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Ohad Ben-Cohen <ohad@wizery.com>
+ * Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/rpmsg.h>
+#include <linux/mutex.h>
+
+/**
+ * struct virtproc_info - virtual remote processor state
+ * @vdev: the virtio device
+ * @rvq: rx virtqueue
+ * @svq: tx virtqueue
+ * @rbufs: kernel address of rx buffers
+ * @sbufs: kernel address of tx buffers
+ * @last_sbuf: index of last tx buffer used
+ * @bufs_dma: dma base addr of the buffers
+ * @tx_lock: protects svq, sbufs and sleepers, to allow concurrent senders.
+ * sending a message might require waking up a dozing remote
+ * processor, which involves sleeping, hence the mutex.
+ * @endpoints: idr of local endpoints, allows fast retrieval
+ * @endpoints_lock: lock of the endpoints set
+ * @sendq: wait queue of sending contexts waiting for a tx buffers
+ * @sleepers: number of senders that are waiting for a tx buffer
+ * @ns_ept: the bus's name service endpoint
+ *
+ * This structure stores the rpmsg state of a given virtio remote processor
+ * device (there might be several virtio proc devices for each physical
+ * remote processor).
+ */
+struct virtproc_info {
+ struct virtio_device *vdev;
+ struct virtqueue *rvq, *svq;
+ void *rbufs, *sbufs;
+ int last_sbuf;
+ dma_addr_t bufs_dma;
+ struct mutex tx_lock;
+ struct idr endpoints;
+ struct mutex endpoints_lock;
+ wait_queue_head_t sendq;
+ atomic_t sleepers;
+ struct rpmsg_endpoint *ns_ept;
+};
+
+/**
+ * struct rpmsg_channel_info - internal channel info representation
+ * @name: name of service
+ * @src: local address
+ * @dst: destination address
+ */
+struct rpmsg_channel_info {
+ char name[RPMSG_NAME_SIZE];
+ u32 src;
+ u32 dst;
+};
+
+#define to_rpmsg_channel(d) container_of(d, struct rpmsg_channel, dev)
+#define to_rpmsg_driver(d) container_of(d, struct rpmsg_driver, drv)
+
+/*
+ * We're allocating 512 buffers of 512 bytes for communications, and then
+ * using the first 256 buffers for RX, and the last 256 buffers for TX.
+ *
+ * Each buffer will have 16 bytes for the msg header and 496 bytes for
+ * the payload.
+ *
+ * This will require a total space of 256KB for the buffers.
+ *
+ * We might also want to add support for user-provided buffers in time.
+ * This will allow bigger buffer size flexibility, and can also be used
+ * to achieve zero-copy messaging.
+ *
+ * Note that these numbers are purely a decision of this driver - we
+ * can change this without changing anything in the firmware of the remote
+ * processor.
+ */
+#define RPMSG_NUM_BUFS (512)
+#define RPMSG_BUF_SIZE (512)
+#define RPMSG_TOTAL_BUF_SPACE (RPMSG_NUM_BUFS * RPMSG_BUF_SIZE)
+
+/*
+ * Local addresses are dynamically allocated on-demand.
+ * We do not dynamically assign addresses from the low 1024 range,
+ * in order to reserve that address range for predefined services.
+ */
+#define RPMSG_RESERVED_ADDRESSES (1024)
+
+/* Address 53 is reserved for advertising remote services */
+#define RPMSG_NS_ADDR (53)
+
+/* sysfs show configuration fields */
+#define rpmsg_show_attr(field, path, format_string) \
+static ssize_t \
+field##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); \
+ \
+ return sprintf(buf, format_string, rpdev->path); \
+}
+
+/* for more info, see Documentation/ABI/testing/sysfs-bus-rpmsg */
+rpmsg_show_attr(name, id.name, "%s\n");
+rpmsg_show_attr(src, src, "0x%x\n");
+rpmsg_show_attr(dst, dst, "0x%x\n");
+rpmsg_show_attr(announce, announce ? "true" : "false", "%s\n");
+
+/*
+ * Unique (and free running) index for rpmsg devices.
+ *
+ * Yeah, we're not recycling those numbers (yet?). will be easy
+ * to change if/when we want to.
+ */
+static unsigned int rpmsg_dev_index;
+
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
+
+ return sprintf(buf, RPMSG_DEVICE_MODALIAS_FMT "\n", rpdev->id.name);
+}
+
+static struct device_attribute rpmsg_dev_attrs[] = {
+ __ATTR_RO(name),
+ __ATTR_RO(modalias),
+ __ATTR_RO(dst),
+ __ATTR_RO(src),
+ __ATTR_RO(announce),
+ __ATTR_NULL
+};
+
+/* rpmsg devices and drivers are matched using the service name */
+static inline int rpmsg_id_match(const struct rpmsg_channel *rpdev,
+ const struct rpmsg_device_id *id)
+{
+ return strncmp(id->name, rpdev->id.name, RPMSG_NAME_SIZE) == 0;
+}
+
+/* match rpmsg channel and rpmsg driver */
+static int rpmsg_dev_match(struct device *dev, struct device_driver *drv)
+{
+ struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
+ struct rpmsg_driver *rpdrv = to_rpmsg_driver(drv);
+ const struct rpmsg_device_id *ids = rpdrv->id_table;
+ unsigned int i;
+
+ for (i = 0; ids[i].name[0]; i++)
+ if (rpmsg_id_match(rpdev, &ids[i]))
+ return 1;
+
+ return 0;
+}
+
+static int rpmsg_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
+
+ return add_uevent_var(env, "MODALIAS=" RPMSG_DEVICE_MODALIAS_FMT,
+ rpdev->id.name);
+}
+
+/* for more info, see below documentation of rpmsg_create_ept() */
+static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
+ struct rpmsg_channel *rpdev, rpmsg_rx_cb_t cb,
+ void *priv, u32 addr)
+{
+ int err, tmpaddr, request;
+ struct rpmsg_endpoint *ept;
+ struct device *dev = rpdev ? &rpdev->dev : &vrp->vdev->dev;
+
+ if (!idr_pre_get(&vrp->endpoints, GFP_KERNEL))
+ return NULL;
+
+ ept = kzalloc(sizeof(*ept), GFP_KERNEL);
+ if (!ept) {
+ dev_err(dev, "failed to kzalloc a new ept\n");
+ return NULL;
+ }
+
+ ept->rpdev = rpdev;
+ ept->cb = cb;
+ ept->priv = priv;
+
+ /* do we need to allocate a local address ? */
+ request = addr == RPMSG_ADDR_ANY ? RPMSG_RESERVED_ADDRESSES : addr;
+
+ mutex_lock(&vrp->endpoints_lock);
+
+ /* bind the endpoint to an rpmsg address (and allocate one if needed) */
+ err = idr_get_new_above(&vrp->endpoints, ept, request, &tmpaddr);
+ if (err) {
+ dev_err(dev, "idr_get_new_above failed: %d\n", err);
+ goto free_ept;
+ }
+
+ /* make sure the user's address request is fulfilled, if relevant */
+ if (addr != RPMSG_ADDR_ANY && tmpaddr != addr) {
+ dev_err(dev, "address 0x%x already in use\n", addr);
+ goto rem_idr;
+ }
+
+ ept->addr = tmpaddr;
+
+ mutex_unlock(&vrp->endpoints_lock);
+
+ return ept;
+
+rem_idr:
+ idr_remove(&vrp->endpoints, request);
+free_ept:
+ mutex_unlock(&vrp->endpoints_lock);
+ kfree(ept);
+ return NULL;
+}
+
+/**
+ * rpmsg_create_ept() - create a new rpmsg_endpoint
+ * @rpdev: rpmsg channel device
+ * @cb: rx callback handler
+ * @priv: private data for the driver's use
+ * @addr: local rpmsg address to bind with @cb
+ *
+ * Every rpmsg address in the system is bound to an rx callback (so when
+ * inbound messages arrive, they are dispatched by the rpmsg bus using the
+ * appropriate callback handler) by means of an rpmsg_endpoint struct.
+ *
+ * This function allows drivers to create such an endpoint, and by that,
+ * bind a callback, and possibly some private data too, to an rpmsg address
+ * (either one that is known in advance, or one that will be dynamically
+ * assigned for them).
+ *
+ * Simple rpmsg drivers need not call rpmsg_create_ept, because an endpoint
+ * is already created for them when they are probed by the rpmsg bus
+ * (using the rx callback provided when they registered to the rpmsg bus).
+ *
+ * So things should just work for simple drivers: they already have an
+ * endpoint, their rx callback is bound to their rpmsg address, and when
+ * relevant inbound messages arrive (i.e. messages which their dst address
+ * equals to the src address of their rpmsg channel), the driver's handler
+ * is invoked to process it.
+ *
+ * That said, more complicated drivers might do need to allocate
+ * additional rpmsg addresses, and bind them to different rx callbacks.
+ * To accomplish that, those drivers need to call this function.
+ *
+ * Drivers should provide their @rpdev channel (so the new endpoint would belong
+ * to the same remote processor their channel belongs to), an rx callback
+ * function, an optional private data (which is provided back when the
+ * rx callback is invoked), and an address they want to bind with the
+ * callback. If @addr is RPMSG_ADDR_ANY, then rpmsg_create_ept will
+ * dynamically assign them an available rpmsg address (drivers should have
+ * a very good reason why not to always use RPMSG_ADDR_ANY here).
+ *
+ * Returns a pointer to the endpoint on success, or NULL on error.
+ */
+struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *rpdev,
+ rpmsg_rx_cb_t cb, void *priv, u32 addr)
+{
+ return __rpmsg_create_ept(rpdev->vrp, rpdev, cb, priv, addr);
+}
+EXPORT_SYMBOL(rpmsg_create_ept);
+
+/**
+ * __rpmsg_destroy_ept() - destroy an existing rpmsg endpoint
+ * @vrp: virtproc which owns this ept
+ * @ept: endpoing to destroy
+ *
+ * An internal function which destroy an ept without assuming it is
+ * bound to an rpmsg channel. This is needed for handling the internal
+ * name service endpoint, which isn't bound to an rpmsg channel.
+ * See also __rpmsg_create_ept().
+ */
+static void
+__rpmsg_destroy_ept(struct virtproc_info *vrp, struct rpmsg_endpoint *ept)
+{
+ mutex_lock(&vrp->endpoints_lock);
+ idr_remove(&vrp->endpoints, ept->addr);
+ mutex_unlock(&vrp->endpoints_lock);
+
+ kfree(ept);
+}
+
+/**
+ * rpmsg_destroy_ept() - destroy an existing rpmsg endpoint
+ * @ept: endpoing to destroy
+ *
+ * Should be used by drivers to destroy an rpmsg endpoint previously
+ * created with rpmsg_create_ept().
+ */
+void rpmsg_destroy_ept(struct rpmsg_endpoint *ept)
+{
+ __rpmsg_destroy_ept(ept->rpdev->vrp, ept);
+}
+EXPORT_SYMBOL(rpmsg_destroy_ept);
+
+/*
+ * when an rpmsg driver is probed with a channel, we seamlessly create
+ * it an endpoint, binding its rx callback to a unique local rpmsg
+ * address.
+ *
+ * if we need to, we also announce about this channel to the remote
+ * processor (needed in case the driver is exposing an rpmsg service).
+ */
+static int rpmsg_dev_probe(struct device *dev)
+{
+ struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
+ struct rpmsg_driver *rpdrv = to_rpmsg_driver(rpdev->dev.driver);
+ struct virtproc_info *vrp = rpdev->vrp;
+ struct rpmsg_endpoint *ept;
+ int err;
+
+ ept = rpmsg_create_ept(rpdev, rpdrv->callback, NULL, rpdev->src);
+ if (!ept) {
+ dev_err(dev, "failed to create endpoint\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ rpdev->ept = ept;
+ rpdev->src = ept->addr;
+
+ err = rpdrv->probe(rpdev);
+ if (err) {
+ dev_err(dev, "%s: failed: %d\n", __func__, err);
+ rpmsg_destroy_ept(ept);
+ goto out;
+ }
+
+ /* need to tell remote processor's name service about this channel ? */
+ if (rpdev->announce &&
+ virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) {
+ struct rpmsg_ns_msg nsm;
+
+ strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE);
+ nsm.addr = rpdev->src;
+ nsm.flags = RPMSG_NS_CREATE;
+
+ err = rpmsg_sendto(rpdev, &nsm, sizeof(nsm), RPMSG_NS_ADDR);
+ if (err)
+ dev_err(dev, "failed to announce service %d\n", err);
+ }
+
+out:
+ return err;
+}
+
+static int rpmsg_dev_remove(struct device *dev)
+{
+ struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
+ struct rpmsg_driver *rpdrv = to_rpmsg_driver(rpdev->dev.driver);
+ struct virtproc_info *vrp = rpdev->vrp;
+ int err = 0;
+
+ /* tell remote processor's name service we're removing this channel */
+ if (rpdev->announce &&
+ virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) {
+ struct rpmsg_ns_msg nsm;
+
+ strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE);
+ nsm.addr = rpdev->src;
+ nsm.flags = RPMSG_NS_DESTROY;
+
+ err = rpmsg_sendto(rpdev, &nsm, sizeof(nsm), RPMSG_NS_ADDR);
+ if (err)
+ dev_err(dev, "failed to announce service %d\n", err);
+ }
+
+ rpdrv->remove(rpdev);
+
+ rpmsg_destroy_ept(rpdev->ept);
+
+ return err;
+}
+
+static struct bus_type rpmsg_bus = {
+ .name = "rpmsg",
+ .match = rpmsg_dev_match,
+ .dev_attrs = rpmsg_dev_attrs,
+ .uevent = rpmsg_uevent,
+ .probe = rpmsg_dev_probe,
+ .remove = rpmsg_dev_remove,
+};
+
+/**
+ * register_rpmsg_driver() - register an rpmsg driver with the rpmsg bus
+ * @rpdrv: pointer to a struct rpmsg_driver
+ *
+ * Returns 0 on success, and an appropriate error value on failure.
+ */
+int register_rpmsg_driver(struct rpmsg_driver *rpdrv)
+{
+ rpdrv->drv.bus = &rpmsg_bus;
+ return driver_register(&rpdrv->drv);
+}
+EXPORT_SYMBOL(register_rpmsg_driver);
+
+/**
+ * unregister_rpmsg_driver() - unregister an rpmsg driver from the rpmsg bus
+ * @rpdrv: pointer to a struct rpmsg_driver
+ *
+ * Returns 0 on success, and an appropriate error value on failure.
+ */
+void unregister_rpmsg_driver(struct rpmsg_driver *rpdrv)
+{
+ driver_unregister(&rpdrv->drv);
+}
+EXPORT_SYMBOL(unregister_rpmsg_driver);
+
+static void rpmsg_release_device(struct device *dev)
+{
+ struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
+
+ kfree(rpdev);
+}
+
+/*
+ * match an rpmsg channel with a channel info struct.
+ * this is used to make sure we're not creating rpmsg devices for channels
+ * that already exist.
+ */
+static int rpmsg_channel_match(struct device *dev, void *data)
+{
+ struct rpmsg_channel_info *chinfo = data;
+ struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
+
+ if (chinfo->src != RPMSG_ADDR_ANY && chinfo->src != rpdev->src)
+ return 0;
+
+ if (chinfo->dst != RPMSG_ADDR_ANY && chinfo->dst != rpdev->dst)
+ return 0;
+
+ if (strncmp(chinfo->name, rpdev->id.name, RPMSG_NAME_SIZE))
+ return 0;
+
+ /* found a match ! */
+ return 1;
+}
+
+/*
+ * create an rpmsg channel using its name and address info.
+ * this function will be used to create both static and dynamic
+ * channels.
+ */
+static struct rpmsg_channel *rpmsg_create_channel(struct virtproc_info *vrp,
+ struct rpmsg_channel_info *chinfo)
+{
+ struct rpmsg_channel *rpdev;
+ struct device *tmp, *dev = &vrp->vdev->dev;
+ int ret;
+
+ /* make sure a similar channel doesn't already exist */
+ tmp = device_find_child(dev, chinfo, rpmsg_channel_match);
+ if (tmp) {
+ /* decrement the matched device's refcount back */
+ put_device(tmp);
+ dev_err(dev, "channel %s:%x:%x already exist\n",
+ chinfo->name, chinfo->src, chinfo->dst);
+ return NULL;
+ }
+
+ rpdev = kzalloc(sizeof(struct rpmsg_channel), GFP_KERNEL);
+ if (!rpdev) {
+ pr_err("kzalloc failed\n");
+ return NULL;
+ }
+
+ rpdev->vrp = vrp;
+ rpdev->src = chinfo->src;
+ rpdev->dst = chinfo->dst;
+
+ /*
+ * rpmsg server channels has predefined local address (for now),
+ * and their existence needs to be announced remotely
+ */
+ rpdev->announce = rpdev->src != RPMSG_ADDR_ANY ? true : false;
+
+ strncpy(rpdev->id.name, chinfo->name, RPMSG_NAME_SIZE);
+
+ /* very simple device indexing plumbing which is enough for now */
+ dev_set_name(&rpdev->dev, "rpmsg%d", rpmsg_dev_index++);
+
+ rpdev->dev.parent = &vrp->vdev->dev;
+ rpdev->dev.bus = &rpmsg_bus;
+ rpdev->dev.release = rpmsg_release_device;
+
+ ret = device_register(&rpdev->dev);
+ if (ret) {
+ dev_err(dev, "device_register failed: %d\n", ret);
+ put_device(&rpdev->dev);
+ return NULL;
+ }
+
+ return rpdev;
+}
+
+/*
+ * find an existing channel using its name + address properties,
+ * and destroy it
+ */
+static int rpmsg_destroy_channel(struct virtproc_info *vrp,
+ struct rpmsg_channel_info *chinfo)
+{
+ struct virtio_device *vdev = vrp->vdev;
+ struct device *dev;
+
+ dev = device_find_child(&vdev->dev, chinfo, rpmsg_channel_match);
+ if (!dev)
+ return -EINVAL;
+
+ device_unregister(dev);
+
+ put_device(dev);
+
+ return 0;
+}
+
+/* super simple buffer "allocator" that is just enough for now */
+static void *get_a_tx_buf(struct virtproc_info *vrp)
+{
+ unsigned int len;
+ void *ret;
+
+ /* support multiple concurrent senders */
+ mutex_lock(&vrp->tx_lock);
+
+ /*
+ * either pick the next unused tx buffer
+ * (half of our buffers are used for sending messages)
+ */
+ if (vrp->last_sbuf < RPMSG_NUM_BUFS / 2)
+ ret = vrp->sbufs + RPMSG_BUF_SIZE * vrp->last_sbuf++;
+ /* or recycle a used one */
+ else
+ ret = virtqueue_get_buf(vrp->svq, &len);
+
+ mutex_unlock(&vrp->tx_lock);
+
+ return ret;
+}
+
+/**
+ * rpmsg_upref_sleepers() - enable "tx-complete" interrupts, if needed
+ * @vrp: virtual remote processor state
+ *
+ * This function is called before a sender is blocked, waiting for
+ * a tx buffer to become available.
+ *
+ * If we already have blocking senders, this function merely increases
+ * the "sleepers" reference count, and exits.
+ *
+ * Otherwise, if this is the first sender to block, we also enable
+ * virtio's tx callbacks, so we'd be immediately notified when a tx
+ * buffer is consumed (we rely on virtio's tx callback in order
+ * to wake up sleeping senders as soon as a tx buffer is used by the
+ * remote processor).
+ */
+static void rpmsg_upref_sleepers(struct virtproc_info *vrp)
+{
+ /* support multiple concurrent senders */
+ mutex_lock(&vrp->tx_lock);
+
+ /* are we the first sleeping context waiting for tx buffers ? */
+ if (atomic_inc_return(&vrp->sleepers) == 1)
+ /* enable "tx-complete" interrupts before dozing off */
+ virtqueue_enable_cb(vrp->svq);
+
+ mutex_unlock(&vrp->tx_lock);
+}
+
+/**
+ * rpmsg_downref_sleepers() - disable "tx-complete" interrupts, if needed
+ * @vrp: virtual remote processor state
+ *
+ * This function is called after a sender, that waited for a tx buffer
+ * to become available, is unblocked.
+ *
+ * If we still have blocking senders, this function merely decreases
+ * the "sleepers" reference count, and exits.
+ *
+ * Otherwise, if there are no more blocking senders, we also disable
+ * virtio's tx callbacks, to avoid the overhead incurred with handling
+ * those (now redundant) interrupts.
+ */
+static void rpmsg_downref_sleepers(struct virtproc_info *vrp)
+{
+ /* support multiple concurrent senders */
+ mutex_lock(&vrp->tx_lock);
+
+ /* are we the last sleeping context waiting for tx buffers ? */
+ if (atomic_dec_and_test(&vrp->sleepers))
+ /* disable "tx-complete" interrupts */
+ virtqueue_disable_cb(vrp->svq);
+
+ mutex_unlock(&vrp->tx_lock);
+}
+
+/**
+ * rpmsg_send_offchannel_raw() - send a message across to the remote processor
+ * @rpdev: the rpmsg channel
+ * @src: source address
+ * @dst: destination address
+ * @data: payload of message
+ * @len: length of payload
+ * @wait: indicates whether caller should block in case no TX buffers available
+ *
+ * This function is the base implementation for all of the rpmsg sending API.
+ *
+ * It will send @data of length @len to @dst, and say it's from @src. The
+ * message will be sent to the remote processor which the @rpdev channel
+ * belongs to.
+ *
+ * The message is sent using one of the TX buffers that are available for
+ * communication with this remote processor.
+ *
+ * If @wait is true, the caller will be blocked until either a TX buffer is
+ * available, or 15 seconds elapses (we don't want callers to
+ * sleep indefinitely due to misbehaving remote processors), and in that
+ * case -ERESTARTSYS is returned. The number '15' itself was picked
+ * arbitrarily; there's little point in asking drivers to provide a timeout
+ * value themselves.
+ *
+ * Otherwise, if @wait is false, and there are no TX buffers available,
+ * the function will immediately fail, and -ENOMEM will be returned.
+ *
+ * Normally drivers shouldn't use this function directly; instead, drivers
+ * should use the appropriate rpmsg_{try}send{to, _offchannel} API
+ * (see include/linux/rpmsg.h).
+ *
+ * Returns 0 on success and an appropriate error value on failure.
+ */
+int rpmsg_send_offchannel_raw(struct rpmsg_channel *rpdev, u32 src, u32 dst,
+ void *data, int len, bool wait)
+{
+ struct virtproc_info *vrp = rpdev->vrp;
+ struct device *dev = &rpdev->dev;
+ struct scatterlist sg;
+ struct rpmsg_hdr *msg;
+ int err;
+
+ /* bcasting isn't allowed */
+ if (src == RPMSG_ADDR_ANY || dst == RPMSG_ADDR_ANY) {
+ dev_err(dev, "invalid addr (src 0x%x, dst 0x%x)\n", src, dst);
+ return -EINVAL;
+ }
+
+ /*
+ * We currently use fixed-sized buffers, and therefore the payload
+ * length is limited.
+ *
+ * One of the possible improvements here is either to support
+ * user-provided buffers (and then we can also support zero-copy
+ * messaging), or to improve the buffer allocator, to support
+ * variable-length buffer sizes.
+ */
+ if (len > RPMSG_BUF_SIZE - sizeof(struct rpmsg_hdr)) {
+ dev_err(dev, "message is too big (%d)\n", len);
+ return -EMSGSIZE;
+ }
+
+ /* grab a buffer */
+ msg = get_a_tx_buf(vrp);
+ if (!msg && !wait)
+ return -ENOMEM;
+
+ /* no free buffer ? wait for one (but bail after 15 seconds) */
+ while (!msg) {
+ /* enable "tx-complete" interrupts, if not already enabled */
+ rpmsg_upref_sleepers(vrp);
+
+ /*
+ * sleep until a free buffer is available or 15 secs elapse.
+ * the timeout period is not configurable because there's
+ * little point in asking drivers to specify that.
+ * if later this happens to be required, it'd be easy to add.
+ */
+ err = wait_event_interruptible_timeout(vrp->sendq,
+ (msg = get_a_tx_buf(vrp)),
+ msecs_to_jiffies(15000));
+
+ /* disable "tx-complete" interrupts if we're the last sleeper */
+ rpmsg_downref_sleepers(vrp);
+
+ /* timeout ? */
+ if (!err) {
+ dev_err(dev, "timeout waiting for a tx buffer\n");
+ return -ERESTARTSYS;
+ }
+ }
+
+ msg->len = len;
+ msg->flags = 0;
+ msg->src = src;
+ msg->dst = dst;
+ msg->reserved = 0;
+ memcpy(msg->data, data, len);
+
+ dev_dbg(dev, "TX From 0x%x, To 0x%x, Len %d, Flags %d, Reserved %d\n",
+ msg->src, msg->dst, msg->len,
+ msg->flags, msg->reserved);
+ print_hex_dump(KERN_DEBUG, "rpmsg_virtio TX: ", DUMP_PREFIX_NONE, 16, 1,
+ msg, sizeof(*msg) + msg->len, true);
+
+ sg_init_one(&sg, msg, sizeof(*msg) + len);
+
+ mutex_lock(&vrp->tx_lock);
+
+ /* add message to the remote processor's virtqueue */
+ err = virtqueue_add_buf(vrp->svq, &sg, 1, 0, msg, GFP_KERNEL);
+ if (err < 0) {
+ /*
+ * need to reclaim the buffer here, otherwise it's lost
+ * (memory won't leak, but rpmsg won't use it again for TX).
+ * this will wait for a buffer management overhaul.
+ */
+ dev_err(dev, "virtqueue_add_buf failed: %d\n", err);
+ goto out;
+ }
+
+ /* tell the remote processor it has a pending message to read */
+ virtqueue_kick(vrp->svq);
+
+ err = 0;
+out:
+ mutex_unlock(&vrp->tx_lock);
+ return err;
+}
+EXPORT_SYMBOL(rpmsg_send_offchannel_raw);
+
+/* called when an rx buffer is used, and it's time to digest a message */
+static void rpmsg_recv_done(struct virtqueue *rvq)
+{
+ struct rpmsg_hdr *msg;
+ unsigned int len;
+ struct rpmsg_endpoint *ept;
+ struct scatterlist sg;
+ struct virtproc_info *vrp = rvq->vdev->priv;
+ struct device *dev = &rvq->vdev->dev;
+ int err;
+
+ msg = virtqueue_get_buf(rvq, &len);
+ if (!msg) {
+ dev_err(dev, "uhm, incoming signal, but no used buffer ?\n");
+ return;
+ }
+
+ dev_dbg(dev, "From: 0x%x, To: 0x%x, Len: %d, Flags: %d, Reserved: %d\n",
+ msg->src, msg->dst, msg->len,
+ msg->flags, msg->reserved);
+ print_hex_dump(KERN_DEBUG, "rpmsg_virtio RX: ", DUMP_PREFIX_NONE, 16, 1,
+ msg, sizeof(*msg) + msg->len, true);
+
+ /*
+ * We currently use fixed-sized buffers, so trivially sanitize
+ * the reported payload length.
+ */
+ if (len > RPMSG_BUF_SIZE ||
+ msg->len > (len - sizeof(struct rpmsg_hdr))) {
+ dev_warn(dev, "inbound msg too big: (%d, %d)\n", len, msg->len);
+ return;
+ }
+
+ /* use the dst addr to fetch the callback of the appropriate user */
+ mutex_lock(&vrp->endpoints_lock);
+ ept = idr_find(&vrp->endpoints, msg->dst);
+ mutex_unlock(&vrp->endpoints_lock);
+
+ if (ept && ept->cb)
+ ept->cb(ept->rpdev, msg->data, msg->len, ept->priv, msg->src);
+ else
+ dev_warn(dev, "msg received with no recepient\n");
+
+ /* publish the real size of the buffer */
+ sg_init_one(&sg, msg, RPMSG_BUF_SIZE);
+
+ /* add the buffer back to the remote processor's virtqueue */
+ err = virtqueue_add_buf(vrp->rvq, &sg, 0, 1, msg, GFP_KERNEL);
+ if (err < 0) {
+ dev_err(dev, "failed to add a virtqueue buffer: %d\n", err);
+ return;
+ }
+
+ /* tell the remote processor we added another available rx buffer */
+ virtqueue_kick(vrp->rvq);
+}
+
+/*
+ * This is invoked whenever the remote processor completed processing
+ * a TX msg we just sent it, and the buffer is put back to the used ring.
+ *
+ * Normally, though, we suppress this "tx complete" interrupt in order to
+ * avoid the incurred overhead.
+ */
+static void rpmsg_xmit_done(struct virtqueue *svq)
+{
+ struct virtproc_info *vrp = svq->vdev->priv;
+
+ dev_dbg(&svq->vdev->dev, "%s\n", __func__);
+
+ /* wake up potential senders that are waiting for a tx buffer */
+ wake_up_interruptible(&vrp->sendq);
+}
+
+/* invoked when a name service announcement arrives */
+static void rpmsg_ns_cb(struct rpmsg_channel *rpdev, void *data, int len,
+ void *priv, u32 src)
+{
+ struct rpmsg_ns_msg *msg = data;
+ struct rpmsg_channel *newch;
+ struct rpmsg_channel_info chinfo;
+ struct virtproc_info *vrp = priv;
+ struct device *dev = &vrp->vdev->dev;
+ int ret;
+
+ print_hex_dump(KERN_DEBUG, "NS announcement: ",
+ DUMP_PREFIX_NONE, 16, 1,
+ data, len, true);
+
+ if (len != sizeof(*msg)) {
+ dev_err(dev, "malformed ns msg (%d)\n", len);
+ return;
+ }
+
+ /*
+ * the name service ept does _not_ belong to a real rpmsg channel,
+ * and is handled by the rpmsg bus itself.
+ * for sanity reasons, make sure a valid rpdev has _not_ sneaked
+ * in somehow.
+ */
+ if (rpdev) {
+ dev_err(dev, "anomaly: ns ept has an rpdev handle\n");
+ return;
+ }
+
+ /* don't trust the remote processor for null terminating the name */
+ msg->name[RPMSG_NAME_SIZE - 1] = '\0';
+
+ dev_info(dev, "%sing channel %s addr 0x%x\n",
+ msg->flags & RPMSG_NS_DESTROY ? "destroy" : "creat",
+ msg->name, msg->addr);
+
+ strncpy(chinfo.name, msg->name, sizeof(chinfo.name));
+ chinfo.src = RPMSG_ADDR_ANY;
+ chinfo.dst = msg->addr;
+
+ if (msg->flags & RPMSG_NS_DESTROY) {
+ ret = rpmsg_destroy_channel(vrp, &chinfo);
+ if (ret)
+ dev_err(dev, "rpmsg_destroy_channel failed: %d\n", ret);
+ } else {
+ newch = rpmsg_create_channel(vrp, &chinfo);
+ if (!newch)
+ dev_err(dev, "rpmsg_create_channel failed\n");
+ }
+}
+
+static int rpmsg_probe(struct virtio_device *vdev)
+{
+ vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done };
+ const char *names[] = { "input", "output" };
+ struct virtqueue *vqs[2];
+ struct virtproc_info *vrp;
+ void *bufs_va;
+ int err = 0, i;
+
+ vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
+ if (!vrp)
+ return -ENOMEM;
+
+ vrp->vdev = vdev;
+
+ idr_init(&vrp->endpoints);
+ mutex_init(&vrp->endpoints_lock);
+ mutex_init(&vrp->tx_lock);
+ init_waitqueue_head(&vrp->sendq);
+
+ /* We expect two virtqueues, rx and tx (and in this order) */
+ err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, names);
+ if (err)
+ goto free_vrp;
+
+ vrp->rvq = vqs[0];
+ vrp->svq = vqs[1];
+
+ /* allocate coherent memory for the buffers */
+ bufs_va = dma_alloc_coherent(vdev->dev.parent, RPMSG_TOTAL_BUF_SPACE,
+ &vrp->bufs_dma, GFP_KERNEL);
+ if (!bufs_va)
+ goto vqs_del;
+
+ dev_dbg(&vdev->dev, "buffers: va %p, dma 0x%llx\n", bufs_va,
+ (unsigned long long)vrp->bufs_dma);
+
+ /* half of the buffers is dedicated for RX */
+ vrp->rbufs = bufs_va;
+
+ /* and half is dedicated for TX */
+ vrp->sbufs = bufs_va + RPMSG_TOTAL_BUF_SPACE / 2;
+
+ /* set up the receive buffers */
+ for (i = 0; i < RPMSG_NUM_BUFS / 2; i++) {
+ struct scatterlist sg;
+ void *cpu_addr = vrp->rbufs + i * RPMSG_BUF_SIZE;
+
+ sg_init_one(&sg, cpu_addr, RPMSG_BUF_SIZE);
+
+ err = virtqueue_add_buf(vrp->rvq, &sg, 0, 1, cpu_addr,
+ GFP_KERNEL);
+ WARN_ON(err < 0); /* sanity check; this can't really happen */
+ }
+
+ /* suppress "tx-complete" interrupts */
+ virtqueue_disable_cb(vrp->svq);
+
+ vdev->priv = vrp;
+
+ /* if supported by the remote processor, enable the name service */
+ if (virtio_has_feature(vdev, VIRTIO_RPMSG_F_NS)) {
+ /* a dedicated endpoint handles the name service msgs */
+ vrp->ns_ept = __rpmsg_create_ept(vrp, NULL, rpmsg_ns_cb,
+ vrp, RPMSG_NS_ADDR);
+ if (!vrp->ns_ept) {
+ dev_err(&vdev->dev, "failed to create the ns ept\n");
+ err = -ENOMEM;
+ goto free_coherent;
+ }
+ }
+
+ /* tell the remote processor it can start sending messages */
+ virtqueue_kick(vrp->rvq);
+
+ dev_info(&vdev->dev, "rpmsg host is online\n");
+
+ return 0;
+
+free_coherent:
+ dma_free_coherent(vdev->dev.parent, RPMSG_TOTAL_BUF_SPACE, bufs_va,
+ vrp->bufs_dma);
+vqs_del:
+ vdev->config->del_vqs(vrp->vdev);
+free_vrp:
+ kfree(vrp);
+ return err;
+}
+
+static int rpmsg_remove_device(struct device *dev, void *data)
+{
+ device_unregister(dev);
+
+ return 0;
+}
+
+static void __devexit rpmsg_remove(struct virtio_device *vdev)
+{
+ struct virtproc_info *vrp = vdev->priv;
+ int ret;
+
+ vdev->config->reset(vdev);
+
+ ret = device_for_each_child(&vdev->dev, NULL, rpmsg_remove_device);
+ if (ret)
+ dev_warn(&vdev->dev, "can't remove rpmsg device: %d\n", ret);
+
+ if (vrp->ns_ept)
+ __rpmsg_destroy_ept(vrp, vrp->ns_ept);
+
+ idr_remove_all(&vrp->endpoints);
+ idr_destroy(&vrp->endpoints);
+
+ vdev->config->del_vqs(vrp->vdev);
+
+ dma_free_coherent(vdev->dev.parent, RPMSG_TOTAL_BUF_SPACE,
+ vrp->rbufs, vrp->bufs_dma);
+
+ kfree(vrp);
+}
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_RPMSG, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static unsigned int features[] = {
+ VIRTIO_RPMSG_F_NS,
+};
+
+static struct virtio_driver virtio_ipc_driver = {
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = rpmsg_probe,
+ .remove = __devexit_p(rpmsg_remove),
+};
+
+static int __init rpmsg_init(void)
+{
+ int ret;
+
+ ret = bus_register(&rpmsg_bus);
+ if (ret) {
+ pr_err("failed to register rpmsg bus: %d\n", ret);
+ return ret;
+ }
+
+ ret = register_virtio_driver(&virtio_ipc_driver);
+ if (ret) {
+ pr_err("failed to register virtio driver: %d\n", ret);
+ bus_unregister(&rpmsg_bus);
+ }
+
+ return ret;
+}
+module_init(rpmsg_init);
+
+static void __exit rpmsg_fini(void)
+{
+ unregister_virtio_driver(&virtio_ipc_driver);
+ bus_unregister(&rpmsg_bus);
+}
+module_exit(rpmsg_fini);
+
+MODULE_DEVICE_TABLE(virtio, id_table);
+MODULE_DESCRIPTION("Virtio-based remote processor messaging bus");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 4f9fb25f945b..8c8377d50c4c 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -755,7 +755,7 @@ config HAVE_S3C_RTC
config RTC_DRV_S3C
tristate "Samsung S3C series SoC RTC"
- depends on ARCH_S3C2410 || ARCH_S3C64XX || HAVE_S3C_RTC
+ depends on ARCH_S3C64XX || HAVE_S3C_RTC
help
RTC (Realtime Clock) driver for the clock inbuilt into the
Samsung S3C24XX series of SoCs. This can provide periodic
@@ -780,8 +780,8 @@ config RTC_DRV_EP93XX
will be called rtc-ep93xx.
config RTC_DRV_SA1100
- tristate "SA11x0/PXA2xx"
- depends on ARCH_SA1100 || ARCH_PXA
+ tristate "SA11x0/PXA2xx/PXA910"
+ depends on ARCH_SA1100 || ARCH_PXA || ARCH_MMP
help
If you say Y here you will get access to the real time clock
built into your SA11x0 or PXA2xx CPU.
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index 274a0aafe42b..831868904e02 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -57,6 +57,7 @@ struct sam9_rtc {
void __iomem *rtt;
struct rtc_device *rtcdev;
u32 imr;
+ void __iomem *gpbr;
};
#define rtt_readl(rtc, field) \
@@ -65,9 +66,9 @@ struct sam9_rtc {
__raw_writel((val), (rtc)->rtt + AT91_RTT_ ## field)
#define gpbr_readl(rtc) \
- at91_sys_read(AT91_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR)
+ __raw_readl((rtc)->gpbr)
#define gpbr_writel(rtc, val) \
- at91_sys_write(AT91_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR, (val))
+ __raw_writel((val), (rtc)->gpbr)
/*
* Read current time and date in RTC
@@ -287,16 +288,19 @@ static const struct rtc_class_ops at91_rtc_ops = {
/*
* Initialize and install RTC driver
*/
-static int __init at91_rtc_probe(struct platform_device *pdev)
+static int __devinit at91_rtc_probe(struct platform_device *pdev)
{
- struct resource *r;
+ struct resource *r, *r_gpbr;
struct sam9_rtc *rtc;
int ret;
u32 mr;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r)
+ r_gpbr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!r || !r_gpbr) {
+ dev_err(&pdev->dev, "need 2 ressources\n");
return -ENODEV;
+ }
rtc = kzalloc(sizeof *rtc, GFP_KERNEL);
if (!rtc)
@@ -314,6 +318,13 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
goto fail;
}
+ rtc->gpbr = ioremap(r_gpbr->start, resource_size(r_gpbr));
+ if (!rtc->gpbr) {
+ dev_err(&pdev->dev, "failed to map gpbr registers, aborting.\n");
+ ret = -ENOMEM;
+ goto fail_gpbr;
+ }
+
mr = rtt_readl(rtc, MR);
/* unless RTT is counting at 1 Hz, re-initialize it */
@@ -340,7 +351,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
if (ret) {
dev_dbg(&pdev->dev, "can't share IRQ %d?\n", AT91_ID_SYS);
rtc_device_unregister(rtc->rtcdev);
- goto fail;
+ goto fail_register;
}
/* NOTE: sam9260 rev A silicon has a ROM bug which resets the
@@ -356,6 +367,8 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
return 0;
fail_register:
+ iounmap(rtc->gpbr);
+fail_gpbr:
iounmap(rtc->rtt);
fail:
platform_set_drvdata(pdev, NULL);
@@ -366,7 +379,7 @@ fail:
/*
* Disable and remove the RTC driver
*/
-static int __exit at91_rtc_remove(struct platform_device *pdev)
+static int __devexit at91_rtc_remove(struct platform_device *pdev)
{
struct sam9_rtc *rtc = platform_get_drvdata(pdev);
u32 mr = rtt_readl(rtc, MR);
@@ -377,6 +390,7 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
rtc_device_unregister(rtc->rtcdev);
+ iounmap(rtc->gpbr);
iounmap(rtc->rtt);
platform_set_drvdata(pdev, NULL);
kfree(rtc);
@@ -440,63 +454,20 @@ static int at91_rtc_resume(struct platform_device *pdev)
#endif
static struct platform_driver at91_rtc_driver = {
- .driver.name = "rtc-at91sam9",
- .driver.owner = THIS_MODULE,
- .remove = __exit_p(at91_rtc_remove),
+ .probe = at91_rtc_probe,
+ .remove = __devexit_p(at91_rtc_remove),
.shutdown = at91_rtc_shutdown,
.suspend = at91_rtc_suspend,
.resume = at91_rtc_resume,
+ .driver = {
+ .name = "rtc-at91sam9",
+ .owner = THIS_MODULE,
+ },
};
-/* Chips can have more than one RTT module, and they can be used for more
- * than just RTCs. So we can't just register as "the" RTT driver.
- *
- * A normal approach in such cases is to create a library to allocate and
- * free the modules. Here we just use bus_find_device() as like such a
- * library, binding directly ... no runtime "library" footprint is needed.
- */
-static int __init at91_rtc_match(struct device *dev, void *v)
-{
- struct platform_device *pdev = to_platform_device(dev);
- int ret;
-
- /* continue searching if this isn't the RTT we need */
- if (strcmp("at91_rtt", pdev->name) != 0
- || pdev->id != CONFIG_RTC_DRV_AT91SAM9_RTT)
- goto fail;
-
- /* else we found it ... but fail unless we can bind to the RTC driver */
- if (dev->driver) {
- dev_dbg(dev, "busy, can't use as RTC!\n");
- goto fail;
- }
- dev->driver = &at91_rtc_driver.driver;
- if (device_attach(dev) == 0) {
- dev_dbg(dev, "can't attach RTC!\n");
- goto fail;
- }
- ret = at91_rtc_probe(pdev);
- if (ret == 0)
- return true;
-
- dev_dbg(dev, "RTC probe err %d!\n", ret);
-fail:
- return false;
-}
-
static int __init at91_rtc_init(void)
{
- int status;
- struct device *rtc;
-
- status = platform_driver_register(&at91_rtc_driver);
- if (status)
- return status;
- rtc = bus_find_device(&platform_bus_type, NULL,
- NULL, at91_rtc_match);
- if (!rtc)
- platform_driver_unregister(&at91_rtc_driver);
- return rtc ? 0 : -ENODEV;
+ return platform_driver_register(&at91_rtc_driver);
}
module_init(at91_rtc_init);
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index 1300962486d1..b2185f4255aa 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -12,6 +12,7 @@
#include <linux/bcd.h>
#include <linux/io.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
#include <linux/delay.h>
#include <linux/gfp.h>
#include <linux/module.h>
@@ -294,11 +295,19 @@ static int __exit mv_rtc_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static struct of_device_id rtc_mv_of_match_table[] = {
+ { .compatible = "mrvl,orion-rtc", },
+ {}
+};
+#endif
+
static struct platform_driver mv_rtc_driver = {
.remove = __exit_p(mv_rtc_remove),
.driver = {
.name = "rtc-mv",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(rtc_mv_of_match_table),
},
};
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index c543f6f1eec2..9ccea134a996 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -35,6 +35,8 @@
enum s3c_cpu_type {
TYPE_S3C2410,
+ TYPE_S3C2416,
+ TYPE_S3C2443,
TYPE_S3C64XX,
};
@@ -132,6 +134,7 @@ static int s3c_rtc_setfreq(struct device *dev, int freq)
struct platform_device *pdev = to_platform_device(dev);
struct rtc_device *rtc_dev = platform_get_drvdata(pdev);
unsigned int tmp = 0;
+ int val;
if (!is_power_of_2(freq))
return -EINVAL;
@@ -139,12 +142,22 @@ static int s3c_rtc_setfreq(struct device *dev, int freq)
clk_enable(rtc_clk);
spin_lock_irq(&s3c_rtc_pie_lock);
- if (s3c_rtc_cpu_type == TYPE_S3C2410) {
+ if (s3c_rtc_cpu_type != TYPE_S3C64XX) {
tmp = readb(s3c_rtc_base + S3C2410_TICNT);
tmp &= S3C2410_TICNT_ENABLE;
}
- tmp |= (rtc_dev->max_user_freq / freq)-1;
+ val = (rtc_dev->max_user_freq / freq) - 1;
+
+ if (s3c_rtc_cpu_type == TYPE_S3C2416 || s3c_rtc_cpu_type == TYPE_S3C2443) {
+ tmp |= S3C2443_TICNT_PART(val);
+ writel(S3C2443_TICNT1_PART(val), s3c_rtc_base + S3C2443_TICNT1);
+
+ if (s3c_rtc_cpu_type == TYPE_S3C2416)
+ writel(S3C2416_TICNT2_PART(val), s3c_rtc_base + S3C2416_TICNT2);
+ } else {
+ tmp |= val;
+ }
writel(tmp, s3c_rtc_base + S3C2410_TICNT);
spin_unlock_irq(&s3c_rtc_pie_lock);
@@ -371,7 +384,7 @@ static void s3c_rtc_enable(struct platform_device *pdev, int en)
tmp &= ~S3C2410_RTCCON_RTCEN;
writew(tmp, base + S3C2410_RTCCON);
- if (s3c_rtc_cpu_type == TYPE_S3C2410) {
+ if (s3c_rtc_cpu_type != TYPE_S3C64XX) {
tmp = readb(base + S3C2410_TICNT);
tmp &= ~S3C2410_TICNT_ENABLE;
writeb(tmp, base + S3C2410_TICNT);
@@ -428,12 +441,27 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev)
return 0;
}
+static const struct of_device_id s3c_rtc_dt_match[];
+
+static inline int s3c_rtc_get_driver_data(struct platform_device *pdev)
+{
+#ifdef CONFIG_OF
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(s3c_rtc_dt_match, pdev->dev.of_node);
+ return match->data;
+ }
+#endif
+ return platform_get_device_id(pdev)->driver_data;
+}
+
static int __devinit s3c_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
struct rtc_time rtc_tm;
struct resource *res;
int ret;
+ int tmp;
pr_debug("%s: probe=%p\n", __func__, pdev);
@@ -508,13 +536,7 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
goto err_nortc;
}
-#ifdef CONFIG_OF
- if (pdev->dev.of_node)
- s3c_rtc_cpu_type = of_device_is_compatible(pdev->dev.of_node,
- "samsung,s3c6410-rtc") ? TYPE_S3C64XX : TYPE_S3C2410;
- else
-#endif
- s3c_rtc_cpu_type = platform_get_device_id(pdev)->driver_data;
+ s3c_rtc_cpu_type = s3c_rtc_get_driver_data(pdev);
/* Check RTC Time */
@@ -533,11 +555,17 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "warning: invalid RTC value so initializing it\n");
}
- if (s3c_rtc_cpu_type == TYPE_S3C64XX)
+ if (s3c_rtc_cpu_type != TYPE_S3C2410)
rtc->max_user_freq = 32768;
else
rtc->max_user_freq = 128;
+ if (s3c_rtc_cpu_type == TYPE_S3C2416 || s3c_rtc_cpu_type == TYPE_S3C2443) {
+ tmp = readw(s3c_rtc_base + S3C2410_RTCCON);
+ tmp |= S3C2443_RTCCON_TICSEL;
+ writew(tmp, s3c_rtc_base + S3C2410_RTCCON);
+ }
+
platform_set_drvdata(pdev, rtc);
s3c_rtc_setfreq(&pdev->dev, 1);
@@ -638,8 +666,19 @@ static int s3c_rtc_resume(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id s3c_rtc_dt_match[] = {
- { .compatible = "samsung,s3c2410-rtc" },
- { .compatible = "samsung,s3c6410-rtc" },
+ {
+ .compatible = "samsung,s3c2410-rtc"
+ .data = TYPE_S3C2410,
+ }, {
+ .compatible = "samsung,s3c2416-rtc"
+ .data = TYPE_S3C2416,
+ }, {
+ .compatible = "samsung,s3c2443-rtc"
+ .data = TYPE_S3C2443,
+ }, {
+ .compatible = "samsung,s3c6410-rtc"
+ .data = TYPE_S3C64XX,
+ },
{},
};
MODULE_DEVICE_TABLE(of, s3c_rtc_dt_match);
@@ -652,6 +691,12 @@ static struct platform_device_id s3c_rtc_driver_ids[] = {
.name = "s3c2410-rtc",
.driver_data = TYPE_S3C2410,
}, {
+ .name = "s3c2416-rtc",
+ .driver_data = TYPE_S3C2416,
+ }, {
+ .name = "s3c2443-rtc",
+ .driver_data = TYPE_S3C2443,
+ }, {
.name = "s3c64xx-rtc",
.driver_data = TYPE_S3C64XX,
},
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index fb758db9d0f4..4940fa8c4e10 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -23,94 +23,44 @@
#include <linux/platform_device.h>
#include <linux/module.h>
+#include <linux/clk.h>
#include <linux/rtc.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
+#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/of.h>
#include <linux/pm.h>
#include <linux/bitops.h>
#include <mach/hardware.h>
-#include <asm/irq.h>
+#include <mach/irqs.h>
-#ifdef CONFIG_ARCH_PXA
+#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
#include <mach/regs-rtc.h>
#endif
#define RTC_DEF_DIVIDER (32768 - 1)
#define RTC_DEF_TRIM 0
-
-static const unsigned long RTC_FREQ = 1024;
-static struct rtc_time rtc_alarm;
-static DEFINE_SPINLOCK(sa1100_rtc_lock);
-
-static inline int rtc_periodic_alarm(struct rtc_time *tm)
-{
- return (tm->tm_year == -1) ||
- ((unsigned)tm->tm_mon >= 12) ||
- ((unsigned)(tm->tm_mday - 1) >= 31) ||
- ((unsigned)tm->tm_hour > 23) ||
- ((unsigned)tm->tm_min > 59) ||
- ((unsigned)tm->tm_sec > 59);
-}
-
-/*
- * Calculate the next alarm time given the requested alarm time mask
- * and the current time.
- */
-static void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now,
- struct rtc_time *alrm)
-{
- unsigned long next_time;
- unsigned long now_time;
-
- next->tm_year = now->tm_year;
- next->tm_mon = now->tm_mon;
- next->tm_mday = now->tm_mday;
- next->tm_hour = alrm->tm_hour;
- next->tm_min = alrm->tm_min;
- next->tm_sec = alrm->tm_sec;
-
- rtc_tm_to_time(now, &now_time);
- rtc_tm_to_time(next, &next_time);
-
- if (next_time < now_time) {
- /* Advance one day */
- next_time += 60 * 60 * 24;
- rtc_time_to_tm(next_time, next);
- }
-}
-
-static int rtc_update_alarm(struct rtc_time *alrm)
-{
- struct rtc_time alarm_tm, now_tm;
- unsigned long now, time;
- int ret;
-
- do {
- now = RCNR;
- rtc_time_to_tm(now, &now_tm);
- rtc_next_alarm_time(&alarm_tm, &now_tm, alrm);
- ret = rtc_tm_to_time(&alarm_tm, &time);
- if (ret != 0)
- break;
-
- RTSR = RTSR & (RTSR_HZE|RTSR_ALE|RTSR_AL);
- RTAR = time;
- } while (now != RCNR);
-
- return ret;
-}
+#define RTC_FREQ 1024
+
+struct sa1100_rtc {
+ spinlock_t lock;
+ int irq_1hz;
+ int irq_alarm;
+ struct rtc_device *rtc;
+ struct clk *clk;
+};
static irqreturn_t sa1100_rtc_interrupt(int irq, void *dev_id)
{
- struct platform_device *pdev = to_platform_device(dev_id);
- struct rtc_device *rtc = platform_get_drvdata(pdev);
+ struct sa1100_rtc *info = dev_get_drvdata(dev_id);
+ struct rtc_device *rtc = info->rtc;
unsigned int rtsr;
unsigned long events = 0;
- spin_lock(&sa1100_rtc_lock);
+ spin_lock(&info->lock);
rtsr = RTSR;
/* clear interrupt sources */
@@ -146,29 +96,28 @@ static irqreturn_t sa1100_rtc_interrupt(int irq, void *dev_id)
rtc_update_irq(rtc, 1, events);
- if (rtsr & RTSR_AL && rtc_periodic_alarm(&rtc_alarm))
- rtc_update_alarm(&rtc_alarm);
-
- spin_unlock(&sa1100_rtc_lock);
+ spin_unlock(&info->lock);
return IRQ_HANDLED;
}
static int sa1100_rtc_open(struct device *dev)
{
+ struct sa1100_rtc *info = dev_get_drvdata(dev);
+ struct rtc_device *rtc = info->rtc;
int ret;
- struct platform_device *plat_dev = to_platform_device(dev);
- struct rtc_device *rtc = platform_get_drvdata(plat_dev);
- ret = request_irq(IRQ_RTC1Hz, sa1100_rtc_interrupt, 0, "rtc 1Hz", dev);
+ ret = clk_prepare_enable(info->clk);
+ if (ret)
+ goto fail_clk;
+ ret = request_irq(info->irq_1hz, sa1100_rtc_interrupt, 0, "rtc 1Hz", dev);
if (ret) {
- dev_err(dev, "IRQ %d already in use.\n", IRQ_RTC1Hz);
+ dev_err(dev, "IRQ %d already in use.\n", info->irq_1hz);
goto fail_ui;
}
- ret = request_irq(IRQ_RTCAlrm, sa1100_rtc_interrupt, 0,
- "rtc Alrm", dev);
+ ret = request_irq(info->irq_alarm, sa1100_rtc_interrupt, 0, "rtc Alrm", dev);
if (ret) {
- dev_err(dev, "IRQ %d already in use.\n", IRQ_RTCAlrm);
+ dev_err(dev, "IRQ %d already in use.\n", info->irq_alarm);
goto fail_ai;
}
rtc->max_user_freq = RTC_FREQ;
@@ -177,29 +126,36 @@ static int sa1100_rtc_open(struct device *dev)
return 0;
fail_ai:
- free_irq(IRQ_RTC1Hz, dev);
+ free_irq(info->irq_1hz, dev);
fail_ui:
+ clk_disable_unprepare(info->clk);
+ fail_clk:
return ret;
}
static void sa1100_rtc_release(struct device *dev)
{
- spin_lock_irq(&sa1100_rtc_lock);
+ struct sa1100_rtc *info = dev_get_drvdata(dev);
+
+ spin_lock_irq(&info->lock);
RTSR = 0;
- spin_unlock_irq(&sa1100_rtc_lock);
+ spin_unlock_irq(&info->lock);
- free_irq(IRQ_RTCAlrm, dev);
- free_irq(IRQ_RTC1Hz, dev);
+ free_irq(info->irq_alarm, dev);
+ free_irq(info->irq_1hz, dev);
+ clk_disable_unprepare(info->clk);
}
static int sa1100_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
- spin_lock_irq(&sa1100_rtc_lock);
+ struct sa1100_rtc *info = dev_get_drvdata(dev);
+
+ spin_lock_irq(&info->lock);
if (enabled)
RTSR |= RTSR_ALE;
else
RTSR &= ~RTSR_ALE;
- spin_unlock_irq(&sa1100_rtc_lock);
+ spin_unlock_irq(&info->lock);
return 0;
}
@@ -224,7 +180,6 @@ static int sa1100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
u32 rtsr;
- memcpy(&alrm->time, &rtc_alarm, sizeof(struct rtc_time));
rtsr = RTSR;
alrm->enabled = (rtsr & RTSR_ALE) ? 1 : 0;
alrm->pending = (rtsr & RTSR_AL) ? 1 : 0;
@@ -233,17 +188,22 @@ static int sa1100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static int sa1100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
+ struct sa1100_rtc *info = dev_get_drvdata(dev);
+ unsigned long time;
int ret;
- spin_lock_irq(&sa1100_rtc_lock);
- ret = rtc_update_alarm(&alrm->time);
- if (ret == 0) {
- if (alrm->enabled)
- RTSR |= RTSR_ALE;
- else
- RTSR &= ~RTSR_ALE;
- }
- spin_unlock_irq(&sa1100_rtc_lock);
+ spin_lock_irq(&info->lock);
+ ret = rtc_tm_to_time(&alrm->time, &time);
+ if (ret != 0)
+ goto out;
+ RTSR = RTSR & (RTSR_HZE|RTSR_ALE|RTSR_AL);
+ RTAR = time;
+ if (alrm->enabled)
+ RTSR |= RTSR_ALE;
+ else
+ RTSR &= ~RTSR_ALE;
+out:
+ spin_unlock_irq(&info->lock);
return ret;
}
@@ -270,6 +230,27 @@ static const struct rtc_class_ops sa1100_rtc_ops = {
static int sa1100_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
+ struct sa1100_rtc *info;
+ int irq_1hz, irq_alarm, ret = 0;
+
+ irq_1hz = platform_get_irq_byname(pdev, "rtc 1Hz");
+ irq_alarm = platform_get_irq_byname(pdev, "rtc alarm");
+ if (irq_1hz < 0 || irq_alarm < 0)
+ return -ENODEV;
+
+ info = kzalloc(sizeof(struct sa1100_rtc), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+ info->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(info->clk)) {
+ dev_err(&pdev->dev, "failed to find rtc clock source\n");
+ ret = PTR_ERR(info->clk);
+ goto err_clk;
+ }
+ info->irq_1hz = irq_1hz;
+ info->irq_alarm = irq_alarm;
+ spin_lock_init(&info->lock);
+ platform_set_drvdata(pdev, info);
/*
* According to the manual we should be able to let RTTR be zero
@@ -291,10 +272,11 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
rtc = rtc_device_register(pdev->name, &pdev->dev, &sa1100_rtc_ops,
THIS_MODULE);
- if (IS_ERR(rtc))
- return PTR_ERR(rtc);
-
- platform_set_drvdata(pdev, rtc);
+ if (IS_ERR(rtc)) {
+ ret = PTR_ERR(rtc);
+ goto err_dev;
+ }
+ info->rtc = rtc;
/* Fix for a nasty initialization problem the in SA11xx RTSR register.
* See also the comments in sa1100_rtc_interrupt().
@@ -321,14 +303,24 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
RTSR = RTSR_AL | RTSR_HZ;
return 0;
+err_dev:
+ platform_set_drvdata(pdev, NULL);
+ clk_put(info->clk);
+err_clk:
+ kfree(info);
+ return ret;
}
static int sa1100_rtc_remove(struct platform_device *pdev)
{
- struct rtc_device *rtc = platform_get_drvdata(pdev);
+ struct sa1100_rtc *info = platform_get_drvdata(pdev);
- if (rtc)
- rtc_device_unregister(rtc);
+ if (info) {
+ rtc_device_unregister(info->rtc);
+ clk_put(info->clk);
+ platform_set_drvdata(pdev, NULL);
+ kfree(info);
+ }
return 0;
}
@@ -336,15 +328,17 @@ static int sa1100_rtc_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int sa1100_rtc_suspend(struct device *dev)
{
+ struct sa1100_rtc *info = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
- enable_irq_wake(IRQ_RTCAlrm);
+ enable_irq_wake(info->irq_alarm);
return 0;
}
static int sa1100_rtc_resume(struct device *dev)
{
+ struct sa1100_rtc *info = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
- disable_irq_wake(IRQ_RTCAlrm);
+ disable_irq_wake(info->irq_alarm);
return 0;
}
@@ -354,6 +348,13 @@ static const struct dev_pm_ops sa1100_rtc_pm_ops = {
};
#endif
+static struct of_device_id sa1100_rtc_dt_ids[] = {
+ { .compatible = "mrvl,sa1100-rtc", },
+ { .compatible = "mrvl,mmp-rtc", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sa1100_rtc_dt_ids);
+
static struct platform_driver sa1100_rtc_driver = {
.probe = sa1100_rtc_probe,
.remove = sa1100_rtc_remove,
@@ -362,6 +363,7 @@ static struct platform_driver sa1100_rtc_driver = {
#ifdef CONFIG_PM
.pm = &sa1100_rtc_pm_ops,
#endif
+ .of_match_table = sa1100_rtc_dt_ids,
},
};
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 2a0dfcb0bc42..35c685c374e9 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -1458,7 +1458,6 @@ int qdio_establish(struct qdio_initialize *init_data)
}
qdio_setup_ssqd_info(irq_ptr);
- DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
qdio_detect_hsicq(irq_ptr);
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 452989a7ec13..ecf12f0aca7b 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -311,7 +311,8 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token);
process_ac_flags(irq_ptr, qdioac);
- DBF_EVENT("qdioac:%4x", qdioac);
+ DBF_EVENT("ac 1:%2x 2:%4x", qdioac, irq_ptr->ssqd_desc.qdioac2);
+ DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac);
}
void qdio_release_memory(struct qdio_irq *irq_ptr)
diff --git a/drivers/scsi/arm/arxescsi.c b/drivers/scsi/arm/arxescsi.c
index a750aa72b8ef..2a28b4ad1975 100644
--- a/drivers/scsi/arm/arxescsi.c
+++ b/drivers/scsi/arm/arxescsi.c
@@ -305,7 +305,7 @@ arxescsi_probe(struct expansion_card *ec, const struct ecard_id *id)
info->base = base;
info->info.scsi.io_base = base + 0x2000;
- info->info.scsi.irq = NO_IRQ;
+ info->info.scsi.irq = 0;
info->info.scsi.dma = NO_DMA;
info->info.scsi.io_shift = 5;
info->info.ifcfg.clockrate = 24; /* MHz */
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index e85c40b6e19b..6206a666a8ec 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2176,7 +2176,7 @@ static void fas216_done(FAS216_Info *info, unsigned int result)
fn = (void (*)(FAS216_Info *, struct scsi_cmnd *, unsigned int))SCpnt->host_scribble;
fn(info, SCpnt, result);
- if (info->scsi.irq != NO_IRQ) {
+ if (info->scsi.irq) {
spin_lock_irqsave(&info->host_lock, flags);
if (info->scsi.phase == PHASE_IDLE)
fas216_kick(info);
@@ -2276,7 +2276,7 @@ static int fas216_noqueue_command_lck(struct scsi_cmnd *SCpnt,
* We should only be using this if we don't have an interrupt.
* Provide some "incentive" to use the queueing code.
*/
- BUG_ON(info->scsi.irq != NO_IRQ);
+ BUG_ON(info->scsi.irq);
info->internal_done = 0;
fas216_queue_command_lck(SCpnt, fas216_internal_done);
diff --git a/drivers/scsi/arm/fas216.h b/drivers/scsi/arm/fas216.h
index 84b7127c0121..df2e1b3ddfe2 100644
--- a/drivers/scsi/arm/fas216.h
+++ b/drivers/scsi/arm/fas216.h
@@ -12,10 +12,6 @@
#ifndef FAS216_H
#define FAS216_H
-#ifndef NO_IRQ
-#define NO_IRQ 255
-#endif
-
#include <scsi/scsi_eh.h>
#include "queue.h"
diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c
index 92d314a73f69..91b6d52f74eb 100644
--- a/drivers/sh/clk/cpg.c
+++ b/drivers/sh/clk/cpg.c
@@ -26,7 +26,7 @@ static void sh_clk_mstp32_disable(struct clk *clk)
clk->mapped_reg);
}
-static struct clk_ops sh_clk_mstp32_clk_ops = {
+static struct sh_clk_ops sh_clk_mstp32_clk_ops = {
.enable = sh_clk_mstp32_enable,
.disable = sh_clk_mstp32_disable,
.recalc = followparent_recalc,
@@ -150,7 +150,7 @@ static void sh_clk_div6_disable(struct clk *clk)
iowrite32(value, clk->mapped_reg);
}
-static struct clk_ops sh_clk_div6_clk_ops = {
+static struct sh_clk_ops sh_clk_div6_clk_ops = {
.recalc = sh_clk_div6_recalc,
.round_rate = sh_clk_div_round_rate,
.set_rate = sh_clk_div6_set_rate,
@@ -158,7 +158,7 @@ static struct clk_ops sh_clk_div6_clk_ops = {
.disable = sh_clk_div6_disable,
};
-static struct clk_ops sh_clk_div6_reparent_clk_ops = {
+static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
.recalc = sh_clk_div6_recalc,
.round_rate = sh_clk_div_round_rate,
.set_rate = sh_clk_div6_set_rate,
@@ -200,7 +200,7 @@ static int __init sh_clk_init_parent(struct clk *clk)
}
static int __init sh_clk_div6_register_ops(struct clk *clks, int nr,
- struct clk_ops *ops)
+ struct sh_clk_ops *ops)
{
struct clk *clkp;
void *freq_table;
@@ -317,13 +317,13 @@ static void sh_clk_div4_disable(struct clk *clk)
iowrite32(ioread32(clk->mapped_reg) | (1 << 8), clk->mapped_reg);
}
-static struct clk_ops sh_clk_div4_clk_ops = {
+static struct sh_clk_ops sh_clk_div4_clk_ops = {
.recalc = sh_clk_div4_recalc,
.set_rate = sh_clk_div4_set_rate,
.round_rate = sh_clk_div_round_rate,
};
-static struct clk_ops sh_clk_div4_enable_clk_ops = {
+static struct sh_clk_ops sh_clk_div4_enable_clk_ops = {
.recalc = sh_clk_div4_recalc,
.set_rate = sh_clk_div4_set_rate,
.round_rate = sh_clk_div_round_rate,
@@ -331,7 +331,7 @@ static struct clk_ops sh_clk_div4_enable_clk_ops = {
.disable = sh_clk_div4_disable,
};
-static struct clk_ops sh_clk_div4_reparent_clk_ops = {
+static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
.recalc = sh_clk_div4_recalc,
.set_rate = sh_clk_div4_set_rate,
.round_rate = sh_clk_div_round_rate,
@@ -341,7 +341,7 @@ static struct clk_ops sh_clk_div4_reparent_clk_ops = {
};
static int __init sh_clk_div4_register_ops(struct clk *clks, int nr,
- struct clk_div4_table *table, struct clk_ops *ops)
+ struct clk_div4_table *table, struct sh_clk_ops *ops)
{
struct clk *clkp;
void *freq_table;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 0b06e360628a..3ed748355b98 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -293,7 +293,7 @@ config SPI_RSPI
config SPI_S3C24XX
tristate "Samsung S3C24XX series SPI"
- depends on ARCH_S3C2410 && EXPERIMENTAL
+ depends on ARCH_S3C24XX && EXPERIMENTAL
select SPI_BITBANG
help
SPI driver for Samsung S3C24XX series ARM SoCs
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 13448c832c44..e496f799b7a9 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -359,11 +359,6 @@ static int orion_spi_setup(struct spi_device *spi)
orion_spi = spi_master_get_devdata(spi->master);
- /* Fix ac timing if required. */
- if (orion_spi->spi_info->enable_clock_fix)
- orion_spi_setbits(orion_spi, ORION_SPI_IF_CONFIG_REG,
- (1 << 14));
-
if ((spi->max_speed_hz == 0)
|| (spi->max_speed_hz > orion_spi->max_speed))
spi->max_speed_hz = orion_spi->max_speed;
diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c
index fc064535f4fc..8ee7d790ce49 100644
--- a/drivers/spi/spi-s3c24xx.c
+++ b/drivers/spi/spi-s3c24xx.c
@@ -24,10 +24,10 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
+#include <linux/spi/s3c24xx.h>
#include <linux/module.h>
#include <plat/regs-spi.h>
-#include <mach/spi.h>
#include <plat/fiq.h>
#include <asm/fiq.h>
diff --git a/drivers/staging/ste_rmi4/Makefile b/drivers/staging/ste_rmi4/Makefile
index 176f46900571..e4c03351420f 100644
--- a/drivers/staging/ste_rmi4/Makefile
+++ b/drivers/staging/ste_rmi4/Makefile
@@ -2,4 +2,4 @@
# Makefile for the RMI4 touchscreen driver.
#
obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4) += synaptics_i2c_rmi4.o
-obj-$(CONFIG_MACH_U8500) += board-mop500-u8500uib-rmi4.o
+obj-$(CONFIG_MACH_MOP500) += board-mop500-u8500uib-rmi4.o
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 10605ecc99ab..f9a6be7a9bed 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1526,6 +1526,8 @@ void __init atmel_register_uart_fns(struct atmel_port_fns *fns)
atmel_pops.set_wake = fns->set_wake;
}
+struct platform_device *atmel_default_console_device; /* the serial console device */
+
#ifdef CONFIG_SERIAL_ATMEL_CONSOLE
static void atmel_console_putchar(struct uart_port *port, int ch)
{
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 0b7fed746b27..e7feceeebc2f 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1508,7 +1508,7 @@ static int serial_imx_probe(struct platform_device *pdev)
ret = PTR_ERR(sport->clk);
goto unmap;
}
- clk_enable(sport->clk);
+ clk_prepare_enable(sport->clk);
sport->port.uartclk = clk_get_rate(sport->clk);
@@ -1531,8 +1531,8 @@ deinit:
if (pdata && pdata->exit)
pdata->exit(pdev);
clkput:
+ clk_disable_unprepare(sport->clk);
clk_put(sport->clk);
- clk_disable(sport->clk);
unmap:
iounmap(sport->port.membase);
free:
@@ -1552,11 +1552,10 @@ static int serial_imx_remove(struct platform_device *pdev)
if (sport) {
uart_remove_one_port(&imx_reg, &sport->port);
+ clk_disable_unprepare(sport->clk);
clk_put(sport->clk);
}
- clk_disable(sport->clk);
-
if (pdata && pdata->exit)
pdata->exit(pdev);
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index e2fd3d8e0ab4..5847a4b855f7 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -36,6 +36,7 @@
#include <linux/circ_buf.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
@@ -44,6 +45,8 @@
#include <linux/io.h>
#include <linux/slab.h>
+#define PXA_NAME_LEN 8
+
struct uart_pxa_port {
struct uart_port port;
unsigned char ier;
@@ -51,7 +54,7 @@ struct uart_pxa_port {
unsigned char mcr;
unsigned int lsr_break_flag;
struct clk *clk;
- char *name;
+ char name[PXA_NAME_LEN];
};
static inline unsigned int serial_in(struct uart_pxa_port *up, int offset)
@@ -781,6 +784,31 @@ static const struct dev_pm_ops serial_pxa_pm_ops = {
};
#endif
+static struct of_device_id serial_pxa_dt_ids[] = {
+ { .compatible = "mrvl,pxa-uart", },
+ { .compatible = "mrvl,mmp-uart", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, serial_pxa_dt_ids);
+
+static int serial_pxa_probe_dt(struct platform_device *pdev,
+ struct uart_pxa_port *sport)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+
+ if (!np)
+ return 1;
+
+ ret = of_alias_get_id(np, "serial");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
+ return ret;
+ }
+ sport->port.line = ret;
+ return 0;
+}
+
static int serial_pxa_probe(struct platform_device *dev)
{
struct uart_pxa_port *sport;
@@ -808,20 +836,16 @@ static int serial_pxa_probe(struct platform_device *dev)
sport->port.irq = irqres->start;
sport->port.fifosize = 64;
sport->port.ops = &serial_pxa_pops;
- sport->port.line = dev->id;
sport->port.dev = &dev->dev;
sport->port.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF;
sport->port.uartclk = clk_get_rate(sport->clk);
- switch (dev->id) {
- case 0: sport->name = "FFUART"; break;
- case 1: sport->name = "BTUART"; break;
- case 2: sport->name = "STUART"; break;
- case 3: sport->name = "HWUART"; break;
- default:
- sport->name = "???";
- break;
- }
+ ret = serial_pxa_probe_dt(dev, sport);
+ if (ret > 0)
+ sport->port.line = dev->id;
+ else if (ret < 0)
+ goto err_clk;
+ snprintf(sport->name, PXA_NAME_LEN - 1, "UART%d", sport->port.line + 1);
sport->port.membase = ioremap(mmres->start, resource_size(mmres));
if (!sport->port.membase) {
@@ -829,7 +853,7 @@ static int serial_pxa_probe(struct platform_device *dev)
goto err_clk;
}
- serial_pxa_ports[dev->id] = sport;
+ serial_pxa_ports[sport->port.line] = sport;
uart_add_one_port(&serial_pxa_reg, &sport->port);
platform_set_drvdata(dev, sport);
@@ -866,6 +890,7 @@ static struct platform_driver serial_pxa_driver = {
#ifdef CONFIG_PM
.pm = &serial_pxa_pm_ops,
#endif
+ .of_match_table = serial_pxa_dt_ids,
},
};
diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c
index ef7a21a6a01b..2ca5959ec3fa 100644
--- a/drivers/tty/serial/sa1100.c
+++ b/drivers/tty/serial/sa1100.c
@@ -38,6 +38,7 @@
#include <asm/irq.h>
#include <mach/hardware.h>
+#include <mach/irqs.h>
#include <asm/mach/serial_sa1100.h>
/* We've been assigned a range on the "Low-density serial ports" major */
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index e4405e088589..cbd8f5f80596 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -16,7 +16,7 @@ config USB_ARCH_HAS_OHCI
# ARM:
default y if SA1111
default y if ARCH_OMAP
- default y if ARCH_S3C2410
+ default y if ARCH_S3C24XX
default y if PXA27x
default y if PXA3xx
default y if ARCH_EP93XX
@@ -44,7 +44,7 @@ config USB_ARCH_HAS_EHCI
default y if PPC_MPC512x
default y if ARCH_IXP4XX
default y if ARCH_W90X900
- default y if ARCH_AT91SAM9G45
+ default y if ARCH_AT91
default y if ARCH_MXC
default y if ARCH_OMAP3
default y if ARCH_CNS3XXX
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index c14a3972953a..2633f7595116 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -137,7 +137,7 @@ choice
config USB_AT91
tristate "Atmel AT91 USB Device Port"
- depends on ARCH_AT91 && !ARCH_AT91SAM9RL && !ARCH_AT91CAP9 && !ARCH_AT91SAM9G45
+ depends on ARCH_AT91
help
Many Atmel AT91 processors (such as the AT91RM2000) have a
full speed USB Device Port with support for five configurable
@@ -150,7 +150,7 @@ config USB_AT91
config USB_ATMEL_USBA
tristate "Atmel USBA"
select USB_GADGET_DUALSPEED
- depends on AVR32 || ARCH_AT91CAP9 || ARCH_AT91SAM9RL || ARCH_AT91SAM9G45
+ depends on AVR32 || ARCH_AT91SAM9RL || ARCH_AT91SAM9G45
help
USBA is the integrated high-speed USB Device controller on
the AT32AP700x, some AT91SAM9 and AT91CAP9 processors from Atmel.
@@ -284,7 +284,7 @@ config USB_IMX
config USB_S3C2410
tristate "S3C2410 USB Device Controller"
- depends on ARCH_S3C2410
+ depends on ARCH_S3C24XX
help
Samsung's S3C2410 is an ARM-4 processor with an integrated
full speed USB 1.1 device controller. It has 4 configurable
@@ -299,7 +299,7 @@ config USB_S3C2410_DEBUG
config USB_S3C_HSUDC
tristate "S3C2416, S3C2443 and S3C2450 USB Device Controller"
- depends on ARCH_S3C2410
+ depends on ARCH_S3C24XX
select USB_GADGET_DUALSPEED
help
Samsung's S3C2416, S3C2443 and S3C2450 is an ARM9 based SoC
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 15a8cdb2ded5..36fd2b4b49a2 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -29,6 +29,8 @@
#include <linux/clk.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <asm/byteorder.h>
#include <mach/hardware.h>
@@ -40,6 +42,7 @@
#include <mach/board.h>
#include <mach/cpu.h>
#include <mach/at91sam9261_matrix.h>
+#include <mach/at91_matrix.h>
#include "at91_udc.h"
@@ -910,9 +913,9 @@ static void pullup(struct at91_udc *udc, int is_on)
} else if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) {
u32 usbpucr;
- usbpucr = at91_sys_read(AT91_MATRIX_USBPUCR);
+ usbpucr = at91_matrix_read(AT91_MATRIX_USBPUCR);
usbpucr |= AT91_MATRIX_USBPUCR_PUON;
- at91_sys_write(AT91_MATRIX_USBPUCR, usbpucr);
+ at91_matrix_write(AT91_MATRIX_USBPUCR, usbpucr);
}
} else {
stop_activity(udc);
@@ -928,9 +931,9 @@ static void pullup(struct at91_udc *udc, int is_on)
} else if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) {
u32 usbpucr;
- usbpucr = at91_sys_read(AT91_MATRIX_USBPUCR);
+ usbpucr = at91_matrix_read(AT91_MATRIX_USBPUCR);
usbpucr &= ~AT91_MATRIX_USBPUCR_PUON;
- at91_sys_write(AT91_MATRIX_USBPUCR, usbpucr);
+ at91_matrix_write(AT91_MATRIX_USBPUCR, usbpucr);
}
clk_off(udc);
}
@@ -1706,7 +1709,27 @@ static void at91udc_shutdown(struct platform_device *dev)
spin_unlock_irqrestore(&udc->lock, flags);
}
-static int __init at91udc_probe(struct platform_device *pdev)
+static void __devinit at91udc_of_init(struct at91_udc *udc,
+ struct device_node *np)
+{
+ struct at91_udc_data *board = &udc->board;
+ u32 val;
+ enum of_gpio_flags flags;
+
+ if (of_property_read_u32(np, "atmel,vbus-polled", &val) == 0)
+ board->vbus_polled = 1;
+
+ board->vbus_pin = of_get_named_gpio_flags(np, "atmel,vbus-gpio", 0,
+ &flags);
+ board->vbus_active_low = (flags & OF_GPIO_ACTIVE_LOW) ? 1 : 0;
+
+ board->pullup_pin = of_get_named_gpio_flags(np, "atmel,pullup-gpio", 0,
+ &flags);
+
+ board->pullup_active_low = (flags & OF_GPIO_ACTIVE_LOW) ? 1 : 0;
+}
+
+static int __devinit at91udc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct at91_udc *udc;
@@ -1741,7 +1764,11 @@ static int __init at91udc_probe(struct platform_device *pdev)
/* init software state */
udc = &controller;
udc->gadget.dev.parent = dev;
- udc->board = *(struct at91_udc_data *) dev->platform_data;
+ if (pdev->dev.of_node)
+ at91udc_of_init(udc, pdev->dev.of_node);
+ else
+ memcpy(&udc->board, dev->platform_data,
+ sizeof(struct at91_udc_data));
udc->pdev = pdev;
udc->enabled = 0;
spin_lock_init(&udc->lock);
@@ -1970,6 +1997,15 @@ static int at91udc_resume(struct platform_device *pdev)
#define at91udc_resume NULL
#endif
+#if defined(CONFIG_OF)
+static const struct of_device_id at91_udc_dt_ids[] = {
+ { .compatible = "atmel,at91rm9200-udc" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, at91_udc_dt_ids);
+#endif
+
static struct platform_driver at91_udc_driver = {
.remove = __exit_p(at91udc_remove),
.shutdown = at91udc_shutdown,
@@ -1978,6 +2014,7 @@ static struct platform_driver at91_udc_driver = {
.driver = {
.name = (char *) driver_name,
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(at91_udc_dt_ids),
},
};
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index 5e10f651ad63..9f98508966d1 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -332,12 +332,12 @@ static int vbus_is_present(struct usba_udc *udc)
static void toggle_bias(int is_on)
{
- unsigned int uckr = at91_sys_read(AT91_CKGR_UCKR);
+ unsigned int uckr = at91_pmc_read(AT91_CKGR_UCKR);
if (is_on)
- at91_sys_write(AT91_CKGR_UCKR, uckr | AT91_PMC_BIASEN);
+ at91_pmc_write(AT91_CKGR_UCKR, uckr | AT91_PMC_BIASEN);
else
- at91_sys_write(AT91_CKGR_UCKR, uckr & ~(AT91_PMC_BIASEN));
+ at91_pmc_write(AT91_CKGR_UCKR, uckr & ~(AT91_PMC_BIASEN));
}
#else
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index 7cdcb63b21ff..85a5cebe96b3 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -345,7 +345,7 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
}
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- skb->len <= 1, req->actual);
+ skb->len <= 1, req->actual, req->actual);
page = NULL;
if (req->actual < req->length) { /* Last fragment */
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index a5a3ef1f0096..19f318ababa2 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -13,6 +13,7 @@
#include <linux/clk.h>
#include <linux/platform_device.h>
+#include <linux/of_platform.h>
/* interface and function clocks */
static struct clk *iclk, *fclk;
@@ -115,6 +116,8 @@ static const struct hc_driver ehci_atmel_hc_driver = {
.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
};
+static u64 at91_ehci_dma_mask = DMA_BIT_MASK(32);
+
static int __devinit ehci_atmel_drv_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
@@ -137,6 +140,13 @@ static int __devinit ehci_atmel_drv_probe(struct platform_device *pdev)
goto fail_create_hcd;
}
+ /* Right now device-tree probed devices don't get dma_mask set.
+ * Since shared usb code relies on it, set it here for now.
+ * Once we have dma capability bindings this can go away.
+ */
+ if (!pdev->dev.dma_mask)
+ pdev->dev.dma_mask = &at91_ehci_dma_mask;
+
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
retval = -ENOMEM;
@@ -225,9 +235,21 @@ static int __devexit ehci_atmel_drv_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id atmel_ehci_dt_ids[] = {
+ { .compatible = "atmel,at91sam9g45-ehci" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_ehci_dt_ids);
+#endif
+
static struct platform_driver ehci_atmel_driver = {
.probe = ehci_atmel_drv_probe,
.remove = __devexit_p(ehci_atmel_drv_remove),
.shutdown = usb_hcd_platform_shutdown,
- .driver.name = "atmel-ehci",
+ .driver = {
+ .name = "atmel-ehci",
+ .of_match_table = of_match_ptr(atmel_ehci_dt_ids),
+ },
};
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 77afabc77f9b..db8963f5fbce 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -14,6 +14,8 @@
#include <linux/clk.h>
#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
#include <mach/hardware.h>
#include <asm/gpio.h>
@@ -448,10 +450,11 @@ static irqreturn_t ohci_hcd_at91_overcurrent_irq(int irq, void *data)
/* From the GPIO notifying the over-current situation, find
* out the corresponding port */
- gpio = irq_to_gpio(irq);
for (port = 0; port < ARRAY_SIZE(pdata->overcurrent_pin); port++) {
- if (pdata->overcurrent_pin[port] == gpio)
+ if (gpio_to_irq(pdata->overcurrent_pin[port]) == irq) {
+ gpio = pdata->overcurrent_pin[port];
break;
+ }
}
if (port == ARRAY_SIZE(pdata->overcurrent_pin)) {
@@ -476,13 +479,109 @@ static irqreturn_t ohci_hcd_at91_overcurrent_irq(int irq, void *data)
return IRQ_HANDLED;
}
+#ifdef CONFIG_OF
+static const struct of_device_id at91_ohci_dt_ids[] = {
+ { .compatible = "atmel,at91rm9200-ohci" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, at91_ohci_dt_ids);
+
+static u64 at91_ohci_dma_mask = DMA_BIT_MASK(32);
+
+static int __devinit ohci_at91_of_init(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int i, ret, gpio;
+ enum of_gpio_flags flags;
+ struct at91_usbh_data *pdata;
+ u32 ports;
+
+ if (!np)
+ return 0;
+
+ /* Right now device-tree probed devices don't get dma_mask set.
+ * Since shared usb code relies on it, set it here for now.
+ * Once we have dma capability bindings this can go away.
+ */
+ if (!pdev->dev.dma_mask)
+ pdev->dev.dma_mask = &at91_ohci_dma_mask;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ if (!of_property_read_u32(np, "num-ports", &ports))
+ pdata->ports = ports;
+
+ for (i = 0; i < 2; i++) {
+ gpio = of_get_named_gpio_flags(np, "atmel,vbus-gpio", i, &flags);
+ pdata->vbus_pin[i] = gpio;
+ if (!gpio_is_valid(gpio))
+ continue;
+ pdata->vbus_pin_active_low[i] = flags & OF_GPIO_ACTIVE_LOW;
+ ret = gpio_request(gpio, "ohci_vbus");
+ if (ret) {
+ dev_warn(&pdev->dev, "can't request vbus gpio %d", gpio);
+ continue;
+ }
+ ret = gpio_direction_output(gpio, !(flags & OF_GPIO_ACTIVE_LOW) ^ 1);
+ if (ret)
+ dev_warn(&pdev->dev, "can't put vbus gpio %d as output %d",
+ !(flags & OF_GPIO_ACTIVE_LOW) ^ 1, gpio);
+ }
+
+ for (i = 0; i < 2; i++) {
+ gpio = of_get_named_gpio_flags(np, "atmel,oc-gpio", i, &flags);
+ pdata->overcurrent_pin[i] = gpio;
+ if (!gpio_is_valid(gpio))
+ continue;
+ ret = gpio_request(gpio, "ohci_overcurrent");
+ if (ret) {
+ dev_err(&pdev->dev, "can't request overcurrent gpio %d", gpio);
+ continue;
+ }
+
+ ret = gpio_direction_input(gpio);
+ if (ret) {
+ dev_err(&pdev->dev, "can't configure overcurrent gpio %d as input", gpio);
+ continue;
+ }
+
+ ret = request_irq(gpio_to_irq(gpio),
+ ohci_hcd_at91_overcurrent_irq,
+ IRQF_SHARED, "ohci_overcurrent", pdev);
+ if (ret) {
+ gpio_free(gpio);
+ dev_warn(& pdev->dev, "cannot get GPIO IRQ for overcurrent\n");
+ }
+ }
+
+ pdev->dev.platform_data = pdata;
+
+ return 0;
+}
+#else
+static int __devinit ohci_at91_of_init(struct platform_device *pdev)
+{
+ return 0;
+}
+#endif
+
/*-------------------------------------------------------------------------*/
static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
{
- struct at91_usbh_data *pdata = pdev->dev.platform_data;
+ struct at91_usbh_data *pdata;
int i;
+ i = ohci_at91_of_init(pdev);
+
+ if (i)
+ return i;
+
+ pdata = pdev->dev.platform_data;
+
if (pdata) {
for (i = 0; i < ARRAY_SIZE(pdata->vbus_pin); i++) {
if (!gpio_is_valid(pdata->vbus_pin[i]))
@@ -595,5 +694,6 @@ static struct platform_driver ohci_hcd_at91_driver = {
.driver = {
.name = "at91_ohci",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(at91_ohci_dt_ids),
},
};
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index cd5e382db89c..543e90e336b8 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1000,7 +1000,7 @@ MODULE_LICENSE ("GPL");
#define SA1111_DRIVER ohci_hcd_sa1111_driver
#endif
-#if defined(CONFIG_ARCH_S3C2410) || defined(CONFIG_ARCH_S3C64XX)
+#if defined(CONFIG_ARCH_S3C24XX) || defined(CONFIG_ARCH_S3C64XX)
#include "ohci-s3c2410.c"
#define PLATFORM_DRIVER ohci_hcd_s3c2410_driver
#endif
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index 4bde4f9821ba..e1004fb37bd9 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -16,29 +16,115 @@
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <mach/assabet.h>
-#include <mach/badge4.h>
#include <asm/hardware/sa1111.h>
#ifndef CONFIG_SA1111
#error "This file is SA-1111 bus glue. CONFIG_SA1111 must be defined."
#endif
-extern int usb_disabled(void);
+#define USB_STATUS 0x0118
+#define USB_RESET 0x011c
+#define USB_IRQTEST 0x0120
+
+#define USB_RESET_FORCEIFRESET (1 << 0)
+#define USB_RESET_FORCEHCRESET (1 << 1)
+#define USB_RESET_CLKGENRESET (1 << 2)
+#define USB_RESET_SIMSCALEDOWN (1 << 3)
+#define USB_RESET_USBINTTEST (1 << 4)
+#define USB_RESET_SLEEPSTBYEN (1 << 5)
+#define USB_RESET_PWRSENSELOW (1 << 6)
+#define USB_RESET_PWRCTRLLOW (1 << 7)
+
+#define USB_STATUS_IRQHCIRMTWKUP (1 << 7)
+#define USB_STATUS_IRQHCIBUFFACC (1 << 8)
+#define USB_STATUS_NIRQHCIM (1 << 9)
+#define USB_STATUS_NHCIMFCLR (1 << 10)
+#define USB_STATUS_USBPWRSENSE (1 << 11)
-/*-------------------------------------------------------------------------*/
+#if 0
+static void dump_hci_status(struct usb_hcd *hcd, const char *label)
+{
+ unsigned long status = sa1111_readl(hcd->regs + USB_STATUS);
+
+ dbg("%s USB_STATUS = { %s%s%s%s%s}", label,
+ ((status & USB_STATUS_IRQHCIRMTWKUP) ? "IRQHCIRMTWKUP " : ""),
+ ((status & USB_STATUS_IRQHCIBUFFACC) ? "IRQHCIBUFFACC " : ""),
+ ((status & USB_STATUS_NIRQHCIM) ? "" : "IRQHCIM "),
+ ((status & USB_STATUS_NHCIMFCLR) ? "" : "HCIMFCLR "),
+ ((status & USB_STATUS_USBPWRSENSE) ? "USBPWRSENSE " : ""));
+}
+#endif
-static void sa1111_start_hc(struct sa1111_dev *dev)
+static int ohci_sa1111_reset(struct usb_hcd *hcd)
{
- unsigned int usb_rst = 0;
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+
+ ohci_hcd_init(ohci);
+ return ohci_init(ohci);
+}
- printk(KERN_DEBUG "%s: starting SA-1111 OHCI USB Controller\n",
- __FILE__);
+static int __devinit ohci_sa1111_start(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int ret;
-#ifdef CONFIG_SA1100_BADGE4
- if (machine_is_badge4()) {
- badge4_set_5V(BADGE4_5V_USB, 1);
+ ret = ohci_run(ohci);
+ if (ret < 0) {
+ ohci_err(ohci, "can't start\n");
+ ohci_stop(hcd);
}
+ return ret;
+}
+
+static const struct hc_driver ohci_sa1111_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "SA-1111 OHCI",
+ .hcd_priv_size = sizeof(struct ohci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ohci_irq,
+ .flags = HCD_USB11 | HCD_MEMORY,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = ohci_sa1111_reset,
+ .start = ohci_sa1111_start,
+ .stop = ohci_stop,
+ .shutdown = ohci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ohci_urb_enqueue,
+ .urb_dequeue = ohci_urb_dequeue,
+ .endpoint_disable = ohci_endpoint_disable,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ohci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ohci_hub_status_data,
+ .hub_control = ohci_hub_control,
+#ifdef CONFIG_PM
+ .bus_suspend = ohci_bus_suspend,
+ .bus_resume = ohci_bus_resume,
#endif
+ .start_port_reset = ohci_start_port_reset,
+};
+
+static int sa1111_start_hc(struct sa1111_dev *dev)
+{
+ unsigned int usb_rst = 0;
+ int ret;
+
+ dev_dbg(&dev->dev, "starting SA-1111 OHCI USB Controller\n");
if (machine_is_xp860() ||
machine_has_neponset() ||
@@ -51,220 +137,121 @@ static void sa1111_start_hc(struct sa1111_dev *dev)
* host controller in reset.
*/
sa1111_writel(usb_rst | USB_RESET_FORCEIFRESET | USB_RESET_FORCEHCRESET,
- dev->mapbase + SA1111_USB_RESET);
+ dev->mapbase + USB_RESET);
/*
* Now, carefully enable the USB clock, and take
* the USB host controller out of reset.
*/
- sa1111_enable_device(dev);
- udelay(11);
- sa1111_writel(usb_rst, dev->mapbase + SA1111_USB_RESET);
+ ret = sa1111_enable_device(dev);
+ if (ret == 0) {
+ udelay(11);
+ sa1111_writel(usb_rst, dev->mapbase + USB_RESET);
+ }
+
+ return ret;
}
static void sa1111_stop_hc(struct sa1111_dev *dev)
{
unsigned int usb_rst;
- printk(KERN_DEBUG "%s: stopping SA-1111 OHCI USB Controller\n",
- __FILE__);
+
+ dev_dbg(&dev->dev, "stopping SA-1111 OHCI USB Controller\n");
/*
* Put the USB host controller into reset.
*/
- usb_rst = sa1111_readl(dev->mapbase + SA1111_USB_RESET);
+ usb_rst = sa1111_readl(dev->mapbase + USB_RESET);
sa1111_writel(usb_rst | USB_RESET_FORCEIFRESET | USB_RESET_FORCEHCRESET,
- dev->mapbase + SA1111_USB_RESET);
+ dev->mapbase + USB_RESET);
/*
* Stop the USB clock.
*/
sa1111_disable_device(dev);
-
-#ifdef CONFIG_SA1100_BADGE4
- if (machine_is_badge4()) {
- /* Disable power to the USB bus */
- badge4_set_5V(BADGE4_5V_USB, 0);
- }
-#endif
-}
-
-
-/*-------------------------------------------------------------------------*/
-
-#if 0
-static void dump_hci_status(struct usb_hcd *hcd, const char *label)
-{
- unsigned long status = sa1111_readl(hcd->regs + SA1111_USB_STATUS);
-
- dbg ("%s USB_STATUS = { %s%s%s%s%s}", label,
- ((status & USB_STATUS_IRQHCIRMTWKUP) ? "IRQHCIRMTWKUP " : ""),
- ((status & USB_STATUS_IRQHCIBUFFACC) ? "IRQHCIBUFFACC " : ""),
- ((status & USB_STATUS_NIRQHCIM) ? "" : "IRQHCIM "),
- ((status & USB_STATUS_NHCIMFCLR) ? "" : "HCIMFCLR "),
- ((status & USB_STATUS_USBPWRSENSE) ? "USBPWRSENSE " : ""));
}
-#endif
-
-/*-------------------------------------------------------------------------*/
-
-/* configure so an HC device and id are always provided */
-/* always called with process context; sleeping is OK */
-
/**
- * usb_hcd_sa1111_probe - initialize SA-1111-based HCDs
- * Context: !in_interrupt()
+ * ohci_hcd_sa1111_probe - initialize SA-1111-based HCDs
*
* Allocates basic resources for this USB host controller, and
- * then invokes the start() method for the HCD associated with it
- * through the hotplug entry's driver_data.
- *
- * Store this function in the HCD's struct pci_driver as probe().
+ * then invokes the start() method for the HCD associated with it.
*/
-int usb_hcd_sa1111_probe (const struct hc_driver *driver,
- struct sa1111_dev *dev)
+static int ohci_hcd_sa1111_probe(struct sa1111_dev *dev)
{
struct usb_hcd *hcd;
- int retval;
+ int ret;
- hcd = usb_create_hcd (driver, &dev->dev, "sa1111");
+ if (usb_disabled())
+ return -ENODEV;
+
+ hcd = usb_create_hcd(&ohci_sa1111_hc_driver, &dev->dev, "sa1111");
if (!hcd)
return -ENOMEM;
+
hcd->rsrc_start = dev->res.start;
hcd->rsrc_len = resource_size(&dev->res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
dbg("request_mem_region failed");
- retval = -EBUSY;
+ ret = -EBUSY;
goto err1;
}
+
hcd->regs = dev->mapbase;
- sa1111_start_hc(dev);
- ohci_hcd_init(hcd_to_ohci(hcd));
+ ret = sa1111_start_hc(dev);
+ if (ret)
+ goto err2;
- retval = usb_add_hcd(hcd, dev->irq[1], 0);
- if (retval == 0)
- return retval;
+ ret = usb_add_hcd(hcd, dev->irq[1], 0);
+ if (ret == 0)
+ return ret;
sa1111_stop_hc(dev);
+ err2:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err1:
usb_put_hcd(hcd);
- return retval;
+ return ret;
}
-
-/* may be called without controller electrically present */
-/* may be called with controller, bus, and devices active */
-
/**
- * usb_hcd_sa1111_remove - shutdown processing for SA-1111-based HCDs
+ * ohci_hcd_sa1111_remove - shutdown processing for SA-1111-based HCDs
* @dev: USB Host Controller being removed
- * Context: !in_interrupt()
- *
- * Reverses the effect of usb_hcd_sa1111_probe(), first invoking
- * the HCD's stop() method. It is always called from a thread
- * context, normally "rmmod", "apmd", or something similar.
*
+ * Reverses the effect of ohci_hcd_sa1111_probe(), first invoking
+ * the HCD's stop() method.
*/
-void usb_hcd_sa1111_remove (struct usb_hcd *hcd, struct sa1111_dev *dev)
+static int ohci_hcd_sa1111_remove(struct sa1111_dev *dev)
{
+ struct usb_hcd *hcd = sa1111_get_drvdata(dev);
+
usb_remove_hcd(hcd);
sa1111_stop_hc(dev);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
-}
-
-/*-------------------------------------------------------------------------*/
-static int __devinit
-ohci_sa1111_start (struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci (hcd);
- int ret;
-
- if ((ret = ohci_init(ohci)) < 0)
- return ret;
-
- if ((ret = ohci_run (ohci)) < 0) {
- err ("can't start %s", hcd->self.bus_name);
- ohci_stop (hcd);
- return ret;
- }
return 0;
}
-/*-------------------------------------------------------------------------*/
-
-static const struct hc_driver ohci_sa1111_hc_driver = {
- .description = hcd_name,
- .product_desc = "SA-1111 OHCI",
- .hcd_priv_size = sizeof(struct ohci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
-
- /*
- * basic lifecycle operations
- */
- .start = ohci_sa1111_start,
- .stop = ohci_stop,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
-
- /*
- * scheduling support
- */
- .get_frame_number = ohci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ohci_hub_status_data,
- .hub_control = ohci_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
-#endif
- .start_port_reset = ohci_start_port_reset,
-};
-
-/*-------------------------------------------------------------------------*/
-
-static int ohci_hcd_sa1111_drv_probe(struct sa1111_dev *dev)
-{
- int ret;
-
- if (usb_disabled())
- return -ENODEV;
-
- ret = usb_hcd_sa1111_probe(&ohci_sa1111_hc_driver, dev);
- return ret;
-}
-
-static int ohci_hcd_sa1111_drv_remove(struct sa1111_dev *dev)
+static void ohci_hcd_sa1111_shutdown(struct sa1111_dev *dev)
{
struct usb_hcd *hcd = sa1111_get_drvdata(dev);
- usb_hcd_sa1111_remove(hcd, dev);
- return 0;
+ if (test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
+ hcd->driver->shutdown(hcd);
+ sa1111_stop_hc(dev);
+ }
}
static struct sa1111_driver ohci_hcd_sa1111_driver = {
.drv = {
.name = "sa1111-ohci",
+ .owner = THIS_MODULE,
},
.devid = SA1111_DEVID_USB,
- .probe = ohci_hcd_sa1111_drv_probe,
- .remove = ohci_hcd_sa1111_drv_remove,
+ .probe = ohci_hcd_sa1111_probe,
+ .remove = ohci_hcd_sa1111_remove,
+ .shutdown = ohci_hcd_sa1111_shutdown,
};
-
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 6815701cf656..836cfa9a515f 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -903,8 +903,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 9dab1f51dd43..f0da2c32fbde 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -588,7 +588,7 @@ static int vhost_net_release(struct inode *inode, struct file *f)
vhost_net_stop(n, &tx_sock, &rx_sock);
vhost_net_flush(n);
- vhost_dev_cleanup(&n->dev);
+ vhost_dev_cleanup(&n->dev, false);
if (tx_sock)
fput(tx_sock->file);
if (rx_sock)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index bdb2d6436b2b..947f00d8e091 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -222,6 +222,8 @@ static int vhost_worker(void *data)
if (work) {
__set_current_state(TASK_RUNNING);
work->fn(work);
+ if (need_resched())
+ schedule();
} else
schedule();
@@ -403,7 +405,7 @@ long vhost_dev_reset_owner(struct vhost_dev *dev)
if (!memory)
return -ENOMEM;
- vhost_dev_cleanup(dev);
+ vhost_dev_cleanup(dev, true);
memory->nregions = 0;
RCU_INIT_POINTER(dev->memory, memory);
@@ -434,8 +436,8 @@ int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq)
return j;
}
-/* Caller should have device mutex */
-void vhost_dev_cleanup(struct vhost_dev *dev)
+/* Caller should have device mutex if and only if locked is set */
+void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
{
int i;
@@ -472,7 +474,8 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
dev->log_file = NULL;
/* No one will access memory at this point */
kfree(rcu_dereference_protected(dev->memory,
- lockdep_is_held(&dev->mutex)));
+ locked ==
+ lockdep_is_held(&dev->mutex)));
RCU_INIT_POINTER(dev->memory, NULL);
WARN_ON(!list_empty(&dev->work_list));
if (dev->worker) {
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index a801e2821d03..8dcf4cca6bf2 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -163,7 +163,7 @@ struct vhost_dev {
long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *vqs, int nvqs);
long vhost_dev_check_owner(struct vhost_dev *);
long vhost_dev_reset_owner(struct vhost_dev *);
-void vhost_dev_cleanup(struct vhost_dev *);
+void vhost_dev_cleanup(struct vhost_dev *, bool locked);
long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, unsigned long arg);
int vhost_vq_access_ok(struct vhost_virtqueue *vq);
int vhost_log_access_ok(struct vhost_dev *);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index a8a897ac5446..a290be51a1f4 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -2061,7 +2061,7 @@ config FB_S3C_DEBUG_REGWRITE
config FB_S3C2410
tristate "S3C2410 LCD framebuffer support"
- depends on FB && ARCH_S3C2410
+ depends on FB && ARCH_S3C24XX
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/backlight/ep93xx_bl.c b/drivers/video/backlight/ep93xx_bl.c
index b62b8b9063b5..08214e1f0958 100644
--- a/drivers/video/backlight/ep93xx_bl.c
+++ b/drivers/video/backlight/ep93xx_bl.c
@@ -17,11 +17,6 @@
#include <linux/fb.h>
#include <linux/backlight.h>
-#include <mach/hardware.h>
-
-#define EP93XX_RASTER_REG(x) (EP93XX_RASTER_BASE + (x))
-#define EP93XX_RASTER_BRIGHTNESS EP93XX_RASTER_REG(0x20)
-
#define EP93XX_MAX_COUNT 255
#define EP93XX_MAX_BRIGHT 255
#define EP93XX_DEF_BRIGHT 128
@@ -35,7 +30,7 @@ static int ep93xxbl_set(struct backlight_device *bl, int brightness)
{
struct ep93xxbl *ep93xxbl = bl_get_data(bl);
- __raw_writel((brightness << 8) | EP93XX_MAX_COUNT, ep93xxbl->mmio);
+ writel((brightness << 8) | EP93XX_MAX_COUNT, ep93xxbl->mmio);
ep93xxbl->brightness = brightness;
@@ -70,21 +65,29 @@ static int __init ep93xxbl_probe(struct platform_device *dev)
struct ep93xxbl *ep93xxbl;
struct backlight_device *bl;
struct backlight_properties props;
+ struct resource *res;
ep93xxbl = devm_kzalloc(&dev->dev, sizeof(*ep93xxbl), GFP_KERNEL);
if (!ep93xxbl)
return -ENOMEM;
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENXIO;
+
/*
- * This register is located in the range already ioremap'ed by
- * the framebuffer driver. A MFD driver seems a bit of overkill
- * to handle this so use the static I/O mapping; this address
- * is already virtual.
+ * FIXME - We don't do a request_mem_region here because we are
+ * sharing the register space with the framebuffer driver (see
+ * drivers/video/ep93xx-fb.c) and doing so will cause the second
+ * loaded driver to return -EBUSY.
*
* NOTE: No locking is required; the framebuffer does not touch
* this register.
*/
- ep93xxbl->mmio = EP93XX_RASTER_BRIGHTNESS;
+ ep93xxbl->mmio = devm_ioremap(&dev->dev, res->start,
+ resource_size(res));
+ if (!ep93xxbl->mmio)
+ return -ENXIO;
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
diff --git a/drivers/video/ep93xx-fb.c b/drivers/video/ep93xx-fb.c
index 2e830ec52a5a..f8babbeee275 100644
--- a/drivers/video/ep93xx-fb.c
+++ b/drivers/video/ep93xx-fb.c
@@ -519,12 +519,15 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev)
goto failed;
}
- res = request_mem_region(res->start, resource_size(res), pdev->name);
- if (!res) {
- err = -EBUSY;
- goto failed;
- }
-
+ /*
+ * FIXME - We don't do a request_mem_region here because we are
+ * sharing the register space with the backlight driver (see
+ * drivers/video/backlight/ep93xx_bl.c) and doing so will cause
+ * the second loaded driver to return -EBUSY.
+ *
+ * NOTE: No locking is required; the backlight does not touch
+ * any of the framebuffer registers.
+ */
fbi->res = res;
fbi->mmio_base = ioremap(res->start, resource_size(res));
if (!fbi->mmio_base) {
@@ -586,8 +589,6 @@ failed:
clk_put(fbi->clk);
if (fbi->mmio_base)
iounmap(fbi->mmio_base);
- if (fbi->res)
- release_mem_region(fbi->res->start, resource_size(fbi->res));
ep93xxfb_dealloc_videomem(info);
if (&info->cmap)
fb_dealloc_cmap(&info->cmap);
@@ -608,7 +609,6 @@ static int __devexit ep93xxfb_remove(struct platform_device *pdev)
clk_disable(fbi->clk);
clk_put(fbi->clk);
iounmap(fbi->mmio_base);
- release_mem_region(fbi->res->start, resource_size(fbi->res));
ep93xxfb_dealloc_videomem(info);
fb_dealloc_cmap(&info->cmap);
diff --git a/drivers/video/omap/lcd_ams_delta.c b/drivers/video/omap/lcd_ams_delta.c
index 0fdd6f6873bf..d3a311327227 100644
--- a/drivers/video/omap/lcd_ams_delta.c
+++ b/drivers/video/omap/lcd_ams_delta.c
@@ -25,6 +25,7 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/lcd.h>
+#include <linux/gpio.h>
#include <plat/board-ams-delta.h>
#include <mach/hardware.h>
@@ -98,29 +99,41 @@ static struct lcd_ops ams_delta_lcd_ops = {
/* omapfb panel section */
+static const struct gpio _gpios[] = {
+ {
+ .gpio = AMS_DELTA_GPIO_PIN_LCD_VBLEN,
+ .flags = GPIOF_OUT_INIT_LOW,
+ .label = "lcd_vblen",
+ },
+ {
+ .gpio = AMS_DELTA_GPIO_PIN_LCD_NDISP,
+ .flags = GPIOF_OUT_INIT_LOW,
+ .label = "lcd_ndisp",
+ },
+};
+
static int ams_delta_panel_init(struct lcd_panel *panel,
struct omapfb_device *fbdev)
{
- return 0;
+ return gpio_request_array(_gpios, ARRAY_SIZE(_gpios));
}
static void ams_delta_panel_cleanup(struct lcd_panel *panel)
{
+ gpio_free_array(_gpios, ARRAY_SIZE(_gpios));
}
static int ams_delta_panel_enable(struct lcd_panel *panel)
{
- ams_delta_latch2_write(AMS_DELTA_LATCH2_LCD_NDISP,
- AMS_DELTA_LATCH2_LCD_NDISP);
- ams_delta_latch2_write(AMS_DELTA_LATCH2_LCD_VBLEN,
- AMS_DELTA_LATCH2_LCD_VBLEN);
+ gpio_set_value(AMS_DELTA_GPIO_PIN_LCD_NDISP, 1);
+ gpio_set_value(AMS_DELTA_GPIO_PIN_LCD_VBLEN, 1);
return 0;
}
static void ams_delta_panel_disable(struct lcd_panel *panel)
{
- ams_delta_latch2_write(AMS_DELTA_LATCH2_LCD_VBLEN, 0);
- ams_delta_latch2_write(AMS_DELTA_LATCH2_LCD_NDISP, 0);
+ gpio_set_value(AMS_DELTA_GPIO_PIN_LCD_VBLEN, 0);
+ gpio_set_value(AMS_DELTA_GPIO_PIN_LCD_NDISP, 0);
}
static unsigned long ams_delta_panel_get_caps(struct lcd_panel *panel)
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index bddd64b435b9..ee30937482e1 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -3318,11 +3318,6 @@ static void _omap_dispc_initial_config(void)
if (dss_has_feature(FEAT_FUNCGATED))
REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9);
- /* L3 firewall setting: enable access to OCM RAM */
- /* XXX this should be somewhere in plat-omap */
- if (cpu_is_omap24xx())
- __raw_writel(0x402000b0, OMAP2_L3_IO_ADDRESS(0x680050a0));
-
_dispc_setup_color_conv_coef();
dispc_set_loadmode(OMAP_DSS_LOAD_FRAME_ONLY);
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index 4a6b5eeef6a7..bd2d5e159463 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -33,7 +33,10 @@
#include <linux/pm_runtime.h>
#include <video/omapdss.h>
+
+#include <plat/cpu.h>
#include <plat/clock.h>
+
#include "dss.h"
#include "dss_features.h"
diff --git a/drivers/video/sa1100fb.c b/drivers/video/sa1100fb.c
index 98d55d0e2da5..b6325848ad61 100644
--- a/drivers/video/sa1100fb.c
+++ b/drivers/video/sa1100fb.c
@@ -173,282 +173,48 @@
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/cpufreq.h>
+#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/mutex.h>
#include <linux/io.h>
+#include <video/sa1100fb.h>
+
#include <mach/hardware.h>
#include <asm/mach-types.h>
-#include <mach/assabet.h>
#include <mach/shannon.h>
/*
- * debugging?
- */
-#define DEBUG 0
-/*
* Complain if VAR is out of range.
*/
#define DEBUG_VAR 1
-#undef ASSABET_PAL_VIDEO
-
#include "sa1100fb.h"
-extern void (*sa1100fb_backlight_power)(int on);
-extern void (*sa1100fb_lcd_power)(int on);
-
-static struct sa1100fb_rgb rgb_4 = {
+static const struct sa1100fb_rgb rgb_4 = {
.red = { .offset = 0, .length = 4, },
.green = { .offset = 0, .length = 4, },
.blue = { .offset = 0, .length = 4, },
.transp = { .offset = 0, .length = 0, },
};
-static struct sa1100fb_rgb rgb_8 = {
+static const struct sa1100fb_rgb rgb_8 = {
.red = { .offset = 0, .length = 8, },
.green = { .offset = 0, .length = 8, },
.blue = { .offset = 0, .length = 8, },
.transp = { .offset = 0, .length = 0, },
};
-static struct sa1100fb_rgb def_rgb_16 = {
+static const struct sa1100fb_rgb def_rgb_16 = {
.red = { .offset = 11, .length = 5, },
.green = { .offset = 5, .length = 6, },
.blue = { .offset = 0, .length = 5, },
.transp = { .offset = 0, .length = 0, },
};
-#ifdef CONFIG_SA1100_ASSABET
-#ifndef ASSABET_PAL_VIDEO
-/*
- * The assabet uses a sharp LQ039Q2DS54 LCD module. It is actually
- * takes an RGB666 signal, but we provide it with an RGB565 signal
- * instead (def_rgb_16).
- */
-static struct sa1100fb_mach_info lq039q2ds54_info __initdata = {
- .pixclock = 171521, .bpp = 16,
- .xres = 320, .yres = 240,
-
- .hsync_len = 5, .vsync_len = 1,
- .left_margin = 61, .upper_margin = 3,
- .right_margin = 9, .lower_margin = 0,
-
- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
-
- .lccr0 = LCCR0_Color | LCCR0_Sngl | LCCR0_Act,
- .lccr3 = LCCR3_OutEnH | LCCR3_PixRsEdg | LCCR3_ACBsDiv(2),
-};
-#else
-static struct sa1100fb_mach_info pal_info __initdata = {
- .pixclock = 67797, .bpp = 16,
- .xres = 640, .yres = 512,
-
- .hsync_len = 64, .vsync_len = 6,
- .left_margin = 125, .upper_margin = 70,
- .right_margin = 115, .lower_margin = 36,
-
- .lccr0 = LCCR0_Color | LCCR0_Sngl | LCCR0_Act,
- .lccr3 = LCCR3_OutEnH | LCCR3_PixRsEdg | LCCR3_ACBsDiv(512),
-};
-#endif
-#endif
-
-#ifdef CONFIG_SA1100_H3600
-static struct sa1100fb_mach_info h3600_info __initdata = {
- .pixclock = 174757, .bpp = 16,
- .xres = 320, .yres = 240,
-
- .hsync_len = 3, .vsync_len = 3,
- .left_margin = 12, .upper_margin = 10,
- .right_margin = 17, .lower_margin = 1,
-
- .cmap_static = 1,
-
- .lccr0 = LCCR0_Color | LCCR0_Sngl | LCCR0_Act,
- .lccr3 = LCCR3_OutEnH | LCCR3_PixRsEdg | LCCR3_ACBsDiv(2),
-};
-
-static struct sa1100fb_rgb h3600_rgb_16 = {
- .red = { .offset = 12, .length = 4, },
- .green = { .offset = 7, .length = 4, },
- .blue = { .offset = 1, .length = 4, },
- .transp = { .offset = 0, .length = 0, },
-};
-#endif
-
-#ifdef CONFIG_SA1100_H3100
-static struct sa1100fb_mach_info h3100_info __initdata = {
- .pixclock = 406977, .bpp = 4,
- .xres = 320, .yres = 240,
-
- .hsync_len = 26, .vsync_len = 41,
- .left_margin = 4, .upper_margin = 0,
- .right_margin = 4, .lower_margin = 0,
-
- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
- .cmap_greyscale = 1,
- .cmap_inverse = 1,
-
- .lccr0 = LCCR0_Mono | LCCR0_4PixMono | LCCR0_Sngl | LCCR0_Pas,
- .lccr3 = LCCR3_OutEnH | LCCR3_PixRsEdg | LCCR3_ACBsDiv(2),
-};
-#endif
-
-#ifdef CONFIG_SA1100_COLLIE
-static struct sa1100fb_mach_info collie_info __initdata = {
- .pixclock = 171521, .bpp = 16,
- .xres = 320, .yres = 240,
-
- .hsync_len = 5, .vsync_len = 1,
- .left_margin = 11, .upper_margin = 2,
- .right_margin = 30, .lower_margin = 0,
-
- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
-
- .lccr0 = LCCR0_Color | LCCR0_Sngl | LCCR0_Act,
- .lccr3 = LCCR3_OutEnH | LCCR3_PixRsEdg | LCCR3_ACBsDiv(2),
-};
-#endif
-
-#ifdef LART_GREY_LCD
-static struct sa1100fb_mach_info lart_grey_info __initdata = {
- .pixclock = 150000, .bpp = 4,
- .xres = 320, .yres = 240,
-
- .hsync_len = 1, .vsync_len = 1,
- .left_margin = 4, .upper_margin = 0,
- .right_margin = 2, .lower_margin = 0,
-
- .cmap_greyscale = 1,
- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
-
- .lccr0 = LCCR0_Mono | LCCR0_Sngl | LCCR0_Pas | LCCR0_4PixMono,
- .lccr3 = LCCR3_OutEnH | LCCR3_PixRsEdg | LCCR3_ACBsDiv(512),
-};
-#endif
-#ifdef LART_COLOR_LCD
-static struct sa1100fb_mach_info lart_color_info __initdata = {
- .pixclock = 150000, .bpp = 16,
- .xres = 320, .yres = 240,
-
- .hsync_len = 2, .vsync_len = 3,
- .left_margin = 69, .upper_margin = 14,
- .right_margin = 8, .lower_margin = 4,
-
- .lccr0 = LCCR0_Color | LCCR0_Sngl | LCCR0_Act,
- .lccr3 = LCCR3_OutEnH | LCCR3_PixFlEdg | LCCR3_ACBsDiv(512),
-};
-#endif
-#ifdef LART_VIDEO_OUT
-static struct sa1100fb_mach_info lart_video_info __initdata = {
- .pixclock = 39721, .bpp = 16,
- .xres = 640, .yres = 480,
-
- .hsync_len = 95, .vsync_len = 2,
- .left_margin = 40, .upper_margin = 32,
- .right_margin = 24, .lower_margin = 11,
-
- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
-
- .lccr0 = LCCR0_Color | LCCR0_Sngl | LCCR0_Act,
- .lccr3 = LCCR3_OutEnL | LCCR3_PixFlEdg | LCCR3_ACBsDiv(512),
-};
-#endif
-
-#ifdef LART_KIT01_LCD
-static struct sa1100fb_mach_info lart_kit01_info __initdata = {
- .pixclock = 63291, .bpp = 16,
- .xres = 640, .yres = 480,
-
- .hsync_len = 64, .vsync_len = 3,
- .left_margin = 122, .upper_margin = 45,
- .right_margin = 10, .lower_margin = 10,
-
- .lccr0 = LCCR0_Color | LCCR0_Sngl | LCCR0_Act,
- .lccr3 = LCCR3_OutEnH | LCCR3_PixFlEdg
-};
-#endif
-
-#ifdef CONFIG_SA1100_SHANNON
-static struct sa1100fb_mach_info shannon_info __initdata = {
- .pixclock = 152500, .bpp = 8,
- .xres = 640, .yres = 480,
-
- .hsync_len = 4, .vsync_len = 3,
- .left_margin = 2, .upper_margin = 0,
- .right_margin = 1, .lower_margin = 0,
-
- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
-
- .lccr0 = LCCR0_Color | LCCR0_Dual | LCCR0_Pas,
- .lccr3 = LCCR3_ACBsDiv(512),
-};
-#endif
-
-static struct sa1100fb_mach_info * __init
-sa1100fb_get_machine_info(struct sa1100fb_info *fbi)
-{
- struct sa1100fb_mach_info *inf = NULL;
-
- /*
- * R G B T
- * default {11,5}, { 5,6}, { 0,5}, { 0,0}
- * h3600 {12,4}, { 7,4}, { 1,4}, { 0,0}
- * freebird { 8,4}, { 4,4}, { 0,4}, {12,4}
- */
-#ifdef CONFIG_SA1100_ASSABET
- if (machine_is_assabet()) {
-#ifndef ASSABET_PAL_VIDEO
- inf = &lq039q2ds54_info;
-#else
- inf = &pal_info;
-#endif
- }
-#endif
-#ifdef CONFIG_SA1100_H3100
- if (machine_is_h3100()) {
- inf = &h3100_info;
- }
-#endif
-#ifdef CONFIG_SA1100_H3600
- if (machine_is_h3600()) {
- inf = &h3600_info;
- fbi->rgb[RGB_16] = &h3600_rgb_16;
- }
-#endif
-#ifdef CONFIG_SA1100_COLLIE
- if (machine_is_collie()) {
- inf = &collie_info;
- }
-#endif
-#ifdef CONFIG_SA1100_LART
- if (machine_is_lart()) {
-#ifdef LART_GREY_LCD
- inf = &lart_grey_info;
-#endif
-#ifdef LART_COLOR_LCD
- inf = &lart_color_info;
-#endif
-#ifdef LART_VIDEO_OUT
- inf = &lart_video_info;
-#endif
-#ifdef LART_KIT01_LCD
- inf = &lart_kit01_info;
-#endif
- }
-#endif
-#ifdef CONFIG_SA1100_SHANNON
- if (machine_is_shannon()) {
- inf = &shannon_info;
- }
-#endif
- return inf;
-}
-
static int sa1100fb_activate_var(struct fb_var_screeninfo *var, struct sa1100fb_info *);
static void set_ctrlr_state(struct sa1100fb_info *fbi, u_int state);
@@ -533,7 +299,7 @@ sa1100fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
* is what you poke into the framebuffer to produce the
* colour you requested.
*/
- if (fbi->cmap_inverse) {
+ if (fbi->inf->cmap_inverse) {
red = 0xffff - red;
green = 0xffff - green;
blue = 0xffff - blue;
@@ -607,14 +373,14 @@ sa1100fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
var->xres = MIN_XRES;
if (var->yres < MIN_YRES)
var->yres = MIN_YRES;
- if (var->xres > fbi->max_xres)
- var->xres = fbi->max_xres;
- if (var->yres > fbi->max_yres)
- var->yres = fbi->max_yres;
+ if (var->xres > fbi->inf->xres)
+ var->xres = fbi->inf->xres;
+ if (var->yres > fbi->inf->yres)
+ var->yres = fbi->inf->yres;
var->xres_virtual = max(var->xres_virtual, var->xres);
var->yres_virtual = max(var->yres_virtual, var->yres);
- DPRINTK("var->bits_per_pixel=%d\n", var->bits_per_pixel);
+ dev_dbg(fbi->dev, "var->bits_per_pixel=%d\n", var->bits_per_pixel);
switch (var->bits_per_pixel) {
case 4:
rgbidx = RGB_4;
@@ -638,16 +404,16 @@ sa1100fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
var->blue = fbi->rgb[rgbidx]->blue;
var->transp = fbi->rgb[rgbidx]->transp;
- DPRINTK("RGBT length = %d:%d:%d:%d\n",
+ dev_dbg(fbi->dev, "RGBT length = %d:%d:%d:%d\n",
var->red.length, var->green.length, var->blue.length,
var->transp.length);
- DPRINTK("RGBT offset = %d:%d:%d:%d\n",
+ dev_dbg(fbi->dev, "RGBT offset = %d:%d:%d:%d\n",
var->red.offset, var->green.offset, var->blue.offset,
var->transp.offset);
#ifdef CONFIG_CPU_FREQ
- printk(KERN_DEBUG "dma period = %d ps, clock = %d kHz\n",
+ dev_dbg(fbi->dev, "dma period = %d ps, clock = %d kHz\n",
sa1100fb_display_dma_period(var),
cpufreq_get(smp_processor_id()));
#endif
@@ -655,22 +421,10 @@ sa1100fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
return 0;
}
-static inline void sa1100fb_set_truecolor(u_int is_true_color)
+static void sa1100fb_set_visual(struct sa1100fb_info *fbi, u32 visual)
{
- if (machine_is_assabet()) {
-#if 1 // phase 4 or newer Assabet's
- if (is_true_color)
- ASSABET_BCR_set(ASSABET_BCR_LCD_12RGB);
- else
- ASSABET_BCR_clear(ASSABET_BCR_LCD_12RGB);
-#else
- // older Assabet's
- if (is_true_color)
- ASSABET_BCR_clear(ASSABET_BCR_LCD_12RGB);
- else
- ASSABET_BCR_set(ASSABET_BCR_LCD_12RGB);
-#endif
- }
+ if (fbi->inf->set_visual)
+ fbi->inf->set_visual(visual);
}
/*
@@ -683,11 +437,11 @@ static int sa1100fb_set_par(struct fb_info *info)
struct fb_var_screeninfo *var = &info->var;
unsigned long palette_mem_size;
- DPRINTK("set_par\n");
+ dev_dbg(fbi->dev, "set_par\n");
if (var->bits_per_pixel == 16)
fbi->fb.fix.visual = FB_VISUAL_TRUECOLOR;
- else if (!fbi->cmap_static)
+ else if (!fbi->inf->cmap_static)
fbi->fb.fix.visual = FB_VISUAL_PSEUDOCOLOR;
else {
/*
@@ -704,7 +458,7 @@ static int sa1100fb_set_par(struct fb_info *info)
palette_mem_size = fbi->palette_size * sizeof(u16);
- DPRINTK("palette_mem_size = 0x%08lx\n", (u_long) palette_mem_size);
+ dev_dbg(fbi->dev, "palette_mem_size = 0x%08lx\n", palette_mem_size);
fbi->palette_cpu = (u16 *)(fbi->map_cpu + PAGE_SIZE - palette_mem_size);
fbi->palette_dma = fbi->map_dma + PAGE_SIZE - palette_mem_size;
@@ -712,7 +466,7 @@ static int sa1100fb_set_par(struct fb_info *info)
/*
* Set (any) board control register to handle new color depth
*/
- sa1100fb_set_truecolor(fbi->fb.fix.visual == FB_VISUAL_TRUECOLOR);
+ sa1100fb_set_visual(fbi, fbi->fb.fix.visual);
sa1100fb_activate_var(var, fbi);
return 0;
@@ -728,7 +482,7 @@ sa1100fb_set_cmap(struct fb_cmap *cmap, int kspc, int con,
/*
* Make sure the user isn't doing something stupid.
*/
- if (!kspc && (fbi->fb.var.bits_per_pixel == 16 || fbi->cmap_static))
+ if (!kspc && (fbi->fb.var.bits_per_pixel == 16 || fbi->inf->cmap_static))
return -EINVAL;
return gen_set_cmap(cmap, kspc, con, info);
@@ -775,7 +529,7 @@ static int sa1100fb_blank(int blank, struct fb_info *info)
struct sa1100fb_info *fbi = (struct sa1100fb_info *)info;
int i;
- DPRINTK("sa1100fb_blank: blank=%d\n", blank);
+ dev_dbg(fbi->dev, "sa1100fb_blank: blank=%d\n", blank);
switch (blank) {
case FB_BLANK_POWERDOWN:
@@ -863,43 +617,43 @@ static int sa1100fb_activate_var(struct fb_var_screeninfo *var, struct sa1100fb_
u_int half_screen_size, yres, pcd;
u_long flags;
- DPRINTK("Configuring SA1100 LCD\n");
+ dev_dbg(fbi->dev, "Configuring SA1100 LCD\n");
- DPRINTK("var: xres=%d hslen=%d lm=%d rm=%d\n",
+ dev_dbg(fbi->dev, "var: xres=%d hslen=%d lm=%d rm=%d\n",
var->xres, var->hsync_len,
var->left_margin, var->right_margin);
- DPRINTK("var: yres=%d vslen=%d um=%d bm=%d\n",
+ dev_dbg(fbi->dev, "var: yres=%d vslen=%d um=%d bm=%d\n",
var->yres, var->vsync_len,
var->upper_margin, var->lower_margin);
#if DEBUG_VAR
if (var->xres < 16 || var->xres > 1024)
- printk(KERN_ERR "%s: invalid xres %d\n",
+ dev_err(fbi->dev, "%s: invalid xres %d\n",
fbi->fb.fix.id, var->xres);
if (var->hsync_len < 1 || var->hsync_len > 64)
- printk(KERN_ERR "%s: invalid hsync_len %d\n",
+ dev_err(fbi->dev, "%s: invalid hsync_len %d\n",
fbi->fb.fix.id, var->hsync_len);
if (var->left_margin < 1 || var->left_margin > 255)
- printk(KERN_ERR "%s: invalid left_margin %d\n",
+ dev_err(fbi->dev, "%s: invalid left_margin %d\n",
fbi->fb.fix.id, var->left_margin);
if (var->right_margin < 1 || var->right_margin > 255)
- printk(KERN_ERR "%s: invalid right_margin %d\n",
+ dev_err(fbi->dev, "%s: invalid right_margin %d\n",
fbi->fb.fix.id, var->right_margin);
if (var->yres < 1 || var->yres > 1024)
- printk(KERN_ERR "%s: invalid yres %d\n",
+ dev_err(fbi->dev, "%s: invalid yres %d\n",
fbi->fb.fix.id, var->yres);
if (var->vsync_len < 1 || var->vsync_len > 64)
- printk(KERN_ERR "%s: invalid vsync_len %d\n",
+ dev_err(fbi->dev, "%s: invalid vsync_len %d\n",
fbi->fb.fix.id, var->vsync_len);
if (var->upper_margin < 0 || var->upper_margin > 255)
- printk(KERN_ERR "%s: invalid upper_margin %d\n",
+ dev_err(fbi->dev, "%s: invalid upper_margin %d\n",
fbi->fb.fix.id, var->upper_margin);
if (var->lower_margin < 0 || var->lower_margin > 255)
- printk(KERN_ERR "%s: invalid lower_margin %d\n",
+ dev_err(fbi->dev, "%s: invalid lower_margin %d\n",
fbi->fb.fix.id, var->lower_margin);
#endif
- new_regs.lccr0 = fbi->lccr0 |
+ new_regs.lccr0 = fbi->inf->lccr0 |
LCCR0_LEN | LCCR0_LDM | LCCR0_BAM |
LCCR0_ERM | LCCR0_LtlEnd | LCCR0_DMADel(0);
@@ -914,7 +668,7 @@ static int sa1100fb_activate_var(struct fb_var_screeninfo *var, struct sa1100fb_
* the YRES parameter.
*/
yres = var->yres;
- if (fbi->lccr0 & LCCR0_Dual)
+ if (fbi->inf->lccr0 & LCCR0_Dual)
yres /= 2;
new_regs.lccr2 =
@@ -924,14 +678,14 @@ static int sa1100fb_activate_var(struct fb_var_screeninfo *var, struct sa1100fb_
LCCR2_EndFrmDel(var->lower_margin);
pcd = get_pcd(var->pixclock, cpufreq_get(0));
- new_regs.lccr3 = LCCR3_PixClkDiv(pcd) | fbi->lccr3 |
+ new_regs.lccr3 = LCCR3_PixClkDiv(pcd) | fbi->inf->lccr3 |
(var->sync & FB_SYNC_HOR_HIGH_ACT ? LCCR3_HorSnchH : LCCR3_HorSnchL) |
(var->sync & FB_SYNC_VERT_HIGH_ACT ? LCCR3_VrtSnchH : LCCR3_VrtSnchL);
- DPRINTK("nlccr0 = 0x%08lx\n", new_regs.lccr0);
- DPRINTK("nlccr1 = 0x%08lx\n", new_regs.lccr1);
- DPRINTK("nlccr2 = 0x%08lx\n", new_regs.lccr2);
- DPRINTK("nlccr3 = 0x%08lx\n", new_regs.lccr3);
+ dev_dbg(fbi->dev, "nlccr0 = 0x%08lx\n", new_regs.lccr0);
+ dev_dbg(fbi->dev, "nlccr1 = 0x%08lx\n", new_regs.lccr1);
+ dev_dbg(fbi->dev, "nlccr2 = 0x%08lx\n", new_regs.lccr2);
+ dev_dbg(fbi->dev, "nlccr3 = 0x%08lx\n", new_regs.lccr3);
half_screen_size = var->bits_per_pixel;
half_screen_size = half_screen_size * var->xres * var->yres / 16;
@@ -951,9 +705,12 @@ static int sa1100fb_activate_var(struct fb_var_screeninfo *var, struct sa1100fb_
* Only update the registers if the controller is enabled
* and something has changed.
*/
- if ((LCCR0 != fbi->reg_lccr0) || (LCCR1 != fbi->reg_lccr1) ||
- (LCCR2 != fbi->reg_lccr2) || (LCCR3 != fbi->reg_lccr3) ||
- (DBAR1 != fbi->dbar1) || (DBAR2 != fbi->dbar2))
+ if (readl_relaxed(fbi->base + LCCR0) != fbi->reg_lccr0 ||
+ readl_relaxed(fbi->base + LCCR1) != fbi->reg_lccr1 ||
+ readl_relaxed(fbi->base + LCCR2) != fbi->reg_lccr2 ||
+ readl_relaxed(fbi->base + LCCR3) != fbi->reg_lccr3 ||
+ readl_relaxed(fbi->base + DBAR1) != fbi->dbar1 ||
+ readl_relaxed(fbi->base + DBAR2) != fbi->dbar2)
sa1100fb_schedule_work(fbi, C_REENABLE);
return 0;
@@ -967,18 +724,18 @@ static int sa1100fb_activate_var(struct fb_var_screeninfo *var, struct sa1100fb_
*/
static inline void __sa1100fb_backlight_power(struct sa1100fb_info *fbi, int on)
{
- DPRINTK("backlight o%s\n", on ? "n" : "ff");
+ dev_dbg(fbi->dev, "backlight o%s\n", on ? "n" : "ff");
- if (sa1100fb_backlight_power)
- sa1100fb_backlight_power(on);
+ if (fbi->inf->backlight_power)
+ fbi->inf->backlight_power(on);
}
static inline void __sa1100fb_lcd_power(struct sa1100fb_info *fbi, int on)
{
- DPRINTK("LCD power o%s\n", on ? "n" : "ff");
+ dev_dbg(fbi->dev, "LCD power o%s\n", on ? "n" : "ff");
- if (sa1100fb_lcd_power)
- sa1100fb_lcd_power(on);
+ if (fbi->inf->lcd_power)
+ fbi->inf->lcd_power(on);
}
static void sa1100fb_setup_gpio(struct sa1100fb_info *fbi)
@@ -1008,14 +765,25 @@ static void sa1100fb_setup_gpio(struct sa1100fb_info *fbi)
}
if (mask) {
+ unsigned long flags;
+
+ /*
+ * SA-1100 requires the GPIO direction register set
+ * appropriately for the alternate function. Hence
+ * we set it here via bitmask rather than excessive
+ * fiddling via the GPIO subsystem - and even then
+ * we'll still have to deal with GAFR.
+ */
+ local_irq_save(flags);
GPDR |= mask;
GAFR |= mask;
+ local_irq_restore(flags);
}
}
static void sa1100fb_enable_controller(struct sa1100fb_info *fbi)
{
- DPRINTK("Enabling LCD controller\n");
+ dev_dbg(fbi->dev, "Enabling LCD controller\n");
/*
* Make sure the mode bits are present in the first palette entry
@@ -1024,43 +792,46 @@ static void sa1100fb_enable_controller(struct sa1100fb_info *fbi)
fbi->palette_cpu[0] |= palette_pbs(&fbi->fb.var);
/* Sequence from 11.7.10 */
- LCCR3 = fbi->reg_lccr3;
- LCCR2 = fbi->reg_lccr2;
- LCCR1 = fbi->reg_lccr1;
- LCCR0 = fbi->reg_lccr0 & ~LCCR0_LEN;
- DBAR1 = fbi->dbar1;
- DBAR2 = fbi->dbar2;
- LCCR0 |= LCCR0_LEN;
-
- if (machine_is_shannon()) {
- GPDR |= SHANNON_GPIO_DISP_EN;
- GPSR |= SHANNON_GPIO_DISP_EN;
- }
-
- DPRINTK("DBAR1 = 0x%08x\n", DBAR1);
- DPRINTK("DBAR2 = 0x%08x\n", DBAR2);
- DPRINTK("LCCR0 = 0x%08x\n", LCCR0);
- DPRINTK("LCCR1 = 0x%08x\n", LCCR1);
- DPRINTK("LCCR2 = 0x%08x\n", LCCR2);
- DPRINTK("LCCR3 = 0x%08x\n", LCCR3);
+ writel_relaxed(fbi->reg_lccr3, fbi->base + LCCR3);
+ writel_relaxed(fbi->reg_lccr2, fbi->base + LCCR2);
+ writel_relaxed(fbi->reg_lccr1, fbi->base + LCCR1);
+ writel_relaxed(fbi->reg_lccr0 & ~LCCR0_LEN, fbi->base + LCCR0);
+ writel_relaxed(fbi->dbar1, fbi->base + DBAR1);
+ writel_relaxed(fbi->dbar2, fbi->base + DBAR2);
+ writel_relaxed(fbi->reg_lccr0 | LCCR0_LEN, fbi->base + LCCR0);
+
+ if (machine_is_shannon())
+ gpio_set_value(SHANNON_GPIO_DISP_EN, 1);
+
+ dev_dbg(fbi->dev, "DBAR1: 0x%08x\n", readl_relaxed(fbi->base + DBAR1));
+ dev_dbg(fbi->dev, "DBAR2: 0x%08x\n", readl_relaxed(fbi->base + DBAR2));
+ dev_dbg(fbi->dev, "LCCR0: 0x%08x\n", readl_relaxed(fbi->base + LCCR0));
+ dev_dbg(fbi->dev, "LCCR1: 0x%08x\n", readl_relaxed(fbi->base + LCCR1));
+ dev_dbg(fbi->dev, "LCCR2: 0x%08x\n", readl_relaxed(fbi->base + LCCR2));
+ dev_dbg(fbi->dev, "LCCR3: 0x%08x\n", readl_relaxed(fbi->base + LCCR3));
}
static void sa1100fb_disable_controller(struct sa1100fb_info *fbi)
{
DECLARE_WAITQUEUE(wait, current);
+ u32 lccr0;
- DPRINTK("Disabling LCD controller\n");
+ dev_dbg(fbi->dev, "Disabling LCD controller\n");
- if (machine_is_shannon()) {
- GPCR |= SHANNON_GPIO_DISP_EN;
- }
+ if (machine_is_shannon())
+ gpio_set_value(SHANNON_GPIO_DISP_EN, 0);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&fbi->ctrlr_wait, &wait);
- LCSR = 0xffffffff; /* Clear LCD Status Register */
- LCCR0 &= ~LCCR0_LDM; /* Enable LCD Disable Done Interrupt */
- LCCR0 &= ~LCCR0_LEN; /* Disable LCD Controller */
+ /* Clear LCD Status Register */
+ writel_relaxed(~0, fbi->base + LCSR);
+
+ lccr0 = readl_relaxed(fbi->base + LCCR0);
+ lccr0 &= ~LCCR0_LDM; /* Enable LCD Disable Done Interrupt */
+ writel_relaxed(lccr0, fbi->base + LCCR0);
+ lccr0 &= ~LCCR0_LEN; /* Disable LCD Controller */
+ writel_relaxed(lccr0, fbi->base + LCCR0);
schedule_timeout(20 * HZ / 1000);
remove_wait_queue(&fbi->ctrlr_wait, &wait);
@@ -1072,14 +843,15 @@ static void sa1100fb_disable_controller(struct sa1100fb_info *fbi)
static irqreturn_t sa1100fb_handle_irq(int irq, void *dev_id)
{
struct sa1100fb_info *fbi = dev_id;
- unsigned int lcsr = LCSR;
+ unsigned int lcsr = readl_relaxed(fbi->base + LCSR);
if (lcsr & LCSR_LDD) {
- LCCR0 |= LCCR0_LDM;
+ u32 lccr0 = readl_relaxed(fbi->base + LCCR0) | LCCR0_LDM;
+ writel_relaxed(lccr0, fbi->base + LCCR0);
wake_up(&fbi->ctrlr_wait);
}
- LCSR = lcsr;
+ writel_relaxed(lcsr, fbi->base + LCSR);
return IRQ_HANDLED;
}
@@ -1268,7 +1040,7 @@ sa1100fb_freq_policy(struct notifier_block *nb, unsigned long val,
switch (val) {
case CPUFREQ_ADJUST:
case CPUFREQ_INCOMPATIBLE:
- printk(KERN_DEBUG "min dma period: %d ps, "
+ dev_dbg(fbi->dev, "min dma period: %d ps, "
"new clock %d kHz\n", sa1100fb_min_dma_period(fbi),
policy->max);
/* todo: fill in min/max values */
@@ -1318,7 +1090,7 @@ static int sa1100fb_resume(struct platform_device *dev)
* cache. Once this area is remapped, all virtual memory
* access to the video memory should occur at the new region.
*/
-static int __init sa1100fb_map_video_memory(struct sa1100fb_info *fbi)
+static int __devinit sa1100fb_map_video_memory(struct sa1100fb_info *fbi)
{
/*
* We reserve one page for the palette, plus the size
@@ -1344,7 +1116,7 @@ static int __init sa1100fb_map_video_memory(struct sa1100fb_info *fbi)
}
/* Fake monspecs to fill in fbinfo structure */
-static struct fb_monspecs monspecs __initdata = {
+static struct fb_monspecs monspecs __devinitdata = {
.hfmin = 30000,
.hfmax = 70000,
.vfmin = 50,
@@ -1352,10 +1124,11 @@ static struct fb_monspecs monspecs __initdata = {
};
-static struct sa1100fb_info * __init sa1100fb_init_fbinfo(struct device *dev)
+static struct sa1100fb_info * __devinit sa1100fb_init_fbinfo(struct device *dev)
{
- struct sa1100fb_mach_info *inf;
+ struct sa1100fb_mach_info *inf = dev->platform_data;
struct sa1100fb_info *fbi;
+ unsigned i;
fbi = kmalloc(sizeof(struct sa1100fb_info) + sizeof(u32) * 16,
GFP_KERNEL);
@@ -1390,8 +1163,6 @@ static struct sa1100fb_info * __init sa1100fb_init_fbinfo(struct device *dev)
fbi->rgb[RGB_8] = &rgb_8;
fbi->rgb[RGB_16] = &def_rgb_16;
- inf = sa1100fb_get_machine_info(fbi);
-
/*
* People just don't seem to get this. We don't support
* anything but correct entries now, so panic if someone
@@ -1402,13 +1173,10 @@ static struct sa1100fb_info * __init sa1100fb_init_fbinfo(struct device *dev)
panic("sa1100fb error: invalid LCCR3 fields set or zero "
"pixclock.");
- fbi->max_xres = inf->xres;
fbi->fb.var.xres = inf->xres;
fbi->fb.var.xres_virtual = inf->xres;
- fbi->max_yres = inf->yres;
fbi->fb.var.yres = inf->yres;
fbi->fb.var.yres_virtual = inf->yres;
- fbi->max_bpp = inf->bpp;
fbi->fb.var.bits_per_pixel = inf->bpp;
fbi->fb.var.pixclock = inf->pixclock;
fbi->fb.var.hsync_len = inf->hsync_len;
@@ -1419,14 +1187,16 @@ static struct sa1100fb_info * __init sa1100fb_init_fbinfo(struct device *dev)
fbi->fb.var.lower_margin = inf->lower_margin;
fbi->fb.var.sync = inf->sync;
fbi->fb.var.grayscale = inf->cmap_greyscale;
- fbi->cmap_inverse = inf->cmap_inverse;
- fbi->cmap_static = inf->cmap_static;
- fbi->lccr0 = inf->lccr0;
- fbi->lccr3 = inf->lccr3;
fbi->state = C_STARTUP;
fbi->task_state = (u_char)-1;
- fbi->fb.fix.smem_len = fbi->max_xres * fbi->max_yres *
- fbi->max_bpp / 8;
+ fbi->fb.fix.smem_len = inf->xres * inf->yres *
+ inf->bpp / 8;
+ fbi->inf = inf;
+
+ /* Copy the RGB bitfield overrides */
+ for (i = 0; i < NR_RGB; i++)
+ if (inf->rgb[i])
+ fbi->rgb[i] = inf->rgb[i];
init_waitqueue_head(&fbi->ctrlr_wait);
INIT_WORK(&fbi->task, sa1100fb_task);
@@ -1438,13 +1208,20 @@ static struct sa1100fb_info * __init sa1100fb_init_fbinfo(struct device *dev)
static int __devinit sa1100fb_probe(struct platform_device *pdev)
{
struct sa1100fb_info *fbi;
+ struct resource *res;
int ret, irq;
+ if (!pdev->dev.platform_data) {
+ dev_err(&pdev->dev, "no platform LCD data\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- if (irq < 0)
+ if (irq < 0 || !res)
return -EINVAL;
- if (!request_mem_region(0xb0100000, 0x10000, "LCD"))
+ if (!request_mem_region(res->start, resource_size(res), "LCD"))
return -EBUSY;
fbi = sa1100fb_init_fbinfo(&pdev->dev);
@@ -1452,6 +1229,10 @@ static int __devinit sa1100fb_probe(struct platform_device *pdev)
if (!fbi)
goto failed;
+ fbi->base = ioremap(res->start, resource_size(res));
+ if (!fbi->base)
+ goto failed;
+
/* Initialize video memory */
ret = sa1100fb_map_video_memory(fbi);
if (ret)
@@ -1459,14 +1240,16 @@ static int __devinit sa1100fb_probe(struct platform_device *pdev)
ret = request_irq(irq, sa1100fb_handle_irq, 0, "LCD", fbi);
if (ret) {
- printk(KERN_ERR "sa1100fb: request_irq failed: %d\n", ret);
+ dev_err(&pdev->dev, "request_irq failed: %d\n", ret);
goto failed;
}
-#ifdef ASSABET_PAL_VIDEO
- if (machine_is_assabet())
- ASSABET_BCR_clear(ASSABET_BCR_LCD_ON);
-#endif
+ if (machine_is_shannon()) {
+ ret = gpio_request_one(SHANNON_GPIO_DISP_EN,
+ GPIOF_OUT_INIT_LOW, "display enable");
+ if (ret)
+ goto err_free_irq;
+ }
/*
* This makes sure that our colour bitfield
@@ -1478,7 +1261,7 @@ static int __devinit sa1100fb_probe(struct platform_device *pdev)
ret = register_framebuffer(&fbi->fb);
if (ret < 0)
- goto err_free_irq;
+ goto err_reg_fb;
#ifdef CONFIG_CPU_FREQ
fbi->freq_transition.notifier_call = sa1100fb_freq_transition;
@@ -1490,12 +1273,17 @@ static int __devinit sa1100fb_probe(struct platform_device *pdev)
/* This driver cannot be unloaded at the moment */
return 0;
+ err_reg_fb:
+ if (machine_is_shannon())
+ gpio_free(SHANNON_GPIO_DISP_EN);
err_free_irq:
free_irq(irq, fbi);
failed:
+ if (fbi)
+ iounmap(fbi->base);
platform_set_drvdata(pdev, NULL);
kfree(fbi);
- release_mem_region(0xb0100000, 0x10000);
+ release_mem_region(res->start, resource_size(res));
return ret;
}
@@ -1505,6 +1293,7 @@ static struct platform_driver sa1100fb_driver = {
.resume = sa1100fb_resume,
.driver = {
.name = "sa11x0-fb",
+ .owner = THIS_MODULE,
},
};
diff --git a/drivers/video/sa1100fb.h b/drivers/video/sa1100fb.h
index 1c3b459865d8..fc5d4292fad6 100644
--- a/drivers/video/sa1100fb.h
+++ b/drivers/video/sa1100fb.h
@@ -10,44 +10,15 @@
* for more details.
*/
-/*
- * These are the bitfields for each
- * display depth that we support.
- */
-struct sa1100fb_rgb {
- struct fb_bitfield red;
- struct fb_bitfield green;
- struct fb_bitfield blue;
- struct fb_bitfield transp;
-};
-
-/*
- * This structure describes the machine which we are running on.
- */
-struct sa1100fb_mach_info {
- u_long pixclock;
-
- u_short xres;
- u_short yres;
-
- u_char bpp;
- u_char hsync_len;
- u_char left_margin;
- u_char right_margin;
-
- u_char vsync_len;
- u_char upper_margin;
- u_char lower_margin;
- u_char sync;
-
- u_int cmap_greyscale:1,
- cmap_inverse:1,
- cmap_static:1,
- unused:29;
-
- u_int lccr0;
- u_int lccr3;
-};
+#define LCCR0 0x0000 /* LCD Control Reg. 0 */
+#define LCSR 0x0004 /* LCD Status Reg. */
+#define DBAR1 0x0010 /* LCD DMA Base Address Reg. channel 1 */
+#define DCAR1 0x0014 /* LCD DMA Current Address Reg. channel 1 */
+#define DBAR2 0x0018 /* LCD DMA Base Address Reg. channel 2 */
+#define DCAR2 0x001C /* LCD DMA Current Address Reg. channel 2 */
+#define LCCR1 0x0020 /* LCD Control Reg. 1 */
+#define LCCR2 0x0024 /* LCD Control Reg. 2 */
+#define LCCR3 0x0028 /* LCD Control Reg. 3 */
/* Shadows for LCD controller registers */
struct sa1100fb_lcd_reg {
@@ -57,19 +28,11 @@ struct sa1100fb_lcd_reg {
unsigned long lccr3;
};
-#define RGB_4 (0)
-#define RGB_8 (1)
-#define RGB_16 (2)
-#define NR_RGB 3
-
struct sa1100fb_info {
struct fb_info fb;
struct device *dev;
- struct sa1100fb_rgb *rgb[NR_RGB];
-
- u_int max_bpp;
- u_int max_xres;
- u_int max_yres;
+ const struct sa1100fb_rgb *rgb[NR_RGB];
+ void __iomem *base;
/*
* These are the addresses we mapped
@@ -88,12 +51,6 @@ struct sa1100fb_info {
dma_addr_t dbar1;
dma_addr_t dbar2;
- u_int lccr0;
- u_int lccr3;
- u_int cmap_inverse:1,
- cmap_static:1,
- unused:30;
-
u_int reg_lccr0;
u_int reg_lccr1;
u_int reg_lccr2;
@@ -109,6 +66,8 @@ struct sa1100fb_info {
struct notifier_block freq_transition;
struct notifier_block freq_policy;
#endif
+
+ const struct sa1100fb_mach_info *inf;
};
#define TO_INF(ptr,member) container_of(ptr,struct sa1100fb_info,member)
@@ -130,15 +89,6 @@ struct sa1100fb_info {
#define SA1100_NAME "SA1100"
/*
- * Debug macros
- */
-#if DEBUG
-# define DPRINTK(fmt, args...) printk("%s: " fmt, __func__ , ## args)
-#else
-# define DPRINTK(fmt, args...)
-#endif
-
-/*
* Minimum X and Y resolutions
*/
#define MIN_XRES 64
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 5d8cd69e191a..37096246c937 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -172,7 +172,7 @@ config HAVE_S3C2410_WATCHDOG
config S3C2410_WATCHDOG
tristate "S3C2410 Watchdog"
- depends on ARCH_S3C2410 || HAVE_S3C2410_WATCHDOG
+ depends on HAVE_S3C2410_WATCHDOG
select WATCHDOG_CORE
help
Watchdog timer block in the Samsung SoCs. This will reboot
diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c
index ad12c2030441..7ef99a169e3b 100644
--- a/drivers/watchdog/at91rm9200_wdt.c
+++ b/drivers/watchdog/at91rm9200_wdt.c
@@ -53,7 +53,7 @@ static unsigned long at91wdt_busy;
*/
static inline void at91_wdt_stop(void)
{
- at91_sys_write(AT91_ST_WDMR, AT91_ST_EXTEN);
+ at91_st_write(AT91_ST_WDMR, AT91_ST_EXTEN);
}
/*
@@ -61,9 +61,9 @@ static inline void at91_wdt_stop(void)
*/
static inline void at91_wdt_start(void)
{
- at91_sys_write(AT91_ST_WDMR, AT91_ST_EXTEN | AT91_ST_RSTEN |
+ at91_st_write(AT91_ST_WDMR, AT91_ST_EXTEN | AT91_ST_RSTEN |
(((65536 * wdt_time) >> 8) & AT91_ST_WDV));
- at91_sys_write(AT91_ST_CR, AT91_ST_WDRST);
+ at91_st_write(AT91_ST_CR, AT91_ST_WDRST);
}
/*
@@ -71,7 +71,7 @@ static inline void at91_wdt_start(void)
*/
static inline void at91_wdt_reload(void)
{
- at91_sys_write(AT91_ST_CR, AT91_ST_WDRST);
+ at91_st_write(AT91_ST_CR, AT91_ST_WDRST);
}
/* ......................................................................... */
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index 173ddf1ba004..788aa158e78c 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -30,9 +30,9 @@
/*
* Watchdog timer block registers.
*/
-#define TIMER_CTRL (TIMER_VIRT_BASE + 0x0000)
+#define TIMER_CTRL 0x0000
#define WDT_EN 0x0010
-#define WDT_VAL (TIMER_VIRT_BASE + 0x0024)
+#define WDT_VAL 0x0024
#define WDT_MAX_CYCLE_COUNT 0xffffffff
#define WDT_IN_USE 0
@@ -42,6 +42,7 @@ static bool nowayout = WATCHDOG_NOWAYOUT;
static int heartbeat = -1; /* module parameter (seconds) */
static unsigned int wdt_max_duration; /* (seconds) */
static unsigned int wdt_tclk;
+static void __iomem *wdt_reg;
static unsigned long wdt_status;
static DEFINE_SPINLOCK(wdt_lock);
@@ -50,7 +51,7 @@ static void orion_wdt_ping(void)
spin_lock(&wdt_lock);
/* Reload watchdog duration */
- writel(wdt_tclk * heartbeat, WDT_VAL);
+ writel(wdt_tclk * heartbeat, wdt_reg + WDT_VAL);
spin_unlock(&wdt_lock);
}
@@ -62,7 +63,7 @@ static void orion_wdt_enable(void)
spin_lock(&wdt_lock);
/* Set watchdog duration */
- writel(wdt_tclk * heartbeat, WDT_VAL);
+ writel(wdt_tclk * heartbeat, wdt_reg + WDT_VAL);
/* Clear watchdog timer interrupt */
reg = readl(BRIDGE_CAUSE);
@@ -70,9 +71,9 @@ static void orion_wdt_enable(void)
writel(reg, BRIDGE_CAUSE);
/* Enable watchdog timer */
- reg = readl(TIMER_CTRL);
+ reg = readl(wdt_reg + TIMER_CTRL);
reg |= WDT_EN;
- writel(reg, TIMER_CTRL);
+ writel(reg, wdt_reg + TIMER_CTRL);
/* Enable reset on watchdog */
reg = readl(RSTOUTn_MASK);
@@ -94,9 +95,9 @@ static void orion_wdt_disable(void)
writel(reg, RSTOUTn_MASK);
/* Disable watchdog timer */
- reg = readl(TIMER_CTRL);
+ reg = readl(wdt_reg + TIMER_CTRL);
reg &= ~WDT_EN;
- writel(reg, TIMER_CTRL);
+ writel(reg, wdt_reg + TIMER_CTRL);
spin_unlock(&wdt_lock);
}
@@ -104,7 +105,7 @@ static void orion_wdt_disable(void)
static int orion_wdt_get_timeleft(int *time_left)
{
spin_lock(&wdt_lock);
- *time_left = readl(WDT_VAL) / wdt_tclk;
+ *time_left = readl(wdt_reg + WDT_VAL) / wdt_tclk;
spin_unlock(&wdt_lock);
return 0;
}
@@ -237,6 +238,7 @@ static struct miscdevice orion_wdt_miscdev = {
static int __devinit orion_wdt_probe(struct platform_device *pdev)
{
struct orion_wdt_platform_data *pdata = pdev->dev.platform_data;
+ struct resource *res;
int ret;
if (pdata) {
@@ -246,6 +248,10 @@ static int __devinit orion_wdt_probe(struct platform_device *pdev)
return -ENODEV;
}
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ wdt_reg = ioremap(res->start, resource_size(res));
+
if (orion_wdt_miscdev.parent)
return -EBUSY;
orion_wdt_miscdev.parent = &pdev->dev;