summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/x86/xen/mmu.c9
-rw-r--r--arch/x86/xen/suspend.c20
-rw-r--r--drivers/block/xen-blkback/blkback.c15
-rw-r--r--drivers/block/xen-blkback/common.h8
-rw-r--r--drivers/net/xen-netback/netback.c34
-rw-r--r--drivers/xen/events/events_fifo.c23
-rw-r--r--drivers/xen/xen-pciback/pciback.h1
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c75
-rw-r--r--drivers/xen/xen-pciback/xenbus.c4
-rw-r--r--drivers/xen/xen-scsiback.c2
-rw-r--r--include/xen/interface/io/ring.h14
11 files changed, 138 insertions, 67 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index ac161db63388..cb5e266a8bf7 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2495,14 +2495,9 @@ void __init xen_init_mmu_ops(void)
{
x86_init.paging.pagetable_init = xen_pagetable_init;
- /* Optimization - we can use the HVM one but it has no idea which
- * VCPUs are descheduled - which means that it will needlessly IPI
- * them. Xen knows so let it do the job.
- */
- if (xen_feature(XENFEAT_auto_translated_physmap)) {
- pv_mmu_ops.flush_tlb_others = xen_flush_tlb_others;
+ if (xen_feature(XENFEAT_auto_translated_physmap))
return;
- }
+
pv_mmu_ops = xen_mmu_ops;
memset(dummy_mapping, 0xff, PAGE_SIZE);
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index feddabdab448..3705eabd7e22 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -68,26 +68,16 @@ static void xen_pv_post_suspend(int suspend_cancelled)
void xen_arch_pre_suspend(void)
{
- int cpu;
-
- for_each_online_cpu(cpu)
- xen_pmu_finish(cpu);
-
if (xen_pv_domain())
xen_pv_pre_suspend();
}
void xen_arch_post_suspend(int cancelled)
{
- int cpu;
-
if (xen_pv_domain())
xen_pv_post_suspend(cancelled);
else
xen_hvm_post_suspend(cancelled);
-
- for_each_online_cpu(cpu)
- xen_pmu_init(cpu);
}
static void xen_vcpu_notify_restore(void *data)
@@ -106,10 +96,20 @@ static void xen_vcpu_notify_suspend(void *data)
void xen_arch_resume(void)
{
+ int cpu;
+
on_each_cpu(xen_vcpu_notify_restore, NULL, 1);
+
+ for_each_online_cpu(cpu)
+ xen_pmu_init(cpu);
}
void xen_arch_suspend(void)
{
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ xen_pmu_finish(cpu);
+
on_each_cpu(xen_vcpu_notify_suspend, NULL, 1);
}
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index f9099940c272..41fb1a917b17 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -950,6 +950,8 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
goto unmap;
for (n = 0, i = 0; n < nseg; n++) {
+ uint8_t first_sect, last_sect;
+
if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
/* Map indirect segments */
if (segments)
@@ -957,15 +959,18 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
}
i = n % SEGS_PER_INDIRECT_FRAME;
+
pending_req->segments[n]->gref = segments[i].gref;
- seg[n].nsec = segments[i].last_sect -
- segments[i].first_sect + 1;
- seg[n].offset = (segments[i].first_sect << 9);
- if ((segments[i].last_sect >= (XEN_PAGE_SIZE >> 9)) ||
- (segments[i].last_sect < segments[i].first_sect)) {
+
+ first_sect = READ_ONCE(segments[i].first_sect);
+ last_sect = READ_ONCE(segments[i].last_sect);
+ if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) {
rc = -EINVAL;
goto unmap;
}
+
+ seg[n].nsec = last_sect - first_sect + 1;
+ seg[n].offset = first_sect << 9;
preq->nr_sects += seg[n].nsec;
}
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 68e87a037b99..c929ae22764c 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -408,8 +408,8 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
struct blkif_x86_32_request *src)
{
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
- dst->operation = src->operation;
- switch (src->operation) {
+ dst->operation = READ_ONCE(src->operation);
+ switch (dst->operation) {
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
case BLKIF_OP_WRITE_BARRIER:
@@ -456,8 +456,8 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
struct blkif_x86_64_request *src)
{
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
- dst->operation = src->operation;
- switch (src->operation) {
+ dst->operation = READ_ONCE(src->operation);
+ switch (dst->operation) {
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
case BLKIF_OP_WRITE_BARRIER:
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index e481f3710bd3..1049c34e7d43 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -258,18 +258,18 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
struct netrx_pending_operations *npo)
{
struct xenvif_rx_meta *meta;
- struct xen_netif_rx_request *req;
+ struct xen_netif_rx_request req;
- req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
+ RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
meta = npo->meta + npo->meta_prod++;
meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
meta->gso_size = 0;
meta->size = 0;
- meta->id = req->id;
+ meta->id = req.id;
npo->copy_off = 0;
- npo->copy_gref = req->gref;
+ npo->copy_gref = req.gref;
return meta;
}
@@ -424,7 +424,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
struct xenvif *vif = netdev_priv(skb->dev);
int nr_frags = skb_shinfo(skb)->nr_frags;
int i;
- struct xen_netif_rx_request *req;
+ struct xen_netif_rx_request req;
struct xenvif_rx_meta *meta;
unsigned char *data;
int head = 1;
@@ -443,15 +443,15 @@ static int xenvif_gop_skb(struct sk_buff *skb,
/* Set up a GSO prefix descriptor, if necessary */
if ((1 << gso_type) & vif->gso_prefix_mask) {
- req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
+ RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
meta = npo->meta + npo->meta_prod++;
meta->gso_type = gso_type;
meta->gso_size = skb_shinfo(skb)->gso_size;
meta->size = 0;
- meta->id = req->id;
+ meta->id = req.id;
}
- req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
+ RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
meta = npo->meta + npo->meta_prod++;
if ((1 << gso_type) & vif->gso_mask) {
@@ -463,9 +463,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
}
meta->size = 0;
- meta->id = req->id;
+ meta->id = req.id;
npo->copy_off = 0;
- npo->copy_gref = req->gref;
+ npo->copy_gref = req.gref;
data = skb->data;
while (data < skb_tail_pointer(skb)) {
@@ -679,9 +679,7 @@ static void tx_add_credit(struct xenvif_queue *queue)
* Allow a burst big enough to transmit a jumbo packet of up to 128kB.
* Otherwise the interface can seize up due to insufficient credit.
*/
- max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size;
- max_burst = min(max_burst, 131072UL);
- max_burst = max(max_burst, queue->credit_bytes);
+ max_burst = max(131072UL, queue->credit_bytes);
/* Take care that adding a new chunk of credit doesn't wrap to zero. */
max_credit = queue->remaining_credit + queue->credit_bytes;
@@ -711,7 +709,7 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
spin_unlock_irqrestore(&queue->response_lock, flags);
if (cons == end)
break;
- txp = RING_GET_REQUEST(&queue->tx, cons++);
+ RING_COPY_REQUEST(&queue->tx, cons++, txp);
} while (1);
queue->tx.req_cons = cons;
}
@@ -778,8 +776,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
if (drop_err)
txp = &dropped_tx;
- memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots),
- sizeof(*txp));
+ RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
/* If the guest submitted a frame >= 64 KiB then
* first->size overflowed and following slots will
@@ -1112,8 +1109,7 @@ static int xenvif_get_extras(struct xenvif_queue *queue,
return -EBADR;
}
- memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons),
- sizeof(extra));
+ RING_COPY_REQUEST(&queue->tx, cons, &extra);
if (unlikely(!extra.type ||
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
queue->tx.req_cons = ++cons;
@@ -1322,7 +1318,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
idx = queue->tx.req_cons;
rmb(); /* Ensure that we see the request before we copy it. */
- memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq));
+ RING_COPY_REQUEST(&queue->tx, idx, &txreq);
/* Credit-based scheduling. */
if (txreq.size > queue->remaining_credit &&
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index e3e9e3d46d1b..96a1b8da5371 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -281,7 +281,8 @@ static void handle_irq_for_port(unsigned port)
static void consume_one_event(unsigned cpu,
struct evtchn_fifo_control_block *control_block,
- unsigned priority, unsigned long *ready)
+ unsigned priority, unsigned long *ready,
+ bool drop)
{
struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
uint32_t head;
@@ -313,13 +314,17 @@ static void consume_one_event(unsigned cpu,
if (head == 0)
clear_bit(priority, ready);
- if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port))
- handle_irq_for_port(port);
+ if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) {
+ if (unlikely(drop))
+ pr_warn("Dropping pending event for port %u\n", port);
+ else
+ handle_irq_for_port(port);
+ }
q->head[priority] = head;
}
-static void evtchn_fifo_handle_events(unsigned cpu)
+static void __evtchn_fifo_handle_events(unsigned cpu, bool drop)
{
struct evtchn_fifo_control_block *control_block;
unsigned long ready;
@@ -331,11 +336,16 @@ static void evtchn_fifo_handle_events(unsigned cpu)
while (ready) {
q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES);
- consume_one_event(cpu, control_block, q, &ready);
+ consume_one_event(cpu, control_block, q, &ready, drop);
ready |= xchg(&control_block->ready, 0);
}
}
+static void evtchn_fifo_handle_events(unsigned cpu)
+{
+ __evtchn_fifo_handle_events(cpu, false);
+}
+
static void evtchn_fifo_resume(void)
{
unsigned cpu;
@@ -420,6 +430,9 @@ static int evtchn_fifo_cpu_notification(struct notifier_block *self,
if (!per_cpu(cpu_control_block, cpu))
ret = evtchn_fifo_alloc_control_block(cpu);
break;
+ case CPU_DEAD:
+ __evtchn_fifo_handle_events(cpu, true);
+ break;
default:
break;
}
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
index 58e38d586f52..4d529f3e40df 100644
--- a/drivers/xen/xen-pciback/pciback.h
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -37,6 +37,7 @@ struct xen_pcibk_device {
struct xen_pci_sharedinfo *sh_info;
unsigned long flags;
struct work_struct op_work;
+ struct xen_pci_op op;
};
struct xen_pcibk_dev_data {
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index c4a0666de6f5..73dafdc494aa 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -70,6 +70,13 @@ static void xen_pcibk_control_isr(struct pci_dev *dev, int reset)
enable ? "enable" : "disable");
if (enable) {
+ /*
+ * The MSI or MSI-X should not have an IRQ handler. Otherwise
+ * if the guest terminates we BUG_ON in free_msi_irqs.
+ */
+ if (dev->msi_enabled || dev->msix_enabled)
+ goto out;
+
rc = request_irq(dev_data->irq,
xen_pcibk_guest_interrupt, IRQF_SHARED,
dev_data->irq_name, dev);
@@ -144,7 +151,12 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
if (unlikely(verbose_request))
printk(KERN_DEBUG DRV_NAME ": %s: enable MSI\n", pci_name(dev));
- status = pci_enable_msi(dev);
+ if (dev->msi_enabled)
+ status = -EALREADY;
+ else if (dev->msix_enabled)
+ status = -ENXIO;
+ else
+ status = pci_enable_msi(dev);
if (status) {
pr_warn_ratelimited("%s: error enabling MSI for guest %u: err %d\n",
@@ -173,20 +185,23 @@ static
int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev,
struct pci_dev *dev, struct xen_pci_op *op)
{
- struct xen_pcibk_dev_data *dev_data;
-
if (unlikely(verbose_request))
printk(KERN_DEBUG DRV_NAME ": %s: disable MSI\n",
pci_name(dev));
- pci_disable_msi(dev);
+ if (dev->msi_enabled) {
+ struct xen_pcibk_dev_data *dev_data;
+
+ pci_disable_msi(dev);
+
+ dev_data = pci_get_drvdata(dev);
+ if (dev_data)
+ dev_data->ack_intr = 1;
+ }
op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
if (unlikely(verbose_request))
printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev),
op->value);
- dev_data = pci_get_drvdata(dev);
- if (dev_data)
- dev_data->ack_intr = 1;
return 0;
}
@@ -197,13 +212,26 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
struct xen_pcibk_dev_data *dev_data;
int i, result;
struct msix_entry *entries;
+ u16 cmd;
if (unlikely(verbose_request))
printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n",
pci_name(dev));
+
if (op->value > SH_INFO_MAX_VEC)
return -EINVAL;
+ if (dev->msix_enabled)
+ return -EALREADY;
+
+ /*
+ * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able
+ * to access the BARs where the MSI-X entries reside.
+ */
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY))
+ return -ENXIO;
+
entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL);
if (entries == NULL)
return -ENOMEM;
@@ -245,23 +273,27 @@ static
int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev,
struct pci_dev *dev, struct xen_pci_op *op)
{
- struct xen_pcibk_dev_data *dev_data;
if (unlikely(verbose_request))
printk(KERN_DEBUG DRV_NAME ": %s: disable MSI-X\n",
pci_name(dev));
- pci_disable_msix(dev);
+ if (dev->msix_enabled) {
+ struct xen_pcibk_dev_data *dev_data;
+
+ pci_disable_msix(dev);
+
+ dev_data = pci_get_drvdata(dev);
+ if (dev_data)
+ dev_data->ack_intr = 1;
+ }
/*
* SR-IOV devices (which don't have any legacy IRQ) have
* an undefined IRQ value of zero.
*/
op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
if (unlikely(verbose_request))
- printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n", pci_name(dev),
- op->value);
- dev_data = pci_get_drvdata(dev);
- if (dev_data)
- dev_data->ack_intr = 1;
+ printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n",
+ pci_name(dev), op->value);
return 0;
}
#endif
@@ -298,9 +330,11 @@ void xen_pcibk_do_op(struct work_struct *data)
container_of(data, struct xen_pcibk_device, op_work);
struct pci_dev *dev;
struct xen_pcibk_dev_data *dev_data = NULL;
- struct xen_pci_op *op = &pdev->sh_info->op;
+ struct xen_pci_op *op = &pdev->op;
int test_intx = 0;
+ *op = pdev->sh_info->op;
+ barrier();
dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn);
if (dev == NULL)
@@ -342,6 +376,17 @@ void xen_pcibk_do_op(struct work_struct *data)
if ((dev_data->enable_intx != test_intx))
xen_pcibk_control_isr(dev, 0 /* no reset */);
}
+ pdev->sh_info->op.err = op->err;
+ pdev->sh_info->op.value = op->value;
+#ifdef CONFIG_PCI_MSI
+ if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
+ unsigned int i;
+
+ for (i = 0; i < op->value; i++)
+ pdev->sh_info->op.msix_entries[i].vector =
+ op->msix_entries[i].vector;
+ }
+#endif
/* Tell the driver domain that we're done. */
wmb();
clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 98bc345f296e..4843741e703a 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -44,7 +44,6 @@ static struct xen_pcibk_device *alloc_pdev(struct xenbus_device *xdev)
dev_dbg(&xdev->dev, "allocated pdev @ 0x%p\n", pdev);
pdev->xdev = xdev;
- dev_set_drvdata(&xdev->dev, pdev);
mutex_init(&pdev->dev_lock);
@@ -58,6 +57,9 @@ static struct xen_pcibk_device *alloc_pdev(struct xenbus_device *xdev)
kfree(pdev);
pdev = NULL;
}
+
+ dev_set_drvdata(&xdev->dev, pdev);
+
out:
return pdev;
}
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 43bcae852546..ad4eb1024d1f 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -726,7 +726,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
if (!pending_req)
return 1;
- ring_req = *RING_GET_REQUEST(ring, rc);
+ RING_COPY_REQUEST(ring, rc, &ring_req);
ring->req_cons = ++rc;
err = prepare_pending_reqs(info, &ring_req, pending_req);
diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
index 7d28aff605c7..7dc685b4057d 100644
--- a/include/xen/interface/io/ring.h
+++ b/include/xen/interface/io/ring.h
@@ -181,6 +181,20 @@ struct __name##_back_ring { \
#define RING_GET_REQUEST(_r, _idx) \
(&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
+/*
+ * Get a local copy of a request.
+ *
+ * Use this in preference to RING_GET_REQUEST() so all processing is
+ * done on a local copy that cannot be modified by the other end.
+ *
+ * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
+ * to be ineffective where _req is a struct which consists of only bitfields.
+ */
+#define RING_COPY_REQUEST(_r, _idx, _req) do { \
+ /* Use volatile to force the copy into _req. */ \
+ *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \
+} while (0)
+
#define RING_GET_RESPONSE(_r, _idx) \
(&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))