summaryrefslogtreecommitdiff
path: root/drivers/misc/mic/vop/vop_vringh.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc/mic/vop/vop_vringh.c')
-rw-r--r--drivers/misc/mic/vop/vop_vringh.c168
1 files changed, 57 insertions, 111 deletions
diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
index 7014ffe88632..7cf9e1d034eb 100644
--- a/drivers/misc/mic/vop/vop_vringh.c
+++ b/drivers/misc/mic/vop/vop_vringh.c
@@ -53,33 +53,6 @@ static void _vop_notify(struct vringh *vrh)
vpdev->hw_ops->send_intr(vpdev, db);
}
-static void vop_virtio_init_post(struct vop_vdev *vdev)
-{
- struct mic_vqconfig *vqconfig = mic_vq_config(vdev->dd);
- struct vop_device *vpdev = vdev->vpdev;
- int i, used_size;
-
- for (i = 0; i < vdev->dd->num_vq; i++) {
- used_size = PAGE_ALIGN(sizeof(u16) * 3 +
- sizeof(struct vring_used_elem) *
- le16_to_cpu(vqconfig->num));
- if (!le64_to_cpu(vqconfig[i].used_address)) {
- dev_warn(vop_dev(vdev), "used_address zero??\n");
- continue;
- }
- vdev->vvr[i].vrh.vring.used =
- (void __force *)vpdev->hw_ops->remap(
- vpdev,
- le64_to_cpu(vqconfig[i].used_address),
- used_size);
- }
-
- vdev->dc->used_address_updated = 0;
-
- dev_info(vop_dev(vdev), "%s: device type %d LINKUP\n",
- __func__, vdev->virtio_id);
-}
-
static inline void vop_virtio_device_reset(struct vop_vdev *vdev)
{
int i;
@@ -130,9 +103,6 @@ static void vop_bh_handler(struct work_struct *work)
struct vop_vdev *vdev = container_of(work, struct vop_vdev,
virtio_bh_work);
- if (vdev->dc->used_address_updated)
- vop_virtio_init_post(vdev);
-
if (vdev->dc->vdev_reset)
vop_virtio_device_reset(vdev);
@@ -250,7 +220,6 @@ static void vop_init_device_ctrl(struct vop_vdev *vdev,
dc->guest_ack = 0;
dc->vdev_reset = 0;
dc->host_ack = 0;
- dc->used_address_updated = 0;
dc->c2h_vdev_db = -1;
dc->h2c_vdev_db = -1;
vdev->dc = dc;
@@ -298,9 +267,9 @@ static int vop_virtio_add_device(struct vop_vdev *vdev,
mutex_init(&vvr->vr_mutex);
vr_size = PAGE_ALIGN(round_up(vring_size(num, MIC_VIRTIO_RING_ALIGN), 4) +
sizeof(struct _mic_vring_info));
- vr->va = (void *)
- __get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(vr_size));
+
+ vr->va = dma_alloc_coherent(vop_dev(vdev), vr_size,
+ &vr_addr, GFP_KERNEL);
if (!vr->va) {
ret = -ENOMEM;
dev_err(vop_dev(vdev), "%s %d err %d\n",
@@ -310,15 +279,7 @@ static int vop_virtio_add_device(struct vop_vdev *vdev,
vr->len = vr_size;
vr->info = vr->va + round_up(vring_size(num, MIC_VIRTIO_RING_ALIGN), 4);
vr->info->magic = cpu_to_le32(MIC_MAGIC + vdev->virtio_id + i);
- vr_addr = dma_map_single(&vpdev->dev, vr->va, vr_size,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(&vpdev->dev, vr_addr)) {
- free_pages((unsigned long)vr->va, get_order(vr_size));
- ret = -ENOMEM;
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- goto err;
- }
+
vqconfig[i].address = cpu_to_le64(vr_addr);
vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN);
@@ -339,11 +300,9 @@ static int vop_virtio_add_device(struct vop_vdev *vdev,
dev_dbg(&vpdev->dev,
"%s %d index %d va %p info %p vr_size 0x%x\n",
__func__, __LINE__, i, vr->va, vr->info, vr_size);
- vvr->buf = (void *)__get_free_pages(GFP_KERNEL,
- get_order(VOP_INT_DMA_BUF_SIZE));
- vvr->buf_da = dma_map_single(&vpdev->dev,
- vvr->buf, VOP_INT_DMA_BUF_SIZE,
- DMA_BIDIRECTIONAL);
+
+ vvr->buf = dma_alloc_coherent(vop_dev(vdev), VOP_INT_DMA_BUF_SIZE,
+ &vvr->buf_da, GFP_KERNEL);
}
snprintf(irqname, sizeof(irqname), "vop%dvirtio%d", vpdev->index,
@@ -382,10 +341,8 @@ err:
for (j = 0; j < i; j++) {
struct vop_vringh *vvr = &vdev->vvr[j];
- dma_unmap_single(&vpdev->dev, le64_to_cpu(vqconfig[j].address),
- vvr->vring.len, DMA_BIDIRECTIONAL);
- free_pages((unsigned long)vvr->vring.va,
- get_order(vvr->vring.len));
+ dma_free_coherent(vop_dev(vdev), vvr->vring.len, vvr->vring.va,
+ le64_to_cpu(vqconfig[j].address));
}
return ret;
}
@@ -433,17 +390,12 @@ skip_hot_remove:
for (i = 0; i < vdev->dd->num_vq; i++) {
struct vop_vringh *vvr = &vdev->vvr[i];
- dma_unmap_single(&vpdev->dev,
- vvr->buf_da, VOP_INT_DMA_BUF_SIZE,
- DMA_BIDIRECTIONAL);
- free_pages((unsigned long)vvr->buf,
- get_order(VOP_INT_DMA_BUF_SIZE));
+ dma_free_coherent(vop_dev(vdev), VOP_INT_DMA_BUF_SIZE,
+ vvr->buf, vvr->buf_da);
vringh_kiov_cleanup(&vvr->riov);
vringh_kiov_cleanup(&vvr->wiov);
- dma_unmap_single(&vpdev->dev, le64_to_cpu(vqconfig[i].address),
- vvr->vring.len, DMA_BIDIRECTIONAL);
- free_pages((unsigned long)vvr->vring.va,
- get_order(vvr->vring.len));
+ dma_free_coherent(vop_dev(vdev), vvr->vring.len, vvr->vring.va,
+ le64_to_cpu(vqconfig[i].address));
}
/*
* Order the type update with previous stores. This write barrier
@@ -1042,13 +994,27 @@ done:
return mask;
}
-static inline int
-vop_query_offset(struct vop_vdev *vdev, unsigned long offset,
- unsigned long *size, unsigned long *pa)
+/*
+ * Maps the device page and virtio rings to user space for readonly access.
+ */
+static int vop_mmap(struct file *f, struct vm_area_struct *vma)
{
- struct vop_device *vpdev = vdev->vpdev;
- unsigned long start = MIC_DP_SIZE;
- int i;
+ struct vop_vdev *vdev = f->private_data;
+ struct mic_vqconfig *vqconfig = mic_vq_config(vdev->dd);
+ unsigned long orig_start = vma->vm_start;
+ unsigned long orig_end = vma->vm_end;
+ int err, i;
+
+ if (!vdev->vpdev->hw_ops->dp_mmap)
+ return -EINVAL;
+ if (vma->vm_pgoff)
+ return -EINVAL;
+ if (vma->vm_flags & VM_WRITE)
+ return -EACCES;
+
+ err = vop_vdev_inited(vdev);
+ if (err)
+ return err;
/*
* MMAP interface is as follows:
@@ -1057,58 +1023,38 @@ vop_query_offset(struct vop_vdev *vdev, unsigned long offset,
* 0x1000 first vring
* 0x1000 + size of 1st vring second vring
* ....
+ *
+ * We manipulate vm_start/vm_end to trick dma_mmap_coherent into
+ * performing partial mappings, which is a bit of a hack, but safe
+ * while we are under mmap_lock(). Eventually this needs to be
+ * replaced by a proper DMA layer API.
*/
- if (!offset) {
- *pa = virt_to_phys(vpdev->hw_ops->get_dp(vpdev));
- *size = MIC_DP_SIZE;
- return 0;
- }
+ vma->vm_end = vma->vm_start + MIC_DP_SIZE;
+ err = vdev->vpdev->hw_ops->dp_mmap(vdev->vpdev, vma);
+ if (err)
+ goto out;
for (i = 0; i < vdev->dd->num_vq; i++) {
struct vop_vringh *vvr = &vdev->vvr[i];
- if (offset == start) {
- *pa = virt_to_phys(vvr->vring.va);
- *size = vvr->vring.len;
- return 0;
- }
- start += vvr->vring.len;
- }
- return -1;
-}
-
-/*
- * Maps the device page and virtio rings to user space for readonly access.
- */
-static int vop_mmap(struct file *f, struct vm_area_struct *vma)
-{
- struct vop_vdev *vdev = f->private_data;
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
- unsigned long pa, size = vma->vm_end - vma->vm_start, size_rem = size;
- int i, err;
+ vma->vm_start = vma->vm_end;
+ vma->vm_end += vvr->vring.len;
- err = vop_vdev_inited(vdev);
- if (err)
- goto ret;
- if (vma->vm_flags & VM_WRITE) {
- err = -EACCES;
- goto ret;
- }
- while (size_rem) {
- i = vop_query_offset(vdev, offset, &size, &pa);
- if (i < 0) {
- err = -EINVAL;
- goto ret;
- }
- err = remap_pfn_range(vma, vma->vm_start + offset,
- pa >> PAGE_SHIFT, size,
- vma->vm_page_prot);
+ err = -EINVAL;
+ if (vma->vm_end > orig_end)
+ goto out;
+ err = dma_mmap_coherent(vop_dev(vdev), vma, vvr->vring.va,
+ le64_to_cpu(vqconfig[i].address),
+ vvr->vring.len);
if (err)
- goto ret;
- size_rem -= size;
- offset += size;
+ goto out;
}
-ret:
+out:
+ /*
+ * Restore the original vma parameters.
+ */
+ vma->vm_start = orig_start;
+ vma->vm_end = orig_end;
return err;
}