summaryrefslogtreecommitdiff
path: root/drivers/nvme
diff options
context:
space:
mode:
authorAaron Williams <awilliams@marvell.com>2019-08-22 20:37:26 -0700
committerTom Rini <trini@konsulko.com>2019-08-26 11:46:28 -0400
commitb21dcebfa6b372cd91bf42a30f1d8a1a525f329b (patch)
treeb569c1f257b8ccda9ea47714414147ce7adbfc4d /drivers/nvme
parent4ebeb4c559f3604169a54f3a318bdabcc6047320 (diff)
nvme: Fix PRP Offset Invalid
When large writes take place I saw a Samsung EVO 970+ return a status value of 0x13, PRP Offset Invalid. I tracked this down to the improper handling of PRP entries. The blocks the PRP entries are placed in cannot cross a page boundary and thus should be allocated on page boundaries. This is how the Linux kernel driver works. With this patch, the PRP pool is allocated on a page boundary and other than the very first allocation, the pool size is a multiple of the page size. Each page can hold (4096 / 8) - 1 entries since the last entry must point to the next page in the pool. Signed-off-by: Aaron Williams <awilliams@marvell.com> Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/nvme.c29
1 files changed, 19 insertions, 10 deletions
diff --git a/drivers/nvme/nvme.c b/drivers/nvme/nvme.c
index d4965e2ef6..47f101e280 100644
--- a/drivers/nvme/nvme.c
+++ b/drivers/nvme/nvme.c
@@ -73,6 +73,9 @@ static int nvme_setup_prps(struct nvme_dev *dev, u64 *prp2,
u64 *prp_pool;
int length = total_len;
int i, nprps;
+ u32 prps_per_page = (page_size >> 3) - 1;
+ u32 num_pages;
+
length -= (page_size - offset);
if (length <= 0) {
@@ -89,15 +92,20 @@ static int nvme_setup_prps(struct nvme_dev *dev, u64 *prp2,
}
nprps = DIV_ROUND_UP(length, page_size);
+ num_pages = DIV_ROUND_UP(nprps, prps_per_page);
if (nprps > dev->prp_entry_num) {
free(dev->prp_pool);
- dev->prp_pool = malloc(nprps << 3);
+ /*
+ * Always increase in increments of pages. It doesn't waste
+ * much memory and reduces the number of allocations.
+ */
+ dev->prp_pool = memalign(page_size, num_pages * page_size);
if (!dev->prp_pool) {
printf("Error: malloc prp_pool fail\n");
return -ENOMEM;
}
- dev->prp_entry_num = nprps;
+ dev->prp_entry_num = prps_per_page * num_pages;
}
prp_pool = dev->prp_pool;
@@ -788,14 +796,6 @@ static int nvme_probe(struct udevice *udev)
}
memset(ndev->queues, 0, NVME_Q_NUM * sizeof(struct nvme_queue *));
- ndev->prp_pool = malloc(MAX_PRP_POOL);
- if (!ndev->prp_pool) {
- ret = -ENOMEM;
- printf("Error: %s: Out of memory!\n", udev->name);
- goto free_nvme;
- }
- ndev->prp_entry_num = MAX_PRP_POOL >> 3;
-
ndev->cap = nvme_readq(&ndev->bar->cap);
ndev->q_depth = min_t(int, NVME_CAP_MQES(ndev->cap) + 1, NVME_Q_DEPTH);
ndev->db_stride = 1 << NVME_CAP_STRIDE(ndev->cap);
@@ -805,6 +805,15 @@ static int nvme_probe(struct udevice *udev)
if (ret)
goto free_queue;
+ /* Allocate after the page size is known */
+ ndev->prp_pool = memalign(ndev->page_size, MAX_PRP_POOL);
+ if (!ndev->prp_pool) {
+ ret = -ENOMEM;
+ printf("Error: %s: Out of memory!\n", udev->name);
+ goto free_nvme;
+ }
+ ndev->prp_entry_num = MAX_PRP_POOL >> 3;
+
ret = nvme_setup_io_queues(ndev);
if (ret)
goto free_queue;