diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c | 696 |
1 files changed, 293 insertions, 403 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c index 455da298227f..ee11ccaf0563 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c @@ -21,480 +21,367 @@ * * Authors: Ben Skeggs */ -#include "priv.h" +#include "ummu.h" +#include "vmm.h" -#include <core/gpuobj.h> +#include <subdev/bar.h> #include <subdev/fb.h> -void -nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node) -{ - struct nvkm_vm *vm = vma->vm; - struct nvkm_mmu *mmu = vm->mmu; - struct nvkm_mm_node *r = node->mem; - int big = vma->node->type != mmu->func->spg_shift; - u32 offset = vma->node->offset + (delta >> 12); - u32 bits = vma->node->type - 12; - u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde; - u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits; - u32 max = 1 << (mmu->func->pgt_bits - bits); - u32 end, len; - - delta = 0; - while (r) { - u64 phys = (u64)r->offset << 12; - u32 num = r->length >> bits; - - while (num) { - struct nvkm_memory *pgt = vm->pgt[pde].mem[big]; - - end = (pte + num); - if (unlikely(end >= max)) - end = max; - len = end - pte; - - mmu->func->map(vma, pgt, node, pte, len, phys, delta); - - num -= len; - pte += len; - if (unlikely(end >= max)) { - phys += len << (bits + 12); - pde++; - pte = 0; - } - - delta += (u64)len << vma->node->type; - } - r = r->next; - }; - - mmu->func->flush(vm); -} +#include <nvif/if500d.h> +#include <nvif/if900d.h> -static void -nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length, - struct nvkm_mem *mem) -{ - struct nvkm_vm *vm = vma->vm; - struct nvkm_mmu *mmu = vm->mmu; - int big = vma->node->type != mmu->func->spg_shift; - u32 offset = vma->node->offset + (delta >> 12); - u32 bits = vma->node->type - 12; - u32 num = length >> vma->node->type; - u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde; - u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits; - u32 max = 1 << (mmu->func->pgt_bits - bits); - unsigned m, sglen; - u32 end, len; - int i; - struct scatterlist *sg; - - for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) { - struct nvkm_memory *pgt = vm->pgt[pde].mem[big]; - sglen = sg_dma_len(sg) >> PAGE_SHIFT; - - end = pte + sglen; - if (unlikely(end >= max)) - end = max; - len = end - pte; - - for (m = 0; m < len; m++) { - dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); - - mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr); - num--; - pte++; - - if (num == 0) - goto finish; - } - if (unlikely(end >= max)) { - pde++; - pte = 0; - } - if (m < sglen) { - for (; m < sglen; m++) { - dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); - - mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr); - num--; - pte++; - if (num == 0) - goto finish; - } - } - - } -finish: - mmu->func->flush(vm); -} +struct nvkm_mmu_ptp { + struct nvkm_mmu_pt *pt; + struct list_head head; + u8 shift; + u16 mask; + u16 free; +}; static void -nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length, - struct nvkm_mem *mem) +nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt) { - struct nvkm_vm *vm = vma->vm; - struct nvkm_mmu *mmu = vm->mmu; - dma_addr_t *list = mem->pages; - int big = vma->node->type != mmu->func->spg_shift; - u32 offset = vma->node->offset + (delta >> 12); - u32 bits = vma->node->type - 12; - u32 num = length >> vma->node->type; - u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde; - u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits; - u32 max = 1 << (mmu->func->pgt_bits - bits); - u32 end, len; - - while (num) { - struct nvkm_memory *pgt = vm->pgt[pde].mem[big]; - - end = (pte + num); - if (unlikely(end >= max)) - end = max; - len = end - pte; - - mmu->func->map_sg(vma, pgt, mem, pte, len, list); - - num -= len; - pte += len; - list += len; - if (unlikely(end >= max)) { - pde++; - pte = 0; - } + const int slot = pt->base >> pt->ptp->shift; + struct nvkm_mmu_ptp *ptp = pt->ptp; + + /* If there were no free slots in the parent allocation before, + * there will be now, so return PTP to the cache. + */ + if (!ptp->free) + list_add(&ptp->head, &mmu->ptp.list); + ptp->free |= BIT(slot); + + /* If there's no more sub-allocations, destroy PTP. */ + if (ptp->free == ptp->mask) { + nvkm_mmu_ptc_put(mmu, force, &ptp->pt); + list_del(&ptp->head); + kfree(ptp); } - mmu->func->flush(vm); + kfree(pt); } -void -nvkm_vm_map(struct nvkm_vma *vma, struct nvkm_mem *node) +struct nvkm_mmu_pt * +nvkm_mmu_ptp_get(struct nvkm_mmu *mmu, u32 size, bool zero) { - if (node->sg) - nvkm_vm_map_sg_table(vma, 0, node->size << 12, node); - else - if (node->pages) - nvkm_vm_map_sg(vma, 0, node->size << 12, node); - else - nvkm_vm_map_at(vma, 0, node); -} + struct nvkm_mmu_pt *pt; + struct nvkm_mmu_ptp *ptp; + int slot; + + if (!(pt = kzalloc(sizeof(*pt), GFP_KERNEL))) + return NULL; + + ptp = list_first_entry_or_null(&mmu->ptp.list, typeof(*ptp), head); + if (!ptp) { + /* Need to allocate a new parent to sub-allocate from. */ + if (!(ptp = kmalloc(sizeof(*ptp), GFP_KERNEL))) { + kfree(pt); + return NULL; + } -void -nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length) -{ - struct nvkm_vm *vm = vma->vm; - struct nvkm_mmu *mmu = vm->mmu; - int big = vma->node->type != mmu->func->spg_shift; - u32 offset = vma->node->offset + (delta >> 12); - u32 bits = vma->node->type - 12; - u32 num = length >> vma->node->type; - u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde; - u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits; - u32 max = 1 << (mmu->func->pgt_bits - bits); - u32 end, len; - - while (num) { - struct nvkm_memory *pgt = vm->pgt[pde].mem[big]; - - end = (pte + num); - if (unlikely(end >= max)) - end = max; - len = end - pte; - - mmu->func->unmap(vma, pgt, pte, len); - - num -= len; - pte += len; - if (unlikely(end >= max)) { - pde++; - pte = 0; + ptp->pt = nvkm_mmu_ptc_get(mmu, 0x1000, 0x1000, false); + if (!ptp->pt) { + kfree(ptp); + kfree(pt); + return NULL; } - } - mmu->func->flush(vm); + ptp->shift = order_base_2(size); + slot = nvkm_memory_size(ptp->pt->memory) >> ptp->shift; + ptp->mask = (1 << slot) - 1; + ptp->free = ptp->mask; + list_add(&ptp->head, &mmu->ptp.list); + } + pt->ptp = ptp; + pt->sub = true; + + /* Sub-allocate from parent object, removing PTP from cache + * if there's no more free slots left. + */ + slot = __ffs(ptp->free); + ptp->free &= ~BIT(slot); + if (!ptp->free) + list_del(&ptp->head); + + pt->memory = pt->ptp->pt->memory; + pt->base = slot << ptp->shift; + pt->addr = pt->ptp->pt->addr + pt->base; + return pt; } -void -nvkm_vm_unmap(struct nvkm_vma *vma) -{ - nvkm_vm_unmap_at(vma, 0, (u64)vma->node->length << 12); -} +struct nvkm_mmu_ptc { + struct list_head head; + struct list_head item; + u32 size; + u32 refs; +}; -static void -nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde) +static inline struct nvkm_mmu_ptc * +nvkm_mmu_ptc_find(struct nvkm_mmu *mmu, u32 size) { - struct nvkm_mmu *mmu = vm->mmu; - struct nvkm_vm_pgd *vpgd; - struct nvkm_vm_pgt *vpgt; - struct nvkm_memory *pgt; - u32 pde; - - for (pde = fpde; pde <= lpde; pde++) { - vpgt = &vm->pgt[pde - vm->fpde]; - if (--vpgt->refcount[big]) - continue; - - pgt = vpgt->mem[big]; - vpgt->mem[big] = NULL; - - list_for_each_entry(vpgd, &vm->pgd_list, head) { - mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem); - } + struct nvkm_mmu_ptc *ptc; - mmu->func->flush(vm); + list_for_each_entry(ptc, &mmu->ptc.list, head) { + if (ptc->size == size) + return ptc; + } - nvkm_memory_del(&pgt); + ptc = kmalloc(sizeof(*ptc), GFP_KERNEL); + if (ptc) { + INIT_LIST_HEAD(&ptc->item); + ptc->size = size; + ptc->refs = 0; + list_add(&ptc->head, &mmu->ptc.list); } + + return ptc; } -static int -nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type) +void +nvkm_mmu_ptc_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt **ppt) { - struct nvkm_mmu *mmu = vm->mmu; - struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; - struct nvkm_vm_pgd *vpgd; - int big = (type != mmu->func->spg_shift); - u32 pgt_size; - int ret; - - pgt_size = (1 << (mmu->func->pgt_bits + 12)) >> type; - pgt_size *= 8; - - ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST, - pgt_size, 0x1000, true, &vpgt->mem[big]); - if (unlikely(ret)) - return ret; + struct nvkm_mmu_pt *pt = *ppt; + if (pt) { + /* Handle sub-allocated page tables. */ + if (pt->sub) { + mutex_lock(&mmu->ptp.mutex); + nvkm_mmu_ptp_put(mmu, force, pt); + mutex_unlock(&mmu->ptp.mutex); + return; + } - list_for_each_entry(vpgd, &vm->pgd_list, head) { - mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem); + /* Either cache or free the object. */ + mutex_lock(&mmu->ptc.mutex); + if (pt->ptc->refs < 8 /* Heuristic. */ && !force) { + list_add_tail(&pt->head, &pt->ptc->item); + pt->ptc->refs++; + } else { + nvkm_memory_unref(&pt->memory); + kfree(pt); + } + mutex_unlock(&mmu->ptc.mutex); } - - vpgt->refcount[big]++; - return 0; } -int -nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access, - struct nvkm_vma *vma) +struct nvkm_mmu_pt * +nvkm_mmu_ptc_get(struct nvkm_mmu *mmu, u32 size, u32 align, bool zero) { - struct nvkm_mmu *mmu = vm->mmu; - u32 align = (1 << page_shift) >> 12; - u32 msize = size >> 12; - u32 fpde, lpde, pde; + struct nvkm_mmu_ptc *ptc; + struct nvkm_mmu_pt *pt; int ret; - mutex_lock(&vm->mutex); - ret = nvkm_mm_head(&vm->mm, 0, page_shift, msize, msize, align, - &vma->node); - if (unlikely(ret != 0)) { - mutex_unlock(&vm->mutex); - return ret; + /* Sub-allocated page table (ie. GP100 LPT). */ + if (align < 0x1000) { + mutex_lock(&mmu->ptp.mutex); + pt = nvkm_mmu_ptp_get(mmu, align, zero); + mutex_unlock(&mmu->ptp.mutex); + return pt; } - fpde = (vma->node->offset >> mmu->func->pgt_bits); - lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits; + /* Lookup cache for this page table size. */ + mutex_lock(&mmu->ptc.mutex); + ptc = nvkm_mmu_ptc_find(mmu, size); + if (!ptc) { + mutex_unlock(&mmu->ptc.mutex); + return NULL; + } - for (pde = fpde; pde <= lpde; pde++) { - struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; - int big = (vma->node->type != mmu->func->spg_shift); + /* If there's a free PT in the cache, reuse it. */ + pt = list_first_entry_or_null(&ptc->item, typeof(*pt), head); + if (pt) { + if (zero) + nvkm_fo64(pt->memory, 0, 0, size >> 3); + list_del(&pt->head); + ptc->refs--; + mutex_unlock(&mmu->ptc.mutex); + return pt; + } + mutex_unlock(&mmu->ptc.mutex); - if (likely(vpgt->refcount[big])) { - vpgt->refcount[big]++; - continue; - } + /* No such luck, we need to allocate. */ + if (!(pt = kmalloc(sizeof(*pt), GFP_KERNEL))) + return NULL; + pt->ptc = ptc; + pt->sub = false; - ret = nvkm_vm_map_pgt(vm, pde, vma->node->type); - if (ret) { - if (pde != fpde) - nvkm_vm_unmap_pgt(vm, big, fpde, pde - 1); - nvkm_mm_free(&vm->mm, &vma->node); - mutex_unlock(&vm->mutex); - return ret; - } + ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST, + size, align, zero, &pt->memory); + if (ret) { + kfree(pt); + return NULL; } - mutex_unlock(&vm->mutex); - vma->vm = NULL; - nvkm_vm_ref(vm, &vma->vm, NULL); - vma->offset = (u64)vma->node->offset << 12; - vma->access = access; - return 0; + pt->base = 0; + pt->addr = nvkm_memory_addr(pt->memory); + return pt; } void -nvkm_vm_put(struct nvkm_vma *vma) -{ - struct nvkm_mmu *mmu; - struct nvkm_vm *vm; - u32 fpde, lpde; - - if (unlikely(vma->node == NULL)) - return; - vm = vma->vm; - mmu = vm->mmu; - - fpde = (vma->node->offset >> mmu->func->pgt_bits); - lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits; - - mutex_lock(&vm->mutex); - nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->func->spg_shift, fpde, lpde); - nvkm_mm_free(&vm->mm, &vma->node); - mutex_unlock(&vm->mutex); - - nvkm_vm_ref(NULL, &vma->vm, NULL); -} - -int -nvkm_vm_boot(struct nvkm_vm *vm, u64 size) +nvkm_mmu_ptc_dump(struct nvkm_mmu *mmu) { - struct nvkm_mmu *mmu = vm->mmu; - struct nvkm_memory *pgt; - int ret; - - ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST, - (size >> mmu->func->spg_shift) * 8, 0x1000, true, &pgt); - if (ret == 0) { - vm->pgt[0].refcount[0] = 1; - vm->pgt[0].mem[0] = pgt; - nvkm_memory_boot(pgt, vm); + struct nvkm_mmu_ptc *ptc; + list_for_each_entry(ptc, &mmu->ptc.list, head) { + struct nvkm_mmu_pt *pt, *tt; + list_for_each_entry_safe(pt, tt, &ptc->item, head) { + nvkm_memory_unref(&pt->memory); + list_del(&pt->head); + kfree(pt); + } } - - return ret; } -int -nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset, - u32 block, struct lock_class_key *key, struct nvkm_vm **pvm) +static void +nvkm_mmu_ptc_fini(struct nvkm_mmu *mmu) { - static struct lock_class_key _key; - struct nvkm_vm *vm; - u64 mm_length = (offset + length) - mm_offset; - int ret; - - vm = kzalloc(sizeof(*vm), GFP_KERNEL); - if (!vm) - return -ENOMEM; + struct nvkm_mmu_ptc *ptc, *ptct; - __mutex_init(&vm->mutex, "&vm->mutex", key ? key : &_key); - INIT_LIST_HEAD(&vm->pgd_list); - vm->mmu = mmu; - kref_init(&vm->refcount); - vm->fpde = offset >> (mmu->func->pgt_bits + 12); - vm->lpde = (offset + length - 1) >> (mmu->func->pgt_bits + 12); - - vm->pgt = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt)); - if (!vm->pgt) { - kfree(vm); - return -ENOMEM; + list_for_each_entry_safe(ptc, ptct, &mmu->ptc.list, head) { + WARN_ON(!list_empty(&ptc->item)); + list_del(&ptc->head); + kfree(ptc); } - - ret = nvkm_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12, - block >> 12); - if (ret) { - vfree(vm->pgt); - kfree(vm); - return ret; - } - - *pvm = vm; - - return 0; } -int -nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset, - struct lock_class_key *key, struct nvkm_vm **pvm) +static void +nvkm_mmu_ptc_init(struct nvkm_mmu *mmu) { - struct nvkm_mmu *mmu = device->mmu; - if (!mmu->func->create) - return -EINVAL; - return mmu->func->create(mmu, offset, length, mm_offset, key, pvm); + mutex_init(&mmu->ptc.mutex); + INIT_LIST_HEAD(&mmu->ptc.list); + mutex_init(&mmu->ptp.mutex); + INIT_LIST_HEAD(&mmu->ptp.list); } -static int -nvkm_vm_link(struct nvkm_vm *vm, struct nvkm_gpuobj *pgd) +static void +nvkm_mmu_type(struct nvkm_mmu *mmu, int heap, u8 type) { - struct nvkm_mmu *mmu = vm->mmu; - struct nvkm_vm_pgd *vpgd; - int i; - - if (!pgd) - return 0; - - vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL); - if (!vpgd) - return -ENOMEM; - - vpgd->obj = pgd; - - mutex_lock(&vm->mutex); - for (i = vm->fpde; i <= vm->lpde; i++) - mmu->func->map_pgt(pgd, i, vm->pgt[i - vm->fpde].mem); - list_add(&vpgd->head, &vm->pgd_list); - mutex_unlock(&vm->mutex); - return 0; + if (heap >= 0 && !WARN_ON(mmu->type_nr == ARRAY_SIZE(mmu->type))) { + mmu->type[mmu->type_nr].type = type | mmu->heap[heap].type; + mmu->type[mmu->type_nr].heap = heap; + mmu->type_nr++; + } } -static void -nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd) +static int +nvkm_mmu_heap(struct nvkm_mmu *mmu, u8 type, u64 size) { - struct nvkm_vm_pgd *vpgd, *tmp; - - if (!mpgd) - return; - - mutex_lock(&vm->mutex); - list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { - if (vpgd->obj == mpgd) { - list_del(&vpgd->head); - kfree(vpgd); - break; + if (size) { + if (!WARN_ON(mmu->heap_nr == ARRAY_SIZE(mmu->heap))) { + mmu->heap[mmu->heap_nr].type = type; + mmu->heap[mmu->heap_nr].size = size; + return mmu->heap_nr++; } } - mutex_unlock(&vm->mutex); + return -EINVAL; } static void -nvkm_vm_del(struct kref *kref) +nvkm_mmu_host(struct nvkm_mmu *mmu) { - struct nvkm_vm *vm = container_of(kref, typeof(*vm), refcount); - struct nvkm_vm_pgd *vpgd, *tmp; - - list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { - nvkm_vm_unlink(vm, vpgd->obj); - } - - nvkm_mm_fini(&vm->mm); - vfree(vm->pgt); - kfree(vm); + struct nvkm_device *device = mmu->subdev.device; + u8 type = NVKM_MEM_KIND * !!mmu->func->kind_sys; + int heap; + + /* Non-mappable system memory. */ + heap = nvkm_mmu_heap(mmu, NVKM_MEM_HOST, ~0ULL); + nvkm_mmu_type(mmu, heap, type); + + /* Non-coherent, cached, system memory. + * + * Block-linear mappings of system memory must be done through + * BAR1, and cannot be supported on systems where we're unable + * to map BAR1 with write-combining. + */ + type |= NVKM_MEM_MAPPABLE; + if (!device->bar || device->bar->iomap_uncached) + nvkm_mmu_type(mmu, heap, type & ~NVKM_MEM_KIND); + else + nvkm_mmu_type(mmu, heap, type); + + /* Coherent, cached, system memory. + * + * Unsupported on systems that aren't able to support snooped + * mappings, and also for block-linear mappings which must be + * done through BAR1. + */ + type |= NVKM_MEM_COHERENT; + if (device->func->cpu_coherent) + nvkm_mmu_type(mmu, heap, type & ~NVKM_MEM_KIND); + + /* Uncached system memory. */ + nvkm_mmu_type(mmu, heap, type |= NVKM_MEM_UNCACHED); } -int -nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_gpuobj *pgd) +static void +nvkm_mmu_vram(struct nvkm_mmu *mmu) { - if (ref) { - int ret = nvkm_vm_link(ref, pgd); - if (ret) - return ret; - - kref_get(&ref->refcount); - } + struct nvkm_device *device = mmu->subdev.device; + struct nvkm_mm *mm = &device->fb->ram->vram; + const u32 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL); + const u32 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP); + const u32 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED); + u8 type = NVKM_MEM_KIND * !!mmu->func->kind; + u8 heap = NVKM_MEM_VRAM; + int heapM, heapN, heapU; + + /* Mixed-memory doesn't support compression or display. */ + heapM = nvkm_mmu_heap(mmu, heap, sizeM << NVKM_RAM_MM_SHIFT); + + heap |= NVKM_MEM_COMP; + heap |= NVKM_MEM_DISP; + heapN = nvkm_mmu_heap(mmu, heap, sizeN << NVKM_RAM_MM_SHIFT); + heapU = nvkm_mmu_heap(mmu, heap, sizeU << NVKM_RAM_MM_SHIFT); + + /* Add non-mappable VRAM types first so that they're preferred + * over anything else. Mixed-memory will be slower than other + * heaps, it's prioritised last. + */ + nvkm_mmu_type(mmu, heapU, type); + nvkm_mmu_type(mmu, heapN, type); + nvkm_mmu_type(mmu, heapM, type); + + /* Add host memory types next, under the assumption that users + * wanting mappable memory want to use them as staging buffers + * or the like. + */ + nvkm_mmu_host(mmu); + + /* Mappable VRAM types go last, as they're basically the worst + * possible type to ask for unless there's no other choice. + */ + if (device->bar) { + /* Write-combined BAR1 access. */ + type |= NVKM_MEM_MAPPABLE; + if (!device->bar->iomap_uncached) { + nvkm_mmu_type(mmu, heapN, type); + nvkm_mmu_type(mmu, heapM, type); + } - if (*ptr) { - nvkm_vm_unlink(*ptr, pgd); - kref_put(&(*ptr)->refcount, nvkm_vm_del); + /* Uncached BAR1 access. */ + type |= NVKM_MEM_COHERENT; + type |= NVKM_MEM_UNCACHED; + nvkm_mmu_type(mmu, heapN, type); + nvkm_mmu_type(mmu, heapM, type); } - - *ptr = ref; - return 0; } static int nvkm_mmu_oneinit(struct nvkm_subdev *subdev) { struct nvkm_mmu *mmu = nvkm_mmu(subdev); - if (mmu->func->oneinit) - return mmu->func->oneinit(mmu); + + /* Determine available memory types. */ + if (mmu->subdev.device->fb && mmu->subdev.device->fb->ram) + nvkm_mmu_vram(mmu); + else + nvkm_mmu_host(mmu); + + if (mmu->func->vmm.global) { + int ret = nvkm_vmm_new(subdev->device, 0, 0, NULL, 0, NULL, + "gart", &mmu->vmm); + if (ret) + return ret; + } + return 0; } @@ -511,8 +398,10 @@ static void * nvkm_mmu_dtor(struct nvkm_subdev *subdev) { struct nvkm_mmu *mmu = nvkm_mmu(subdev); - if (mmu->func->dtor) - return mmu->func->dtor(mmu); + + nvkm_vmm_unref(&mmu->vmm); + + nvkm_mmu_ptc_fini(mmu); return mmu; } @@ -529,9 +418,10 @@ nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device, { nvkm_subdev_ctor(&nvkm_mmu, device, index, &mmu->subdev); mmu->func = func; - mmu->limit = func->limit; mmu->dma_bits = func->dma_bits; - mmu->lpg_shift = func->lpg_shift; + nvkm_mmu_ptc_init(mmu); + mmu->user.ctor = nvkm_ummu_new; + mmu->user.base = func->mmu.user; } int |