summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CREDITS20
-rw-r--r--Documentation/admin-guide/README.rst32
-rw-r--r--Documentation/devicetree/bindings/display/msm/gmu.txt59
-rw-r--r--Documentation/devicetree/bindings/display/msm/gpu.txt42
-rw-r--r--Documentation/networking/dsa/dsa.txt10
-rw-r--r--Documentation/networking/msg_zerocopy.rst2
-rw-r--r--Documentation/networking/switchdev.txt10
-rw-r--r--Documentation/process/applying-patches.rst117
-rw-r--r--Documentation/translations/it_IT/admin-guide/README.rst2
-rw-r--r--MAINTAINERS25
-rw-r--r--Makefile2
-rw-r--r--arch/arc/Kconfig20
-rw-r--r--arch/arc/configs/nps_defconfig1
-rw-r--r--arch/arc/configs/vdk_hs38_defconfig1
-rw-r--r--arch/arc/configs/vdk_hs38_smp_defconfig2
-rw-r--r--arch/arc/include/asm/arcregs.h8
-rw-r--r--arch/arc/include/asm/cache.h11
-rw-r--r--arch/arc/include/asm/entry-arcv2.h54
-rw-r--r--arch/arc/include/asm/uaccess.h8
-rw-r--r--arch/arc/kernel/entry-arcv2.S4
-rw-r--r--arch/arc/kernel/head.S16
-rw-r--r--arch/arc/kernel/intc-arcv2.c2
-rw-r--r--arch/arc/kernel/setup.c119
-rw-r--r--arch/arc/lib/memcpy-archs.S14
-rw-r--r--arch/arc/plat-hsdk/Kconfig1
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/boot/dts/am335x-evm.dts2
-rw-r--r--arch/arm/boot/dts/am335x-evmsk.dts4
-rw-r--r--arch/arm/boot/dts/armada-xp-db.dts46
-rw-r--r--arch/arm/boot/dts/armada-xp-gp.dts13
-rw-r--r--arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts85
-rw-r--r--arch/arm/boot/dts/gemini-dlink-dir-685.dts2
-rw-r--r--arch/arm/boot/dts/tegra124-nyan.dtsi17
-rw-r--r--arch/arm/crypto/sha256-armv4.pl3
-rw-r--r--arch/arm/crypto/sha256-core.S_shipped3
-rw-r--r--arch/arm/crypto/sha512-armv4.pl3
-rw-r--r--arch/arm/crypto/sha512-core.S_shipped3
-rw-r--r--arch/arm/include/asm/irq.h1
-rw-r--r--arch/arm/kernel/irq.c62
-rw-r--r--arch/arm/kernel/smp.c2
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/probes/kprobes/opt-arm.c2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998.dtsi2
-rw-r--r--arch/arm64/crypto/chacha-neon-core.S20
-rw-r--r--arch/arm64/include/asm/neon-intrinsics.h4
-rw-r--r--arch/arm64/kernel/head.S3
-rw-r--r--arch/arm64/kernel/ptrace.c15
-rw-r--r--arch/arm64/kernel/setup.c3
-rw-r--r--arch/arm64/mm/kasan_init.c2
-rw-r--r--arch/mips/bcm63xx/dev-enet.c8
-rw-r--r--arch/mips/kernel/cmpxchg.c3
-rw-r--r--arch/mips/kernel/setup.c3
-rw-r--r--arch/mips/lantiq/xway/vmmc.c4
-rw-r--r--arch/mips/net/ebpf_jit.c26
-rw-r--r--arch/parisc/kernel/ptrace.c29
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci.c2
-rw-r--r--arch/s390/kvm/vsie.c2
-rw-r--r--arch/sh/boot/dts/Makefile2
-rw-r--r--arch/x86/include/asm/hyperv-tlfs.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/include/asm/uaccess.h6
-rw-r--r--arch/x86/kvm/cpuid.c4
-rw-r--r--arch/x86/kvm/mmu.c18
-rw-r--r--arch/x86/mm/extable.c58
-rw-r--r--crypto/af_alg.c4
-rw-r--r--drivers/base/power/runtime.c2
-rw-r--r--drivers/clk/at91/at91sam9x5.c5
-rw-r--r--drivers/clk/at91/sama5d2.c4
-rw-r--r--drivers/clk/at91/sama5d4.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c2
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c2
-rw-r--r--drivers/crypto/ccree/cc_pm.h2
-rw-r--r--drivers/gpio/gpio-mt7621.c20
-rw-r--r--drivers/gpio/gpio-pxa.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c138
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c88
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c16
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h22
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c14
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c119
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c189
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c133
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/abm.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h12
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c23
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h19
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c26
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c28
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c4
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c4
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c9
-rw-r--r--drivers/gpu/drm/drm_file.c22
-rw-r--r--drivers/gpu/drm/drm_ioctl.c22
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c12
-rw-r--r--drivers/gpu/drm/imx/Kconfig3
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c7
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c30
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c76
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c32
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c37
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h19
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h220
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c44
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h44
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c65
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c36
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c51
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h27
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c325
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h28
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h28
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c126
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h10
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig12
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c214
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c216
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c13
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimm.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/clb069.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if000c.h30
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/ifc00d.h21
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/vmm.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c306
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c887
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.h60
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c248
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c835
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.h48
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vmm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vmm.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvif/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvif/vmm.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/subdev.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/ce/tu104.c)6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c86
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu104.c)20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu104.c)14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/tu104.c)14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu104.c)10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu104.c)30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/usertu102.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/fifo/usertu104.c)10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c331
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu104.c)30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu104.c)14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu104.c)39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c106
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c62
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu104.c)10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu104.c)8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c83
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c382
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h82
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c56
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk104.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk20a.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c210
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgv100.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu104.c)13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c60
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c1
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c39
-rw-r--r--drivers/gpu/ipu-v3/ipu-pre.c6
-rw-r--r--drivers/gpu/ipu-v3/ipu-prg.c16
-rw-r--r--drivers/gpu/ipu-v3/ipu-prv.h1
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c15
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c10
-rw-r--r--drivers/iommu/dmar.c2
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c4
-rw-r--r--drivers/mailbox/mailbox.c1
-rw-r--r--drivers/mmc/core/block.c6
-rw-r--r--drivers/mmc/core/core.c2
-rw-r--r--drivers/mmc/core/queue.c9
-rw-r--r--drivers/mmc/host/cqhci.c13
-rw-r--r--drivers/mmc/host/mmc_spi.c1
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c1
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c9
-rw-r--r--drivers/mmc/host/tmio_mmc.h5
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c17
-rw-r--r--drivers/mtd/devices/powernv_flash.c2
-rw-r--r--drivers/mtd/mtdcore.c1
-rw-r--r--drivers/net/bonding/bond_main.c35
-rw-r--r--drivers/net/dsa/b53/b53_common.c90
-rw-r--r--drivers/net/dsa/b53/b53_priv.h3
-rw-r--r--drivers/net/dsa/bcm_sf2.c10
-rw-r--r--drivers/net/dsa/lantiq_gswip.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c14
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c8
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c9
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h13
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h14
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c149
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c128
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c27
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c15
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c7
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c12
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c17
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c21
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c22
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2
-rw-r--r--drivers/net/geneve.c11
-rw-r--r--drivers/net/hyperv/netvsc_drv.c22
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c4
-rw-r--r--drivers/net/phy/dp83867.c3
-rw-r--r--drivers/net/phy/marvell10g.c6
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/phy/micrel.c13
-rw-r--r--drivers/net/phy/phylink.c4
-rw-r--r--drivers/net/phy/realtek.c7
-rw-r--r--drivers/net/phy/xilinx_gmii2rgmii.c5
-rw-r--r--drivers/net/team/team.c4
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c4
-rw-r--r--drivers/net/usb/r8152.c5
-rw-r--r--drivers/net/vrf.c3
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c46
-rw-r--r--drivers/net/xen-netback/hash.c2
-rw-r--r--drivers/net/xen-netback/interface.c7
-rw-r--r--drivers/net/xen-netback/netback.c10
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8b.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs404.c2
-rw-r--r--drivers/scsi/3w-9xxx.c14
-rw-r--r--drivers/scsi/3w-sas.c12
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c8
-rw-r--r--drivers/scsi/bfa/bfad.c18
-rw-r--r--drivers/scsi/csiostor/csio_init.c7
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c8
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c8
-rw-r--r--drivers/scsi/hptiop.c10
-rw-r--r--drivers/scsi/libiscsi.c6
-rw-r--r--drivers/scsi/libsas/sas_expander.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c19
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/sd_zbc.c8
-rw-r--r--drivers/tee/optee/core.c4
-rw-r--r--drivers/vhost/vhost.c2
-rw-r--r--fs/afs/cell.c1
-rw-r--r--fs/binfmt_script.c57
-rw-r--r--fs/ceph/snap.c3
-rw-r--r--fs/hugetlbfs/inode.c12
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/nfs/nfs4idmap.c31
-rw-r--r--fs/orangefs/file.c4
-rw-r--r--fs/proc/base.c4
-rw-r--r--include/drm/drm_dp_helper.h13
-rw-r--r--include/drm/drm_drv.h2
-rw-r--r--include/keys/request_key_auth-type.h36
-rw-r--r--include/keys/user-type.h2
-rw-r--r--include/linux/key-type.h22
-rw-r--r--include/linux/netdev_features.h24
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/phy.h8
-rw-r--r--include/linux/sched.h6
-rw-r--r--include/linux/skbuff.h8
-rw-r--r--include/linux/virtio_net.h19
-rw-r--r--include/net/icmp.h9
-rw-r--r--include/net/ip.h4
-rw-r--r--include/net/phonet/pep.h5
-rw-r--r--include/net/xfrm.h12
-rw-r--r--include/uapi/drm/amdgpu_drm.h3
-rw-r--r--include/uapi/drm/nouveau_drm.h51
-rw-r--r--include/video/imx-ipu-v3.h1
-rw-r--r--init/initramfs.c6
-rw-r--r--kernel/bpf/lpm_trie.c1
-rw-r--r--kernel/bpf/stackmap.c8
-rw-r--r--kernel/bpf/syscall.c6
-rw-r--r--kernel/bpf/verifier.c14
-rw-r--r--kernel/sched/psi.c2
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace_kprobe.c10
-rw-r--r--lib/Kconfig.kasan22
-rw-r--r--lib/assoc_array.c8
-rw-r--r--mm/debug.c4
-rw-r--r--mm/hugetlb.c16
-rw-r--r--mm/kasan/Makefile2
-rw-r--r--mm/kasan/common.c29
-rw-r--r--mm/kasan/tags.c2
-rw-r--r--mm/kmemleak.c10
-rw-r--r--mm/maccess.c6
-rw-r--r--mm/memory_hotplug.c27
-rw-r--r--mm/mempolicy.c6
-rw-r--r--mm/migrate.c11
-rw-r--r--mm/mmap.c7
-rw-r--r--mm/page_alloc.c20
-rw-r--r--mm/shmem.c12
-rw-r--r--mm/slab.c15
-rw-r--r--mm/slab.h7
-rw-r--r--mm/slab_common.c3
-rw-r--r--mm/slub.c59
-rw-r--r--mm/swap.c17
-rw-r--r--mm/util.c2
-rw-r--r--net/bpf/test_run.c45
-rw-r--r--net/bridge/br_multicast.c9
-rw-r--r--net/ceph/messenger.c15
-rw-r--r--net/compat.c6
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/filter.c12
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/dsa/dsa2.c16
-rw-r--r--net/dsa/port.c8
-rw-r--r--net/ipv4/cipso_ipv4.c20
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/fib_frontend.c4
-rw-r--r--net/ipv4/icmp.c7
-rw-r--r--net/ipv4/ip_gre.c33
-rw-r--r--net/ipv4/ip_input.c9
-rw-r--r--net/ipv4/ip_options.c22
-rw-r--r--net/ipv4/netlink.c17
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_ipv4.c5
-rw-r--r--net/ipv4/tcp_output.c1
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/fou6.c2
-rw-r--r--net/ipv6/ip6_gre.c73
-rw-r--r--net/ipv6/route.c39
-rw-r--r--net/ipv6/sit.c1
-rw-r--r--net/ipv6/udp.c12
-rw-r--r--net/ipv6/xfrm6_tunnel.c2
-rw-r--r--net/key/af_key.c42
-rw-r--r--net/mac80211/cfg.c6
-rw-r--r--net/mac80211/main.c4
-rw-r--r--net/mac80211/mesh.h6
-rw-r--r--net/mac80211/mesh_pathtbl.c157
-rw-r--r--net/mac80211/rx.c7
-rw-r--r--net/mpls/af_mpls.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c3
-rw-r--r--net/netfilter/nf_tables_api.c3
-rw-r--r--net/netlabel/netlabel_kapi.c3
-rw-r--r--net/nfc/llcp_commands.c20
-rw-r--r--net/nfc/llcp_core.c24
-rw-r--r--net/phonet/pep.c32
-rw-r--r--net/sched/act_ipt.c3
-rw-r--r--net/sched/act_skbedit.c3
-rw-r--r--net/sched/act_tunnel_key.c3
-rw-r--r--net/sched/sch_netem.c10
-rw-r--r--net/sctp/chunk.c2
-rw-r--r--net/sctp/transport.c3
-rw-r--r--net/smc/smc.h6
-rw-r--r--net/socket.c1
-rw-r--r--net/tipc/socket.c17
-rw-r--r--net/unix/af_unix.c57
-rw-r--r--net/unix/diag.c3
-rw-r--r--net/x25/af_x25.c13
-rw-r--r--net/xdp/xdp_umem.c11
-rw-r--r--net/xdp/xsk.c20
-rw-r--r--net/xfrm/xfrm_interface.c4
-rw-r--r--net/xfrm/xfrm_policy.c4
-rw-r--r--net/xfrm/xfrm_state.c30
-rw-r--r--net/xfrm/xfrm_user.c2
-rw-r--r--scripts/Makefile.kasan2
-rw-r--r--scripts/kallsyms.c4
-rw-r--r--security/keys/internal.h13
-rw-r--r--security/keys/key.c5
-rw-r--r--security/keys/keyctl.c1
-rw-r--r--security/keys/keyring.c4
-rw-r--r--security/keys/proc.c3
-rw-r--r--security/keys/process_keys.c1
-rw-r--r--security/keys/request_key.c73
-rw-r--r--security/keys/request_key_auth.c18
-rw-r--r--security/lsm_audit.c10
-rw-r--r--sound/pci/hda/patch_realtek.c42
-rw-r--r--sound/soc/generic/simple-card.c2
-rw-r--r--sound/soc/samsung/i2s.c8
-rw-r--r--sound/soc/soc-topology.c8
-rw-r--r--tools/testing/selftests/bpf/test_lpm_map.c10
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh1
-rwxr-xr-xtools/testing/selftests/net/pmtu.sh96
-rwxr-xr-xtools/testing/selftests/net/udpgro.sh8
-rw-r--r--tools/testing/selftests/net/udpgso_bench_rx.c42
-rw-r--r--virt/kvm/kvm_main.c2
545 files changed, 8028 insertions, 4060 deletions
diff --git a/CREDITS b/CREDITS
index e818eb6a3e71..0175098d4776 100644
--- a/CREDITS
+++ b/CREDITS
@@ -842,10 +842,9 @@ D: ax25-utils maintainer.
N: Helge Deller
E: deller@gmx.de
-E: hdeller@redhat.de
-D: PA-RISC Linux hacker, LASI-, ASP-, WAX-, LCD/LED-driver
-S: Schimmelsrain 1
-S: D-69231 Rauenberg
+W: http://www.parisc-linux.org/
+D: PA-RISC Linux architecture maintainer
+D: LASI-, ASP-, WAX-, LCD/LED-driver
S: Germany
N: Jean Delvare
@@ -1361,7 +1360,7 @@ S: Stellenbosch, Western Cape
S: South Africa
N: Grant Grundler
-E: grundler@parisc-linux.org
+E: grantgrundler@gmail.com
W: http://obmouse.sourceforge.net/
W: http://www.parisc-linux.org/
D: obmouse - rewrote Olivier Florent's Omnibook 600 "pop-up" mouse driver
@@ -2492,7 +2491,7 @@ S: Syracuse, New York 13206
S: USA
N: Kyle McMartin
-E: kyle@parisc-linux.org
+E: kyle@mcmartin.ca
D: Linux/PARISC hacker
D: AD1889 sound driver
S: Ottawa, Canada
@@ -3780,14 +3779,13 @@ S: 21513 Conradia Ct
S: Cupertino, CA 95014
S: USA
-N: Thibaut Varene
-E: T-Bone@parisc-linux.org
-W: http://www.parisc-linux.org/~varenet/
-P: 1024D/B7D2F063 E67C 0D43 A75E 12A5 BB1C FA2F 1E32 C3DA B7D2 F063
+N: Thibaut Varène
+E: hacks+kernel@slashdirt.org
+W: http://hacks.slashdirt.org/
D: PA-RISC port minion, PDC and GSCPS2 drivers, debuglocks and other bits
D: Some ARM at91rm9200 bits, S1D13XXX FB driver, random patches here and there
D: AD1889 sound driver
-S: Paris, France
+S: France
N: Heikki Vatiainen
E: hessu@cs.tut.fi
diff --git a/Documentation/admin-guide/README.rst b/Documentation/admin-guide/README.rst
index 0797eec76be1..47e577264198 100644
--- a/Documentation/admin-guide/README.rst
+++ b/Documentation/admin-guide/README.rst
@@ -1,9 +1,9 @@
.. _readme:
-Linux kernel release 4.x <http://kernel.org/>
+Linux kernel release 5.x <http://kernel.org/>
=============================================
-These are the release notes for Linux version 4. Read them carefully,
+These are the release notes for Linux version 5. Read them carefully,
as they tell you what this is all about, explain how to install the
kernel, and what to do if something goes wrong.
@@ -63,7 +63,7 @@ Installing the kernel source
directory where you have permissions (e.g. your home directory) and
unpack it::
- xz -cd linux-4.X.tar.xz | tar xvf -
+ xz -cd linux-5.x.tar.xz | tar xvf -
Replace "X" with the version number of the latest kernel.
@@ -72,26 +72,26 @@ Installing the kernel source
files. They should match the library, and not get messed up by
whatever the kernel-du-jour happens to be.
- - You can also upgrade between 4.x releases by patching. Patches are
+ - You can also upgrade between 5.x releases by patching. Patches are
distributed in the xz format. To install by patching, get all the
newer patch files, enter the top level directory of the kernel source
- (linux-4.X) and execute::
+ (linux-5.x) and execute::
- xz -cd ../patch-4.x.xz | patch -p1
+ xz -cd ../patch-5.x.xz | patch -p1
- Replace "x" for all versions bigger than the version "X" of your current
+ Replace "x" for all versions bigger than the version "x" of your current
source tree, **in_order**, and you should be ok. You may want to remove
the backup files (some-file-name~ or some-file-name.orig), and make sure
that there are no failed patches (some-file-name# or some-file-name.rej).
If there are, either you or I have made a mistake.
- Unlike patches for the 4.x kernels, patches for the 4.x.y kernels
+ Unlike patches for the 5.x kernels, patches for the 5.x.y kernels
(also known as the -stable kernels) are not incremental but instead apply
- directly to the base 4.x kernel. For example, if your base kernel is 4.0
- and you want to apply the 4.0.3 patch, you must not first apply the 4.0.1
- and 4.0.2 patches. Similarly, if you are running kernel version 4.0.2 and
- want to jump to 4.0.3, you must first reverse the 4.0.2 patch (that is,
- patch -R) **before** applying the 4.0.3 patch. You can read more on this in
+ directly to the base 5.x kernel. For example, if your base kernel is 5.0
+ and you want to apply the 5.0.3 patch, you must not first apply the 5.0.1
+ and 5.0.2 patches. Similarly, if you are running kernel version 5.0.2 and
+ want to jump to 5.0.3, you must first reverse the 5.0.2 patch (that is,
+ patch -R) **before** applying the 5.0.3 patch. You can read more on this in
:ref:`Documentation/process/applying-patches.rst <applying_patches>`.
Alternatively, the script patch-kernel can be used to automate this
@@ -114,7 +114,7 @@ Installing the kernel source
Software requirements
---------------------
- Compiling and running the 4.x kernels requires up-to-date
+ Compiling and running the 5.x kernels requires up-to-date
versions of various software packages. Consult
:ref:`Documentation/process/changes.rst <changes>` for the minimum version numbers
required and how to get updates for these packages. Beware that using
@@ -132,12 +132,12 @@ Build directory for the kernel
place for the output files (including .config).
Example::
- kernel source code: /usr/src/linux-4.X
+ kernel source code: /usr/src/linux-5.x
build directory: /home/name/build/kernel
To configure and build the kernel, use::
- cd /usr/src/linux-4.X
+ cd /usr/src/linux-5.x
make O=/home/name/build/kernel menuconfig
make O=/home/name/build/kernel
sudo make O=/home/name/build/kernel modules_install install
diff --git a/Documentation/devicetree/bindings/display/msm/gmu.txt b/Documentation/devicetree/bindings/display/msm/gmu.txt
new file mode 100644
index 000000000000..3439b38e60f2
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/msm/gmu.txt
@@ -0,0 +1,59 @@
+Qualcomm adreno/snapdragon GMU (Graphics management unit)
+
+The GMU is a programmable power controller for the GPU. the CPU controls the
+GMU which in turn handles power controls for the GPU.
+
+Required properties:
+- compatible: "qcom,adreno-gmu-XYZ.W", "qcom,adreno-gmu"
+ for example: "qcom,adreno-gmu-630.2", "qcom,adreno-gmu"
+ Note that you need to list the less specific "qcom,adreno-gmu"
+ for generic matches and the more specific identifier to identify
+ the specific device.
+- reg: Physical base address and length of the GMU registers.
+- reg-names: Matching names for the register regions
+ * "gmu"
+ * "gmu_pdc"
+ * "gmu_pdc_seg"
+- interrupts: The interrupt signals from the GMU.
+- interrupt-names: Matching names for the interrupts
+ * "hfi"
+ * "gmu"
+- clocks: phandles to the device clocks
+- clock-names: Matching names for the clocks
+ * "gmu"
+ * "cxo"
+ * "axi"
+ * "mnoc"
+- power-domains: should be <&clock_gpucc GPU_CX_GDSC>
+- iommus: phandle to the adreno iommu
+- operating-points-v2: phandle to the OPP operating points
+
+Example:
+
+/ {
+ ...
+
+ gmu: gmu@506a000 {
+ compatible="qcom,adreno-gmu-630.2", "qcom,adreno-gmu";
+
+ reg = <0x506a000 0x30000>,
+ <0xb280000 0x10000>,
+ <0xb480000 0x10000>;
+ reg-names = "gmu", "gmu_pdc", "gmu_pdc_seq";
+
+ interrupts = <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hfi", "gmu";
+
+ clocks = <&gpucc GPU_CC_CX_GMU_CLK>,
+ <&gpucc GPU_CC_CXO_CLK>,
+ <&gcc GCC_DDRSS_GPU_AXI_CLK>,
+ <&gcc GCC_GPU_MEMNOC_GFX_CLK>;
+ clock-names = "gmu", "cxo", "axi", "memnoc";
+
+ power-domains = <&gpucc GPU_CX_GDSC>;
+ iommus = <&adreno_smmu 5>;
+
+ operating-points-v2 = <&gmu_opp_table>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/display/msm/gpu.txt b/Documentation/devicetree/bindings/display/msm/gpu.txt
index f8759145ce1a..aad1aef682f7 100644
--- a/Documentation/devicetree/bindings/display/msm/gpu.txt
+++ b/Documentation/devicetree/bindings/display/msm/gpu.txt
@@ -10,14 +10,23 @@ Required properties:
If "amd,imageon" is used, there should be no top level msm device.
- reg: Physical base address and length of the controller's registers.
- interrupts: The interrupt signal from the gpu.
-- clocks: device clocks
+- clocks: device clocks (if applicable)
See ../clocks/clock-bindings.txt for details.
-- clock-names: the following clocks are required:
+- clock-names: the following clocks are required by a3xx, a4xx and a5xx
+ cores:
* "core"
* "iface"
* "mem_iface"
+ For GMU attached devices the GPU clocks are not used and are not required. The
+ following devices should not list clocks:
+ - qcom,adreno-630.2
+- iommus: optional phandle to an adreno iommu instance
+- operating-points-v2: optional phandle to the OPP operating points
+- qcom,gmu: For GMU attached devices a phandle to the GMU device that will
+ control the power for the GPU. Applicable targets:
+ - qcom,adreno-630.2
-Example:
+Example 3xx/4xx/a5xx:
/ {
...
@@ -37,3 +46,30 @@ Example:
<&mmcc MMSS_IMEM_AHB_CLK>;
};
};
+
+Example a6xx (with GMU):
+
+/ {
+ ...
+
+ gpu@5000000 {
+ compatible = "qcom,adreno-630.2", "qcom,adreno";
+ #stream-id-cells = <16>;
+
+ reg = <0x5000000 0x40000>, <0x509e000 0x10>;
+ reg-names = "kgsl_3d0_reg_memory", "cx_mem";
+
+ /*
+ * Look ma, no clocks! The GPU clocks and power are
+ * controlled entirely by the GMU
+ */
+
+ interrupts = <GIC_SPI 300 IRQ_TYPE_LEVEL_HIGH>;
+
+ iommus = <&adreno_smmu 0>;
+
+ operating-points-v2 = <&gpu_opp_table>;
+
+ qcom,gmu = <&gmu>;
+ };
+};
diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt
index 25170ad7d25b..101f2b2c69ad 100644
--- a/Documentation/networking/dsa/dsa.txt
+++ b/Documentation/networking/dsa/dsa.txt
@@ -533,16 +533,12 @@ Bridge VLAN filtering
function that the driver has to call for each VLAN the given port is a member
of. A switchdev object is used to carry the VID and bridge flags.
-- port_fdb_prepare: bridge layer function invoked when the bridge prepares the
- installation of a Forwarding Database entry. If the operation is not
- supported, this function should return -EOPNOTSUPP to inform the bridge code
- to fallback to a software implementation. No hardware setup must be done in
- this function. See port_fdb_add for this and details.
-
- port_fdb_add: bridge layer function invoked when the bridge wants to install a
Forwarding Database entry, the switch hardware should be programmed with the
specified address in the specified VLAN Id in the forwarding database
- associated with this VLAN ID
+ associated with this VLAN ID. If the operation is not supported, this
+ function should return -EOPNOTSUPP to inform the bridge code to fallback to
+ a software implementation.
Note: VLAN ID 0 corresponds to the port private database, which, in the context
of DSA, would be the its port-based VLAN, used by the associated bridge device.
diff --git a/Documentation/networking/msg_zerocopy.rst b/Documentation/networking/msg_zerocopy.rst
index fe46d4867e2d..18c1415e7bfa 100644
--- a/Documentation/networking/msg_zerocopy.rst
+++ b/Documentation/networking/msg_zerocopy.rst
@@ -7,7 +7,7 @@ Intro
=====
The MSG_ZEROCOPY flag enables copy avoidance for socket send calls.
-The feature is currently implemented for TCP sockets.
+The feature is currently implemented for TCP and UDP sockets.
Opportunity and Caveats
diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt
index 82236a17b5e6..97b7ca8b9b86 100644
--- a/Documentation/networking/switchdev.txt
+++ b/Documentation/networking/switchdev.txt
@@ -92,11 +92,11 @@ device.
Switch ID
^^^^^^^^^
-The switchdev driver must implement the switchdev op switchdev_port_attr_get
-for SWITCHDEV_ATTR_ID_PORT_PARENT_ID for each port netdev, returning the same
-physical ID for each port of a switch. The ID must be unique between switches
-on the same system. The ID does not need to be unique between switches on
-different systems.
+The switchdev driver must implement the net_device operation
+ndo_get_port_parent_id for each port netdev, returning the same physical ID for
+each port of a switch. The ID must be unique between switches on the same
+system. The ID does not need to be unique between switches on different
+systems.
The switch ID is used to locate ports on a switch and to know if aggregated
ports belong to the same switch.
diff --git a/Documentation/process/applying-patches.rst b/Documentation/process/applying-patches.rst
index dc2ddc345044..fbb9297e6360 100644
--- a/Documentation/process/applying-patches.rst
+++ b/Documentation/process/applying-patches.rst
@@ -216,14 +216,14 @@ You can use the ``interdiff`` program (http://cyberelk.net/tim/patchutils/) to
generate a patch representing the differences between two patches and then
apply the result.
-This will let you move from something like 4.7.2 to 4.7.3 in a single
+This will let you move from something like 5.7.2 to 5.7.3 in a single
step. The -z flag to interdiff will even let you feed it patches in gzip or
bzip2 compressed form directly without the use of zcat or bzcat or manual
decompression.
-Here's how you'd go from 4.7.2 to 4.7.3 in a single step::
+Here's how you'd go from 5.7.2 to 5.7.3 in a single step::
- interdiff -z ../patch-4.7.2.gz ../patch-4.7.3.gz | patch -p1
+ interdiff -z ../patch-5.7.2.gz ../patch-5.7.3.gz | patch -p1
Although interdiff may save you a step or two you are generally advised to
do the additional steps since interdiff can get things wrong in some cases.
@@ -245,62 +245,67 @@ The patches are available at http://kernel.org/
Most recent patches are linked from the front page, but they also have
specific homes.
-The 4.x.y (-stable) and 4.x patches live at
+The 5.x.y (-stable) and 5.x patches live at
- https://www.kernel.org/pub/linux/kernel/v4.x/
+ https://www.kernel.org/pub/linux/kernel/v5.x/
-The -rc patches live at
+The -rc patches are not stored on the webserver but are generated on
+demand from git tags such as
- https://www.kernel.org/pub/linux/kernel/v4.x/testing/
+ https://git.kernel.org/torvalds/p/v5.1-rc1/v5.0
+The stable -rc patches live at
-The 4.x kernels
+ https://www.kernel.org/pub/linux/kernel/v5.x/stable-review/
+
+
+The 5.x kernels
===============
These are the base stable releases released by Linus. The highest numbered
release is the most recent.
If regressions or other serious flaws are found, then a -stable fix patch
-will be released (see below) on top of this base. Once a new 4.x base
+will be released (see below) on top of this base. Once a new 5.x base
kernel is released, a patch is made available that is a delta between the
-previous 4.x kernel and the new one.
+previous 5.x kernel and the new one.
-To apply a patch moving from 4.6 to 4.7, you'd do the following (note
-that such patches do **NOT** apply on top of 4.x.y kernels but on top of the
-base 4.x kernel -- if you need to move from 4.x.y to 4.x+1 you need to
-first revert the 4.x.y patch).
+To apply a patch moving from 5.6 to 5.7, you'd do the following (note
+that such patches do **NOT** apply on top of 5.x.y kernels but on top of the
+base 5.x kernel -- if you need to move from 5.x.y to 5.x+1 you need to
+first revert the 5.x.y patch).
Here are some examples::
- # moving from 4.6 to 4.7
+ # moving from 5.6 to 5.7
- $ cd ~/linux-4.6 # change to kernel source dir
- $ patch -p1 < ../patch-4.7 # apply the 4.7 patch
+ $ cd ~/linux-5.6 # change to kernel source dir
+ $ patch -p1 < ../patch-5.7 # apply the 5.7 patch
$ cd ..
- $ mv linux-4.6 linux-4.7 # rename source dir
+ $ mv linux-5.6 linux-5.7 # rename source dir
- # moving from 4.6.1 to 4.7
+ # moving from 5.6.1 to 5.7
- $ cd ~/linux-4.6.1 # change to kernel source dir
- $ patch -p1 -R < ../patch-4.6.1 # revert the 4.6.1 patch
- # source dir is now 4.6
- $ patch -p1 < ../patch-4.7 # apply new 4.7 patch
+ $ cd ~/linux-5.6.1 # change to kernel source dir
+ $ patch -p1 -R < ../patch-5.6.1 # revert the 5.6.1 patch
+ # source dir is now 5.6
+ $ patch -p1 < ../patch-5.7 # apply new 5.7 patch
$ cd ..
- $ mv linux-4.6.1 linux-4.7 # rename source dir
+ $ mv linux-5.6.1 linux-5.7 # rename source dir
-The 4.x.y kernels
+The 5.x.y kernels
=================
Kernels with 3-digit versions are -stable kernels. They contain small(ish)
critical fixes for security problems or significant regressions discovered
-in a given 4.x kernel.
+in a given 5.x kernel.
This is the recommended branch for users who want the most recent stable
kernel and are not interested in helping test development/experimental
versions.
-If no 4.x.y kernel is available, then the highest numbered 4.x kernel is
+If no 5.x.y kernel is available, then the highest numbered 5.x kernel is
the current stable kernel.
.. note::
@@ -308,23 +313,23 @@ the current stable kernel.
The -stable team usually do make incremental patches available as well
as patches against the latest mainline release, but I only cover the
non-incremental ones below. The incremental ones can be found at
- https://www.kernel.org/pub/linux/kernel/v4.x/incr/
+ https://www.kernel.org/pub/linux/kernel/v5.x/incr/
-These patches are not incremental, meaning that for example the 4.7.3
-patch does not apply on top of the 4.7.2 kernel source, but rather on top
-of the base 4.7 kernel source.
+These patches are not incremental, meaning that for example the 5.7.3
+patch does not apply on top of the 5.7.2 kernel source, but rather on top
+of the base 5.7 kernel source.
-So, in order to apply the 4.7.3 patch to your existing 4.7.2 kernel
-source you have to first back out the 4.7.2 patch (so you are left with a
-base 4.7 kernel source) and then apply the new 4.7.3 patch.
+So, in order to apply the 5.7.3 patch to your existing 5.7.2 kernel
+source you have to first back out the 5.7.2 patch (so you are left with a
+base 5.7 kernel source) and then apply the new 5.7.3 patch.
Here's a small example::
- $ cd ~/linux-4.7.2 # change to the kernel source dir
- $ patch -p1 -R < ../patch-4.7.2 # revert the 4.7.2 patch
- $ patch -p1 < ../patch-4.7.3 # apply the new 4.7.3 patch
+ $ cd ~/linux-5.7.2 # change to the kernel source dir
+ $ patch -p1 -R < ../patch-5.7.2 # revert the 5.7.2 patch
+ $ patch -p1 < ../patch-5.7.3 # apply the new 5.7.3 patch
$ cd ..
- $ mv linux-4.7.2 linux-4.7.3 # rename the kernel source dir
+ $ mv linux-5.7.2 linux-5.7.3 # rename the kernel source dir
The -rc kernels
===============
@@ -343,38 +348,38 @@ This is a good branch to run for people who want to help out testing
development kernels but do not want to run some of the really experimental
stuff (such people should see the sections about -next and -mm kernels below).
-The -rc patches are not incremental, they apply to a base 4.x kernel, just
-like the 4.x.y patches described above. The kernel version before the -rcN
+The -rc patches are not incremental, they apply to a base 5.x kernel, just
+like the 5.x.y patches described above. The kernel version before the -rcN
suffix denotes the version of the kernel that this -rc kernel will eventually
turn into.
-So, 4.8-rc5 means that this is the fifth release candidate for the 4.8
-kernel and the patch should be applied on top of the 4.7 kernel source.
+So, 5.8-rc5 means that this is the fifth release candidate for the 5.8
+kernel and the patch should be applied on top of the 5.7 kernel source.
Here are 3 examples of how to apply these patches::
- # first an example of moving from 4.7 to 4.8-rc3
+ # first an example of moving from 5.7 to 5.8-rc3
- $ cd ~/linux-4.7 # change to the 4.7 source dir
- $ patch -p1 < ../patch-4.8-rc3 # apply the 4.8-rc3 patch
+ $ cd ~/linux-5.7 # change to the 5.7 source dir
+ $ patch -p1 < ../patch-5.8-rc3 # apply the 5.8-rc3 patch
$ cd ..
- $ mv linux-4.7 linux-4.8-rc3 # rename the source dir
+ $ mv linux-5.7 linux-5.8-rc3 # rename the source dir
- # now let's move from 4.8-rc3 to 4.8-rc5
+ # now let's move from 5.8-rc3 to 5.8-rc5
- $ cd ~/linux-4.8-rc3 # change to the 4.8-rc3 dir
- $ patch -p1 -R < ../patch-4.8-rc3 # revert the 4.8-rc3 patch
- $ patch -p1 < ../patch-4.8-rc5 # apply the new 4.8-rc5 patch
+ $ cd ~/linux-5.8-rc3 # change to the 5.8-rc3 dir
+ $ patch -p1 -R < ../patch-5.8-rc3 # revert the 5.8-rc3 patch
+ $ patch -p1 < ../patch-5.8-rc5 # apply the new 5.8-rc5 patch
$ cd ..
- $ mv linux-4.8-rc3 linux-4.8-rc5 # rename the source dir
+ $ mv linux-5.8-rc3 linux-5.8-rc5 # rename the source dir
- # finally let's try and move from 4.7.3 to 4.8-rc5
+ # finally let's try and move from 5.7.3 to 5.8-rc5
- $ cd ~/linux-4.7.3 # change to the kernel source dir
- $ patch -p1 -R < ../patch-4.7.3 # revert the 4.7.3 patch
- $ patch -p1 < ../patch-4.8-rc5 # apply new 4.8-rc5 patch
+ $ cd ~/linux-5.7.3 # change to the kernel source dir
+ $ patch -p1 -R < ../patch-5.7.3 # revert the 5.7.3 patch
+ $ patch -p1 < ../patch-5.8-rc5 # apply new 5.8-rc5 patch
$ cd ..
- $ mv linux-4.7.3 linux-4.8-rc5 # rename the kernel source dir
+ $ mv linux-5.7.3 linux-5.8-rc5 # rename the kernel source dir
The -mm patches and the linux-next tree
diff --git a/Documentation/translations/it_IT/admin-guide/README.rst b/Documentation/translations/it_IT/admin-guide/README.rst
index 80f5ffc94a9e..b37166817842 100644
--- a/Documentation/translations/it_IT/admin-guide/README.rst
+++ b/Documentation/translations/it_IT/admin-guide/README.rst
@@ -4,7 +4,7 @@
.. _it_readme:
-Rilascio del kernel Linux 4.x <http://kernel.org/>
+Rilascio del kernel Linux 5.x <http://kernel.org/>
===================================================
.. warning::
diff --git a/MAINTAINERS b/MAINTAINERS
index a4a4bf563f94..4f463de6e721 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -409,8 +409,7 @@ F: drivers/platform/x86/wmi.c
F: include/uapi/linux/wmi.h
AD1889 ALSA SOUND DRIVER
-M: Thibaut Varene <T-Bone@parisc-linux.org>
-W: http://wiki.parisc-linux.org/AD1889
+W: https://parisc.wiki.kernel.org/index.php/AD1889
L: linux-parisc@vger.kernel.org
S: Maintained
F: sound/pci/ad1889.*
@@ -2865,7 +2864,7 @@ R: Martin KaFai Lau <kafai@fb.com>
R: Song Liu <songliubraving@fb.com>
R: Yonghong Song <yhs@fb.com>
L: netdev@vger.kernel.org
-L: linux-kernel@vger.kernel.org
+L: bpf@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git
Q: https://patchwork.ozlabs.org/project/netdev/list/?delegate=77147
@@ -2895,6 +2894,7 @@ N: bpf
BPF JIT for ARM
M: Shubham Bansal <illusionist.neo@gmail.com>
L: netdev@vger.kernel.org
+L: bpf@vger.kernel.org
S: Maintained
F: arch/arm/net/
@@ -2903,18 +2903,21 @@ M: Daniel Borkmann <daniel@iogearbox.net>
M: Alexei Starovoitov <ast@kernel.org>
M: Zi Shen Lim <zlim.lnx@gmail.com>
L: netdev@vger.kernel.org
+L: bpf@vger.kernel.org
S: Supported
F: arch/arm64/net/
BPF JIT for MIPS (32-BIT AND 64-BIT)
M: Paul Burton <paul.burton@mips.com>
L: netdev@vger.kernel.org
+L: bpf@vger.kernel.org
S: Maintained
F: arch/mips/net/
BPF JIT for NFP NICs
M: Jakub Kicinski <jakub.kicinski@netronome.com>
L: netdev@vger.kernel.org
+L: bpf@vger.kernel.org
S: Supported
F: drivers/net/ethernet/netronome/nfp/bpf/
@@ -2922,6 +2925,7 @@ BPF JIT for POWERPC (32-BIT AND 64-BIT)
M: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
M: Sandipan Das <sandipan@linux.ibm.com>
L: netdev@vger.kernel.org
+L: bpf@vger.kernel.org
S: Maintained
F: arch/powerpc/net/
@@ -2929,6 +2933,7 @@ BPF JIT for S390
M: Martin Schwidefsky <schwidefsky@de.ibm.com>
M: Heiko Carstens <heiko.carstens@de.ibm.com>
L: netdev@vger.kernel.org
+L: bpf@vger.kernel.org
S: Maintained
F: arch/s390/net/
X: arch/s390/net/pnet.c
@@ -2936,12 +2941,14 @@ X: arch/s390/net/pnet.c
BPF JIT for SPARC (32-BIT AND 64-BIT)
M: David S. Miller <davem@davemloft.net>
L: netdev@vger.kernel.org
+L: bpf@vger.kernel.org
S: Maintained
F: arch/sparc/net/
BPF JIT for X86 32-BIT
M: Wang YanQing <udknight@gmail.com>
L: netdev@vger.kernel.org
+L: bpf@vger.kernel.org
S: Maintained
F: arch/x86/net/bpf_jit_comp32.c
@@ -2949,6 +2956,7 @@ BPF JIT for X86 64-BIT
M: Alexei Starovoitov <ast@kernel.org>
M: Daniel Borkmann <daniel@iogearbox.net>
L: netdev@vger.kernel.org
+L: bpf@vger.kernel.org
S: Supported
F: arch/x86/net/
X: arch/x86/net/bpf_jit_comp32.c
@@ -3403,9 +3411,8 @@ F: Documentation/media/v4l-drivers/cafe_ccic*
F: drivers/media/platform/marvell-ccic/
CAIF NETWORK LAYER
-M: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
L: netdev@vger.kernel.org
-S: Supported
+S: Orphan
F: Documentation/networking/caif/
F: drivers/net/caif/
F: include/uapi/linux/caif/
@@ -4851,10 +4858,11 @@ F: Documentation/devicetree/bindings/display/multi-inno,mi0283qt.txt
DRM DRIVER FOR MSM ADRENO GPU
M: Rob Clark <robdclark@gmail.com>
+M: Sean Paul <sean@poorly.run>
L: linux-arm-msm@vger.kernel.org
L: dri-devel@lists.freedesktop.org
L: freedreno@lists.freedesktop.org
-T: git git://people.freedesktop.org/~robclark/linux
+T: git https://gitlab.freedesktop.org/drm/msm.git
S: Maintained
F: drivers/gpu/drm/msm/
F: include/uapi/drm/msm_drm.h
@@ -8523,6 +8531,7 @@ L7 BPF FRAMEWORK
M: John Fastabend <john.fastabend@gmail.com>
M: Daniel Borkmann <daniel@iogearbox.net>
L: netdev@vger.kernel.org
+L: bpf@vger.kernel.org
S: Maintained
F: include/linux/skmsg.h
F: net/core/skmsg.c
@@ -11524,7 +11533,7 @@ F: Documentation/blockdev/paride.txt
F: drivers/block/paride/
PARISC ARCHITECTURE
-M: "James E.J. Bottomley" <jejb@parisc-linux.org>
+M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
M: Helge Deller <deller@gmx.de>
L: linux-parisc@vger.kernel.org
W: http://www.parisc-linux.org/
@@ -16750,6 +16759,7 @@ M: Jesper Dangaard Brouer <hawk@kernel.org>
M: John Fastabend <john.fastabend@gmail.com>
L: netdev@vger.kernel.org
L: xdp-newbies@vger.kernel.org
+L: bpf@vger.kernel.org
S: Supported
F: net/core/xdp.c
F: include/net/xdp.h
@@ -16763,6 +16773,7 @@ XDP SOCKETS (AF_XDP)
M: Björn Töpel <bjorn.topel@intel.com>
M: Magnus Karlsson <magnus.karlsson@intel.com>
L: netdev@vger.kernel.org
+L: bpf@vger.kernel.org
S: Maintained
F: kernel/bpf/xskmap.c
F: net/xdp/
diff --git a/Makefile b/Makefile
index 96c5335e7ee4..d5713e7b1e50 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 0
SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION =
NAME = Shy Crocodile
# *DOCUMENTATION*
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 376366a7db81..d750b302d5ab 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -191,7 +191,6 @@ config NR_CPUS
config ARC_SMP_HALT_ON_RESET
bool "Enable Halt-on-reset boot mode"
- default y if ARC_UBOOT_SUPPORT
help
In SMP configuration cores can be configured as Halt-on-reset
or they could all start at same time. For Halt-on-reset, non
@@ -407,6 +406,14 @@ config ARC_HAS_ACCL_REGS
(also referred to as r58:r59). These can also be used by gcc as GPR so
kernel needs to save/restore per process
+config ARC_IRQ_NO_AUTOSAVE
+ bool "Disable hardware autosave regfile on interrupts"
+ default n
+ help
+ On HS cores, taken interrupt auto saves the regfile on stack.
+ This is programmable and can be optionally disabled in which case
+ software INTERRUPT_PROLOGUE/EPILGUE do the needed work
+
endif # ISA_ARCV2
endmenu # "ARC CPU Configuration"
@@ -515,17 +522,6 @@ config ARC_DBG_TLB_PARANOIA
endif
-config ARC_UBOOT_SUPPORT
- bool "Support uboot arg Handling"
- help
- ARC Linux by default checks for uboot provided args as pointers to
- external cmdline or DTB. This however breaks in absence of uboot,
- when booting from Metaware debugger directly, as the registers are
- not zeroed out on reset by mdb and/or ARCv2 based cores. The bogus
- registers look like uboot args to kernel which then chokes.
- So only enable the uboot arg checking/processing if users are sure
- of uboot being in play.
-
config ARC_BUILTIN_DTB_NAME
string "Built in DTB"
help
diff --git a/arch/arc/configs/nps_defconfig b/arch/arc/configs/nps_defconfig
index 6e84060e7c90..621f59407d76 100644
--- a/arch/arc/configs/nps_defconfig
+++ b/arch/arc/configs/nps_defconfig
@@ -31,7 +31,6 @@ CONFIG_ARC_CACHE_LINE_SHIFT=5
# CONFIG_ARC_HAS_LLSC is not set
CONFIG_ARC_KVADDR_SIZE=402
CONFIG_ARC_EMUL_UNALIGNED=y
-CONFIG_ARC_UBOOT_SUPPORT=y
CONFIG_PREEMPT=y
CONFIG_NET=y
CONFIG_UNIX=y
diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig
index 1e59a2e9c602..e447ace6fa1c 100644
--- a/arch/arc/configs/vdk_hs38_defconfig
+++ b/arch/arc/configs/vdk_hs38_defconfig
@@ -13,7 +13,6 @@ CONFIG_PARTITION_ADVANCED=y
CONFIG_ARC_PLAT_AXS10X=y
CONFIG_AXS103=y
CONFIG_ISA_ARCV2=y
-CONFIG_ARC_UBOOT_SUPPORT=y
CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38"
CONFIG_PREEMPT=y
CONFIG_NET=y
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig
index b5c3f6c54b03..c82cdb10aaf4 100644
--- a/arch/arc/configs/vdk_hs38_smp_defconfig
+++ b/arch/arc/configs/vdk_hs38_smp_defconfig
@@ -15,8 +15,6 @@ CONFIG_AXS103=y
CONFIG_ISA_ARCV2=y
CONFIG_SMP=y
# CONFIG_ARC_TIMERS_64BIT is not set
-# CONFIG_ARC_SMP_HALT_ON_RESET is not set
-CONFIG_ARC_UBOOT_SUPPORT=y
CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp"
CONFIG_PREEMPT=y
CONFIG_NET=y
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index f1b86cef0905..a27eafdc8260 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -151,6 +151,14 @@ struct bcr_isa_arcv2 {
#endif
};
+struct bcr_uarch_build_arcv2 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:8, prod:8, maj:8, min:8;
+#else
+ unsigned int min:8, maj:8, prod:8, pad:8;
+#endif
+};
+
struct bcr_mpy {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:8, x1616:8, dsp:4, cycles:2, type:2, ver:8;
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index f393b663413e..2ad77fb43639 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -52,6 +52,17 @@
#define cache_line_size() SMP_CACHE_BYTES
#define ARCH_DMA_MINALIGN SMP_CACHE_BYTES
+/*
+ * Make sure slab-allocated buffers are 64-bit aligned when atomic64_t uses
+ * ARCv2 64-bit atomics (LLOCKD/SCONDD). This guarantess runtime 64-bit
+ * alignment for any atomic64_t embedded in buffer.
+ * Default ARCH_SLAB_MINALIGN is __alignof__(long long) which has a relaxed
+ * value of 4 (and not 8) in ARC ABI.
+ */
+#if defined(CONFIG_ARC_HAS_LL64) && defined(CONFIG_ARC_HAS_LLSC)
+#define ARCH_SLAB_MINALIGN 8
+#endif
+
extern void arc_cache_init(void);
extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
extern void read_decode_cache_bcr(void);
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h
index 309f4e6721b3..225e7df2d8ed 100644
--- a/arch/arc/include/asm/entry-arcv2.h
+++ b/arch/arc/include/asm/entry-arcv2.h
@@ -17,6 +17,33 @@
;
; Now manually save: r12, sp, fp, gp, r25
+#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
+.ifnc \called_from, exception
+ st.as r9, [sp, -10] ; save r9 in it's final stack slot
+ sub sp, sp, 12 ; skip JLI, LDI, EI
+
+ PUSH lp_count
+ PUSHAX lp_start
+ PUSHAX lp_end
+ PUSH blink
+
+ PUSH r11
+ PUSH r10
+
+ sub sp, sp, 4 ; skip r9
+
+ PUSH r8
+ PUSH r7
+ PUSH r6
+ PUSH r5
+ PUSH r4
+ PUSH r3
+ PUSH r2
+ PUSH r1
+ PUSH r0
+.endif
+#endif
+
#ifdef CONFIG_ARC_HAS_ACCL_REGS
PUSH r59
PUSH r58
@@ -86,6 +113,33 @@
POP r59
#endif
+#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
+.ifnc \called_from, exception
+ POP r0
+ POP r1
+ POP r2
+ POP r3
+ POP r4
+ POP r5
+ POP r6
+ POP r7
+ POP r8
+ POP r9
+ POP r10
+ POP r11
+
+ POP blink
+ POPAX lp_end
+ POPAX lp_start
+
+ POP r9
+ mov lp_count, r9
+
+ add sp, sp, 12 ; skip JLI, LDI, EI
+ ld.as r9, [sp, -10] ; reload r9 which got clobbered
+.endif
+#endif
+
.endm
/*------------------------------------------------------------------------*/
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index c9173c02081c..eabc3efa6c6d 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -207,7 +207,7 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n)
*/
"=&r" (tmp), "+r" (to), "+r" (from)
:
- : "lp_count", "lp_start", "lp_end", "memory");
+ : "lp_count", "memory");
return n;
}
@@ -433,7 +433,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
*/
"=&r" (tmp), "+r" (to), "+r" (from)
:
- : "lp_count", "lp_start", "lp_end", "memory");
+ : "lp_count", "memory");
return n;
}
@@ -653,7 +653,7 @@ static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
" .previous \n"
: "+r"(d_char), "+r"(res)
: "i"(0)
- : "lp_count", "lp_start", "lp_end", "memory");
+ : "lp_count", "memory");
return res;
}
@@ -686,7 +686,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
" .previous \n"
: "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
: "g"(-EFAULT), "r"(count)
- : "lp_count", "lp_start", "lp_end", "memory");
+ : "lp_count", "memory");
return res;
}
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
index cc558a25b8fa..562089d62d9d 100644
--- a/arch/arc/kernel/entry-arcv2.S
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -209,7 +209,9 @@ restore_regs:
;####### Return from Intr #######
debug_marker_l1:
- bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot
+ ; bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot
+ btst r0, STATUS_DE_BIT ; Z flag set if bit clear
+ bnz .Lintr_ret_to_delay_slot ; branch if STATUS_DE_BIT set
.Lisr_ret_fast_path:
; Handle special case #1: (Entry via Exception, Return via IRQ)
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 8b90d25a15cc..30e090625916 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -17,6 +17,7 @@
#include <asm/entry.h>
#include <asm/arcregs.h>
#include <asm/cache.h>
+#include <asm/irqflags.h>
.macro CPU_EARLY_SETUP
@@ -47,6 +48,15 @@
sr r5, [ARC_REG_DC_CTRL]
1:
+
+#ifdef CONFIG_ISA_ARCV2
+ ; Unaligned access is disabled at reset, so re-enable early as
+ ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access
+ ; by default
+ lr r5, [status32]
+ bset r5, r5, STATUS_AD_BIT
+ kflag r5
+#endif
.endm
.section .init.text, "ax",@progbits
@@ -90,15 +100,13 @@ ENTRY(stext)
st.ab 0, [r5, 4]
1:
-#ifdef CONFIG_ARC_UBOOT_SUPPORT
; Uboot - kernel ABI
; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2
- ; r1 = magic number (board identity, unused as of now
+ ; r1 = magic number (always zero as of now)
; r2 = pointer to uboot provided cmdline or external DTB in mem
- ; These are handled later in setup_arch()
+ ; These are handled later in handle_uboot_args()
st r0, [@uboot_tag]
st r2, [@uboot_arg]
-#endif
; setup "current" tsk and optionally cache it in dedicated r25
mov r9, @init_task
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index 067ea362fb3e..cf18b3e5a934 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -49,11 +49,13 @@ void arc_init_IRQ(void)
*(unsigned int *)&ictrl = 0;
+#ifndef CONFIG_ARC_IRQ_NO_AUTOSAVE
ictrl.save_nr_gpr_pairs = 6; /* r0 to r11 (r12 saved manually) */
ictrl.save_blink = 1;
ictrl.save_lp_regs = 1; /* LP_COUNT, LP_START, LP_END */
ictrl.save_u_to_u = 0; /* user ctxt saved on kernel stack */
ictrl.save_idx_regs = 1; /* JLI, LDI, EI */
+#endif
WRITE_AUX(AUX_IRQ_CTRL, ictrl);
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index feb90093e6b1..7b2340996cf8 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -199,20 +199,36 @@ static void read_arc_build_cfg_regs(void)
cpu->bpu.ret_stk = 4 << bpu.rse;
if (cpu->core.family >= 0x54) {
- unsigned int exec_ctrl;
- READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
- cpu->extn.dual_enb = !(exec_ctrl & 1);
+ struct bcr_uarch_build_arcv2 uarch;
- /* dual issue always present for this core */
- cpu->extn.dual = 1;
+ /*
+ * The first 0x54 core (uarch maj:min 0:1 or 0:2) was
+ * dual issue only (HS4x). But next uarch rev (1:0)
+ * allows it be configured for single issue (HS3x)
+ * Ensure we fiddle with dual issue only on HS4x
+ */
+ READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
+
+ if (uarch.prod == 4) {
+ unsigned int exec_ctrl;
+
+ /* dual issue hardware always present */
+ cpu->extn.dual = 1;
+
+ READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
+
+ /* dual issue hardware enabled ? */
+ cpu->extn.dual_enb = !(exec_ctrl & 1);
+
+ }
}
}
READ_BCR(ARC_REG_AP_BCR, ap);
if (ap.ver) {
cpu->extn.ap_num = 2 << ap.num;
- cpu->extn.ap_full = !!ap.min;
+ cpu->extn.ap_full = !ap.min;
}
READ_BCR(ARC_REG_SMART_BCR, bcr);
@@ -462,43 +478,78 @@ void setup_processor(void)
arc_chk_core_config();
}
-static inline int is_kernel(unsigned long addr)
+static inline bool uboot_arg_invalid(unsigned long addr)
{
- if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
- return 1;
- return 0;
+ /*
+ * Check that it is a untranslated address (although MMU is not enabled
+ * yet, it being a high address ensures this is not by fluke)
+ */
+ if (addr < PAGE_OFFSET)
+ return true;
+
+ /* Check that address doesn't clobber resident kernel image */
+ return addr >= (unsigned long)_stext && addr <= (unsigned long)_end;
}
-void __init setup_arch(char **cmdline_p)
+#define IGNORE_ARGS "Ignore U-boot args: "
+
+/* uboot_tag values for U-boot - kernel ABI revision 0; see head.S */
+#define UBOOT_TAG_NONE 0
+#define UBOOT_TAG_CMDLINE 1
+#define UBOOT_TAG_DTB 2
+
+void __init handle_uboot_args(void)
{
-#ifdef CONFIG_ARC_UBOOT_SUPPORT
- /* make sure that uboot passed pointer to cmdline/dtb is valid */
- if (uboot_tag && is_kernel((unsigned long)uboot_arg))
- panic("Invalid uboot arg\n");
-
- /* See if u-boot passed an external Device Tree blob */
- machine_desc = setup_machine_fdt(uboot_arg); /* uboot_tag == 2 */
- if (!machine_desc)
-#endif
- {
- /* No, so try the embedded one */
+ bool use_embedded_dtb = true;
+ bool append_cmdline = false;
+
+ /* check that we know this tag */
+ if (uboot_tag != UBOOT_TAG_NONE &&
+ uboot_tag != UBOOT_TAG_CMDLINE &&
+ uboot_tag != UBOOT_TAG_DTB) {
+ pr_warn(IGNORE_ARGS "invalid uboot tag: '%08x'\n", uboot_tag);
+ goto ignore_uboot_args;
+ }
+
+ if (uboot_tag != UBOOT_TAG_NONE &&
+ uboot_arg_invalid((unsigned long)uboot_arg)) {
+ pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
+ goto ignore_uboot_args;
+ }
+
+ /* see if U-boot passed an external Device Tree blob */
+ if (uboot_tag == UBOOT_TAG_DTB) {
+ machine_desc = setup_machine_fdt((void *)uboot_arg);
+
+ /* external Device Tree blob is invalid - use embedded one */
+ use_embedded_dtb = !machine_desc;
+ }
+
+ if (uboot_tag == UBOOT_TAG_CMDLINE)
+ append_cmdline = true;
+
+ignore_uboot_args:
+
+ if (use_embedded_dtb) {
machine_desc = setup_machine_fdt(__dtb_start);
if (!machine_desc)
panic("Embedded DT invalid\n");
+ }
- /*
- * If we are here, it is established that @uboot_arg didn't
- * point to DT blob. Instead if u-boot says it is cmdline,
- * append to embedded DT cmdline.
- * setup_machine_fdt() would have populated @boot_command_line
- */
- if (uboot_tag == 1) {
- /* Ensure a whitespace between the 2 cmdlines */
- strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
- strlcat(boot_command_line, uboot_arg,
- COMMAND_LINE_SIZE);
- }
+ /*
+ * NOTE: @boot_command_line is populated by setup_machine_fdt() so this
+ * append processing can only happen after.
+ */
+ if (append_cmdline) {
+ /* Ensure a whitespace between the 2 cmdlines */
+ strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
+ strlcat(boot_command_line, uboot_arg, COMMAND_LINE_SIZE);
}
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+ handle_uboot_args();
/* Save unparsed command line copy for /proc/cmdline */
*cmdline_p = boot_command_line;
diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S
index d61044dd8b58..ea14b0bf3116 100644
--- a/arch/arc/lib/memcpy-archs.S
+++ b/arch/arc/lib/memcpy-archs.S
@@ -25,15 +25,11 @@
#endif
#ifdef CONFIG_ARC_HAS_LL64
-# define PREFETCH_READ(RX) prefetch [RX, 56]
-# define PREFETCH_WRITE(RX) prefetchw [RX, 64]
# define LOADX(DST,RX) ldd.ab DST, [RX, 8]
# define STOREX(SRC,RX) std.ab SRC, [RX, 8]
# define ZOLSHFT 5
# define ZOLAND 0x1F
#else
-# define PREFETCH_READ(RX) prefetch [RX, 28]
-# define PREFETCH_WRITE(RX) prefetchw [RX, 32]
# define LOADX(DST,RX) ld.ab DST, [RX, 4]
# define STOREX(SRC,RX) st.ab SRC, [RX, 4]
# define ZOLSHFT 4
@@ -41,8 +37,6 @@
#endif
ENTRY_CFI(memcpy)
- prefetch [r1] ; Prefetch the read location
- prefetchw [r0] ; Prefetch the write location
mov.f 0, r2
;;; if size is zero
jz.d [blink]
@@ -72,8 +66,6 @@ ENTRY_CFI(memcpy)
lpnz @.Lcopy32_64bytes
;; LOOP START
LOADX (r6, r1)
- PREFETCH_READ (r1)
- PREFETCH_WRITE (r3)
LOADX (r8, r1)
LOADX (r10, r1)
LOADX (r4, r1)
@@ -117,9 +109,7 @@ ENTRY_CFI(memcpy)
lpnz @.Lcopy8bytes_1
;; LOOP START
ld.ab r6, [r1, 4]
- prefetch [r1, 28] ;Prefetch the next read location
ld.ab r8, [r1,4]
- prefetchw [r3, 32] ;Prefetch the next write location
SHIFT_1 (r7, r6, 24)
or r7, r7, r5
@@ -162,9 +152,7 @@ ENTRY_CFI(memcpy)
lpnz @.Lcopy8bytes_2
;; LOOP START
ld.ab r6, [r1, 4]
- prefetch [r1, 28] ;Prefetch the next read location
ld.ab r8, [r1,4]
- prefetchw [r3, 32] ;Prefetch the next write location
SHIFT_1 (r7, r6, 16)
or r7, r7, r5
@@ -204,9 +192,7 @@ ENTRY_CFI(memcpy)
lpnz @.Lcopy8bytes_3
;; LOOP START
ld.ab r6, [r1, 4]
- prefetch [r1, 28] ;Prefetch the next read location
ld.ab r8, [r1,4]
- prefetchw [r3, 32] ;Prefetch the next write location
SHIFT_1 (r7, r6, 8)
or r7, r7, r5
diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig
index f25c085b9874..23e00216e5a5 100644
--- a/arch/arc/plat-hsdk/Kconfig
+++ b/arch/arc/plat-hsdk/Kconfig
@@ -9,6 +9,7 @@ menuconfig ARC_SOC_HSDK
bool "ARC HS Development Kit SOC"
depends on ISA_ARCV2
select ARC_HAS_ACCL_REGS
+ select ARC_IRQ_NO_AUTOSAVE
select CLK_HSDK
select RESET_HSDK
select HAVE_PCI
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 664e918e2624..26524b75970a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1400,6 +1400,7 @@ config NR_CPUS
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
depends on SMP
+ select GENERIC_IRQ_MIGRATION
help
Say Y here to experiment with turning CPUs off and on. CPUs
can be controlled through /sys/devices/system/cpu.
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
index b67f5fee1469..dce5be5df97b 100644
--- a/arch/arm/boot/dts/am335x-evm.dts
+++ b/arch/arm/boot/dts/am335x-evm.dts
@@ -729,7 +729,7 @@
&cpsw_emac0 {
phy-handle = <&ethphy0>;
- phy-mode = "rgmii-txid";
+ phy-mode = "rgmii-id";
};
&tscadc {
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index 172c0224e7f6..b128998097ce 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -651,13 +651,13 @@
&cpsw_emac0 {
phy-handle = <&ethphy0>;
- phy-mode = "rgmii-txid";
+ phy-mode = "rgmii-id";
dual_emac_res_vlan = <1>;
};
&cpsw_emac1 {
phy-handle = <&ethphy1>;
- phy-mode = "rgmii-txid";
+ phy-mode = "rgmii-id";
dual_emac_res_vlan = <2>;
};
diff --git a/arch/arm/boot/dts/armada-xp-db.dts b/arch/arm/boot/dts/armada-xp-db.dts
index f3ac7483afed..5d04dc68cf57 100644
--- a/arch/arm/boot/dts/armada-xp-db.dts
+++ b/arch/arm/boot/dts/armada-xp-db.dts
@@ -144,30 +144,32 @@
status = "okay";
};
- nand@d0000 {
+ nand-controller@d0000 {
status = "okay";
- label = "pxa3xx_nand-0";
- num-cs = <1>;
- marvell,nand-keep-config;
- nand-on-flash-bbt;
-
- partitions {
- compatible = "fixed-partitions";
- #address-cells = <1>;
- #size-cells = <1>;
-
- partition@0 {
- label = "U-Boot";
- reg = <0 0x800000>;
- };
- partition@800000 {
- label = "Linux";
- reg = <0x800000 0x800000>;
- };
- partition@1000000 {
- label = "Filesystem";
- reg = <0x1000000 0x3f000000>;
+ nand@0 {
+ reg = <0>;
+ label = "pxa3xx_nand-0";
+ nand-rb = <0>;
+ nand-on-flash-bbt;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "U-Boot";
+ reg = <0 0x800000>;
+ };
+ partition@800000 {
+ label = "Linux";
+ reg = <0x800000 0x800000>;
+ };
+ partition@1000000 {
+ label = "Filesystem";
+ reg = <0x1000000 0x3f000000>;
+ };
};
};
};
diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts
index 1139e9469a83..b4cca507cf13 100644
--- a/arch/arm/boot/dts/armada-xp-gp.dts
+++ b/arch/arm/boot/dts/armada-xp-gp.dts
@@ -160,12 +160,15 @@
status = "okay";
};
- nand@d0000 {
+ nand-controller@d0000 {
status = "okay";
- label = "pxa3xx_nand-0";
- num-cs = <1>;
- marvell,nand-keep-config;
- nand-on-flash-bbt;
+
+ nand@0 {
+ reg = <0>;
+ label = "pxa3xx_nand-0";
+ nand-rb = <0>;
+ nand-on-flash-bbt;
+ };
};
};
diff --git a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
index bbbb38888bb8..87dcb502f72d 100644
--- a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
+++ b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
@@ -81,49 +81,52 @@
};
- nand@d0000 {
+ nand-controller@d0000 {
status = "okay";
- label = "pxa3xx_nand-0";
- num-cs = <1>;
- marvell,nand-keep-config;
- nand-on-flash-bbt;
-
- partitions {
- compatible = "fixed-partitions";
- #address-cells = <1>;
- #size-cells = <1>;
-
- partition@0 {
- label = "u-boot";
- reg = <0x00000000 0x000e0000>;
- read-only;
- };
-
- partition@e0000 {
- label = "u-boot-env";
- reg = <0x000e0000 0x00020000>;
- read-only;
- };
-
- partition@100000 {
- label = "u-boot-env2";
- reg = <0x00100000 0x00020000>;
- read-only;
- };
-
- partition@120000 {
- label = "zImage";
- reg = <0x00120000 0x00400000>;
- };
-
- partition@520000 {
- label = "initrd";
- reg = <0x00520000 0x00400000>;
- };
- partition@e00000 {
- label = "boot";
- reg = <0x00e00000 0x3f200000>;
+ nand@0 {
+ reg = <0>;
+ label = "pxa3xx_nand-0";
+ nand-rb = <0>;
+ nand-on-flash-bbt;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "u-boot";
+ reg = <0x00000000 0x000e0000>;
+ read-only;
+ };
+
+ partition@e0000 {
+ label = "u-boot-env";
+ reg = <0x000e0000 0x00020000>;
+ read-only;
+ };
+
+ partition@100000 {
+ label = "u-boot-env2";
+ reg = <0x00100000 0x00020000>;
+ read-only;
+ };
+
+ partition@120000 {
+ label = "zImage";
+ reg = <0x00120000 0x00400000>;
+ };
+
+ partition@520000 {
+ label = "initrd";
+ reg = <0x00520000 0x00400000>;
+ };
+
+ partition@e00000 {
+ label = "boot";
+ reg = <0x00e00000 0x3f200000>;
+ };
};
};
};
diff --git a/arch/arm/boot/dts/gemini-dlink-dir-685.dts b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
index cc0c3cf89eaa..592111c8d6fd 100644
--- a/arch/arm/boot/dts/gemini-dlink-dir-685.dts
+++ b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
@@ -443,7 +443,7 @@
};
display-controller@6a000000 {
- status = "disabled";
+ status = "okay";
port@0 {
reg = <0>;
diff --git a/arch/arm/boot/dts/tegra124-nyan.dtsi b/arch/arm/boot/dts/tegra124-nyan.dtsi
index d5f11d6d987e..bc85b6a166c7 100644
--- a/arch/arm/boot/dts/tegra124-nyan.dtsi
+++ b/arch/arm/boot/dts/tegra124-nyan.dtsi
@@ -13,10 +13,25 @@
stdout-path = "serial0:115200n8";
};
- memory@80000000 {
+ /*
+ * Note that recent version of the device tree compiler (starting with
+ * version 1.4.2) warn about this node containing a reg property, but
+ * missing a unit-address. However, the bootloader on these Chromebook
+ * devices relies on the full name of this node to be exactly /memory.
+ * Adding the unit-address causes the bootloader to create a /memory
+ * node and write the memory bank configuration to that node, which in
+ * turn leads the kernel to believe that the device has 2 GiB of
+ * memory instead of the amount detected by the bootloader.
+ *
+ * The name of this node is effectively ABI and must not be changed.
+ */
+ memory {
+ device_type = "memory";
reg = <0x0 0x80000000 0x0 0x80000000>;
};
+ /delete-node/ memory@80000000;
+
host1x@50000000 {
hdmi@54280000 {
status = "okay";
diff --git a/arch/arm/crypto/sha256-armv4.pl b/arch/arm/crypto/sha256-armv4.pl
index b9ec44060ed3..a03cf4dfb781 100644
--- a/arch/arm/crypto/sha256-armv4.pl
+++ b/arch/arm/crypto/sha256-armv4.pl
@@ -212,10 +212,11 @@ K256:
.global sha256_block_data_order
.type sha256_block_data_order,%function
sha256_block_data_order:
+.Lsha256_block_data_order:
#if __ARM_ARCH__<7
sub r3,pc,#8 @ sha256_block_data_order
#else
- adr r3,sha256_block_data_order
+ adr r3,.Lsha256_block_data_order
#endif
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
ldr r12,.LOPENSSL_armcap
diff --git a/arch/arm/crypto/sha256-core.S_shipped b/arch/arm/crypto/sha256-core.S_shipped
index 3b58300d611c..054aae0edfce 100644
--- a/arch/arm/crypto/sha256-core.S_shipped
+++ b/arch/arm/crypto/sha256-core.S_shipped
@@ -93,10 +93,11 @@ K256:
.global sha256_block_data_order
.type sha256_block_data_order,%function
sha256_block_data_order:
+.Lsha256_block_data_order:
#if __ARM_ARCH__<7
sub r3,pc,#8 @ sha256_block_data_order
#else
- adr r3,sha256_block_data_order
+ adr r3,.Lsha256_block_data_order
#endif
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
ldr r12,.LOPENSSL_armcap
diff --git a/arch/arm/crypto/sha512-armv4.pl b/arch/arm/crypto/sha512-armv4.pl
index fb5d15048c0b..788c17b56ecc 100644
--- a/arch/arm/crypto/sha512-armv4.pl
+++ b/arch/arm/crypto/sha512-armv4.pl
@@ -274,10 +274,11 @@ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
.global sha512_block_data_order
.type sha512_block_data_order,%function
sha512_block_data_order:
+.Lsha512_block_data_order:
#if __ARM_ARCH__<7
sub r3,pc,#8 @ sha512_block_data_order
#else
- adr r3,sha512_block_data_order
+ adr r3,.Lsha512_block_data_order
#endif
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
ldr r12,.LOPENSSL_armcap
diff --git a/arch/arm/crypto/sha512-core.S_shipped b/arch/arm/crypto/sha512-core.S_shipped
index b1c334a49cda..710ea309769e 100644
--- a/arch/arm/crypto/sha512-core.S_shipped
+++ b/arch/arm/crypto/sha512-core.S_shipped
@@ -141,10 +141,11 @@ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
.global sha512_block_data_order
.type sha512_block_data_order,%function
sha512_block_data_order:
+.Lsha512_block_data_order:
#if __ARM_ARCH__<7
sub r3,pc,#8 @ sha512_block_data_order
#else
- adr r3,sha512_block_data_order
+ adr r3,.Lsha512_block_data_order
#endif
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
ldr r12,.LOPENSSL_armcap
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index c883fcbe93b6..46d41140df27 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -25,7 +25,6 @@
#ifndef __ASSEMBLY__
struct irqaction;
struct pt_regs;
-extern void migrate_irqs(void);
extern void asm_do_IRQ(unsigned int, struct pt_regs *);
void handle_IRQ(unsigned int, struct pt_regs *);
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 9908dacf9229..844861368cd5 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -31,7 +31,6 @@
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/seq_file.h>
-#include <linux/ratelimit.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/kallsyms.h>
@@ -109,64 +108,3 @@ int __init arch_probe_nr_irqs(void)
return nr_irqs;
}
#endif
-
-#ifdef CONFIG_HOTPLUG_CPU
-static bool migrate_one_irq(struct irq_desc *desc)
-{
- struct irq_data *d = irq_desc_get_irq_data(desc);
- const struct cpumask *affinity = irq_data_get_affinity_mask(d);
- struct irq_chip *c;
- bool ret = false;
-
- /*
- * If this is a per-CPU interrupt, or the affinity does not
- * include this CPU, then we have nothing to do.
- */
- if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
- return false;
-
- if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
- affinity = cpu_online_mask;
- ret = true;
- }
-
- c = irq_data_get_irq_chip(d);
- if (!c->irq_set_affinity)
- pr_debug("IRQ%u: unable to set affinity\n", d->irq);
- else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
- cpumask_copy(irq_data_get_affinity_mask(d), affinity);
-
- return ret;
-}
-
-/*
- * The current CPU has been marked offline. Migrate IRQs off this CPU.
- * If the affinity settings do not allow other CPUs, force them onto any
- * available CPU.
- *
- * Note: we must iterate over all IRQs, whether they have an attached
- * action structure or not, as we need to get chained interrupts too.
- */
-void migrate_irqs(void)
-{
- unsigned int i;
- struct irq_desc *desc;
- unsigned long flags;
-
- local_irq_save(flags);
-
- for_each_irq_desc(i, desc) {
- bool affinity_broken;
-
- raw_spin_lock(&desc->lock);
- affinity_broken = migrate_one_irq(desc);
- raw_spin_unlock(&desc->lock);
-
- if (affinity_broken)
- pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
- i, smp_processor_id());
- }
-
- local_irq_restore(flags);
-}
-#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 3bf82232b1be..1d6f5ea522f4 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -254,7 +254,7 @@ int __cpu_disable(void)
/*
* OK - migrate IRQs away from this CPU
*/
- migrate_irqs();
+ irq_migrate_all_off_this_cpu();
/*
* Flush user cache and TLB mappings, and then remove this CPU
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index f1e2922e447c..1e3e08a1c456 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2390,4 +2390,6 @@ void arch_teardown_dma_ops(struct device *dev)
return;
arm_teardown_iommu_dma_ops(dev);
+ /* Let arch_setup_dma_ops() start again from scratch upon re-probe */
+ set_dma_ops(dev, NULL);
}
diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
index 2c118a6ab358..0dc23fc227ed 100644
--- a/arch/arm/probes/kprobes/opt-arm.c
+++ b/arch/arm/probes/kprobes/opt-arm.c
@@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or
}
/* Copy arch-dep-instance from template. */
- memcpy(code, (unsigned char *)optprobe_template_entry,
+ memcpy(code, (unsigned long *)&optprobe_template_entry,
TMPL_END_IDX * sizeof(kprobe_opcode_t));
/* Adjust buffer according to instruction. */
diff --git a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
index 5b4a9609e31f..2468762283a5 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
+++ b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
@@ -351,7 +351,7 @@
reg = <0>;
pinctrl-names = "default";
pinctrl-0 = <&cp0_copper_eth_phy_reset>;
- reset-gpios = <&cp1_gpio1 11 GPIO_ACTIVE_LOW>;
+ reset-gpios = <&cp0_gpio2 11 GPIO_ACTIVE_LOW>;
reset-assert-us = <10000>;
};
diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi
index 8d41b69ec2da..99bccaac31ad 100644
--- a/arch/arm64/boot/dts/qcom/msm8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi
@@ -37,7 +37,7 @@
};
memory@86200000 {
- reg = <0x0 0x86200000 0x0 0x2600000>;
+ reg = <0x0 0x86200000 0x0 0x2d00000>;
no-map;
};
diff --git a/arch/arm64/crypto/chacha-neon-core.S b/arch/arm64/crypto/chacha-neon-core.S
index 021bb9e9784b..706c4e10e9e2 100644
--- a/arch/arm64/crypto/chacha-neon-core.S
+++ b/arch/arm64/crypto/chacha-neon-core.S
@@ -158,8 +158,8 @@ ENTRY(hchacha_block_neon)
mov w3, w2
bl chacha_permute
- st1 {v0.16b}, [x1], #16
- st1 {v3.16b}, [x1]
+ st1 {v0.4s}, [x1], #16
+ st1 {v3.4s}, [x1]
ldp x29, x30, [sp], #16
ret
@@ -532,6 +532,10 @@ ENTRY(chacha_4block_xor_neon)
add v3.4s, v3.4s, v19.4s
add a2, a2, w8
add a3, a3, w9
+CPU_BE( rev a0, a0 )
+CPU_BE( rev a1, a1 )
+CPU_BE( rev a2, a2 )
+CPU_BE( rev a3, a3 )
ld4r {v24.4s-v27.4s}, [x0], #16
ld4r {v28.4s-v31.4s}, [x0]
@@ -552,6 +556,10 @@ ENTRY(chacha_4block_xor_neon)
add v7.4s, v7.4s, v23.4s
add a6, a6, w8
add a7, a7, w9
+CPU_BE( rev a4, a4 )
+CPU_BE( rev a5, a5 )
+CPU_BE( rev a6, a6 )
+CPU_BE( rev a7, a7 )
// x8[0-3] += s2[0]
// x9[0-3] += s2[1]
@@ -569,6 +577,10 @@ ENTRY(chacha_4block_xor_neon)
add v11.4s, v11.4s, v27.4s
add a10, a10, w8
add a11, a11, w9
+CPU_BE( rev a8, a8 )
+CPU_BE( rev a9, a9 )
+CPU_BE( rev a10, a10 )
+CPU_BE( rev a11, a11 )
// x12[0-3] += s3[0]
// x13[0-3] += s3[1]
@@ -586,6 +598,10 @@ ENTRY(chacha_4block_xor_neon)
add v15.4s, v15.4s, v31.4s
add a14, a14, w8
add a15, a15, w9
+CPU_BE( rev a12, a12 )
+CPU_BE( rev a13, a13 )
+CPU_BE( rev a14, a14 )
+CPU_BE( rev a15, a15 )
// interleave 32-bit words in state n, n+1
ldp w6, w7, [x2], #64
diff --git a/arch/arm64/include/asm/neon-intrinsics.h b/arch/arm64/include/asm/neon-intrinsics.h
index 2ba6c6b9541f..71abfc7612b2 100644
--- a/arch/arm64/include/asm/neon-intrinsics.h
+++ b/arch/arm64/include/asm/neon-intrinsics.h
@@ -36,4 +36,8 @@
#include <arm_neon.h>
#endif
+#ifdef CONFIG_CC_IS_CLANG
+#pragma clang diagnostic ignored "-Wincompatible-pointer-types"
+#endif
+
#endif /* __ASM_NEON_INTRINSICS_H */
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 15d79a8e5e5e..eecf7927dab0 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -539,8 +539,7 @@ set_hcr:
/* GICv3 system register access */
mrs x0, id_aa64pfr0_el1
ubfx x0, x0, #24, #4
- cmp x0, #1
- b.ne 3f
+ cbz x0, 3f
mrs_s x0, SYS_ICC_SRE_EL2
orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 9dce33b0e260..ddaea0fd2fa4 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -1702,19 +1702,20 @@ void syscall_trace_exit(struct pt_regs *regs)
}
/*
- * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487C.a
- * We also take into account DIT (bit 24), which is not yet documented, and
- * treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may be
- * allocated an EL0 meaning in future.
+ * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
+ * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
+ * not described in ARM DDI 0487D.a.
+ * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
+ * be allocated an EL0 meaning in future.
* Userspace cannot use these until they have an architectural meaning.
* Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
* We also reserve IL for the kernel; SS is handled dynamically.
*/
#define SPSR_EL1_AARCH64_RES0_BITS \
- (GENMASK_ULL(63,32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
- GENMASK_ULL(20, 10) | GENMASK_ULL(5, 5))
+ (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
+ GENMASK_ULL(20, 13) | GENMASK_ULL(11, 10) | GENMASK_ULL(5, 5))
#define SPSR_EL1_AARCH32_RES0_BITS \
- (GENMASK_ULL(63,32) | GENMASK_ULL(23, 22) | GENMASK_ULL(20,20))
+ (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
static int valid_compat_regs(struct user_pt_regs *regs)
{
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index d09ec76f08cf..009849328289 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -339,6 +339,9 @@ void __init setup_arch(char **cmdline_p)
smp_init_cpus();
smp_build_mpidr_hash();
+ /* Init percpu seeds for random tags after cpus are set up. */
+ kasan_init_tags();
+
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
/*
* Make sure init_thread_info.ttbr0 always generates translation
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 4b55b15707a3..f37a86d2a69d 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -252,8 +252,6 @@ void __init kasan_init(void)
memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
- kasan_init_tags();
-
/* At this point kasan is fully initialized. Enable error messages */
init_task.kasan_depth = 0;
pr_info("KernelAddressSanitizer initialized\n");
diff --git a/arch/mips/bcm63xx/dev-enet.c b/arch/mips/bcm63xx/dev-enet.c
index 07b4c65a88a4..8e73d65f3480 100644
--- a/arch/mips/bcm63xx/dev-enet.c
+++ b/arch/mips/bcm63xx/dev-enet.c
@@ -70,6 +70,8 @@ static struct platform_device bcm63xx_enet_shared_device = {
static int shared_device_registered;
+static u64 enet_dmamask = DMA_BIT_MASK(32);
+
static struct resource enet0_res[] = {
{
.start = -1, /* filled at runtime */
@@ -99,6 +101,8 @@ static struct platform_device bcm63xx_enet0_device = {
.resource = enet0_res,
.dev = {
.platform_data = &enet0_pd,
+ .dma_mask = &enet_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -131,6 +135,8 @@ static struct platform_device bcm63xx_enet1_device = {
.resource = enet1_res,
.dev = {
.platform_data = &enet1_pd,
+ .dma_mask = &enet_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -157,6 +163,8 @@ static struct platform_device bcm63xx_enetsw_device = {
.resource = enetsw_res,
.dev = {
.platform_data = &enetsw_pd,
+ .dma_mask = &enet_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c
index 0b9535bc2c53..6b2a4a902a98 100644
--- a/arch/mips/kernel/cmpxchg.c
+++ b/arch/mips/kernel/cmpxchg.c
@@ -54,10 +54,9 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s
unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
unsigned long new, unsigned int size)
{
- u32 mask, old32, new32, load32;
+ u32 mask, old32, new32, load32, load;
volatile u32 *ptr32;
unsigned int shift;
- u8 load;
/* Check that ptr is naturally aligned */
WARN_ON((unsigned long)ptr & (size - 1));
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 8c6c48ed786a..d2e5a5ad0e6f 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -384,7 +384,8 @@ static void __init bootmem_init(void)
init_initrd();
reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end));
- memblock_reserve(PHYS_OFFSET, reserved_end << PAGE_SHIFT);
+ memblock_reserve(PHYS_OFFSET,
+ (reserved_end << PAGE_SHIFT) - PHYS_OFFSET);
/*
* max_low_pfn is not a number of pages. The number of pages
diff --git a/arch/mips/lantiq/xway/vmmc.c b/arch/mips/lantiq/xway/vmmc.c
index 577ec81b557d..3deab9a77718 100644
--- a/arch/mips/lantiq/xway/vmmc.c
+++ b/arch/mips/lantiq/xway/vmmc.c
@@ -31,8 +31,8 @@ static int vmmc_probe(struct platform_device *pdev)
dma_addr_t dma;
cp1_base =
- (void *) CPHYSADDR(dma_alloc_coherent(NULL, CP1_SIZE,
- &dma, GFP_ATOMIC));
+ (void *) CPHYSADDR(dma_alloc_coherent(&pdev->dev, CP1_SIZE,
+ &dma, GFP_KERNEL));
gpio_count = of_gpio_count(pdev->dev.of_node);
while (gpio_count > 0) {
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
index b16710a8a9e7..0effd3cba9a7 100644
--- a/arch/mips/net/ebpf_jit.c
+++ b/arch/mips/net/ebpf_jit.c
@@ -79,8 +79,6 @@ enum reg_val_type {
REG_64BIT_32BIT,
/* 32-bit compatible, need truncation for 64-bit ops. */
REG_32BIT,
- /* 32-bit zero extended. */
- REG_32BIT_ZERO_EX,
/* 32-bit no sign/zero extension needed. */
REG_32BIT_POS
};
@@ -343,12 +341,15 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
const struct bpf_prog *prog = ctx->skf;
int stack_adjust = ctx->stack_size;
int store_offset = stack_adjust - 8;
+ enum reg_val_type td;
int r0 = MIPS_R_V0;
- if (dest_reg == MIPS_R_RA &&
- get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX)
+ if (dest_reg == MIPS_R_RA) {
/* Don't let zero extended value escape. */
- emit_instr(ctx, sll, r0, r0, 0);
+ td = get_reg_val_type(ctx, prog->len, BPF_REG_0);
+ if (td == REG_64BIT)
+ emit_instr(ctx, sll, r0, r0, 0);
+ }
if (ctx->flags & EBPF_SAVE_RA) {
emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);
@@ -692,7 +693,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
if (dst < 0)
return dst;
td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
- if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
+ if (td == REG_64BIT) {
/* sign extend */
emit_instr(ctx, sll, dst, dst, 0);
}
@@ -707,7 +708,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
if (dst < 0)
return dst;
td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
- if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
+ if (td == REG_64BIT) {
/* sign extend */
emit_instr(ctx, sll, dst, dst, 0);
}
@@ -721,7 +722,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
if (dst < 0)
return dst;
td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
- if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
+ if (td == REG_64BIT)
/* sign extend */
emit_instr(ctx, sll, dst, dst, 0);
if (insn->imm == 1) {
@@ -860,13 +861,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
if (src < 0 || dst < 0)
return -EINVAL;
td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
- if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
+ if (td == REG_64BIT) {
/* sign extend */
emit_instr(ctx, sll, dst, dst, 0);
}
did_move = false;
ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
- if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) {
+ if (ts == REG_64BIT) {
int tmp_reg = MIPS_R_AT;
if (bpf_op == BPF_MOV) {
@@ -1254,8 +1255,7 @@ jeq_common:
if (insn->imm == 64 && td == REG_32BIT)
emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
- if (insn->imm != 64 &&
- (td == REG_64BIT || td == REG_32BIT_ZERO_EX)) {
+ if (insn->imm != 64 && td == REG_64BIT) {
/* sign extend */
emit_instr(ctx, sll, dst, dst, 0);
}
@@ -1819,7 +1819,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
/* Update the icache */
flush_icache_range((unsigned long)ctx.target,
- (unsigned long)(ctx.target + ctx.idx * sizeof(u32)));
+ (unsigned long)&ctx.target[ctx.idx]);
if (bpf_jit_enable > 1)
/* Dump JIT code */
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 2582df1c529b..0964c236e3e5 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -308,15 +308,29 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
long do_syscall_trace_enter(struct pt_regs *regs)
{
- if (test_thread_flag(TIF_SYSCALL_TRACE) &&
- tracehook_report_syscall_entry(regs)) {
+ if (test_thread_flag(TIF_SYSCALL_TRACE)) {
+ int rc = tracehook_report_syscall_entry(regs);
+
/*
- * Tracing decided this syscall should not happen or the
- * debugger stored an invalid system call number. Skip
- * the system call and the system call restart handling.
+ * As tracesys_next does not set %r28 to -ENOSYS
+ * when %r20 is set to -1, initialize it here.
*/
- regs->gr[20] = -1UL;
- goto out;
+ regs->gr[28] = -ENOSYS;
+
+ if (rc) {
+ /*
+ * A nonzero return code from
+ * tracehook_report_syscall_entry() tells us
+ * to prevent the syscall execution. Skip
+ * the syscall call and the syscall restart handling.
+ *
+ * Note that the tracer may also just change
+ * regs->gr[20] to an invalid syscall number,
+ * that is handled by tracesys_next.
+ */
+ regs->gr[20] = -1UL;
+ return -1;
+ }
}
/* Do the secure computing check after ptrace. */
@@ -340,7 +354,6 @@ long do_syscall_trace_enter(struct pt_regs *regs)
regs->gr[24] & 0xffffffff,
regs->gr[23] & 0xffffffff);
-out:
/*
* Sign extend the syscall number to 64bit since it may have been
* modified by a compat ptrace call
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 7db3119f8a5b..145373f0e5dc 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1593,6 +1593,8 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
pnv_pci_ioda2_setup_dma_pe(phb, pe);
#ifdef CONFIG_IOMMU_API
+ iommu_register_group(&pe->table_group,
+ pe->phb->hose->global_number, pe->pe_number);
pnv_ioda_setup_bus_iommu_group(pe, &pe->table_group, NULL);
#endif
}
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 45fb70b4bfa7..ef9448a907c6 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -1147,6 +1147,8 @@ static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb,
return 0;
pe = &phb->ioda.pe_array[pdn->pe_number];
+ if (!pe->table_group.group)
+ return 0;
iommu_add_device(&pe->table_group, dev);
return 0;
case BUS_NOTIFY_DEL_DEVICE:
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index a153257bf7d9..d62fa148558b 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -297,7 +297,7 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
scb_s->crycbd = 0;
apie_h = vcpu->arch.sie_block->eca & ECA_APIE;
- if (!apie_h && !key_msk)
+ if (!apie_h && (!key_msk || fmt_o == CRYCB_FORMAT0))
return 0;
if (!crycb_addr)
diff --git a/arch/sh/boot/dts/Makefile b/arch/sh/boot/dts/Makefile
index 01d0f7fb14cc..2563d1e532e2 100644
--- a/arch/sh/boot/dts/Makefile
+++ b/arch/sh/boot/dts/Makefile
@@ -1,3 +1,3 @@
ifneq ($(CONFIG_BUILTIN_DTB_SOURCE),"")
-obj-y += $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o
+obj-$(CONFIG_USE_BUILTIN_DTB) += $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o
endif
diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h
index 705dafc2d11a..2bdbbbcfa393 100644
--- a/arch/x86/include/asm/hyperv-tlfs.h
+++ b/arch/x86/include/asm/hyperv-tlfs.h
@@ -841,7 +841,7 @@ union hv_gpa_page_range {
* count is equal with how many entries of union hv_gpa_page_range can
* be populated into the input parameter page.
*/
-#define HV_MAX_FLUSH_REP_COUNT (PAGE_SIZE - 2 * sizeof(u64) / \
+#define HV_MAX_FLUSH_REP_COUNT ((PAGE_SIZE - 2 * sizeof(u64)) / \
sizeof(union hv_gpa_page_range))
struct hv_guest_mapping_flush_list {
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4660ce90de7f..180373360e34 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -299,6 +299,7 @@ union kvm_mmu_extended_role {
unsigned int cr4_smap:1;
unsigned int cr4_smep:1;
unsigned int cr4_la57:1;
+ unsigned int maxphyaddr:6;
};
};
@@ -397,6 +398,7 @@ struct kvm_mmu {
void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
u64 *spte, const void *pte);
hpa_t root_hpa;
+ gpa_t root_cr3;
union kvm_mmu_role mmu_role;
u8 root_level;
u8 shadow_root_level;
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 780f2b42c8ef..c1334aaaa78d 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -284,7 +284,7 @@ do { \
__put_user_goto(x, ptr, "l", "k", "ir", label); \
break; \
case 8: \
- __put_user_goto_u64((__typeof__(*ptr))(x), ptr, label); \
+ __put_user_goto_u64(x, ptr, label); \
break; \
default: \
__put_user_bad(); \
@@ -431,8 +431,10 @@ do { \
({ \
__label__ __pu_label; \
int __pu_err = -EFAULT; \
+ __typeof__(*(ptr)) __pu_val; \
+ __pu_val = x; \
__uaccess_begin(); \
- __put_user_size((x), (ptr), (size), __pu_label); \
+ __put_user_size(__pu_val, (ptr), (size), __pu_label); \
__pu_err = 0; \
__pu_label: \
__uaccess_end(); \
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index bbffa6c54697..c07958b59f50 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -335,6 +335,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0;
+ unsigned f_la57 = 0;
/* cpuid 1.edx */
const u32 kvm_cpuid_1_edx_x86_features =
@@ -489,7 +490,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
// TSC_ADJUST is emulated
entry->ebx |= F(TSC_ADJUST);
entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
+ f_la57 = entry->ecx & F(LA57);
cpuid_mask(&entry->ecx, CPUID_7_ECX);
+ /* Set LA57 based on hardware capability. */
+ entry->ecx |= f_la57;
entry->ecx |= f_umip;
/* PKU is not yet implemented for shadow paging. */
if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index da9c42349b1f..f2d1d230d5b8 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3555,6 +3555,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
&invalid_list);
mmu->root_hpa = INVALID_PAGE;
}
+ mmu->root_cr3 = 0;
}
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
@@ -3610,6 +3611,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
} else
BUG();
+ vcpu->arch.mmu->root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
return 0;
}
@@ -3618,10 +3620,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
{
struct kvm_mmu_page *sp;
u64 pdptr, pm_mask;
- gfn_t root_gfn;
+ gfn_t root_gfn, root_cr3;
int i;
- root_gfn = vcpu->arch.mmu->get_cr3(vcpu) >> PAGE_SHIFT;
+ root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
+ root_gfn = root_cr3 >> PAGE_SHIFT;
if (mmu_check_root(vcpu, root_gfn))
return 1;
@@ -3646,7 +3649,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
++sp->root_count;
spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu->root_hpa = root;
- return 0;
+ goto set_root_cr3;
}
/*
@@ -3712,6 +3715,9 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root);
}
+set_root_cr3:
+ vcpu->arch.mmu->root_cr3 = root_cr3;
+
return 0;
}
@@ -4163,7 +4169,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
struct kvm_mmu_root_info root;
struct kvm_mmu *mmu = vcpu->arch.mmu;
- root.cr3 = mmu->get_cr3(vcpu);
+ root.cr3 = mmu->root_cr3;
root.hpa = mmu->root_hpa;
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
@@ -4176,6 +4182,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
}
mmu->root_hpa = root.hpa;
+ mmu->root_cr3 = root.cr3;
return i < KVM_MMU_NUM_PREV_ROOTS;
}
@@ -4770,6 +4777,7 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
ext.cr4_pse = !!is_pse(vcpu);
ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
+ ext.maxphyaddr = cpuid_maxphyaddr(vcpu);
ext.valid = 1;
@@ -5516,11 +5524,13 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
vcpu->arch.root_mmu.root_hpa = INVALID_PAGE;
+ vcpu->arch.root_mmu.root_cr3 = 0;
vcpu->arch.root_mmu.translate_gpa = translate_gpa;
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE;
+ vcpu->arch.guest_mmu.root_cr3 = 0;
vcpu->arch.guest_mmu.translate_gpa = translate_gpa;
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 6521134057e8..856fa409c536 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -117,67 +117,11 @@ __visible bool ex_handler_fprestore(const struct exception_table_entry *fixup,
}
EXPORT_SYMBOL_GPL(ex_handler_fprestore);
-/* Helper to check whether a uaccess fault indicates a kernel bug. */
-static bool bogus_uaccess(struct pt_regs *regs, int trapnr,
- unsigned long fault_addr)
-{
- /* This is the normal case: #PF with a fault address in userspace. */
- if (trapnr == X86_TRAP_PF && fault_addr < TASK_SIZE_MAX)
- return false;
-
- /*
- * This code can be reached for machine checks, but only if the #MC
- * handler has already decided that it looks like a candidate for fixup.
- * This e.g. happens when attempting to access userspace memory which
- * the CPU can't access because of uncorrectable bad memory.
- */
- if (trapnr == X86_TRAP_MC)
- return false;
-
- /*
- * There are two remaining exception types we might encounter here:
- * - #PF for faulting accesses to kernel addresses
- * - #GP for faulting accesses to noncanonical addresses
- * Complain about anything else.
- */
- if (trapnr != X86_TRAP_PF && trapnr != X86_TRAP_GP) {
- WARN(1, "unexpected trap %d in uaccess\n", trapnr);
- return false;
- }
-
- /*
- * This is a faulting memory access in kernel space, on a kernel
- * address, in a usercopy function. This can e.g. be caused by improper
- * use of helpers like __put_user and by improper attempts to access
- * userspace addresses in KERNEL_DS regions.
- * The one (semi-)legitimate exception are probe_kernel_{read,write}(),
- * which can be invoked from places like kgdb, /dev/mem (for reading)
- * and privileged BPF code (for reading).
- * The probe_kernel_*() functions set the kernel_uaccess_faults_ok flag
- * to tell us that faulting on kernel addresses, and even noncanonical
- * addresses, in a userspace accessor does not necessarily imply a
- * kernel bug, root might just be doing weird stuff.
- */
- if (current->kernel_uaccess_faults_ok)
- return false;
-
- /* This is bad. Refuse the fixup so that we go into die(). */
- if (trapnr == X86_TRAP_PF) {
- pr_emerg("BUG: pagefault on kernel address 0x%lx in non-whitelisted uaccess\n",
- fault_addr);
- } else {
- pr_emerg("BUG: GPF in non-whitelisted uaccess (non-canonical address?)\n");
- }
- return true;
-}
-
__visible bool ex_handler_uaccess(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr,
unsigned long error_code,
unsigned long fault_addr)
{
- if (bogus_uaccess(regs, trapnr, fault_addr))
- return false;
regs->ip = ex_fixup_addr(fixup);
return true;
}
@@ -188,8 +132,6 @@ __visible bool ex_handler_ext(const struct exception_table_entry *fixup,
unsigned long error_code,
unsigned long fault_addr)
{
- if (bogus_uaccess(regs, trapnr, fault_addr))
- return false;
/* Special hack for uaccess_err */
current->thread.uaccess_err = 1;
regs->ip = ex_fixup_addr(fixup);
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 17eb09d222ff..ec78a04eb136 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -122,8 +122,10 @@ static void alg_do_release(const struct af_alg_type *type, void *private)
int af_alg_release(struct socket *sock)
{
- if (sock->sk)
+ if (sock->sk) {
sock_put(sock->sk);
+ sock->sk = NULL;
+ }
return 0;
}
EXPORT_SYMBOL_GPL(af_alg_release);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 0ea2139c50d8..ccd296dbb95c 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -95,7 +95,7 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status)
static void pm_runtime_deactivate_timer(struct device *dev)
{
if (dev->power.timer_expires > 0) {
- hrtimer_cancel(&dev->power.suspend_timer);
+ hrtimer_try_to_cancel(&dev->power.suspend_timer);
dev->power.timer_expires = 0;
}
}
diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c
index 2fe225a697df..3487e03d4bc6 100644
--- a/drivers/clk/at91/at91sam9x5.c
+++ b/drivers/clk/at91/at91sam9x5.c
@@ -144,8 +144,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
return;
at91sam9x5_pmc = pmc_data_allocate(PMC_MAIN + 1,
- nck(at91sam9x5_systemck),
- nck(at91sam9x35_periphck), 0);
+ nck(at91sam9x5_systemck), 31, 0);
if (!at91sam9x5_pmc)
return;
@@ -210,7 +209,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- parent_names[4] = "mck";
+ parent_names[4] = "masterck";
for (i = 0; i < 2; i++) {
char name[6];
diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c
index d69ad96fe988..cd0ef7274fdb 100644
--- a/drivers/clk/at91/sama5d2.c
+++ b/drivers/clk/at91/sama5d2.c
@@ -240,7 +240,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- parent_names[4] = "mck";
+ parent_names[4] = "masterck";
for (i = 0; i < 3; i++) {
char name[6];
@@ -291,7 +291,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- parent_names[4] = "mck";
+ parent_names[4] = "masterck";
parent_names[5] = "audiopll_pmcck";
for (i = 0; i < ARRAY_SIZE(sama5d2_gck); i++) {
hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c
index e358be7f6c8d..b645a9d59cdb 100644
--- a/drivers/clk/at91/sama5d4.c
+++ b/drivers/clk/at91/sama5d4.c
@@ -207,7 +207,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- parent_names[4] = "mck";
+ parent_names[4] = "masterck";
for (i = 0; i < 3; i++) {
char name[6];
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 3b97f60540ad..609970c0b666 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -264,9 +264,9 @@ static SUNXI_CCU_GATE(ahb1_mmc1_clk, "ahb1-mmc1", "ahb1",
static SUNXI_CCU_GATE(ahb1_mmc2_clk, "ahb1-mmc2", "ahb1",
0x060, BIT(10), 0);
static SUNXI_CCU_GATE(ahb1_mmc3_clk, "ahb1-mmc3", "ahb1",
- 0x060, BIT(12), 0);
+ 0x060, BIT(11), 0);
static SUNXI_CCU_GATE(ahb1_nand1_clk, "ahb1-nand1", "ahb1",
- 0x060, BIT(13), 0);
+ 0x060, BIT(12), 0);
static SUNXI_CCU_GATE(ahb1_nand0_clk, "ahb1-nand0", "ahb1",
0x060, BIT(13), 0);
static SUNXI_CCU_GATE(ahb1_sdram_clk, "ahb1-sdram", "ahb1",
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index 621b1cd996db..ac12f261f8ca 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -542,7 +542,7 @@ static struct ccu_reset_map sun8i_v3s_ccu_resets[] = {
[RST_BUS_OHCI0] = { 0x2c0, BIT(29) },
[RST_BUS_VE] = { 0x2c4, BIT(0) },
- [RST_BUS_TCON0] = { 0x2c4, BIT(3) },
+ [RST_BUS_TCON0] = { 0x2c4, BIT(4) },
[RST_BUS_CSI] = { 0x2c4, BIT(8) },
[RST_BUS_DE] = { 0x2c4, BIT(12) },
[RST_BUS_DBG] = { 0x2c4, BIT(31) },
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 242c3370544e..9ed46d188cb5 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -187,8 +187,8 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
cpufreq_cooling_unregister(priv->cdev);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
- kfree(priv);
dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
+ kfree(priv);
return 0;
}
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
index f62624357020..907a6db4d6c0 100644
--- a/drivers/crypto/ccree/cc_pm.h
+++ b/drivers/crypto/ccree/cc_pm.h
@@ -30,7 +30,7 @@ static inline int cc_pm_init(struct cc_drvdata *drvdata)
return 0;
}
-static void cc_pm_go(struct cc_drvdata *drvdata) {}
+static inline void cc_pm_go(struct cc_drvdata *drvdata) {}
static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
index 00e954f22bc9..74401e0adb29 100644
--- a/drivers/gpio/gpio-mt7621.c
+++ b/drivers/gpio/gpio-mt7621.c
@@ -30,6 +30,7 @@
#define GPIO_REG_EDGE 0xA0
struct mtk_gc {
+ struct irq_chip irq_chip;
struct gpio_chip chip;
spinlock_t lock;
int bank;
@@ -189,13 +190,6 @@ mediatek_gpio_irq_type(struct irq_data *d, unsigned int type)
return 0;
}
-static struct irq_chip mediatek_gpio_irq_chip = {
- .irq_unmask = mediatek_gpio_irq_unmask,
- .irq_mask = mediatek_gpio_irq_mask,
- .irq_mask_ack = mediatek_gpio_irq_mask,
- .irq_set_type = mediatek_gpio_irq_type,
-};
-
static int
mediatek_gpio_xlate(struct gpio_chip *chip,
const struct of_phandle_args *spec, u32 *flags)
@@ -254,6 +248,13 @@ mediatek_gpio_bank_probe(struct device *dev,
return ret;
}
+ rg->irq_chip.name = dev_name(dev);
+ rg->irq_chip.parent_device = dev;
+ rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask;
+ rg->irq_chip.irq_mask = mediatek_gpio_irq_mask;
+ rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask;
+ rg->irq_chip.irq_set_type = mediatek_gpio_irq_type;
+
if (mtk->gpio_irq) {
/*
* Manually request the irq here instead of passing
@@ -270,14 +271,14 @@ mediatek_gpio_bank_probe(struct device *dev,
return ret;
}
- ret = gpiochip_irqchip_add(&rg->chip, &mediatek_gpio_irq_chip,
+ ret = gpiochip_irqchip_add(&rg->chip, &rg->irq_chip,
0, handle_simple_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(dev, "failed to add gpiochip_irqchip\n");
return ret;
}
- gpiochip_set_chained_irqchip(&rg->chip, &mediatek_gpio_irq_chip,
+ gpiochip_set_chained_irqchip(&rg->chip, &rg->irq_chip,
mtk->gpio_irq, NULL);
}
@@ -310,7 +311,6 @@ mediatek_gpio_probe(struct platform_device *pdev)
mtk->gpio_irq = irq_of_parse_and_map(np, 0);
mtk->dev = dev;
platform_set_drvdata(pdev, mtk);
- mediatek_gpio_irq_chip.name = dev_name(dev);
for (i = 0; i < MTK_BANK_CNT; i++) {
ret = mediatek_gpio_bank_probe(dev, np, i);
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index e9600b556f39..bcc6be4a5cb2 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -245,6 +245,7 @@ static bool pxa_gpio_has_pinctrl(void)
{
switch (gpio_type) {
case PXA3XX_GPIO:
+ case MMP2_GPIO:
return false;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 9efa681d0878..8d0d7f3dd5fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -411,6 +411,8 @@ struct amdgpu_fpriv {
struct amdgpu_ctx_mgr ctx_mgr;
};
+int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
+
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned size, struct amdgpu_ib *ib);
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index e957e42c539a..fe1d7368c1e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -131,7 +131,7 @@ static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
{
- int i, n;
+ int i;
int last_valid_bit;
if (adev->kfd.dev) {
@@ -142,7 +142,9 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
.gpuvm_size = min(adev->vm_manager.max_pfn
<< AMDGPU_GPU_PAGE_SHIFT,
AMDGPU_GMC_HOLE_START),
- .drm_render_minor = adev->ddev->render->index
+ .drm_render_minor = adev->ddev->render->index,
+ .sdma_doorbell_idx = adev->doorbell_index.sdma_engine,
+
};
/* this is going to have a few of the MSBs set that we need to
@@ -172,35 +174,20 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
&gpu_resources.doorbell_aperture_size,
&gpu_resources.doorbell_start_offset);
- if (adev->asic_type < CHIP_VEGA10) {
- kgd2kfd_device_init(adev->kfd.dev, &gpu_resources);
- return;
- }
-
- n = (adev->asic_type < CHIP_VEGA20) ? 2 : 8;
-
- for (i = 0; i < n; i += 2) {
- /* On SOC15 the BIF is involved in routing
- * doorbells using the low 12 bits of the
- * address. Communicate the assignments to
- * KFD. KFD uses two doorbell pages per
- * process in case of 64-bit doorbells so we
- * can use each doorbell assignment twice.
- */
- gpu_resources.sdma_doorbell[0][i] =
- adev->doorbell_index.sdma_engine[0] + (i >> 1);
- gpu_resources.sdma_doorbell[0][i+1] =
- adev->doorbell_index.sdma_engine[0] + 0x200 + (i >> 1);
- gpu_resources.sdma_doorbell[1][i] =
- adev->doorbell_index.sdma_engine[1] + (i >> 1);
- gpu_resources.sdma_doorbell[1][i+1] =
- adev->doorbell_index.sdma_engine[1] + 0x200 + (i >> 1);
- }
- /* Doorbells 0x0e0-0ff and 0x2e0-2ff are reserved for
- * SDMA, IH and VCN. So don't use them for the CP.
+ /* Since SOC15, BIF starts to statically use the
+ * lower 12 bits of doorbell addresses for routing
+ * based on settings in registers like
+ * SDMA0_DOORBELL_RANGE etc..
+ * In order to route a doorbell to CP engine, the lower
+ * 12 bits of its address has to be outside the range
+ * set for SDMA, VCN, and IH blocks.
*/
- gpu_resources.reserved_doorbell_mask = 0x1e0;
- gpu_resources.reserved_doorbell_val = 0x0e0;
+ if (adev->asic_type >= CHIP_VEGA10) {
+ gpu_resources.non_cp_doorbells_start =
+ adev->doorbell_index.first_non_cp;
+ gpu_resources.non_cp_doorbells_end =
+ adev->doorbell_index.last_non_cp;
+ }
kgd2kfd_device_init(adev->kfd.dev, &gpu_resources);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index d7b10d79f1de..1921dec3df7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -204,38 +204,25 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
}
-/* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence(s) from BO's
+/* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
* reservation object.
*
* @bo: [IN] Remove eviction fence(s) from this BO
- * @ef: [IN] If ef is specified, then this eviction fence is removed if it
+ * @ef: [IN] This eviction fence is removed if it
* is present in the shared list.
- * @ef_list: [OUT] Returns list of eviction fences. These fences are removed
- * from BO's reservation object shared list.
- * @ef_count: [OUT] Number of fences in ef_list.
*
- * NOTE: If called with ef_list, then amdgpu_amdkfd_add_eviction_fence must be
- * called to restore the eviction fences and to avoid memory leak. This is
- * useful for shared BOs.
* NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
*/
static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
- struct amdgpu_amdkfd_fence *ef,
- struct amdgpu_amdkfd_fence ***ef_list,
- unsigned int *ef_count)
+ struct amdgpu_amdkfd_fence *ef)
{
struct reservation_object *resv = bo->tbo.resv;
struct reservation_object_list *old, *new;
unsigned int i, j, k;
- if (!ef && !ef_list)
+ if (!ef)
return -EINVAL;
- if (ef_list) {
- *ef_list = NULL;
- *ef_count = 0;
- }
-
old = reservation_object_get_list(resv);
if (!old)
return 0;
@@ -254,8 +241,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
f = rcu_dereference_protected(old->shared[i],
reservation_object_held(resv));
- if ((ef && f->context == ef->base.context) ||
- (!ef && to_amdgpu_amdkfd_fence(f)))
+ if (f->context == ef->base.context)
RCU_INIT_POINTER(new->shared[--j], f);
else
RCU_INIT_POINTER(new->shared[k++], f);
@@ -263,21 +249,6 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
new->shared_max = old->shared_max;
new->shared_count = k;
- if (!ef) {
- unsigned int count = old->shared_count - j;
-
- /* Alloc memory for count number of eviction fence pointers.
- * Fill the ef_list array and ef_count
- */
- *ef_list = kcalloc(count, sizeof(**ef_list), GFP_KERNEL);
- *ef_count = count;
-
- if (!*ef_list) {
- kfree(new);
- return -ENOMEM;
- }
- }
-
/* Install the new fence list, seqcount provides the barriers */
preempt_disable();
write_seqcount_begin(&resv->seq);
@@ -291,46 +262,13 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
f = rcu_dereference_protected(new->shared[i],
reservation_object_held(resv));
- if (!ef)
- (*ef_list)[k++] = to_amdgpu_amdkfd_fence(f);
- else
- dma_fence_put(f);
+ dma_fence_put(f);
}
kfree_rcu(old, rcu);
return 0;
}
-/* amdgpu_amdkfd_add_eviction_fence - Adds eviction fence(s) back into BO's
- * reservation object.
- *
- * @bo: [IN] Add eviction fences to this BO
- * @ef_list: [IN] List of eviction fences to be added
- * @ef_count: [IN] Number of fences in ef_list.
- *
- * NOTE: Must call amdgpu_amdkfd_remove_eviction_fence before calling this
- * function.
- */
-static void amdgpu_amdkfd_add_eviction_fence(struct amdgpu_bo *bo,
- struct amdgpu_amdkfd_fence **ef_list,
- unsigned int ef_count)
-{
- int i;
-
- if (!ef_list || !ef_count)
- return;
-
- for (i = 0; i < ef_count; i++) {
- amdgpu_bo_fence(bo, &ef_list[i]->base, true);
- /* Re-adding the fence takes an additional reference. Drop that
- * reference.
- */
- dma_fence_put(&ef_list[i]->base);
- }
-
- kfree(ef_list);
-}
-
static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
bool wait)
{
@@ -346,18 +284,8 @@ static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
goto validate_fail;
- if (wait) {
- struct amdgpu_amdkfd_fence **ef_list;
- unsigned int ef_count;
-
- ret = amdgpu_amdkfd_remove_eviction_fence(bo, NULL, &ef_list,
- &ef_count);
- if (ret)
- goto validate_fail;
-
- ttm_bo_wait(&bo->tbo, false, false);
- amdgpu_amdkfd_add_eviction_fence(bo, ef_list, ef_count);
- }
+ if (wait)
+ amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
validate_fail:
return ret;
@@ -444,7 +372,6 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
{
int ret;
struct kfd_bo_va_list *bo_va_entry;
- struct amdgpu_bo *pd = vm->root.base.bo;
struct amdgpu_bo *bo = mem->bo;
uint64_t va = mem->va;
struct list_head *list_bo_va = &mem->bo_va_list;
@@ -484,14 +411,8 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
*p_bo_va_entry = bo_va_entry;
/* Allocate new page tables if needed and validate
- * them. Clearing of new page tables and validate need to wait
- * on move fences. We don't want that to trigger the eviction
- * fence, so remove it temporarily.
+ * them.
*/
- amdgpu_amdkfd_remove_eviction_fence(pd,
- vm->process_info->eviction_fence,
- NULL, NULL);
-
ret = amdgpu_vm_alloc_pts(adev, vm, va, amdgpu_bo_size(bo));
if (ret) {
pr_err("Failed to allocate pts, err=%d\n", ret);
@@ -504,13 +425,9 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
goto err_alloc_pts;
}
- /* Add the eviction fence back */
- amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
-
return 0;
err_alloc_pts:
- amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
list_del(&bo_va_entry->bo_list);
err_vmadd:
@@ -809,24 +726,11 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
{
struct amdgpu_bo_va *bo_va = entry->bo_va;
struct amdgpu_vm *vm = bo_va->base.vm;
- struct amdgpu_bo *pd = vm->root.base.bo;
- /* Remove eviction fence from PD (and thereby from PTs too as
- * they share the resv. object). Otherwise during PT update
- * job (see amdgpu_vm_bo_update_mapping), eviction fence would
- * get added to job->sync object and job execution would
- * trigger the eviction fence.
- */
- amdgpu_amdkfd_remove_eviction_fence(pd,
- vm->process_info->eviction_fence,
- NULL, NULL);
amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
- /* Add the eviction fence back */
- amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
-
amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
return 0;
@@ -1002,7 +906,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
pr_err("validate_pt_pd_bos() failed\n");
goto validate_pd_fail;
}
- ret = ttm_bo_wait(&vm->root.base.bo->tbo, false, false);
+ amdgpu_bo_sync_wait(vm->root.base.bo, AMDGPU_FENCE_OWNER_KFD, false);
if (ret)
goto wait_pd_fail;
amdgpu_bo_fence(vm->root.base.bo,
@@ -1389,8 +1293,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
* attached
*/
amdgpu_amdkfd_remove_eviction_fence(mem->bo,
- process_info->eviction_fence,
- NULL, NULL);
+ process_info->eviction_fence);
pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
mem->va + bo_size * (1 + mem->aql_queue));
@@ -1617,8 +1520,7 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
if (mem->mapped_to_gpu_memory == 0 &&
!amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
amdgpu_amdkfd_remove_eviction_fence(mem->bo,
- process_info->eviction_fence,
- NULL, NULL);
+ process_info->eviction_fence);
unreserve_out:
unreserve_bo_and_vms(&ctx, false, false);
@@ -1679,7 +1581,7 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
}
amdgpu_amdkfd_remove_eviction_fence(
- bo, mem->process_info->eviction_fence, NULL, NULL);
+ bo, mem->process_info->eviction_fence);
list_del_init(&mem->validate_list.head);
if (size)
@@ -1945,16 +1847,6 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
amdgpu_sync_create(&sync);
- /* Avoid triggering eviction fences when unmapping invalid
- * userptr BOs (waits for all fences, doesn't use
- * FENCE_OWNER_VM)
- */
- list_for_each_entry(peer_vm, &process_info->vm_list_head,
- vm_list_node)
- amdgpu_amdkfd_remove_eviction_fence(peer_vm->root.base.bo,
- process_info->eviction_fence,
- NULL, NULL);
-
ret = process_validate_vms(process_info);
if (ret)
goto unreserve_out;
@@ -2015,10 +1907,6 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
ret = process_update_pds(process_info, &sync);
unreserve_out:
- list_for_each_entry(peer_vm, &process_info->vm_list_head,
- vm_list_node)
- amdgpu_bo_fence(peer_vm->root.base.bo,
- &process_info->eviction_fence->base, true);
ttm_eu_backoff_reservation(&ticket, &resv_list);
amdgpu_sync_wait(&sync, false);
amdgpu_sync_free(&sync);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index d85184b5b35c..7b526593eb77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -124,6 +124,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
unsigned num_rings;
+ unsigned num_rqs = 0;
switch (i) {
case AMDGPU_HW_IP_GFX:
@@ -166,12 +167,16 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
break;
}
- for (j = 0; j < num_rings; ++j)
- rqs[j] = &rings[j]->sched.sched_rq[priority];
+ for (j = 0; j < num_rings; ++j) {
+ if (!rings[j]->adev)
+ continue;
+
+ rqs[num_rqs++] = &rings[j]->sched.sched_rq[priority];
+ }
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
r = drm_sched_entity_init(&ctx->entities[i][j].entity,
- rqs, num_rings, &ctx->guilty);
+ rqs, num_rqs, &ctx->guilty);
if (r)
goto error_cleanup_entities;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index dd9a4fb9ce39..4ae3ff9a1d4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -158,9 +158,6 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
while (size) {
uint32_t value;
- if (*pos > adev->rmmio_size)
- goto end;
-
if (read) {
value = RREG32(*pos >> 2);
r = put_user(value, (uint32_t *)buf);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
index 1cfec06f81d4..68959b923f89 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
@@ -71,6 +71,8 @@ struct amdgpu_doorbell_index {
uint32_t vce_ring6_7;
} uvd_vce;
};
+ uint32_t first_non_cp;
+ uint32_t last_non_cp;
uint32_t max_assignment;
/* Per engine SDMA doorbell size in dword */
uint32_t sdma_doorbell_range;
@@ -143,6 +145,10 @@ typedef enum _AMDGPU_VEGA20_DOORBELL_ASSIGNMENT
AMDGPU_VEGA20_DOORBELL64_VCE_RING2_3 = 0x18D,
AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5 = 0x18E,
AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7 = 0x18F,
+
+ AMDGPU_VEGA20_DOORBELL64_FIRST_NON_CP = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0,
+ AMDGPU_VEGA20_DOORBELL64_LAST_NON_CP = AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7,
+
AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT = 0x18F,
AMDGPU_VEGA20_DOORBELL_INVALID = 0xFFFF
} AMDGPU_VEGA20_DOORBELL_ASSIGNMENT;
@@ -222,6 +228,9 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE,
AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF,
+ AMDGPU_DOORBELL64_FIRST_NON_CP = AMDGPU_DOORBELL64_sDMA_ENGINE0,
+ AMDGPU_DOORBELL64_LAST_NON_CP = AMDGPU_DOORBELL64_VCE_RING6_7,
+
AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF,
AMDGPU_DOORBELL64_INVALID = 0xFFFF
} AMDGPU_DOORBELL64_ASSIGNMENT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 1c4595562f8f..344967df3137 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -184,61 +184,6 @@ u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
return vrefresh;
}
-void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
- u32 *p, u32 *u)
-{
- u32 b_c = 0;
- u32 i_c;
- u32 tmp;
-
- i_c = (i * r_c) / 100;
- tmp = i_c >> p_b;
-
- while (tmp) {
- b_c++;
- tmp >>= 1;
- }
-
- *u = (b_c + 1) / 2;
- *p = i_c / (1 << (2 * (*u)));
-}
-
-int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
-{
- u32 k, a, ah, al;
- u32 t1;
-
- if ((fl == 0) || (fh == 0) || (fl > fh))
- return -EINVAL;
-
- k = (100 * fh) / fl;
- t1 = (t * (k - 100));
- a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
- a = (a + 5) / 10;
- ah = ((a * t) + 5000) / 10000;
- al = a - ah;
-
- *th = t - ah;
- *tl = t + al;
-
- return 0;
-}
-
-bool amdgpu_is_uvd_state(u32 class, u32 class2)
-{
- if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
- return true;
- if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
- return true;
- if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
- return true;
- if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
- return true;
- if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
- return true;
- return false;
-}
-
bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
{
switch (sensor) {
@@ -949,39 +894,6 @@ enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
return AMDGPU_PCIE_GEN1;
}
-u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
- u16 asic_lanes,
- u16 default_lanes)
-{
- switch (asic_lanes) {
- case 0:
- default:
- return default_lanes;
- case 1:
- return 1;
- case 2:
- return 2;
- case 4:
- return 4;
- case 8:
- return 8;
- case 12:
- return 12;
- case 16:
- return 16;
- }
-}
-
-u8 amdgpu_encode_pci_lane_width(u32 lanes)
-{
- u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
-
- if (lanes > 16)
- return 0;
-
- return encoded_lanes[lanes];
-}
-
struct amd_vce_state*
amdgpu_get_vce_clock_state(void *handle, u32 idx)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 2f61e9edb1c1..e871e022c129 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -486,10 +486,6 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev);
u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev);
void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev);
-bool amdgpu_is_uvd_state(u32 class, u32 class2);
-void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
- u32 *p, u32 *u);
-int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th);
bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor);
@@ -505,11 +501,6 @@ enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
enum amdgpu_pcie_gen asic_gen,
enum amdgpu_pcie_gen default_gen);
-u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
- u16 asic_lanes,
- u16 default_lanes);
-u8 amdgpu_encode_pci_lane_width(u32 lanes);
-
struct amd_vce_state*
amdgpu_get_vce_clock_state(void *handle, u32 idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index a9b3a4a9726a..8a0732088640 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -73,9 +73,10 @@
* - 3.27.0 - Add new chunk to to AMDGPU_CS to enable BO_LIST creation.
* - 3.28.0 - Add AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES
* - 3.29.0 - Add AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID
+ * - 3.30.0 - Add AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE.
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 29
+#define KMS_DRIVER_MINOR 30
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -1179,6 +1180,22 @@ static const struct file_operations amdgpu_driver_kms_fops = {
#endif
};
+int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv)
+{
+ struct drm_file *file;
+
+ if (!filp)
+ return -EINVAL;
+
+ if (filp->f_op != &amdgpu_driver_kms_fops) {
+ return -EINVAL;
+ }
+
+ file = filp->private_data;
+ *fpriv = file->driver_priv;
+ return 0;
+}
+
static bool
amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
bool in_vblank_irq, int *vpos, int *hpos,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index d0a5db777b6d..1c50be3ab8a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -140,9 +140,7 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
* Interrupt hander (VI), walk the IH ring.
* Returns irq process return code.
*/
-int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
- void (*callback)(struct amdgpu_device *adev,
- struct amdgpu_ih_ring *ih))
+int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
{
u32 wptr;
@@ -162,7 +160,7 @@ restart_ih:
rmb();
while (ih->rptr != wptr) {
- callback(adev, ih);
+ amdgpu_irq_dispatch(adev, ih);
ih->rptr &= ih->ptr_mask;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 1ccb1831382a..113a1ba13d4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -69,8 +69,6 @@ struct amdgpu_ih_funcs {
int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
unsigned ring_size, bool use_bus_addr);
void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
-int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
- void (*callback)(struct amdgpu_device *adev,
- struct amdgpu_ih_ring *ih));
+int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 8bfb3dab46f7..af4c3b1af322 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -131,29 +131,6 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
}
/**
- * amdgpu_irq_callback - callback from the IH ring
- *
- * @adev: amdgpu device pointer
- * @ih: amdgpu ih ring
- *
- * Callback from IH ring processing to handle the entry at the current position
- * and advance the read pointer.
- */
-static void amdgpu_irq_callback(struct amdgpu_device *adev,
- struct amdgpu_ih_ring *ih)
-{
- u32 ring_index = ih->rptr >> 2;
- struct amdgpu_iv_entry entry;
-
- entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
- amdgpu_ih_decode_iv(adev, &entry);
-
- trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
-
- amdgpu_irq_dispatch(adev, &entry);
-}
-
-/**
* amdgpu_irq_handler - IRQ handler
*
* @irq: IRQ number (unused)
@@ -170,7 +147,7 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg)
struct amdgpu_device *adev = dev->dev_private;
irqreturn_t ret;
- ret = amdgpu_ih_process(adev, &adev->irq.ih, amdgpu_irq_callback);
+ ret = amdgpu_ih_process(adev, &adev->irq.ih);
if (ret == IRQ_HANDLED)
pm_runtime_mark_last_busy(dev->dev);
return ret;
@@ -188,7 +165,7 @@ static void amdgpu_irq_handle_ih1(struct work_struct *work)
struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
irq.ih1_work);
- amdgpu_ih_process(adev, &adev->irq.ih1, amdgpu_irq_callback);
+ amdgpu_ih_process(adev, &adev->irq.ih1);
}
/**
@@ -203,7 +180,7 @@ static void amdgpu_irq_handle_ih2(struct work_struct *work)
struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
irq.ih2_work);
- amdgpu_ih_process(adev, &adev->irq.ih2, amdgpu_irq_callback);
+ amdgpu_ih_process(adev, &adev->irq.ih2);
}
/**
@@ -394,14 +371,23 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
* Dispatches IRQ to IP blocks.
*/
void amdgpu_irq_dispatch(struct amdgpu_device *adev,
- struct amdgpu_iv_entry *entry)
+ struct amdgpu_ih_ring *ih)
{
- unsigned client_id = entry->client_id;
- unsigned src_id = entry->src_id;
+ u32 ring_index = ih->rptr >> 2;
+ struct amdgpu_iv_entry entry;
+ unsigned client_id, src_id;
struct amdgpu_irq_src *src;
bool handled = false;
int r;
+ entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
+ amdgpu_ih_decode_iv(adev, &entry);
+
+ trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
+
+ client_id = entry.client_id;
+ src_id = entry.src_id;
+
if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
@@ -416,7 +402,7 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
client_id, src_id);
} else if ((src = adev->irq.client[client_id].sources[src_id])) {
- r = src->funcs->process(adev, src, entry);
+ r = src->funcs->process(adev, src, &entry);
if (r < 0)
DRM_ERROR("error processing interrupt (%d)\n", r);
else if (r)
@@ -428,7 +414,7 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
/* Send it to amdkfd as well if it isn't already handled */
if (!handled)
- amdgpu_amdkfd_interrupt(adev, entry->iv_entry);
+ amdgpu_amdkfd_interrupt(adev, entry.iv_entry);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index c27decfda494..c718e94a55c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -108,7 +108,7 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
unsigned client_id, unsigned src_id,
struct amdgpu_irq_src *source);
void amdgpu_irq_dispatch(struct amdgpu_device *adev,
- struct amdgpu_iv_entry *entry);
+ struct amdgpu_ih_ring *ih);
int amdgpu_irq_update(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type);
int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index bc62bf41b7e9..e860412043bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -207,11 +207,12 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
if (!r) {
acpi_status = amdgpu_acpi_init(adev);
if (acpi_status)
- dev_dbg(&dev->pdev->dev,
+ dev_dbg(&dev->pdev->dev,
"Error during ACPI methods call\n");
}
if (amdgpu_device_is_px(dev)) {
+ dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
pm_runtime_set_active(dev->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 698fd8a2f775..889e443eeee7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -406,6 +406,7 @@ struct amdgpu_crtc {
struct amdgpu_flip_work *pflip_works;
enum amdgpu_flip_status pflip_status;
int deferred_flip_completion;
+ u64 last_flip_vblank;
/* pll sharing */
struct amdgpu_atom_ss ss;
bool ss_enabled;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index fd9c4beeaaa4..ec9e45004bff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1285,6 +1285,30 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
}
/**
+ * amdgpu_sync_wait_resv - Wait for BO reservation fences
+ *
+ * @bo: buffer object
+ * @owner: fence owner
+ * @intr: Whether the wait is interruptible
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
+ */
+int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct amdgpu_sync sync;
+ int r;
+
+ amdgpu_sync_create(&sync);
+ amdgpu_sync_resv(adev, &sync, bo->tbo.resv, owner, false);
+ r = amdgpu_sync_wait(&sync, intr);
+ amdgpu_sync_free(&sync);
+
+ return r;
+}
+
+/**
* amdgpu_bo_gpu_offset - return GPU offset of bo
* @bo: amdgpu object for which we query the offset
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 9291c2f837e9..220a6a7b1bc1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -266,6 +266,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared);
+int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
int amdgpu_bo_validate(struct amdgpu_bo *bo);
int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index 1cafe8d83a4d..0767a93e4d91 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -54,16 +54,20 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
enum drm_sched_priority priority)
{
struct file *filp = fget(fd);
- struct drm_file *file;
struct amdgpu_fpriv *fpriv;
struct amdgpu_ctx *ctx;
uint32_t id;
+ int r;
if (!filp)
return -EINVAL;
- file = filp->private_data;
- fpriv = file->driver_priv;
+ r = amdgpu_file_to_fpriv(filp, &fpriv);
+ if (r) {
+ fput(filp);
+ return r;
+ }
+
idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
amdgpu_ctx_priority_override(ctx, priority);
@@ -72,6 +76,39 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
return 0;
}
+static int amdgpu_sched_context_priority_override(struct amdgpu_device *adev,
+ int fd,
+ unsigned ctx_id,
+ enum drm_sched_priority priority)
+{
+ struct file *filp = fget(fd);
+ struct amdgpu_fpriv *fpriv;
+ struct amdgpu_ctx *ctx;
+ int r;
+
+ if (!filp)
+ return -EINVAL;
+
+ r = amdgpu_file_to_fpriv(filp, &fpriv);
+ if (r) {
+ fput(filp);
+ return r;
+ }
+
+ ctx = amdgpu_ctx_get(fpriv, ctx_id);
+
+ if (!ctx) {
+ fput(filp);
+ return -EINVAL;
+ }
+
+ amdgpu_ctx_priority_override(ctx, priority);
+ amdgpu_ctx_put(ctx);
+ fput(filp);
+
+ return 0;
+}
+
int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -81,7 +118,7 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
int r;
priority = amdgpu_to_sched_priority(args->in.priority);
- if (args->in.flags || priority == DRM_SCHED_PRIORITY_INVALID)
+ if (priority == DRM_SCHED_PRIORITY_INVALID)
return -EINVAL;
switch (args->in.op) {
@@ -90,6 +127,12 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
args->in.fd,
priority);
break;
+ case AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE:
+ r = amdgpu_sched_context_priority_override(adev,
+ args->in.fd,
+ args->in.ctx_id,
+ priority);
+ break;
default:
DRM_ERROR("Invalid sched op specified: %d\n", args->in.op);
r = -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 7cd2336e29ff..ead851413c0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -652,12 +652,14 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct ttm_bo_global *glob = adev->mman.bdev.glob;
struct amdgpu_vm_bo_base *bo_base;
+#if 0
if (vm->bulk_moveable) {
spin_lock(&glob->lru_lock);
ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
spin_unlock(&glob->lru_lock);
return;
}
+#endif
memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
@@ -698,8 +700,6 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_vm_bo_base *bo_base, *tmp;
int r = 0;
- vm->bulk_moveable &= list_empty(&vm->evicted);
-
list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
@@ -828,7 +828,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
WARN_ON(job->ibs[0].length_dw > 64);
r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
- AMDGPU_FENCE_OWNER_UNDEFINED, false);
+ AMDGPU_FENCE_OWNER_KFD, false);
if (r)
goto error_free;
@@ -1332,31 +1332,6 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
}
}
-
-/**
- * amdgpu_vm_wait_pd - Wait for PT BOs to be free.
- *
- * @adev: amdgpu_device pointer
- * @vm: related vm
- * @owner: fence owner
- *
- * Returns:
- * 0 on success, errno otherwise.
- */
-static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- void *owner)
-{
- struct amdgpu_sync sync;
- int r;
-
- amdgpu_sync_create(&sync);
- amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false);
- r = amdgpu_sync_wait(&sync, true);
- amdgpu_sync_free(&sync);
-
- return r;
-}
-
/**
* amdgpu_vm_update_func - helper to call update function
*
@@ -1451,7 +1426,8 @@ restart:
params.adev = adev;
if (vm->use_cpu_for_update) {
- r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
+ r = amdgpu_bo_sync_wait(vm->root.base.bo,
+ AMDGPU_FENCE_OWNER_VM, true);
if (unlikely(r))
return r;
@@ -1772,9 +1748,9 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
params.adev = adev;
params.vm = vm;
- /* sync to everything on unmapping */
+ /* sync to everything except eviction fences on unmapping */
if (!(flags & AMDGPU_PTE_VALID))
- owner = AMDGPU_FENCE_OWNER_UNDEFINED;
+ owner = AMDGPU_FENCE_OWNER_KFD;
if (vm->use_cpu_for_update) {
/* params.src is used as flag to indicate system Memory */
@@ -1784,7 +1760,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
/* Wait for PT BOs to be idle. PTs share the same resv. object
* as the root PD BO
*/
- r = amdgpu_vm_wait_pd(adev, vm, owner);
+ r = amdgpu_bo_sync_wait(vm->root.base.bo, owner, true);
if (unlikely(r))
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index db443ec53d3a..bea32f076b91 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2980,7 +2980,7 @@ static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- unsigned long flags;
+ unsigned long flags;
unsigned crtc_id;
struct amdgpu_crtc *amdgpu_crtc;
struct amdgpu_flip_work *works;
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index b11a1c17a7f2..73851ebb3833 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -266,7 +266,8 @@ flr_done:
}
/* Trigger recovery for world switch failure if no TDR */
- if (amdgpu_device_should_recover_gpu(adev))
+ if (amdgpu_device_should_recover_gpu(adev)
+ && amdgpu_lockup_timeout == MAX_SCHEDULE_TIMEOUT)
amdgpu_device_gpu_recover(adev, NULL);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 221f26e50322..c69d51598cfe 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -32,7 +32,7 @@
static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev)
{
- u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
+ u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 127b85983e8f..c816e55d43a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -128,7 +128,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2_init[] = {
static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
{
- SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
@@ -158,7 +158,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
};
static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
- SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 79c1a9bbcc21..9d8df68893b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1436,7 +1436,7 @@ static int si_common_early_init(void *handle)
AMD_CG_SUPPORT_UVD_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_HDP_MGCG;
- adev->pg_flags = 0;
+ adev->pg_flags = 0;
adev->external_rev_id = (adev->rev_id == 0) ? 1 :
(adev->rev_id == 1) ? 5 : 6;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index da58040fdbdc..41e01a7f57a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -6216,10 +6216,12 @@ static void si_request_link_speed_change_before_state_change(struct amdgpu_devic
si_pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
if (current_link_speed == AMDGPU_PCIE_GEN2)
break;
+ /* fall through */
case AMDGPU_PCIE_GEN2:
if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
break;
#endif
+ /* fall through */
default:
si_pi->force_pcie_gen = si_get_current_pcie_speed(adev);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
index 4b5d60ea3e78..a8e92638a2e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
@@ -81,6 +81,10 @@ void vega10_doorbell_index_init(struct amdgpu_device *adev)
adev->doorbell_index.uvd_vce.vce_ring2_3 = AMDGPU_DOORBELL64_VCE_RING2_3;
adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_DOORBELL64_VCE_RING4_5;
adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_DOORBELL64_VCE_RING6_7;
+
+ adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL64_FIRST_NON_CP;
+ adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL64_LAST_NON_CP;
+
/* In unit of dword doorbell */
adev->doorbell_index.max_assignment = AMDGPU_DOORBELL64_MAX_ASSIGNMENT << 1;
adev->doorbell_index.sdma_doorbell_range = 4;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
index 53716c593b2b..0db84386252a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
@@ -85,6 +85,10 @@ void vega20_doorbell_index_init(struct amdgpu_device *adev)
adev->doorbell_index.uvd_vce.vce_ring2_3 = AMDGPU_VEGA20_DOORBELL64_VCE_RING2_3;
adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5;
adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7;
+
+ adev->doorbell_index.first_non_cp = AMDGPU_VEGA20_DOORBELL64_FIRST_NON_CP;
+ adev->doorbell_index.last_non_cp = AMDGPU_VEGA20_DOORBELL64_LAST_NON_CP;
+
adev->doorbell_index.max_assignment = AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT << 1;
adev->doorbell_index.sdma_doorbell_range = 20;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 8372556b52eb..c6c9530e704e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -134,12 +134,18 @@ static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
*/
q->doorbell_id = q->properties.queue_id;
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
- /* For SDMA queues on SOC15, use static doorbell
- * assignments based on the engine and queue.
+ /* For SDMA queues on SOC15 with 8-byte doorbell, use static
+ * doorbell assignments based on the engine and queue id.
+ * The doobell index distance between RLC (2*i) and (2*i+1)
+ * for a SDMA engine is 512.
*/
- q->doorbell_id = dev->shared_resources.sdma_doorbell
- [q->properties.sdma_engine_id]
- [q->properties.sdma_queue_id];
+ uint32_t *idx_offset =
+ dev->shared_resources.sdma_doorbell_idx;
+
+ q->doorbell_id = idx_offset[q->properties.sdma_engine_id]
+ + (q->properties.sdma_queue_id & 1)
+ * KFD_QUEUE_DOORBELL_MIRROR_OFFSET
+ + (q->properties.sdma_queue_id >> 1);
} else {
/* For CP queues on SOC15 reserve a free doorbell ID */
unsigned int found;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 12b66330fc6d..0eeee3c6d6dc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -97,17 +97,29 @@
#define KFD_CWSR_TBA_TMA_SIZE (PAGE_SIZE * 2)
#define KFD_CWSR_TMA_OFFSET PAGE_SIZE
+#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \
+ (KFD_MAX_NUM_OF_PROCESSES * \
+ KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
+
+#define KFD_KERNEL_QUEUE_SIZE 2048
+
+/*
+ * 512 = 0x200
+ * The doorbell index distance between SDMA RLC (2*i) and (2*i+1) in the
+ * same SDMA engine on SOC15, which has 8-byte doorbells for SDMA.
+ * 512 8-byte doorbell distance (i.e. one page away) ensures that SDMA RLC
+ * (2*i+1) doorbells (in terms of the lower 12 bit address) lie exactly in
+ * the OFFSET and SIZE set in registers like BIF_SDMA0_DOORBELL_RANGE.
+ */
+#define KFD_QUEUE_DOORBELL_MIRROR_OFFSET 512
+
+
/*
* Kernel module parameter to specify maximum number of supported queues per
* device
*/
extern int max_num_of_queues_per_device;
-#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \
- (KFD_MAX_NUM_OF_PROCESSES * \
- KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
-
-#define KFD_KERNEL_QUEUE_SIZE 2048
/* Kernel module parameter to specify the scheduling policy */
extern int sched_policy;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 80b36e860a0a..4bdae78bab8e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -607,13 +607,17 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd,
if (!qpd->doorbell_bitmap)
return -ENOMEM;
- /* Mask out any reserved doorbells */
- for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS; i++)
- if ((dev->shared_resources.reserved_doorbell_mask & i) ==
- dev->shared_resources.reserved_doorbell_val) {
+ /* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */
+ for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
+ if (i >= dev->shared_resources.non_cp_doorbells_start
+ && i <= dev->shared_resources.non_cp_doorbells_end) {
set_bit(i, qpd->doorbell_bitmap);
- pr_debug("reserved doorbell 0x%03x\n", i);
+ set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
+ qpd->doorbell_bitmap);
+ pr_debug("reserved doorbell 0x%03x and 0x%03x\n", i,
+ i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
}
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 3a6f595f295e..2f26581b93ff 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -303,12 +303,11 @@ static void dm_pflip_high_irq(void *interrupt_params)
return;
}
+ /* Update to correct count(s) if racing with vblank irq */
+ amdgpu_crtc->last_flip_vblank = drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
/* wake up userspace */
if (amdgpu_crtc->event) {
- /* Update to correct count(s) if racing with vblank irq */
- drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
-
drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
/* page flip completed. clean up */
@@ -786,12 +785,13 @@ static int dm_suspend(void *handle)
struct amdgpu_display_manager *dm = &adev->dm;
int ret = 0;
+ WARN_ON(adev->dm.cached_state);
+ adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
+
s3_handle_mst(adev->ddev, true);
amdgpu_dm_irq_suspend(adev);
- WARN_ON(adev->dm.cached_state);
- adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
@@ -3790,7 +3790,6 @@ static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
* check will succeed, and let DC implement proper check
*/
static const uint32_t rgb_formats[] = {
- DRM_FORMAT_RGB888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGBA8888,
@@ -4646,6 +4645,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct amdgpu_bo *abo;
uint64_t tiling_flags, dcc_address;
uint32_t target, target_vblank;
+ uint64_t last_flip_vblank;
+ bool vrr_active = acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
struct {
struct dc_surface_update surface_updates[MAX_SURFACES];
@@ -4678,10 +4679,9 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct dc_plane_state *dc_plane;
struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
- if (plane->type == DRM_PLANE_TYPE_CURSOR) {
- handle_cursor_update(plane, old_plane_state);
+ /* Cursor plane is handled after stream updates */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
continue;
- }
if (!fb || !crtc || pcrtc != crtc)
continue;
@@ -4712,14 +4712,21 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
*/
abo = gem_to_amdgpu_bo(fb->obj[0]);
r = amdgpu_bo_reserve(abo, true);
- if (unlikely(r != 0)) {
+ if (unlikely(r != 0))
DRM_ERROR("failed to reserve buffer before flip\n");
- WARN_ON(1);
- }
- /* Wait for all fences on this FB */
- WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
- MAX_SCHEDULE_TIMEOUT) < 0);
+ /*
+ * Wait for all fences on this FB. Do limited wait to avoid
+ * deadlock during GPU reset when this fence will not signal
+ * but we hold reservation lock for the BO.
+ */
+ r = reservation_object_wait_timeout_rcu(abo->tbo.resv,
+ true, false,
+ msecs_to_jiffies(5000));
+ if (unlikely(r == 0))
+ DRM_ERROR("Waiting for fences timed out.");
+
+
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
@@ -4799,7 +4806,31 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* hopefully eliminating dc_*_update structs in their entirety.
*/
if (flip_count) {
- target = (uint32_t)drm_crtc_vblank_count(pcrtc) + *wait_for_vblank;
+ if (!vrr_active) {
+ /* Use old throttling in non-vrr fixed refresh rate mode
+ * to keep flip scheduling based on target vblank counts
+ * working in a backwards compatible way, e.g., for
+ * clients using the GLX_OML_sync_control extension or
+ * DRI3/Present extension with defined target_msc.
+ */
+ last_flip_vblank = drm_crtc_vblank_count(pcrtc);
+ }
+ else {
+ /* For variable refresh rate mode only:
+ * Get vblank of last completed flip to avoid > 1 vrr
+ * flips per video frame by use of throttling, but allow
+ * flip programming anywhere in the possibly large
+ * variable vrr vblank interval for fine-grained flip
+ * timing control and more opportunity to avoid stutter
+ * on late submission of flips.
+ */
+ spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+ last_flip_vblank = acrtc_attach->last_flip_vblank;
+ spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
+ }
+
+ target = (uint32_t)last_flip_vblank + *wait_for_vblank;
+
/* Prepare wait for target vblank early - before the fence-waits */
target_vblank = target - (uint32_t)drm_crtc_vblank_count(pcrtc) +
amdgpu_get_vblank_counter_kms(pcrtc->dev, acrtc_attach->crtc_id);
@@ -4874,6 +4905,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
mutex_unlock(&dm->dc_lock);
}
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ handle_cursor_update(plane, old_plane_state);
+
cleanup:
kfree(flip);
kfree(full);
@@ -5799,14 +5834,13 @@ dm_determine_update_type_for_commit(struct dc *dc,
old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
num_plane = 0;
- if (!new_dm_crtc_state->stream) {
- if (!new_dm_crtc_state->stream && old_dm_crtc_state->stream) {
- update_type = UPDATE_TYPE_FULL;
- goto cleanup;
- }
+ if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
+ update_type = UPDATE_TYPE_FULL;
+ goto cleanup;
+ }
+ if (!new_dm_crtc_state->stream)
continue;
- }
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
new_plane_crtc = new_plane_state->crtc;
@@ -5817,6 +5851,11 @@ dm_determine_update_type_for_commit(struct dc *dc,
if (plane->type == DRM_PLANE_TYPE_CURSOR)
continue;
+ if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
+ update_type = UPDATE_TYPE_FULL;
+ goto cleanup;
+ }
+
if (!state->allow_modeset)
continue;
@@ -5955,6 +5994,42 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
}
+ /*
+ * Add all primary and overlay planes on the CRTC to the state
+ * whenever a plane is enabled to maintain correct z-ordering
+ * and to enable fast surface updates.
+ */
+ drm_for_each_crtc(crtc, dev) {
+ bool modified = false;
+
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ if (new_plane_state->crtc == crtc ||
+ old_plane_state->crtc == crtc) {
+ modified = true;
+ break;
+ }
+ }
+
+ if (!modified)
+ continue;
+
+ drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ new_plane_state =
+ drm_atomic_get_plane_state(state, plane);
+
+ if (IS_ERR(new_plane_state)) {
+ ret = PTR_ERR(new_plane_state);
+ goto fail;
+ }
+ }
+ }
+
/* Remove exiting planes if they are modified */
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
ret = dm_update_plane_state(dc, state, plane,
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index a1c56f29cfeb..fd5266a58297 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -265,6 +265,7 @@ static struct atom_display_object_path_v2 *get_bios_object(
&& id.enum_id == obj_id.enum_id)
return &bp->object_info_tbl.v1_4->display_path[i];
}
+ /* fall through */
case OBJECT_TYPE_CONNECTOR:
case OBJECT_TYPE_GENERIC:
/* Both Generic and Connector Object ID
@@ -277,6 +278,7 @@ static struct atom_display_object_path_v2 *get_bios_object(
&& id.enum_id == obj_id.enum_id)
return &bp->object_info_tbl.v1_4->display_path[i];
}
+ /* fall through */
default:
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 52f838442e21..c68fbd55db3c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1138,6 +1138,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
/* pplib is notified if disp_num changed */
dc->hwss.optimize_bandwidth(dc, context);
+ for (i = 0; i < context->stream_count; i++)
+ context->streams[i]->mode_changed = false;
+
dc_release_state(dc->current_state);
dc->current_state = context;
@@ -1623,13 +1626,13 @@ static void commit_planes_do_stream_update(struct dc *dc,
stream_update->adjust->v_total_min,
stream_update->adjust->v_total_max);
- if (stream_update->periodic_vsync_config && pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
- pipe_ctx->stream_res.tg->funcs->program_vline_interrupt(
- pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, VLINE0, &stream->periodic_vsync_config);
+ if (stream_update->periodic_interrupt0 &&
+ dc->hwss.setup_periodic_interrupt)
+ dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE0);
- if (stream_update->enhanced_sync_config && pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
- pipe_ctx->stream_res.tg->funcs->program_vline_interrupt(
- pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, VLINE1, &stream->enhanced_sync_config);
+ if (stream_update->periodic_interrupt1 &&
+ dc->hwss.setup_periodic_interrupt)
+ dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE1);
if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
stream_update->vrr_infopacket ||
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index a798694992b9..5657cb3a2ad3 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -51,9 +51,19 @@ struct freesync_context {
bool dummy;
};
-union vline_config {
- unsigned int line_number;
- unsigned long long delta_in_ns;
+enum vertical_interrupt_ref_point {
+ START_V_UPDATE = 0,
+ START_V_SYNC,
+ INVALID_POINT
+
+ //For now, only v_update interrupt is used.
+ //START_V_BLANK,
+ //START_V_ACTIVE
+};
+
+struct periodic_interrupt_config {
+ enum vertical_interrupt_ref_point ref_point;
+ int lines_offset;
};
@@ -106,8 +116,8 @@ struct dc_stream_state {
/* DMCU info */
unsigned int abm_level;
- union vline_config periodic_vsync_config;
- union vline_config enhanced_sync_config;
+ struct periodic_interrupt_config periodic_interrupt0;
+ struct periodic_interrupt_config periodic_interrupt1;
/* from core_stream struct */
struct dc_context *ctx;
@@ -158,8 +168,8 @@ struct dc_stream_update {
struct dc_info_packet *hdr_static_metadata;
unsigned int *abm_level;
- union vline_config *periodic_vsync_config;
- union vline_config *enhanced_sync_config;
+ struct periodic_interrupt_config *periodic_interrupt0;
+ struct periodic_interrupt_config *periodic_interrupt1;
struct dc_crtc_timing_adjust *adjust;
struct dc_info_packet *vrr_infopacket;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index 01e56f1a9f34..da96229db53a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -53,6 +53,27 @@
#define MCP_DISABLE_ABM_IMMEDIATELY 255
+static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id)
+{
+ struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+ uint32_t rampingBoundary = 0xFFFF;
+
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
+ 1, 80000);
+
+ /* set ramping boundary */
+ REG_WRITE(MASTER_COMM_DATA_REG1, rampingBoundary);
+
+ /* setDMCUParam_Pipe */
+ REG_UPDATE_2(MASTER_COMM_CMD_REG,
+ MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_PIPE_SET,
+ MASTER_COMM_CMD_REG_BYTE1, controller_id);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ return true;
+}
static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_abm *abm_dce)
{
@@ -175,7 +196,6 @@ static void dmcu_set_backlight_level(
uint32_t controller_id)
{
unsigned int backlight_8_bit = 0;
- uint32_t rampingBoundary = 0xFFFF;
uint32_t s2;
if (backlight_pwm_u16_16 & 0x10000)
@@ -185,16 +205,7 @@ static void dmcu_set_backlight_level(
// Take MSB of fractional part since backlight is not max
backlight_8_bit = (backlight_pwm_u16_16 >> 8) & 0xFF;
- /* set ramping boundary */
- REG_WRITE(MASTER_COMM_DATA_REG1, rampingBoundary);
-
- /* setDMCUParam_Pipe */
- REG_UPDATE_2(MASTER_COMM_CMD_REG,
- MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_PIPE_SET,
- MASTER_COMM_CMD_REG_BYTE1, controller_id);
-
- /* notifyDMCUMsg */
- REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+ dce_abm_set_pipe(&abm_dce->base, controller_id);
/* waitDMCUReadyForCmd */
REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT,
@@ -309,16 +320,7 @@ static bool dce_abm_immediate_disable(struct abm *abm)
{
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
- REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
- 1, 80000);
-
- /* setDMCUParam_ABMLevel */
- REG_UPDATE_2(MASTER_COMM_CMD_REG,
- MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_PIPE_SET,
- MASTER_COMM_CMD_REG_BYTE1, MCP_DISABLE_ABM_IMMEDIATELY);
-
- /* notifyDMCUMsg */
- REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+ dce_abm_set_pipe(abm, MCP_DISABLE_ABM_IMMEDIATELY);
abm->stored_backlight_registers.BL_PWM_CNTL =
REG_READ(BL_PWM_CNTL);
@@ -419,6 +421,7 @@ static const struct abm_funcs dce_funcs = {
.abm_init = dce_abm_init,
.set_abm_level = dce_abm_set_level,
.init_backlight = dce_abm_init_backlight,
+ .set_pipe = dce_abm_set_pipe,
.set_backlight_level_pwm = dce_abm_set_backlight_level_pwm,
.get_current_backlight = dce_abm_get_current_backlight,
.get_target_backlight = dce_abm_get_target_backlight,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
index bbe051736a18..6e142c2db986 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
@@ -696,6 +696,11 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr,
{
struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
struct dm_pp_power_level_change_request level_change_req;
+ int patched_disp_clk = context->bw.dce.dispclk_khz;
+
+ /*TODO: W/A for dal3 linux, investigate why this works */
+ if (!clk_mgr_dce->dfs_bypass_active)
+ patched_disp_clk = patched_disp_clk * 115 / 100;
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
/* get max clock state from PPLIB */
@@ -705,9 +710,9 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr,
clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
}
- if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
- context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
- clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+ if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) {
+ context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, patched_disp_clk);
+ clk_mgr->clks.dispclk_khz = patched_disp_clk;
}
dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
index 85686d917636..a24a2bda8656 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -479,7 +479,7 @@ static void program_grph_pixel_format(
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
sign = 1;
floating = 1;
- /* no break */
+ /* fall through */
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: /* shouldn't this get float too? */
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
grph_depth = 3;
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
index acd418515346..a6b80fdaa666 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
@@ -37,6 +37,10 @@ void dce100_prepare_bandwidth(
struct dc *dc,
struct dc_state *context);
+void dce100_optimize_bandwidth(
+ struct dc *dc,
+ struct dc_state *context);
+
bool dce100_enable_display_power_gating(struct dc *dc, uint8_t controller_id,
struct dc_bios *dcb,
enum pipe_gating_control power_gating);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index e1b285ea01ac..5e4db3712eef 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1300,6 +1300,10 @@ static enum dc_status apply_single_controller_ctx_to_hw(
struct drr_params params = {0};
unsigned int event_triggers = 0;
+ if (dc->hwss.disable_stream_gating) {
+ dc->hwss.disable_stream_gating(dc, pipe_ctx);
+ }
+
if (pipe_ctx->stream_res.audio != NULL) {
struct audio_output audio_output;
@@ -1329,10 +1333,8 @@ static enum dc_status apply_single_controller_ctx_to_hw(
if (!pipe_ctx->stream->apply_seamless_boot_optimization)
dc->hwss.enable_stream_timing(pipe_ctx, context, dc);
- if (pipe_ctx->stream_res.tg->funcs->program_vupdate_interrupt)
- pipe_ctx->stream_res.tg->funcs->program_vupdate_interrupt(
- pipe_ctx->stream_res.tg,
- &stream->timing);
+ if (dc->hwss.setup_vupdate_interrupt)
+ dc->hwss.setup_vupdate_interrupt(pipe_ctx);
params.vertical_total_min = stream->adjust.v_total_min;
params.vertical_total_max = stream->adjust.v_total_max;
@@ -1521,6 +1523,14 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
struct dc_link *edp_link = get_link_for_edp(dc);
bool can_edp_fast_boot_optimize = false;
bool apply_edp_fast_boot_optimization = false;
+ bool can_apply_seamless_boot = false;
+
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->streams[i]->apply_seamless_boot_optimization) {
+ can_apply_seamless_boot = true;
+ break;
+ }
+ }
if (edp_link) {
/* this seems to cause blank screens on DCE8 */
@@ -1549,7 +1559,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
}
}
- if (!apply_edp_fast_boot_optimization) {
+ if (!apply_edp_fast_boot_optimization && !can_apply_seamless_boot) {
if (edp_link_to_turnoff) {
/*turn off backlight before DP_blank and encoder powered down*/
dc->hwss.edp_backlight_control(edp_link_to_turnoff, false);
@@ -2676,6 +2686,8 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.set_static_screen_control = set_static_screen_control,
.reset_hw_ctx_wrap = dce110_reset_hw_ctx_wrap,
.enable_stream_timing = dce110_enable_stream_timing,
+ .disable_stream_gating = NULL,
+ .enable_stream_gating = NULL,
.setup_stereo = NULL,
.set_avmute = dce110_set_avmute,
.wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect,
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
index a60a90e68d91..c4543178ba20 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
@@ -77,6 +77,6 @@ void dce80_hw_sequencer_construct(struct dc *dc)
dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
dc->hwss.pipe_control_lock = dce_pipe_control_lock;
dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
- dc->hwss.optimize_bandwidth = dce100_prepare_bandwidth;
+ dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 2eca81b5cf2f..c109ace96be9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -792,9 +792,22 @@ bool dce80_validate_bandwidth(
struct dc *dc,
struct dc_state *context)
{
- /* TODO implement when needed but for now hardcode max value*/
- context->bw.dce.dispclk_khz = 681000;
- context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
+ int i;
+ bool at_least_one_pipe = false;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].stream)
+ at_least_one_pipe = true;
+ }
+
+ if (at_least_one_pipe) {
+ /* TODO implement when needed but for now hardcode max value*/
+ context->bw.dce.dispclk_khz = 681000;
+ context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
+ } else {
+ context->bw.dce.dispclk_khz = 0;
+ context->bw.dce.yclk_khz = 0;
+ }
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 117d9d8227f7..d1a8f1c302a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -959,9 +959,25 @@ static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
{
int i;
+ bool can_apply_seamless_boot = false;
+
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->streams[i]->apply_seamless_boot_optimization) {
+ can_apply_seamless_boot = true;
+ break;
+ }
+ }
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct timing_generator *tg = dc->res_pool->timing_generators[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ /* There is assumption that pipe_ctx is not mapping irregularly
+ * to non-preferred front end. If pipe_ctx->stream is not NULL,
+ * we will use the pipe, so don't disable
+ */
+ if (pipe_ctx->stream != NULL)
+ continue;
if (tg->funcs->is_tg_enabled(tg))
tg->funcs->lock(tg);
@@ -975,7 +991,9 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
}
}
- dc->res_pool->mpc->funcs->mpc_init(dc->res_pool->mpc);
+ /* Cannot reset the MPC mux if seamless boot */
+ if (!can_apply_seamless_boot)
+ dc->res_pool->mpc->funcs->mpc_init(dc->res_pool->mpc);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct timing_generator *tg = dc->res_pool->timing_generators[i];
@@ -983,6 +1001,16 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
struct dpp *dpp = dc->res_pool->dpps[i];
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ // W/A for issue with dc_post_update_surfaces_to_stream
+ hubp->power_gated = true;
+
+ /* There is assumption that pipe_ctx is not mapping irregularly
+ * to non-preferred front end. If pipe_ctx->stream is not NULL,
+ * we will use the pipe, so don't disable
+ */
+ if (pipe_ctx->stream != NULL)
+ continue;
+
dpp->funcs->dpp_reset(dpp);
pipe_ctx->stream_res.tg = tg;
@@ -1137,11 +1165,13 @@ static void reset_hw_ctx_wrap(
struct clock_source *old_clk = pipe_ctx_old->clock_source;
reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
+ if (dc->hwss.enable_stream_gating) {
+ dc->hwss.enable_stream_gating(dc, pipe_ctx);
+ }
if (old_clk)
old_clk->funcs->cs_power_down(old_clk);
}
}
-
}
static bool patch_address_for_sbs_tb_stereo(
@@ -2162,8 +2192,10 @@ static void dcn10_blank_pixel_data(
if (!blank) {
if (stream_res->tg->funcs->set_blank)
stream_res->tg->funcs->set_blank(stream_res->tg, blank);
- if (stream_res->abm)
+ if (stream_res->abm) {
+ stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1);
stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
+ }
} else if (blank) {
if (stream_res->abm)
stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm);
@@ -2661,8 +2693,8 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
.mirror = pipe_ctx->plane_state->horizontal_mirror
};
- pos_cpy.x -= pipe_ctx->plane_state->dst_rect.x;
- pos_cpy.y -= pipe_ctx->plane_state->dst_rect.y;
+ pos_cpy.x_hotspot += pipe_ctx->plane_state->dst_rect.x;
+ pos_cpy.y_hotspot += pipe_ctx->plane_state->dst_rect.y;
if (pipe_ctx->plane_state->address.type
== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
@@ -2709,6 +2741,147 @@ static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.dpp, &opt_attr);
}
+/**
+* apply_front_porch_workaround TODO FPGA still need?
+*
+* This is a workaround for a bug that has existed since R5xx and has not been
+* fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
+*/
+static void apply_front_porch_workaround(
+ struct dc_crtc_timing *timing)
+{
+ if (timing->flags.INTERLACE == 1) {
+ if (timing->v_front_porch < 2)
+ timing->v_front_porch = 2;
+ } else {
+ if (timing->v_front_porch < 1)
+ timing->v_front_porch = 1;
+ }
+}
+
+int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
+{
+ struct timing_generator *optc = pipe_ctx->stream_res.tg;
+ const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
+ struct dc_crtc_timing patched_crtc_timing;
+ int vesa_sync_start;
+ int asic_blank_end;
+ int interlace_factor;
+ int vertical_line_start;
+
+ patched_crtc_timing = *dc_crtc_timing;
+ apply_front_porch_workaround(&patched_crtc_timing);
+
+ interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
+
+ vesa_sync_start = patched_crtc_timing.v_addressable +
+ patched_crtc_timing.v_border_bottom +
+ patched_crtc_timing.v_front_porch;
+
+ asic_blank_end = (patched_crtc_timing.v_total -
+ vesa_sync_start -
+ patched_crtc_timing.v_border_top)
+ * interlace_factor;
+
+ vertical_line_start = asic_blank_end -
+ optc->dlg_otg_param.vstartup_start + 1;
+
+ return vertical_line_start;
+}
+
+static void calc_vupdate_position(
+ struct pipe_ctx *pipe_ctx,
+ uint32_t *start_line,
+ uint32_t *end_line)
+{
+ const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
+ int vline_int_offset_from_vupdate =
+ pipe_ctx->stream->periodic_interrupt0.lines_offset;
+ int vupdate_offset_from_vsync = get_vupdate_offset_from_vsync(pipe_ctx);
+ int start_position;
+
+ if (vline_int_offset_from_vupdate > 0)
+ vline_int_offset_from_vupdate--;
+ else if (vline_int_offset_from_vupdate < 0)
+ vline_int_offset_from_vupdate++;
+
+ start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync;
+
+ if (start_position >= 0)
+ *start_line = start_position;
+ else
+ *start_line = dc_crtc_timing->v_total + start_position - 1;
+
+ *end_line = *start_line + 2;
+
+ if (*end_line >= dc_crtc_timing->v_total)
+ *end_line = 2;
+}
+
+static void cal_vline_position(
+ struct pipe_ctx *pipe_ctx,
+ enum vline_select vline,
+ uint32_t *start_line,
+ uint32_t *end_line)
+{
+ enum vertical_interrupt_ref_point ref_point = INVALID_POINT;
+
+ if (vline == VLINE0)
+ ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point;
+ else if (vline == VLINE1)
+ ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point;
+
+ switch (ref_point) {
+ case START_V_UPDATE:
+ calc_vupdate_position(
+ pipe_ctx,
+ start_line,
+ end_line);
+ break;
+ case START_V_SYNC:
+ // Suppose to do nothing because vsync is 0;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+}
+
+static void dcn10_setup_periodic_interrupt(
+ struct pipe_ctx *pipe_ctx,
+ enum vline_select vline)
+{
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+
+ if (vline == VLINE0) {
+ uint32_t start_line = 0;
+ uint32_t end_line = 0;
+
+ cal_vline_position(pipe_ctx, vline, &start_line, &end_line);
+
+ tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
+
+ } else if (vline == VLINE1) {
+ pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1(
+ tg,
+ pipe_ctx->stream->periodic_interrupt1.lines_offset);
+ }
+}
+
+static void dcn10_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx)
+{
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ int start_line = get_vupdate_offset_from_vsync(pipe_ctx);
+
+ if (start_line < 0) {
+ ASSERT(0);
+ start_line = 0;
+ }
+
+ if (tg->funcs->setup_vertical_interrupt2)
+ tg->funcs->setup_vertical_interrupt2(tg, start_line);
+}
+
static const struct hw_sequencer_funcs dcn10_funcs = {
.program_gamut_remap = program_gamut_remap,
.init_hw = dcn10_init_hw,
@@ -2756,7 +2929,11 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
.set_cursor_position = dcn10_set_cursor_position,
.set_cursor_attribute = dcn10_set_cursor_attribute,
- .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level
+ .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .disable_stream_gating = NULL,
+ .enable_stream_gating = NULL,
+ .setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
+ .setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index f8eea10e4c64..6d66084df55f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -81,4 +81,6 @@ struct pipe_ctx *find_top_pipe_for_stream(
struct dc_state *context,
const struct dc_stream_state *stream);
+int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx);
+
#endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 2f78a84f0dcb..0345d51e9d6f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -92,134 +92,36 @@ static void optc1_disable_stereo(struct timing_generator *optc)
OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0);
}
-static uint32_t get_start_vline(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing)
-{
- struct dc_crtc_timing patched_crtc_timing;
- int vesa_sync_start;
- int asic_blank_end;
- int interlace_factor;
- int vertical_line_start;
-
- patched_crtc_timing = *dc_crtc_timing;
- optc1_apply_front_porch_workaround(optc, &patched_crtc_timing);
-
- vesa_sync_start = patched_crtc_timing.h_addressable +
- patched_crtc_timing.h_border_right +
- patched_crtc_timing.h_front_porch;
-
- asic_blank_end = patched_crtc_timing.h_total -
- vesa_sync_start -
- patched_crtc_timing.h_border_left;
-
- interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
-
- vesa_sync_start = patched_crtc_timing.v_addressable +
- patched_crtc_timing.v_border_bottom +
- patched_crtc_timing.v_front_porch;
-
- asic_blank_end = (patched_crtc_timing.v_total -
- vesa_sync_start -
- patched_crtc_timing.v_border_top)
- * interlace_factor;
-
- vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1;
- if (vertical_line_start < 0) {
- ASSERT(0);
- vertical_line_start = 0;
- }
-
- return vertical_line_start;
-}
-
-static void calc_vline_position(
+void optc1_setup_vertical_interrupt0(
struct timing_generator *optc,
- const struct dc_crtc_timing *dc_crtc_timing,
- unsigned long long vsync_delta,
- uint32_t *start_line,
- uint32_t *end_line)
+ uint32_t start_line,
+ uint32_t end_line)
{
- unsigned long long req_delta_tens_of_usec = div64_u64((vsync_delta + 9999), 10000);
- unsigned long long pix_clk_hundreds_khz = div64_u64((dc_crtc_timing->pix_clk_100hz + 999), 1000);
- uint32_t req_delta_lines = (uint32_t) div64_u64(
- (req_delta_tens_of_usec * pix_clk_hundreds_khz + dc_crtc_timing->h_total - 1),
- dc_crtc_timing->h_total);
-
- uint32_t vsync_line = get_start_vline(optc, dc_crtc_timing);
-
- if (req_delta_lines != 0)
- req_delta_lines--;
-
- if (req_delta_lines > vsync_line)
- *start_line = dc_crtc_timing->v_total - (req_delta_lines - vsync_line) - 1;
- else
- *start_line = vsync_line - req_delta_lines;
-
- *end_line = *start_line + 2;
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
- if (*end_line >= dc_crtc_timing->v_total)
- *end_line = 2;
+ REG_SET_2(OTG_VERTICAL_INTERRUPT0_POSITION, 0,
+ OTG_VERTICAL_INTERRUPT0_LINE_START, start_line,
+ OTG_VERTICAL_INTERRUPT0_LINE_END, end_line);
}
-void optc1_program_vline_interrupt(
+void optc1_setup_vertical_interrupt1(
struct timing_generator *optc,
- const struct dc_crtc_timing *dc_crtc_timing,
- enum vline_select vline,
- const union vline_config *vline_config)
+ uint32_t start_line)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- uint32_t start_line = 0;
- uint32_t end_line = 0;
-
- switch (vline) {
- case VLINE0:
- calc_vline_position(optc, dc_crtc_timing, vline_config->delta_in_ns, &start_line, &end_line);
- REG_SET_2(OTG_VERTICAL_INTERRUPT0_POSITION, 0,
- OTG_VERTICAL_INTERRUPT0_LINE_START, start_line,
- OTG_VERTICAL_INTERRUPT0_LINE_END, end_line);
- break;
- case VLINE1:
- REG_SET(OTG_VERTICAL_INTERRUPT1_POSITION, 0,
- OTG_VERTICAL_INTERRUPT1_LINE_START, vline_config->line_number);
- break;
- default:
- break;
- }
+
+ REG_SET(OTG_VERTICAL_INTERRUPT1_POSITION, 0,
+ OTG_VERTICAL_INTERRUPT1_LINE_START, start_line);
}
-void optc1_program_vupdate_interrupt(
+void optc1_setup_vertical_interrupt2(
struct timing_generator *optc,
- const struct dc_crtc_timing *dc_crtc_timing)
+ uint32_t start_line)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- int32_t vertical_line_start;
- uint32_t asic_blank_end;
- uint32_t vesa_sync_start;
- struct dc_crtc_timing patched_crtc_timing;
-
- patched_crtc_timing = *dc_crtc_timing;
- optc1_apply_front_porch_workaround(optc, &patched_crtc_timing);
-
- /* asic_h_blank_end = HsyncWidth + HbackPorch =
- * vesa. usHorizontalTotal - vesa. usHorizontalSyncStart -
- * vesa.h_left_border
- */
- vesa_sync_start = patched_crtc_timing.h_addressable +
- patched_crtc_timing.h_border_right +
- patched_crtc_timing.h_front_porch;
-
- asic_blank_end = patched_crtc_timing.h_total -
- vesa_sync_start -
- patched_crtc_timing.h_border_left;
-
- /* Use OTG_VERTICAL_INTERRUPT2 replace VUPDATE interrupt,
- * program the reg for interrupt postition.
- */
- vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1;
- if (vertical_line_start < 0)
- vertical_line_start = 0;
REG_SET(OTG_VERTICAL_INTERRUPT2_POSITION, 0,
- OTG_VERTICAL_INTERRUPT2_LINE_START, vertical_line_start);
+ OTG_VERTICAL_INTERRUPT2_LINE_START, start_line);
}
/**
@@ -1480,8 +1382,9 @@ bool optc1_get_crc(struct timing_generator *optc,
static const struct timing_generator_funcs dcn10_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
- .program_vline_interrupt = optc1_program_vline_interrupt,
- .program_vupdate_interrupt = optc1_program_vupdate_interrupt,
+ .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
+ .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1,
+ .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2,
.program_global_sync = optc1_program_global_sync,
.enable_crtc = optc1_enable_crtc,
.disable_crtc = optc1_disable_crtc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index 24452f11c598..4eb9a898c237 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -483,11 +483,16 @@ void optc1_program_timing(
const struct dc_crtc_timing *dc_crtc_timing,
bool use_vbios);
-void optc1_program_vline_interrupt(
+void optc1_setup_vertical_interrupt0(
struct timing_generator *optc,
- const struct dc_crtc_timing *dc_crtc_timing,
- enum vline_select vline,
- const union vline_config *vline_config);
+ uint32_t start_line,
+ uint32_t end_line);
+void optc1_setup_vertical_interrupt1(
+ struct timing_generator *optc,
+ uint32_t start_line);
+void optc1_setup_vertical_interrupt2(
+ struct timing_generator *optc,
+ uint32_t start_line);
void optc1_program_global_sync(
struct timing_generator *optc);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
index abc961c0906e..86dc39a02408 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
@@ -46,6 +46,7 @@ struct abm_funcs {
void (*abm_init)(struct abm *abm);
bool (*set_abm_level)(struct abm *abm, unsigned int abm_level);
bool (*set_abm_immediate_disable)(struct abm *abm);
+ bool (*set_pipe)(struct abm *abm, unsigned int controller_id);
bool (*init_backlight)(struct abm *abm);
/* backlight_pwm_u16_16 is unsigned 32 bit,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 03ae941895f3..c25f7df7b5e3 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -134,14 +134,6 @@ struct dc_crtc_timing;
struct drr_params;
-union vline_config;
-
-
-enum vline_select {
- VLINE0,
- VLINE1,
- VLINE2
-};
struct timing_generator_funcs {
bool (*validate_timing)(struct timing_generator *tg,
@@ -149,14 +141,17 @@ struct timing_generator_funcs {
void (*program_timing)(struct timing_generator *tg,
const struct dc_crtc_timing *timing,
bool use_vbios);
- void (*program_vline_interrupt)(
+ void (*setup_vertical_interrupt0)(
+ struct timing_generator *optc,
+ uint32_t start_line,
+ uint32_t end_line);
+ void (*setup_vertical_interrupt1)(
+ struct timing_generator *optc,
+ uint32_t start_line);
+ void (*setup_vertical_interrupt2)(
struct timing_generator *optc,
- const struct dc_crtc_timing *dc_crtc_timing,
- enum vline_select vline,
- const union vline_config *vline_config);
+ uint32_t start_line);
- void (*program_vupdate_interrupt)(struct timing_generator *optc,
- const struct dc_crtc_timing *dc_crtc_timing);
bool (*enable_crtc)(struct timing_generator *tg);
bool (*disable_crtc)(struct timing_generator *tg);
bool (*is_counter_moving)(struct timing_generator *tg);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 341b4810288c..7676f25216b1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -38,6 +38,11 @@ enum pipe_gating_control {
PIPE_GATING_CONTROL_INIT
};
+enum vline_select {
+ VLINE0,
+ VLINE1
+};
+
struct dce_hwseq_wa {
bool blnd_crtc_trigger;
bool DEGVIDCN10_253;
@@ -68,6 +73,10 @@ struct stream_resource;
struct hw_sequencer_funcs {
+ void (*disable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+
+ void (*enable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+
void (*init_hw)(struct dc *dc);
void (*init_pipes)(struct dc *dc, struct dc_state *context);
@@ -220,6 +229,9 @@ struct hw_sequencer_funcs {
void (*set_cursor_attribute)(struct pipe_ctx *pipe);
void (*set_cursor_sdr_white_level)(struct pipe_ctx *pipe);
+ void (*setup_periodic_interrupt)(struct pipe_ctx *pipe_ctx, enum vline_select vline);
+ void (*setup_vupdate_interrupt)(struct pipe_ctx *pipe_ctx);
+
};
void color_space_to_black_color(
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 4f501ddcfb8d..34d6fdcb32e2 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -131,6 +131,7 @@
#define INTERNAL_REV_RAVEN_A0 0x00 /* First spin of Raven */
#define RAVEN_A0 0x01
#define RAVEN_B0 0x21
+#define PICASSO_A0 0x41
#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
/* DCN1_01 */
#define RAVEN2_A0 0x81
@@ -165,4 +166,6 @@
#define FAMILY_UNKNOWN 0xFF
+
+
#endif /* __DAL_ASIC_ID_H__ */
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 3ba87b076287..038b88221c5f 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -165,18 +165,11 @@ struct iram_table_v_2_2 {
};
#pragma pack(pop)
-static uint16_t backlight_8_to_16(unsigned int backlight_8bit)
-{
- return (uint16_t)(backlight_8bit * 0x101);
-}
-
static void fill_backlight_transform_table(struct dmcu_iram_parameters params,
struct iram_table_v_2 *table)
{
unsigned int i;
unsigned int num_entries = NUM_BL_CURVE_SEGS;
- unsigned int query_input_8bit;
- unsigned int query_output_8bit;
unsigned int lut_index;
table->backlight_thresholds[0] = 0;
@@ -194,16 +187,13 @@ static void fill_backlight_transform_table(struct dmcu_iram_parameters params,
* format U4.10.
*/
for (i = 1; i+1 < num_entries; i++) {
- query_input_8bit = DIV_ROUNDUP((i * 256), num_entries);
-
lut_index = (params.backlight_lut_array_size - 1) * i / (num_entries - 1);
ASSERT(lut_index < params.backlight_lut_array_size);
- query_output_8bit = params.backlight_lut_array[lut_index] >> 8;
table->backlight_thresholds[i] =
- backlight_8_to_16(query_input_8bit);
+ cpu_to_be16(DIV_ROUNDUP((i * 65536), num_entries));
table->backlight_offsets[i] =
- backlight_8_to_16(query_output_8bit);
+ cpu_to_be16(params.backlight_lut_array[lut_index]);
}
}
@@ -212,8 +202,6 @@ static void fill_backlight_transform_table_v_2_2(struct dmcu_iram_parameters par
{
unsigned int i;
unsigned int num_entries = NUM_BL_CURVE_SEGS;
- unsigned int query_input_8bit;
- unsigned int query_output_8bit;
unsigned int lut_index;
table->backlight_thresholds[0] = 0;
@@ -231,16 +219,13 @@ static void fill_backlight_transform_table_v_2_2(struct dmcu_iram_parameters par
* format U4.10.
*/
for (i = 1; i+1 < num_entries; i++) {
- query_input_8bit = DIV_ROUNDUP((i * 256), num_entries);
-
lut_index = (params.backlight_lut_array_size - 1) * i / (num_entries - 1);
ASSERT(lut_index < params.backlight_lut_array_size);
- query_output_8bit = params.backlight_lut_array[lut_index] >> 8;
table->backlight_thresholds[i] =
- backlight_8_to_16(query_input_8bit);
+ cpu_to_be16(DIV_ROUNDUP((i * 65536), num_entries));
table->backlight_offsets[i] =
- backlight_8_to_16(query_output_8bit);
+ cpu_to_be16(params.backlight_lut_array[lut_index]);
}
}
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 83d960110d23..5f3c10ebff08 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -137,20 +137,17 @@ struct kgd2kfd_shared_resources {
/* Bit n == 1 means Queue n is available for KFD */
DECLARE_BITMAP(queue_bitmap, KGD_MAX_QUEUES);
- /* Doorbell assignments (SOC15 and later chips only). Only
+ /* SDMA doorbell assignments (SOC15 and later chips only). Only
* specific doorbells are routed to each SDMA engine. Others
* are routed to IH and VCN. They are not usable by the CP.
- *
- * Any doorbell number D that satisfies the following condition
- * is reserved: (D & reserved_doorbell_mask) == reserved_doorbell_val
- *
- * KFD currently uses 1024 (= 0x3ff) doorbells per process. If
- * doorbells 0x0e0-0x0ff and 0x2e0-0x2ff are reserved, that means
- * mask would be set to 0x1e0 and val set to 0x0e0.
*/
- unsigned int sdma_doorbell[2][8];
- unsigned int reserved_doorbell_mask;
- unsigned int reserved_doorbell_val;
+ uint32_t *sdma_doorbell_idx;
+
+ /* From SOC15 onward, the doorbell index range not usable for CP
+ * queues.
+ */
+ uint32_t non_cp_doorbells_start;
+ uint32_t non_cp_doorbells_end;
/* Base address of doorbell aperture. */
phys_addr_t doorbell_physical_address;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 5273de3c5b98..0ad8fe4a6277 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -139,12 +139,10 @@ static int smu10_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
static int smu10_init_dynamic_state_adjustment_rule_settings(
struct pp_hwmgr *hwmgr)
{
- uint32_t table_size =
- sizeof(struct phm_clock_voltage_dependency_table) +
- (7 * sizeof(struct phm_clock_voltage_dependency_record));
+ struct phm_clock_voltage_dependency_table *table_clk_vlt;
- struct phm_clock_voltage_dependency_table *table_clk_vlt =
- kzalloc(table_size, GFP_KERNEL);
+ table_clk_vlt = kzalloc(struct_size(table_clk_vlt, entries, 7),
+ GFP_KERNEL);
if (NULL == table_clk_vlt) {
pr_err("Can not allocate memory!\n");
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index c8f5c00dd1e7..48187acac59e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -3681,10 +3681,12 @@ static int smu7_request_link_speed_change_before_state_change(
data->force_pcie_gen = PP_PCIEGen2;
if (current_link_speed == PP_PCIEGen2)
break;
+ /* fall through */
case PP_PCIEGen2:
if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false))
break;
#endif
+ /* fall through */
default:
data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
break;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index d138ddae563d..58f5589aaf12 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -1211,7 +1211,7 @@ int smu7_power_control_set_level(struct pp_hwmgr *hwmgr)
hwmgr->platform_descriptor.TDPAdjustment :
(-1 * hwmgr->platform_descriptor.TDPAdjustment);
- if (hwmgr->chip_id > CHIP_TONGA)
+ if (hwmgr->chip_id > CHIP_TONGA)
target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
else
target_tdp = ((100 + adjust_percent) * (int)(cac_table->usConfigurableTDP * 256)) / 100;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
index 553a203ac47c..019d6a206492 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
@@ -272,12 +272,10 @@ static int smu8_init_dynamic_state_adjustment_rule_settings(
struct pp_hwmgr *hwmgr,
ATOM_CLK_VOLT_CAPABILITY *disp_voltage_table)
{
- uint32_t table_size =
- sizeof(struct phm_clock_voltage_dependency_table) +
- (7 * sizeof(struct phm_clock_voltage_dependency_record));
+ struct phm_clock_voltage_dependency_table *table_clk_vlt;
- struct phm_clock_voltage_dependency_table *table_clk_vlt =
- kzalloc(table_size, GFP_KERNEL);
+ table_clk_vlt = kzalloc(struct_size(table_clk_vlt, entries, 7),
+ GFP_KERNEL);
if (NULL == table_clk_vlt) {
pr_err("Can not allocate memory!\n");
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c
index f94dab27f486..7337be5602e4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c
@@ -1,3 +1,25 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
#include "amdgpu.h"
#include "soc15.h"
#include "soc15_hw_ip.h"
@@ -114,7 +136,7 @@ int vega10_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
if (soc15_baco_program_registers(hwmgr, pre_baco_tbl,
ARRAY_SIZE(pre_baco_tbl))) {
if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnterBaco))
- return -1;
+ return -EINVAL;
if (soc15_baco_program_registers(hwmgr, enter_baco_tbl,
ARRAY_SIZE(enter_baco_tbl)))
@@ -132,5 +154,5 @@ int vega10_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
}
}
- return -1;
+ return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h
index a93b1e6d1c66..f7a3ffa744b3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h
@@ -20,8 +20,8 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#ifndef __VEGA10_BOCO_H__
-#define __VEGA10_BOCO_H__
+#ifndef __VEGA10_BACO_H__
+#define __VEGA10_BACO_H__
#include "hwmgr.h"
#include "common_baco.h"
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
index 0d883b358df2..5e8602a79b1c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
@@ -1,3 +1,25 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
#include "amdgpu.h"
#include "soc15.h"
#include "soc15_hw_ip.h"
@@ -67,14 +89,14 @@ int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
if(smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0))
- return -1;
+ return -EINVAL;
} else if (state == BACO_STATE_OUT) {
if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ExitBaco))
- return -1;
+ return -EINVAL;
if (!soc15_baco_program_registers(hwmgr, clean_baco_tbl,
ARRAY_SIZE(clean_baco_tbl)))
- return -1;
+ return -EINVAL;
}
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h
index c51988a9ed77..51c7f8392925 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h
@@ -20,8 +20,8 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#ifndef __VEGA20_BOCO_H__
-#define __VEGA20_BOCO_H__
+#ifndef __VEGA20_BACO_H__
+#define __VEGA20_BACO_H__
#include "hwmgr.h"
#include "common_baco.h"
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 0769b1ec562b..aad79affb081 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -3456,7 +3456,7 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
!hwmgr->display_config->multi_monitor_in_sync) ||
vblank_too_short;
- latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
+ latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
/* gfxclk */
dpm_table = &(data->dpm_table.gfx_table);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index a6edd5df33b0..4240aeec9000 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -29,6 +29,10 @@
#include <drm/amdgpu_drm.h>
#include "smumgr.h"
+MODULE_FIRMWARE("amdgpu/bonaire_smc.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_smc.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_k_smc.bin");
MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin");
MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index cb55bdc36f3f..6b6e037258c3 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -145,6 +145,10 @@ static int bochs_pci_probe(struct pci_dev *pdev,
if (IS_ERR(dev))
return PTR_ERR(dev);
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto err_free_dev;
+
dev->pdev = pdev;
pci_set_drvdata(pdev, dev);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index c53ecbd9abdd..540a77a2ade9 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1608,6 +1608,15 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
old_plane_state->crtc != new_plane_state->crtc)
return -EINVAL;
+ /*
+ * FIXME: Since prepare_fb and cleanup_fb are always called on
+ * the new_plane_state for async updates we need to block framebuffer
+ * changes. This prevents use of a fb that's been cleaned up and
+ * double cleanups from occuring.
+ */
+ if (old_plane_state->fb != new_plane_state->fb)
+ return -EINVAL;
+
funcs = plane->helper_private;
if (!funcs->atomic_async_update)
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index f782d3103d29..9701469a6e93 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -262,6 +262,18 @@ void drm_file_free(struct drm_file *file)
kfree(file);
}
+static void drm_close_helper(struct file *filp)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_device *dev = file_priv->minor->dev;
+
+ mutex_lock(&dev->filelist_mutex);
+ list_del(&file_priv->lhead);
+ mutex_unlock(&dev->filelist_mutex);
+
+ drm_file_free(file_priv);
+}
+
static int drm_setup(struct drm_device * dev)
{
int ret;
@@ -318,8 +330,10 @@ int drm_open(struct inode *inode, struct file *filp)
goto err_undo;
if (need_setup) {
retcode = drm_setup(dev);
- if (retcode)
+ if (retcode) {
+ drm_close_helper(filp);
goto err_undo;
+ }
}
return 0;
@@ -473,11 +487,7 @@ int drm_release(struct inode *inode, struct file *filp)
DRM_DEBUG("open_count = %d\n", dev->open_count);
- mutex_lock(&dev->filelist_mutex);
- list_del(&file_priv->lhead);
- mutex_unlock(&dev->filelist_mutex);
-
- drm_file_free(file_priv);
+ drm_close_helper(filp);
if (!--dev->open_count)
drm_lastclose(dev);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 7e6746b2d704..687943df58e1 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -508,6 +508,13 @@ int drm_version(struct drm_device *dev, void *data,
return err;
}
+static inline bool
+drm_render_driver_and_ioctl(const struct drm_device *dev, u32 flags)
+{
+ return drm_core_check_feature(dev, DRIVER_RENDER) &&
+ (flags & DRM_RENDER_ALLOW);
+}
+
/**
* drm_ioctl_permit - Check ioctl permissions against caller
*
@@ -522,14 +529,19 @@ int drm_version(struct drm_device *dev, void *data,
*/
int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
{
+ const struct drm_device *dev = file_priv->minor->dev;
+
/* ROOT_ONLY is only for CAP_SYS_ADMIN */
if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
return -EACCES;
- /* AUTH is only for authenticated or render client */
- if (unlikely((flags & DRM_AUTH) && !drm_is_render_client(file_priv) &&
- !file_priv->authenticated))
- return -EACCES;
+ /* AUTH is only for master ... */
+ if (unlikely((flags & DRM_AUTH) && drm_is_primary_client(file_priv))) {
+ /* authenticated ones, or render capable on DRM_RENDER_ALLOW. */
+ if (!file_priv->authenticated &&
+ !drm_render_driver_and_ioctl(dev, flags))
+ return -EACCES;
+ }
/* MASTER is only for master or control clients */
if (unlikely((flags & DRM_MASTER) &&
@@ -570,7 +582,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_UNLOCKED|DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_UNLOCKED|DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH),
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 376ffe842e26..e8f694b57b8a 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -338,8 +338,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
bool *enabled, int width, int height)
{
struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
- unsigned long conn_configured, conn_seq, mask;
unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
+ unsigned long conn_configured, conn_seq;
int i, j;
bool *save_enabled;
bool fallback = true, ret = true;
@@ -357,10 +357,9 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
drm_modeset_backoff(&ctx);
memcpy(save_enabled, enabled, count);
- mask = GENMASK(count - 1, 0);
+ conn_seq = GENMASK(count - 1, 0);
conn_configured = 0;
retry:
- conn_seq = conn_configured;
for (i = 0; i < count; i++) {
struct drm_fb_helper_connector *fb_conn;
struct drm_connector *connector;
@@ -373,7 +372,8 @@ retry:
if (conn_configured & BIT(i))
continue;
- if (conn_seq == 0 && !connector->has_tile)
+ /* First pass, only consider tiled connectors */
+ if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile)
continue;
if (connector->status == connector_status_connected)
@@ -477,8 +477,10 @@ retry:
conn_configured |= BIT(i);
}
- if ((conn_configured & mask) != mask && conn_configured != conn_seq)
+ if (conn_configured != conn_seq) { /* repeat until no more are found */
+ conn_seq = conn_configured;
goto retry;
+ }
/*
* If the BIOS didn't enable everything it could, fall back to have the
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig
index c9e439c82241..c3c84a09e628 100644
--- a/drivers/gpu/drm/imx/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -4,7 +4,7 @@ config DRM_IMX
select VIDEOMODE_HELPERS
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
- depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM)
+ depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM || COMPILE_TEST)
depends on IMX_IPUV3_CORE
help
enable i.MX graphics support
@@ -18,6 +18,7 @@ config DRM_IMX_PARALLEL_DISPLAY
config DRM_IMX_TVE
tristate "Support for TV and VGA displays"
depends on DRM_IMX
+ depends on COMMON_CLK
select REGMAP_MMIO
help
Choose this to enable the internal Television Encoder (TVe)
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 4d8e7685706e..3e8bece620df 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -49,11 +49,7 @@ static int imx_drm_atomic_check(struct drm_device *dev,
{
int ret;
- ret = drm_atomic_helper_check_modeset(dev, state);
- if (ret)
- return ret;
-
- ret = drm_atomic_helper_check_planes(dev, state);
+ ret = drm_atomic_helper_check(dev, state);
if (ret)
return ret;
@@ -229,6 +225,7 @@ static int imx_drm_bind(struct device *dev)
drm->mode_config.funcs = &imx_drm_mode_config_funcs;
drm->mode_config.helper_private = &imx_drm_mode_config_helpers;
drm->mode_config.allow_fb_modifiers = true;
+ drm->mode_config.normalize_zpos = true;
drm_mode_config_init(drm);
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 3c62167a9251..ec3602ebbc1c 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -34,6 +34,7 @@ struct ipu_crtc {
struct ipu_dc *dc;
struct ipu_di *di;
int irq;
+ struct drm_pending_vblank_event *event;
};
static inline struct ipu_crtc *to_ipu_crtc(struct drm_crtc *crtc)
@@ -173,8 +174,31 @@ static const struct drm_crtc_funcs ipu_crtc_funcs = {
static irqreturn_t ipu_irq_handler(int irq, void *dev_id)
{
struct ipu_crtc *ipu_crtc = dev_id;
+ struct drm_crtc *crtc = &ipu_crtc->base;
+ unsigned long flags;
+ int i;
+
+ drm_crtc_handle_vblank(crtc);
+
+ if (ipu_crtc->event) {
+ for (i = 0; i < ARRAY_SIZE(ipu_crtc->plane); i++) {
+ struct ipu_plane *plane = ipu_crtc->plane[i];
- drm_crtc_handle_vblank(&ipu_crtc->base);
+ if (!plane)
+ continue;
+
+ if (ipu_plane_atomic_update_pending(&plane->base))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(ipu_crtc->plane)) {
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, ipu_crtc->event);
+ ipu_crtc->event = NULL;
+ drm_crtc_vblank_put(crtc);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ }
+ }
return IRQ_HANDLED;
}
@@ -223,8 +247,10 @@ static void ipu_crtc_atomic_flush(struct drm_crtc *crtc,
{
spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
+ struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
+
WARN_ON(drm_crtc_vblank_get(crtc));
- drm_crtc_arm_vblank_event(crtc, crtc->state->event);
+ ipu_crtc->event = crtc->state->event;
crtc->state->event = NULL;
}
spin_unlock_irq(&crtc->dev->event_lock);
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 21e964f6ab5c..d7a727a6e3d7 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -273,6 +273,7 @@ static void ipu_plane_destroy(struct drm_plane *plane)
static void ipu_plane_state_reset(struct drm_plane *plane)
{
+ unsigned int zpos = (plane->type == DRM_PLANE_TYPE_PRIMARY) ? 0 : 1;
struct ipu_plane_state *ipu_state;
if (plane->state) {
@@ -284,8 +285,11 @@ static void ipu_plane_state_reset(struct drm_plane *plane)
ipu_state = kzalloc(sizeof(*ipu_state), GFP_KERNEL);
- if (ipu_state)
+ if (ipu_state) {
__drm_atomic_helper_plane_reset(plane, &ipu_state->base);
+ ipu_state->base.zpos = zpos;
+ ipu_state->base.normalized_zpos = zpos;
+ }
}
static struct drm_plane_state *
@@ -560,6 +564,25 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
if (ipu_plane->dp_flow == IPU_DP_FLOW_SYNC_FG)
ipu_dp_set_window_pos(ipu_plane->dp, dst->x1, dst->y1);
+ switch (ipu_plane->dp_flow) {
+ case IPU_DP_FLOW_SYNC_BG:
+ if (state->normalized_zpos == 1) {
+ ipu_dp_set_global_alpha(ipu_plane->dp,
+ !fb->format->has_alpha, 0xff,
+ true);
+ } else {
+ ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
+ }
+ break;
+ case IPU_DP_FLOW_SYNC_FG:
+ if (state->normalized_zpos == 1) {
+ ipu_dp_set_global_alpha(ipu_plane->dp,
+ !fb->format->has_alpha, 0xff,
+ false);
+ }
+ break;
+ }
+
eba = drm_plane_state_to_eba(state, 0);
/*
@@ -582,6 +605,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active);
+ ipu_plane->next_buf = !active;
if (ipu_plane_separate_alpha(ipu_plane)) {
active = ipu_idmac_get_current_buffer(ipu_plane->alpha_ch);
ipu_cpmem_set_buffer(ipu_plane->alpha_ch, !active,
@@ -595,34 +619,11 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
switch (ipu_plane->dp_flow) {
case IPU_DP_FLOW_SYNC_BG:
ipu_dp_setup_channel(ipu_plane->dp, ics, IPUV3_COLORSPACE_RGB);
- ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
break;
case IPU_DP_FLOW_SYNC_FG:
ipu_dp_setup_channel(ipu_plane->dp, ics,
IPUV3_COLORSPACE_UNKNOWN);
- /* Enable local alpha on partial plane */
- switch (fb->format->format) {
- case DRM_FORMAT_ARGB1555:
- case DRM_FORMAT_ABGR1555:
- case DRM_FORMAT_RGBA5551:
- case DRM_FORMAT_BGRA5551:
- case DRM_FORMAT_ARGB4444:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_RGBA8888:
- case DRM_FORMAT_BGRA8888:
- case DRM_FORMAT_RGB565_A8:
- case DRM_FORMAT_BGR565_A8:
- case DRM_FORMAT_RGB888_A8:
- case DRM_FORMAT_BGR888_A8:
- case DRM_FORMAT_RGBX8888_A8:
- case DRM_FORMAT_BGRX8888_A8:
- ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false);
- break;
- default:
- ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
- break;
- }
+ break;
}
ipu_dmfc_config_wait4eot(ipu_plane->dmfc, drm_rect_width(dst));
@@ -709,6 +710,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba);
ipu_idmac_lock_enable(ipu_plane->ipu_ch, num_bursts);
ipu_plane_enable(ipu_plane);
+ ipu_plane->next_buf = -1;
}
static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = {
@@ -718,6 +720,24 @@ static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = {
.atomic_update = ipu_plane_atomic_update,
};
+bool ipu_plane_atomic_update_pending(struct drm_plane *plane)
+{
+ struct ipu_plane *ipu_plane = to_ipu_plane(plane);
+ struct drm_plane_state *state = plane->state;
+ struct ipu_plane_state *ipu_state = to_ipu_plane_state(state);
+
+ /* disabled crtcs must not block the update */
+ if (!state->crtc)
+ return false;
+
+ if (ipu_state->use_pre)
+ return ipu_prg_channel_configure_pending(ipu_plane->ipu_ch);
+ else if (ipu_plane->next_buf >= 0)
+ return ipu_idmac_get_current_buffer(ipu_plane->ipu_ch) !=
+ ipu_plane->next_buf;
+
+ return false;
+}
int ipu_planes_assign_pre(struct drm_device *dev,
struct drm_atomic_state *state)
{
@@ -806,6 +826,7 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
{
struct ipu_plane *ipu_plane;
const uint64_t *modifiers = ipu_format_modifiers;
+ unsigned int zpos = (type == DRM_PLANE_TYPE_PRIMARY) ? 0 : 1;
int ret;
DRM_DEBUG_KMS("channel %d, dp flow %d, possible_crtcs=0x%x\n",
@@ -836,5 +857,10 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
drm_plane_helper_add(&ipu_plane->base, &ipu_plane_helper_funcs);
+ if (dp == IPU_DP_FLOW_SYNC_BG || dp == IPU_DP_FLOW_SYNC_FG)
+ drm_plane_create_zpos_property(&ipu_plane->base, zpos, 0, 1);
+ else
+ drm_plane_create_zpos_immutable_property(&ipu_plane->base, 0);
+
return ipu_plane;
}
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h
index e563ea17a827..15e85e15d35c 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3-plane.h
@@ -27,6 +27,7 @@ struct ipu_plane {
int dp_flow;
bool disabling;
+ int next_buf;
};
struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
@@ -48,5 +49,6 @@ int ipu_plane_irq(struct ipu_plane *plane);
void ipu_plane_disable(struct ipu_plane *ipu_plane, bool disable_dp_channel);
void ipu_plane_disable_deferred(struct drm_plane *plane);
+bool ipu_plane_atomic_update_pending(struct drm_plane *plane);
#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index d130825e2c75..b776fca571f3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -465,8 +465,6 @@ static void _dpu_crtc_setup_mixer_for_encoder(
return;
}
- mixer->encoder = enc;
-
cstate->num_mixers++;
DPU_DEBUG("setup mixer %d: lm %d\n",
i, mixer->hw_lm->idx - LM_0);
@@ -718,11 +716,8 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc, bool async)
* may delay and flush at an irq event (e.g. ppdone)
*/
drm_for_each_encoder_mask(encoder, crtc->dev,
- crtc->state->encoder_mask) {
- struct dpu_encoder_kickoff_params params = { 0 };
- dpu_encoder_prepare_for_kickoff(encoder, &params, async);
- }
-
+ crtc->state->encoder_mask)
+ dpu_encoder_prepare_for_kickoff(encoder, async);
if (!async) {
/* wait for frame_event_done completion */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index dbfb38a1986c..e59d62be4980 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -84,14 +84,12 @@ struct dpu_crtc_smmu_state_data {
* struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC
* @hw_lm: LM HW Driver context
* @lm_ctl: CTL Path HW driver context
- * @encoder: Encoder attached to this lm & ctl
* @mixer_op_mode: mixer blending operation mode
* @flush_mask: mixer flush mask for ctl, mixer and pipe
*/
struct dpu_crtc_mixer {
struct dpu_hw_mixer *hw_lm;
struct dpu_hw_ctl *lm_ctl;
- struct drm_encoder *encoder;
u32 mixer_op_mode;
u32 flush_mask;
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 36af231bb73f..5aa3307f3f0c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -205,7 +205,7 @@ struct dpu_encoder_virt {
bool idle_pc_supported;
struct mutex rc_lock;
enum dpu_enc_rc_states rc_state;
- struct kthread_delayed_work delayed_off_work;
+ struct delayed_work delayed_off_work;
struct kthread_work vsync_event_work;
struct msm_display_topology topology;
bool mode_set_complete;
@@ -742,7 +742,6 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
{
struct dpu_encoder_virt *dpu_enc;
struct msm_drm_private *priv;
- struct msm_drm_thread *disp_thread;
bool is_vid_mode = false;
if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private ||
@@ -755,12 +754,6 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
is_vid_mode = dpu_enc->disp_info.capabilities &
MSM_DISPLAY_CAP_VID_MODE;
- if (drm_enc->crtc->index >= ARRAY_SIZE(priv->disp_thread)) {
- DPU_ERROR("invalid crtc index\n");
- return -EINVAL;
- }
- disp_thread = &priv->disp_thread[drm_enc->crtc->index];
-
/*
* when idle_pc is not supported, process only KICKOFF, STOP and MODESET
* events and return early for other events (ie wb display).
@@ -777,8 +770,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
switch (sw_event) {
case DPU_ENC_RC_EVENT_KICKOFF:
/* cancel delayed off work, if any */
- if (kthread_cancel_delayed_work_sync(
- &dpu_enc->delayed_off_work))
+ if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
sw_event);
@@ -837,10 +829,8 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
return 0;
}
- kthread_queue_delayed_work(
- &disp_thread->worker,
- &dpu_enc->delayed_off_work,
- msecs_to_jiffies(dpu_enc->idle_timeout));
+ queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work,
+ msecs_to_jiffies(dpu_enc->idle_timeout));
trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
dpu_enc->idle_pc_supported, dpu_enc->rc_state,
@@ -849,8 +839,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
case DPU_ENC_RC_EVENT_PRE_STOP:
/* cancel delayed off work, if any */
- if (kthread_cancel_delayed_work_sync(
- &dpu_enc->delayed_off_work))
+ if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
sw_event);
@@ -1368,7 +1357,7 @@ static void dpu_encoder_frame_done_callback(
}
}
-static void dpu_encoder_off_work(struct kthread_work *work)
+static void dpu_encoder_off_work(struct work_struct *work)
{
struct dpu_encoder_virt *dpu_enc = container_of(work,
struct dpu_encoder_virt, delayed_off_work.work);
@@ -1756,15 +1745,14 @@ static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
}
-void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
- struct dpu_encoder_kickoff_params *params, bool async)
+void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc, bool async)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_encoder_phys *phys;
bool needs_hw_reset = false;
unsigned int i;
- if (!drm_enc || !params) {
+ if (!drm_enc) {
DPU_ERROR("invalid args\n");
return;
}
@@ -1778,7 +1766,7 @@ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
phys = dpu_enc->phys_encs[i];
if (phys) {
if (phys->ops.prepare_for_kickoff)
- phys->ops.prepare_for_kickoff(phys, params);
+ phys->ops.prepare_for_kickoff(phys);
if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
needs_hw_reset = true;
}
@@ -2193,7 +2181,7 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
mutex_init(&dpu_enc->rc_lock);
- kthread_init_delayed_work(&dpu_enc->delayed_off_work,
+ INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
dpu_encoder_off_work);
dpu_enc->idle_timeout = IDLE_TIMEOUT;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
index 3f5dafe00580..d77f74fb26d4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -38,15 +38,6 @@ struct dpu_encoder_hw_resources {
};
/**
- * dpu_encoder_kickoff_params - info encoder requires at kickoff
- * @affected_displays: bitmask, bit set means the ROI of the commit lies within
- * the bounds of the physical display at the bit index
- */
-struct dpu_encoder_kickoff_params {
- unsigned long affected_displays;
-};
-
-/**
* dpu_encoder_get_hw_resources - Populate table of required hardware resources
* @encoder: encoder pointer
* @hw_res: resource table to populate with encoder required resources
@@ -88,11 +79,9 @@ void dpu_encoder_register_frame_event_callback(struct drm_encoder *encoder,
* Immediately: if no previous commit is outstanding.
* Delayed: Block until next trigger can be issued.
* @encoder: encoder pointer
- * @params: kickoff time parameters
* @async: true if this is an asynchronous commit
*/
-void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder,
- struct dpu_encoder_kickoff_params *params, bool async);
+void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder, bool async);
/**
* dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index 44e6f8b68e70..db94f3d3bea3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -144,8 +144,7 @@ struct dpu_encoder_phys_ops {
int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc);
int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc);
int (*wait_for_vblank)(struct dpu_encoder_phys *phys_enc);
- void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc,
- struct dpu_encoder_kickoff_params *params);
+ void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc);
void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index 99ab5ca9bed3..a399e1edd313 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -594,8 +594,7 @@ static void dpu_encoder_phys_cmd_get_hw_resources(
}
static void dpu_encoder_phys_cmd_prepare_for_kickoff(
- struct dpu_encoder_phys *phys_enc,
- struct dpu_encoder_kickoff_params *params)
+ struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
@@ -693,7 +692,7 @@ static int dpu_encoder_phys_cmd_wait_for_commit_done(
/* required for both controllers */
if (!rc && cmd_enc->serialize_wait4pp)
- dpu_encoder_phys_cmd_prepare_for_kickoff(phys_enc, NULL);
+ dpu_encoder_phys_cmd_prepare_for_kickoff(phys_enc);
return rc;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index acdab5b0db18..3c4eb470a82c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -587,14 +587,13 @@ static int dpu_encoder_phys_vid_wait_for_vblank(
}
static void dpu_encoder_phys_vid_prepare_for_kickoff(
- struct dpu_encoder_phys *phys_enc,
- struct dpu_encoder_kickoff_params *params)
+ struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_vid *vid_enc;
struct dpu_hw_ctl *ctl;
int rc;
- if (!phys_enc || !params) {
+ if (!phys_enc) {
DPU_ERROR("invalid encoder/parameters\n");
return;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
index 0874f0a53bf9..f59fe1a9f4b9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -263,13 +263,13 @@ static const struct dpu_format dpu_format_map[] = {
INTERLEAVED_RGB_FMT(RGB565,
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
false, 2, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGR565,
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
false, 2, 0,
DPU_FETCH_LINEAR, 1),
@@ -1137,36 +1137,3 @@ const struct msm_format *dpu_get_msm_format(
return &fmt->base;
return NULL;
}
-
-uint32_t dpu_populate_formats(
- const struct dpu_format_extended *format_list,
- uint32_t *pixel_formats,
- uint64_t *pixel_modifiers,
- uint32_t pixel_formats_max)
-{
- uint32_t i, fourcc_format;
-
- if (!format_list || !pixel_formats)
- return 0;
-
- for (i = 0, fourcc_format = 0;
- format_list->fourcc_format && i < pixel_formats_max;
- ++format_list) {
- /* verify if listed format is in dpu_format_map? */
-
- /* optionally return modified formats */
- if (pixel_modifiers) {
- /* assume same modifier for all fb planes */
- pixel_formats[i] = format_list->fourcc_format;
- pixel_modifiers[i++] = format_list->modifier;
- } else {
- /* assume base formats grouped together */
- if (fourcc_format != format_list->fourcc_format) {
- fourcc_format = format_list->fourcc_format;
- pixel_formats[i++] = fourcc_format;
- }
- }
- }
-
- return i;
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
index a54451d8d011..c02c81e7a667 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
@@ -41,20 +41,6 @@ const struct msm_format *dpu_get_msm_format(
const uint64_t modifiers);
/**
- * dpu_populate_formats - populate the given array with fourcc codes supported
- * @format_list: pointer to list of possible formats
- * @pixel_formats: array to populate with fourcc codes
- * @pixel_modifiers: array to populate with drm modifiers, can be NULL
- * @pixel_formats_max: length of pixel formats array
- * Return: number of elements populated
- */
-uint32_t dpu_populate_formats(
- const struct dpu_format_extended *format_list,
- uint32_t *pixel_formats,
- uint64_t *pixel_modifiers,
- uint32_t pixel_formats_max);
-
-/**
* dpu_format_check_modified_format - validate format and buffers for
* dpu non-standard, i.e. modified format
* @kms: kms driver
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 512ac0834d2b..df6852cc98b9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -151,7 +151,9 @@ static const struct dpu_sspp_blks_common sdm845_sspp_common = {
.id = DPU_SSPP_CSC_10BIT, \
.base = 0x1a00, .len = 0x100,}, \
.format_list = plane_formats_yuv, \
+ .num_formats = ARRAY_SIZE(plane_formats_yuv), \
.virt_format_list = plane_formats, \
+ .virt_num_formats = ARRAY_SIZE(plane_formats), \
}
#define _DMA_SBLK(num, sdma_pri) \
@@ -163,7 +165,9 @@ static const struct dpu_sspp_blks_common sdm845_sspp_common = {
.src_blk = {.name = STRCAT("sspp_src_", num), \
.id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
.format_list = plane_formats, \
+ .num_formats = ARRAY_SIZE(plane_formats), \
.virt_format_list = plane_formats, \
+ .virt_num_formats = ARRAY_SIZE(plane_formats), \
}
static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 = _VIG_SBLK("0", 5);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index 144358a3d0fb..a55653b2e466 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -252,17 +252,6 @@ struct dpu_pp_blk {
};
/**
- * struct dpu_format_extended - define dpu specific pixel format+modifier
- * @fourcc_format: Base FOURCC pixel format code
- * @modifier: 64-bit drm format modifier, same modifier must be applied to all
- * framebuffer planes
- */
-struct dpu_format_extended {
- uint32_t fourcc_format;
- uint64_t modifier;
-};
-
-/**
* enum dpu_qos_lut_usage - define QoS LUT use cases
*/
enum dpu_qos_lut_usage {
@@ -348,7 +337,9 @@ struct dpu_sspp_blks_common {
* @pcc_blk:
* @igc_blk:
* @format_list: Pointer to list of supported formats
+ * @num_formats: Number of supported formats
* @virt_format_list: Pointer to list of supported formats for virtual planes
+ * @virt_num_formats: Number of supported formats for virtual planes
*/
struct dpu_sspp_sub_blks {
const struct dpu_sspp_blks_common *common;
@@ -366,8 +357,10 @@ struct dpu_sspp_sub_blks {
struct dpu_pp_blk pcc_blk;
struct dpu_pp_blk igc_blk;
- const struct dpu_format_extended *format_list;
- const struct dpu_format_extended *virt_format_list;
+ const u32 *format_list;
+ u32 num_formats;
+ const u32 *virt_format_list;
+ u32 virt_num_formats;
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
index 3c9f028628ef..d09730985951 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
@@ -12,157 +12,81 @@
#include "dpu_hw_mdss.h"
-static const struct dpu_format_extended plane_formats[] = {
- {DRM_FORMAT_ARGB8888, 0},
- {DRM_FORMAT_ABGR8888, 0},
- {DRM_FORMAT_RGBA8888, 0},
- {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_BGRA8888, 0},
- {DRM_FORMAT_XRGB8888, 0},
- {DRM_FORMAT_RGBX8888, 0},
- {DRM_FORMAT_BGRX8888, 0},
- {DRM_FORMAT_XBGR8888, 0},
- {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_RGB888, 0},
- {DRM_FORMAT_BGR888, 0},
- {DRM_FORMAT_RGB565, 0},
- {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_BGR565, 0},
- {DRM_FORMAT_ARGB1555, 0},
- {DRM_FORMAT_ABGR1555, 0},
- {DRM_FORMAT_RGBA5551, 0},
- {DRM_FORMAT_BGRA5551, 0},
- {DRM_FORMAT_XRGB1555, 0},
- {DRM_FORMAT_XBGR1555, 0},
- {DRM_FORMAT_RGBX5551, 0},
- {DRM_FORMAT_BGRX5551, 0},
- {DRM_FORMAT_ARGB4444, 0},
- {DRM_FORMAT_ABGR4444, 0},
- {DRM_FORMAT_RGBA4444, 0},
- {DRM_FORMAT_BGRA4444, 0},
- {DRM_FORMAT_XRGB4444, 0},
- {DRM_FORMAT_XBGR4444, 0},
- {DRM_FORMAT_RGBX4444, 0},
- {DRM_FORMAT_BGRX4444, 0},
- {0, 0},
+static const uint32_t qcom_compressed_supported_formats[] = {
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_BGR565,
};
-static const struct dpu_format_extended plane_formats_yuv[] = {
- {DRM_FORMAT_ARGB8888, 0},
- {DRM_FORMAT_ABGR8888, 0},
- {DRM_FORMAT_RGBA8888, 0},
- {DRM_FORMAT_BGRX8888, 0},
- {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_BGRA8888, 0},
- {DRM_FORMAT_XRGB8888, 0},
- {DRM_FORMAT_XBGR8888, 0},
- {DRM_FORMAT_RGBX8888, 0},
- {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_RGB888, 0},
- {DRM_FORMAT_BGR888, 0},
- {DRM_FORMAT_RGB565, 0},
- {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_BGR565, 0},
- {DRM_FORMAT_ARGB1555, 0},
- {DRM_FORMAT_ABGR1555, 0},
- {DRM_FORMAT_RGBA5551, 0},
- {DRM_FORMAT_BGRA5551, 0},
- {DRM_FORMAT_XRGB1555, 0},
- {DRM_FORMAT_XBGR1555, 0},
- {DRM_FORMAT_RGBX5551, 0},
- {DRM_FORMAT_BGRX5551, 0},
- {DRM_FORMAT_ARGB4444, 0},
- {DRM_FORMAT_ABGR4444, 0},
- {DRM_FORMAT_RGBA4444, 0},
- {DRM_FORMAT_BGRA4444, 0},
- {DRM_FORMAT_XRGB4444, 0},
- {DRM_FORMAT_XBGR4444, 0},
- {DRM_FORMAT_RGBX4444, 0},
- {DRM_FORMAT_BGRX4444, 0},
-
- {DRM_FORMAT_NV12, 0},
- {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_NV21, 0},
- {DRM_FORMAT_NV16, 0},
- {DRM_FORMAT_NV61, 0},
- {DRM_FORMAT_VYUY, 0},
- {DRM_FORMAT_UYVY, 0},
- {DRM_FORMAT_YUYV, 0},
- {DRM_FORMAT_YVYU, 0},
- {DRM_FORMAT_YUV420, 0},
- {DRM_FORMAT_YVU420, 0},
- {0, 0},
-};
-
-static const struct dpu_format_extended cursor_formats[] = {
- {DRM_FORMAT_ARGB8888, 0},
- {DRM_FORMAT_ABGR8888, 0},
- {DRM_FORMAT_RGBA8888, 0},
- {DRM_FORMAT_BGRA8888, 0},
- {DRM_FORMAT_XRGB8888, 0},
- {DRM_FORMAT_ARGB1555, 0},
- {DRM_FORMAT_ABGR1555, 0},
- {DRM_FORMAT_RGBA5551, 0},
- {DRM_FORMAT_BGRA5551, 0},
- {DRM_FORMAT_ARGB4444, 0},
- {DRM_FORMAT_ABGR4444, 0},
- {DRM_FORMAT_RGBA4444, 0},
- {DRM_FORMAT_BGRA4444, 0},
- {0, 0},
+static const uint32_t plane_formats[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_RGBA4444,
+ DRM_FORMAT_BGRA4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_XBGR4444,
+ DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_BGRX4444,
};
-static const struct dpu_format_extended wb2_formats[] = {
- {DRM_FORMAT_RGB565, 0},
- {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_RGB888, 0},
- {DRM_FORMAT_ARGB8888, 0},
- {DRM_FORMAT_RGBA8888, 0},
- {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_XRGB8888, 0},
- {DRM_FORMAT_RGBX8888, 0},
- {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_ARGB1555, 0},
- {DRM_FORMAT_RGBA5551, 0},
- {DRM_FORMAT_XRGB1555, 0},
- {DRM_FORMAT_RGBX5551, 0},
- {DRM_FORMAT_ARGB4444, 0},
- {DRM_FORMAT_RGBA4444, 0},
- {DRM_FORMAT_RGBX4444, 0},
- {DRM_FORMAT_XRGB4444, 0},
-
- {DRM_FORMAT_BGR565, 0},
- {DRM_FORMAT_BGR888, 0},
- {DRM_FORMAT_ABGR8888, 0},
- {DRM_FORMAT_BGRA8888, 0},
- {DRM_FORMAT_BGRX8888, 0},
- {DRM_FORMAT_XBGR8888, 0},
- {DRM_FORMAT_ABGR1555, 0},
- {DRM_FORMAT_BGRA5551, 0},
- {DRM_FORMAT_XBGR1555, 0},
- {DRM_FORMAT_BGRX5551, 0},
- {DRM_FORMAT_ABGR4444, 0},
- {DRM_FORMAT_BGRA4444, 0},
- {DRM_FORMAT_BGRX4444, 0},
- {DRM_FORMAT_XBGR4444, 0},
-
- {DRM_FORMAT_YUV420, 0},
- {DRM_FORMAT_NV12, 0},
- {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_NV16, 0},
- {DRM_FORMAT_YUYV, 0},
-
- {0, 0},
-};
+static const uint32_t plane_formats_yuv[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_RGBA4444,
+ DRM_FORMAT_BGRA4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_XBGR4444,
+ DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_BGRX4444,
-static const struct dpu_format_extended rgb_10bit_formats[] = {
- {DRM_FORMAT_BGRA1010102, 0},
- {DRM_FORMAT_BGRX1010102, 0},
- {DRM_FORMAT_RGBA1010102, 0},
- {DRM_FORMAT_RGBX1010102, 0},
- {DRM_FORMAT_ABGR2101010, 0},
- {DRM_FORMAT_ABGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_XBGR2101010, 0},
- {DRM_FORMAT_XBGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_ARGB2101010, 0},
- {DRM_FORMAT_XRGB2101010, 0},
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_NV61,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YVU420,
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index c0b7f0049365..8a28a03ac6a9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -170,10 +170,6 @@
/**
* AD4 interrupt status bit definitions
*/
-#define DPU_INTR_BRIGHTPR_UPDATED BIT(4)
-#define DPU_INTR_DARKENH_UPDATED BIT(3)
-#define DPU_INTR_STREN_OUTROI_UPDATED BIT(2)
-#define DPU_INTR_STREN_INROI_UPDATED BIT(1)
#define DPU_INTR_BACKLIGHT_UPDATED BIT(0)
/**
* struct dpu_intr_reg - array of DPU register sets
@@ -782,18 +778,6 @@ static int dpu_hw_intr_irqidx_lookup(enum dpu_intr_type intr_type,
return -EINVAL;
}
-static void dpu_hw_intr_set_mask(struct dpu_hw_intr *intr, uint32_t reg_off,
- uint32_t mask)
-{
- if (!intr)
- return;
-
- DPU_REG_WRITE(&intr->hw, reg_off, mask);
-
- /* ensure register writes go through */
- wmb();
-}
-
static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
void (*cbfunc)(void *, int),
void *arg)
@@ -1004,18 +988,6 @@ static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr)
return 0;
}
-static int dpu_hw_intr_get_valid_interrupts(struct dpu_hw_intr *intr,
- uint32_t *mask)
-{
- if (!intr || !mask)
- return -EINVAL;
-
- *mask = IRQ_SOURCE_MDP | IRQ_SOURCE_DSI0 | IRQ_SOURCE_DSI1
- | IRQ_SOURCE_HDMI | IRQ_SOURCE_EDP;
-
- return 0;
-}
-
static void dpu_hw_intr_get_interrupt_statuses(struct dpu_hw_intr *intr)
{
int i;
@@ -1065,19 +1037,6 @@ static void dpu_hw_intr_clear_intr_status_nolock(struct dpu_hw_intr *intr,
wmb();
}
-static void dpu_hw_intr_clear_interrupt_status(struct dpu_hw_intr *intr,
- int irq_idx)
-{
- unsigned long irq_flags;
-
- if (!intr)
- return;
-
- spin_lock_irqsave(&intr->irq_lock, irq_flags);
- dpu_hw_intr_clear_intr_status_nolock(intr, irq_idx);
- spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
-}
-
static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr,
int irq_idx, bool clear)
{
@@ -1113,16 +1072,13 @@ static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr,
static void __setup_intr_ops(struct dpu_hw_intr_ops *ops)
{
- ops->set_mask = dpu_hw_intr_set_mask;
ops->irq_idx_lookup = dpu_hw_intr_irqidx_lookup;
ops->enable_irq = dpu_hw_intr_enable_irq;
ops->disable_irq = dpu_hw_intr_disable_irq;
ops->dispatch_irqs = dpu_hw_intr_dispatch_irq;
ops->clear_all_irqs = dpu_hw_intr_clear_irqs;
ops->disable_all_irqs = dpu_hw_intr_disable_irqs;
- ops->get_valid_interrupts = dpu_hw_intr_get_valid_interrupts;
ops->get_interrupt_statuses = dpu_hw_intr_get_interrupt_statuses;
- ops->clear_interrupt_status = dpu_hw_intr_clear_interrupt_status;
ops->clear_intr_status_nolock = dpu_hw_intr_clear_intr_status_nolock;
ops->get_interrupt_status = dpu_hw_intr_get_interrupt_status;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
index 61e4cba36562..4d7a1c727ce2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -20,13 +20,6 @@
#include "dpu_hw_util.h"
#include "dpu_hw_mdss.h"
-#define IRQ_SOURCE_MDP BIT(0)
-#define IRQ_SOURCE_DSI0 BIT(4)
-#define IRQ_SOURCE_DSI1 BIT(5)
-#define IRQ_SOURCE_HDMI BIT(8)
-#define IRQ_SOURCE_EDP BIT(12)
-#define IRQ_SOURCE_MHL BIT(16)
-
/**
* dpu_intr_type - HW Interrupt Type
* @DPU_IRQ_TYPE_WB_ROT_COMP: WB rotator done
@@ -96,18 +89,6 @@ struct dpu_hw_intr;
*/
struct dpu_hw_intr_ops {
/**
- * set_mask - Programs the given interrupt register with the
- * given interrupt mask. Register value will get overwritten.
- * @intr: HW interrupt handle
- * @reg_off: MDSS HW register offset
- * @irqmask: IRQ mask value
- */
- void (*set_mask)(
- struct dpu_hw_intr *intr,
- uint32_t reg,
- uint32_t irqmask);
-
- /**
* irq_idx_lookup - Lookup IRQ index on the HW interrupt type
* Used for all irq related ops
* @intr_type: Interrupt type defined in dpu_intr_type
@@ -177,16 +158,6 @@ struct dpu_hw_intr_ops {
struct dpu_hw_intr *intr);
/**
- * clear_interrupt_status - Clears HW interrupt status based on given
- * lookup IRQ index.
- * @intr: HW interrupt handle
- * @irq_idx: Lookup irq index return from irq_idx_lookup
- */
- void (*clear_interrupt_status)(
- struct dpu_hw_intr *intr,
- int irq_idx);
-
- /**
* clear_intr_status_nolock() - clears the HW interrupts without lock
* @intr: HW interrupt handle
* @irq_idx: Lookup irq index return from irq_idx_lookup
@@ -206,21 +177,6 @@ struct dpu_hw_intr_ops {
struct dpu_hw_intr *intr,
int irq_idx,
bool clear);
-
- /**
- * get_valid_interrupts - Gets a mask of all valid interrupt sources
- * within DPU. These are actually status bits
- * within interrupt registers that specify the
- * source of the interrupt in IRQs. For example,
- * valid interrupt sources can be MDP, DSI,
- * HDMI etc.
- * @intr: HW interrupt handle
- * @mask: Returning the interrupt source MASK
- * @return: 0 for success, otherwise failure
- */
- int (*get_valid_interrupts)(
- struct dpu_hw_intr *intr,
- uint32_t *mask);
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index 68c54d2c9677..1ab8d4a889f7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -258,12 +258,6 @@ enum dpu_vbif {
VBIF_NRT = VBIF_1
};
-enum dpu_iommu_domain {
- DPU_IOMMU_DOMAIN_UNSECURE,
- DPU_IOMMU_DOMAIN_SECURE,
- DPU_IOMMU_DOMAIN_MAX
-};
-
/**
* DPU HW,Component order color map
*/
@@ -358,7 +352,6 @@ enum dpu_3d_blend_mode {
* @alpha_enable: whether the format has an alpha channel
* @num_planes: number of planes (including meta data planes)
* @fetch_mode: linear, tiled, or ubwc hw fetch behavior
- * @is_yuv: is format a yuv variant
* @flag: usage bit flags
* @tile_width: format tile width
* @tile_height: format tile height
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
index 321fc64ddd0e..efe70c508ee0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
@@ -18,7 +18,6 @@
#include "dpu_hw_mdss.h"
#define REG_MASK(n) ((BIT(n)) - 1)
-struct dpu_format_extended;
/*
* This is the common struct maintained by each sub block
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 4d67b3c96702..885bf88afa3e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -405,35 +405,38 @@ static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
}
}
-static void _dpu_kms_initialize_dsi(struct drm_device *dev,
+static int _dpu_kms_initialize_dsi(struct drm_device *dev,
struct msm_drm_private *priv,
struct dpu_kms *dpu_kms)
{
struct drm_encoder *encoder = NULL;
- int i, rc;
+ int i, rc = 0;
+
+ if (!(priv->dsi[0] || priv->dsi[1]))
+ return rc;
/*TODO: Support two independent DSI connectors */
encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI);
- if (IS_ERR_OR_NULL(encoder)) {
+ if (IS_ERR(encoder)) {
DPU_ERROR("encoder init failed for dsi display\n");
- return;
+ return PTR_ERR(encoder);
}
priv->encoders[priv->num_encoders++] = encoder;
for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
- if (!priv->dsi[i]) {
- DPU_DEBUG("invalid msm_dsi for ctrl %d\n", i);
- return;
- }
+ if (!priv->dsi[i])
+ continue;
rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
if (rc) {
DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
i, rc);
- continue;
+ break;
}
}
+
+ return rc;
}
/**
@@ -444,16 +447,16 @@ static void _dpu_kms_initialize_dsi(struct drm_device *dev,
* @dpu_kms: Pointer to dpu kms structure
* Returns: Zero on success
*/
-static void _dpu_kms_setup_displays(struct drm_device *dev,
+static int _dpu_kms_setup_displays(struct drm_device *dev,
struct msm_drm_private *priv,
struct dpu_kms *dpu_kms)
{
- _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
-
/**
* Extend this function to initialize other
* types of displays
*/
+
+ return _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
}
static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms)
@@ -516,7 +519,9 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
* Create encoder and query display drivers to create
* bridges and connectors
*/
- _dpu_kms_setup_displays(dev, priv, dpu_kms);
+ ret = _dpu_kms_setup_displays(dev, priv, dpu_kms);
+ if (ret)
+ goto fail;
max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
@@ -627,6 +632,10 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
dpu_kms->vbif[VBIF_RT] = NULL;
+ if (dpu_kms->hw_mdp)
+ dpu_hw_mdp_destroy(dpu_kms->hw_mdp);
+ dpu_kms->hw_mdp = NULL;
+
if (dpu_kms->mmio)
devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
dpu_kms->mmio = NULL;
@@ -877,8 +886,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
goto power_error;
}
- rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio,
- dpu_kms->dev);
+ rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio);
if (rc) {
DPU_ERROR("rm init failed: %d\n", rc);
goto power_error;
@@ -886,11 +894,10 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_kms->rm_init = true;
- dpu_kms->hw_mdp = dpu_rm_get_mdp(&dpu_kms->rm);
- if (IS_ERR_OR_NULL(dpu_kms->hw_mdp)) {
+ dpu_kms->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, dpu_kms->mmio,
+ dpu_kms->catalog);
+ if (IS_ERR(dpu_kms->hw_mdp)) {
rc = PTR_ERR(dpu_kms->hw_mdp);
- if (!dpu_kms->hw_mdp)
- rc = -EINVAL;
DPU_ERROR("failed to get hw_mdp: %d\n", rc);
dpu_kms->hw_mdp = NULL;
goto power_error;
@@ -926,16 +933,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
goto hw_intr_init_err;
}
- /*
- * _dpu_kms_drm_obj_init should create the DRM related objects
- * i.e. CRTCs, planes, encoders, connectors and so forth
- */
- rc = _dpu_kms_drm_obj_init(dpu_kms);
- if (rc) {
- DPU_ERROR("modeset init failed: %d\n", rc);
- goto drm_obj_init_err;
- }
-
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
@@ -952,6 +949,16 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
*/
dev->mode_config.allow_fb_modifiers = true;
+ /*
+ * _dpu_kms_drm_obj_init should create the DRM related objects
+ * i.e. CRTCs, planes, encoders, connectors and so forth
+ */
+ rc = _dpu_kms_drm_obj_init(dpu_kms);
+ if (rc) {
+ DPU_ERROR("modeset init failed: %d\n", rc);
+ goto drm_obj_init_err;
+ }
+
dpu_vbif_init_memtypes(dpu_kms);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
index cb307a2abf06..7316b4ab1b85 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
@@ -23,11 +23,14 @@ struct dpu_mdss {
struct dpu_irq_controller irq_controller;
};
-static irqreturn_t dpu_mdss_irq(int irq, void *arg)
+static void dpu_mdss_irq(struct irq_desc *desc)
{
- struct dpu_mdss *dpu_mdss = arg;
+ struct dpu_mdss *dpu_mdss = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
u32 interrupts;
+ chained_irq_enter(chip, desc);
+
interrupts = readl_relaxed(dpu_mdss->mmio + HW_INTR_STATUS);
while (interrupts) {
@@ -39,20 +42,20 @@ static irqreturn_t dpu_mdss_irq(int irq, void *arg)
hwirq);
if (mapping == 0) {
DRM_ERROR("couldn't find irq mapping for %lu\n", hwirq);
- return IRQ_NONE;
+ break;
}
rc = generic_handle_irq(mapping);
if (rc < 0) {
DRM_ERROR("handle irq fail: irq=%lu mapping=%u rc=%d\n",
hwirq, mapping, rc);
- return IRQ_NONE;
+ break;
}
interrupts &= ~(1 << hwirq);
}
- return IRQ_HANDLED;
+ chained_irq_exit(chip, desc);
}
static void dpu_mdss_irq_mask(struct irq_data *irqd)
@@ -83,16 +86,16 @@ static struct irq_chip dpu_mdss_irq_chip = {
.irq_unmask = dpu_mdss_irq_unmask,
};
+static struct lock_class_key dpu_mdss_lock_key, dpu_mdss_request_key;
+
static int dpu_mdss_irqdomain_map(struct irq_domain *domain,
unsigned int irq, irq_hw_number_t hwirq)
{
struct dpu_mdss *dpu_mdss = domain->host_data;
- int ret;
+ irq_set_lockdep_class(irq, &dpu_mdss_lock_key, &dpu_mdss_request_key);
irq_set_chip_and_handler(irq, &dpu_mdss_irq_chip, handle_level_irq);
- ret = irq_set_chip_data(irq, dpu_mdss);
-
- return ret;
+ return irq_set_chip_data(irq, dpu_mdss);
}
static const struct irq_domain_ops dpu_mdss_irqdomain_ops = {
@@ -159,11 +162,13 @@ static void dpu_mdss_destroy(struct drm_device *dev)
struct msm_drm_private *priv = dev->dev_private;
struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
struct dss_module_power *mp = &dpu_mdss->mp;
+ int irq;
pm_runtime_suspend(dev->dev);
pm_runtime_disable(dev->dev);
_dpu_mdss_irq_domain_fini(dpu_mdss);
- free_irq(platform_get_irq(pdev, 0), dpu_mdss);
+ irq = platform_get_irq(pdev, 0);
+ irq_set_chained_handler_and_data(irq, NULL, NULL);
msm_dss_put_clk(mp->clk_config, mp->num_clk);
devm_kfree(&pdev->dev, mp->clk_config);
@@ -187,6 +192,7 @@ int dpu_mdss_init(struct drm_device *dev)
struct dpu_mdss *dpu_mdss;
struct dss_module_power *mp;
int ret = 0;
+ int irq;
dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
if (!dpu_mdss)
@@ -219,12 +225,12 @@ int dpu_mdss_init(struct drm_device *dev)
if (ret)
goto irq_domain_error;
- ret = request_irq(platform_get_irq(pdev, 0),
- dpu_mdss_irq, 0, "dpu_mdss_isr", dpu_mdss);
- if (ret) {
- DPU_ERROR("failed to init irq: %d\n", ret);
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
goto irq_error;
- }
+
+ irq_set_chained_handler_and_data(irq, dpu_mdss_irq,
+ dpu_mdss);
pm_runtime_enable(dev->dev);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 6aefcd6db46b..b01183b309b9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -95,8 +95,6 @@ struct dpu_plane {
enum dpu_sspp pipe;
uint32_t features; /* capabilities from catalog */
- uint32_t nformats;
- uint32_t formats[64];
struct dpu_hw_pipe *pipe_hw;
struct dpu_hw_pipe_cfg pipe_cfg;
@@ -121,6 +119,12 @@ struct dpu_plane {
bool debugfs_default_scale;
};
+static const uint64_t supported_format_modifiers[] = {
+ DRM_FORMAT_MOD_QCOM_COMPRESSED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
#define to_dpu_plane(x) container_of(x, struct dpu_plane, base)
static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
@@ -1410,6 +1414,23 @@ static void dpu_plane_early_unregister(struct drm_plane *plane)
debugfs_remove_recursive(pdpu->debugfs_root);
}
+static bool dpu_plane_format_mod_supported(struct drm_plane *plane,
+ uint32_t format, uint64_t modifier)
+{
+ if (modifier == DRM_FORMAT_MOD_LINEAR)
+ return true;
+
+ if (modifier == DRM_FORMAT_MOD_QCOM_COMPRESSED) {
+ int i;
+ for (i = 0; i < ARRAY_SIZE(qcom_compressed_supported_formats); i++) {
+ if (format == qcom_compressed_supported_formats[i])
+ return true;
+ }
+ }
+
+ return false;
+}
+
static const struct drm_plane_funcs dpu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@@ -1419,6 +1440,7 @@ static const struct drm_plane_funcs dpu_plane_funcs = {
.atomic_destroy_state = dpu_plane_destroy_state,
.late_register = dpu_plane_late_register,
.early_unregister = dpu_plane_early_unregister,
+ .format_mod_supported = dpu_plane_format_mod_supported,
};
static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = {
@@ -1444,11 +1466,12 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
unsigned long possible_crtcs, u32 master_plane_id)
{
struct drm_plane *plane = NULL, *master_plane = NULL;
- const struct dpu_format_extended *format_list;
+ const uint32_t *format_list;
struct dpu_plane *pdpu;
struct msm_drm_private *priv = dev->dev_private;
struct dpu_kms *kms = to_dpu_kms(priv->kms);
int zpos_max = DPU_ZPOS_MAX;
+ uint32_t num_formats;
int ret = -EINVAL;
/* create and zero local structure */
@@ -1491,24 +1514,18 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
goto clean_sspp;
}
- if (!master_plane_id)
- format_list = pdpu->pipe_sblk->format_list;
- else
+ if (pdpu->is_virtual) {
format_list = pdpu->pipe_sblk->virt_format_list;
-
- pdpu->nformats = dpu_populate_formats(format_list,
- pdpu->formats,
- 0,
- ARRAY_SIZE(pdpu->formats));
-
- if (!pdpu->nformats) {
- DPU_ERROR("[%u]no valid formats for plane\n", pipe);
- goto clean_sspp;
+ num_formats = pdpu->pipe_sblk->virt_num_formats;
+ }
+ else {
+ format_list = pdpu->pipe_sblk->format_list;
+ num_formats = pdpu->pipe_sblk->num_formats;
}
ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs,
- pdpu->formats, pdpu->nformats,
- NULL, type, NULL);
+ format_list, num_formats,
+ supported_format_modifiers, type, NULL);
if (ret)
goto clean_sspp;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
index 7fed0b627708..0e6063acd041 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -28,23 +28,18 @@
/**
* struct dpu_plane_state: Define dpu extension of drm plane state object
* @base: base drm plane state object
- * @property_state: Local storage for msm_prop properties
- * @property_values: cached plane property values
* @aspace: pointer to address space for input/output buffers
- * @input_fence: dereferenced input fence pointer
* @stage: assigned by crtc blender
* @multirect_index: index of the rectangle of SSPP
* @multirect_mode: parallel or time multiplex multirect mode
* @pending: whether the current update is still pending
* @scaler3_cfg: configuration data for scaler3
* @pixel_ext: configuration data for pixel extensions
- * @scaler_check_state: indicates status of user provided pixel extension data
* @cdp_cfg: CDP configuration
*/
struct dpu_plane_state {
struct drm_plane_state base;
struct msm_gem_address_space *aspace;
- void *input_fence;
enum dpu_stage stage;
uint32_t multirect_index;
uint32_t multirect_mode;
@@ -107,12 +102,6 @@ void dpu_plane_restore(struct drm_plane *plane);
void dpu_plane_flush(struct drm_plane *plane);
/**
- * dpu_plane_kickoff - final plane operations before commit kickoff
- * @plane: Pointer to drm plane structure
- */
-void dpu_plane_kickoff(struct drm_plane *plane);
-
-/**
* dpu_plane_set_error: enable/disable error condition
* @plane: pointer to drm_plane structure
*/
@@ -147,14 +136,6 @@ int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane);
void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state);
/**
- * dpu_plane_wait_input_fence - wait for input fence object
- * @plane: Pointer to DRM plane object
- * @wait_ms: Wait timeout value
- * Returns: Zero on success
- */
-int dpu_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms);
-
-/**
* dpu_plane_color_fill - enables color fill on plane
* @plane: Pointer to DRM plane object
* @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
@@ -164,12 +145,4 @@ int dpu_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms);
int dpu_plane_color_fill(struct drm_plane *plane,
uint32_t color, uint32_t alpha);
-/**
- * dpu_plane_set_revalidate - sets revalidate flag which forces a full
- * validation of the plane properties in the next atomic check
- * @plane: Pointer to DRM plane object
- * @enable: Boolean to set/unset the flag
- */
-void dpu_plane_set_revalidate(struct drm_plane *plane, bool enable);
-
#endif /* _DPU_PLANE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index bdb117709674..037d9f4187f9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -21,8 +21,8 @@
#include "dpu_encoder.h"
#include "dpu_trace.h"
-#define RESERVED_BY_OTHER(h, r) \
- ((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id))
+#define RESERVED_BY_OTHER(h, r) \
+ ((h)->enc_id && (h)->enc_id != r)
/**
* struct dpu_rm_requirements - Reservation requirements parameter bundle
@@ -34,90 +34,21 @@ struct dpu_rm_requirements {
struct dpu_encoder_hw_resources hw_res;
};
-/**
- * struct dpu_rm_rsvp - Use Case Reservation tagging structure
- * Used to tag HW blocks as reserved by a CRTC->Encoder->Connector chain
- * By using as a tag, rather than lists of pointers to HW blocks used
- * we can avoid some list management since we don't know how many blocks
- * of each type a given use case may require.
- * @list: List head for list of all reservations
- * @seq: Global RSVP sequence number for debugging, especially for
- * differentiating differenct allocations for same encoder.
- * @enc_id: Reservations are tracked by Encoder DRM object ID.
- * CRTCs may be connected to multiple Encoders.
- * An encoder or connector id identifies the display path.
- */
-struct dpu_rm_rsvp {
- struct list_head list;
- uint32_t seq;
- uint32_t enc_id;
-};
/**
* struct dpu_rm_hw_blk - hardware block tracking list member
* @list: List head for list of all hardware blocks tracking items
- * @rsvp: Pointer to use case reservation if reserved by a client
- * @rsvp_nxt: Temporary pointer used during reservation to the incoming
- * request. Will be swapped into rsvp if proposal is accepted
- * @type: Type of hardware block this structure tracks
* @id: Hardware ID number, within it's own space, ie. LM_X
- * @catalog: Pointer to the hardware catalog entry for this block
+ * @enc_id: Encoder id to which this blk is binded
* @hw: Pointer to the hardware register access object for this block
*/
struct dpu_rm_hw_blk {
struct list_head list;
- struct dpu_rm_rsvp *rsvp;
- struct dpu_rm_rsvp *rsvp_nxt;
- enum dpu_hw_blk_type type;
uint32_t id;
+ uint32_t enc_id;
struct dpu_hw_blk *hw;
};
-/**
- * dpu_rm_dbg_rsvp_stage - enum of steps in making reservation for event logging
- */
-enum dpu_rm_dbg_rsvp_stage {
- DPU_RM_STAGE_BEGIN,
- DPU_RM_STAGE_AFTER_CLEAR,
- DPU_RM_STAGE_AFTER_RSVPNEXT,
- DPU_RM_STAGE_FINAL
-};
-
-static void _dpu_rm_print_rsvps(
- struct dpu_rm *rm,
- enum dpu_rm_dbg_rsvp_stage stage)
-{
- struct dpu_rm_rsvp *rsvp;
- struct dpu_rm_hw_blk *blk;
- enum dpu_hw_blk_type type;
-
- DPU_DEBUG("%d\n", stage);
-
- list_for_each_entry(rsvp, &rm->rsvps, list) {
- DRM_DEBUG_KMS("%d rsvp[s%ue%u]\n", stage, rsvp->seq,
- rsvp->enc_id);
- }
-
- for (type = 0; type < DPU_HW_BLK_MAX; type++) {
- list_for_each_entry(blk, &rm->hw_blks[type], list) {
- if (!blk->rsvp && !blk->rsvp_nxt)
- continue;
-
- DRM_DEBUG_KMS("%d rsvp[s%ue%u->s%ue%u] %d %d\n", stage,
- (blk->rsvp) ? blk->rsvp->seq : 0,
- (blk->rsvp) ? blk->rsvp->enc_id : 0,
- (blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0,
- (blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0,
- blk->type, blk->id);
- }
- }
-}
-
-struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm)
-{
- return rm->hw_mdp;
-}
-
void dpu_rm_init_hw_iter(
struct dpu_rm_hw_iter *iter,
uint32_t enc_id,
@@ -148,15 +79,7 @@ static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
i->blk = list_prepare_entry(i->blk, blk_list, list);
list_for_each_entry_continue(i->blk, blk_list, list) {
- struct dpu_rm_rsvp *rsvp = i->blk->rsvp;
-
- if (i->blk->type != i->type) {
- DPU_ERROR("found incorrect block type %d on %d list\n",
- i->blk->type, i->type);
- return false;
- }
-
- if ((i->enc_id == 0) || (rsvp && rsvp->enc_id == i->enc_id)) {
+ if (i->enc_id == i->blk->enc_id) {
i->hw = i->blk->hw;
DPU_DEBUG("found type %d id %d for enc %d\n",
i->type, i->blk->id, i->enc_id);
@@ -208,34 +131,18 @@ static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw)
int dpu_rm_destroy(struct dpu_rm *rm)
{
-
- struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt;
struct dpu_rm_hw_blk *hw_cur, *hw_nxt;
enum dpu_hw_blk_type type;
- if (!rm) {
- DPU_ERROR("invalid rm\n");
- return -EINVAL;
- }
-
- list_for_each_entry_safe(rsvp_cur, rsvp_nxt, &rm->rsvps, list) {
- list_del(&rsvp_cur->list);
- kfree(rsvp_cur);
- }
-
-
for (type = 0; type < DPU_HW_BLK_MAX; type++) {
list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type],
list) {
list_del(&hw_cur->list);
- _dpu_rm_hw_destroy(hw_cur->type, hw_cur->hw);
+ _dpu_rm_hw_destroy(type, hw_cur->hw);
kfree(hw_cur);
}
}
- dpu_hw_mdp_destroy(rm->hw_mdp);
- rm->hw_mdp = NULL;
-
mutex_destroy(&rm->rm_lock);
return 0;
@@ -250,11 +157,8 @@ static int _dpu_rm_hw_blk_create(
void *hw_catalog_info)
{
struct dpu_rm_hw_blk *blk;
- struct dpu_hw_mdp *hw_mdp;
void *hw;
- hw_mdp = rm->hw_mdp;
-
switch (type) {
case DPU_HW_BLK_LM:
hw = dpu_hw_lm_init(id, mmio, cat);
@@ -290,9 +194,9 @@ static int _dpu_rm_hw_blk_create(
return -ENOMEM;
}
- blk->type = type;
blk->id = id;
blk->hw = hw;
+ blk->enc_id = 0;
list_add_tail(&blk->list, &rm->hw_blks[type]);
return 0;
@@ -300,13 +204,12 @@ static int _dpu_rm_hw_blk_create(
int dpu_rm_init(struct dpu_rm *rm,
struct dpu_mdss_cfg *cat,
- void __iomem *mmio,
- struct drm_device *dev)
+ void __iomem *mmio)
{
int rc, i;
enum dpu_hw_blk_type type;
- if (!rm || !cat || !mmio || !dev) {
+ if (!rm || !cat || !mmio) {
DPU_ERROR("invalid kms\n");
return -EINVAL;
}
@@ -316,21 +219,9 @@ int dpu_rm_init(struct dpu_rm *rm,
mutex_init(&rm->rm_lock);
- INIT_LIST_HEAD(&rm->rsvps);
for (type = 0; type < DPU_HW_BLK_MAX; type++)
INIT_LIST_HEAD(&rm->hw_blks[type]);
- rm->dev = dev;
-
- /* Some of the sub-blocks require an mdptop to be created */
- rm->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, mmio, cat);
- if (IS_ERR_OR_NULL(rm->hw_mdp)) {
- rc = PTR_ERR(rm->hw_mdp);
- rm->hw_mdp = NULL;
- DPU_ERROR("failed: mdp hw not available\n");
- goto fail;
- }
-
/* Interrogate HW catalog and create tracking items for hw blocks */
for (i = 0; i < cat->mixer_count; i++) {
struct dpu_lm_cfg *lm = &cat->mixer[i];
@@ -410,7 +301,7 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
* proposed use case requirements, incl. hardwired dependent blocks like
* pingpong
* @rm: dpu resource manager handle
- * @rsvp: reservation currently being created
+ * @enc_id: encoder id requesting for allocation
* @reqs: proposed use case requirements
* @lm: proposed layer mixer, function checks if lm, and all other hardwired
* blocks connected to the lm (pp) is available and appropriate
@@ -422,7 +313,7 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
*/
static bool _dpu_rm_check_lm_and_get_connected_blks(
struct dpu_rm *rm,
- struct dpu_rm_rsvp *rsvp,
+ uint32_t enc_id,
struct dpu_rm_requirements *reqs,
struct dpu_rm_hw_blk *lm,
struct dpu_rm_hw_blk **pp,
@@ -449,7 +340,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(
}
/* Already reserved? */
- if (RESERVED_BY_OTHER(lm, rsvp)) {
+ if (RESERVED_BY_OTHER(lm, enc_id)) {
DPU_DEBUG("lm %d already reserved\n", lm_cfg->id);
return false;
}
@@ -467,7 +358,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(
return false;
}
- if (RESERVED_BY_OTHER(*pp, rsvp)) {
+ if (RESERVED_BY_OTHER(*pp, enc_id)) {
DPU_DEBUG("lm %d pp %d already reserved\n", lm->id,
(*pp)->id);
return false;
@@ -476,10 +367,8 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(
return true;
}
-static int _dpu_rm_reserve_lms(
- struct dpu_rm *rm,
- struct dpu_rm_rsvp *rsvp,
- struct dpu_rm_requirements *reqs)
+static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id,
+ struct dpu_rm_requirements *reqs)
{
struct dpu_rm_hw_blk *lm[MAX_BLOCKS];
@@ -504,7 +393,7 @@ static int _dpu_rm_reserve_lms(
lm[lm_count] = iter_i.blk;
if (!_dpu_rm_check_lm_and_get_connected_blks(
- rm, rsvp, reqs, lm[lm_count],
+ rm, enc_id, reqs, lm[lm_count],
&pp[lm_count], NULL))
continue;
@@ -519,7 +408,7 @@ static int _dpu_rm_reserve_lms(
continue;
if (!_dpu_rm_check_lm_and_get_connected_blks(
- rm, rsvp, reqs, iter_j.blk,
+ rm, enc_id, reqs, iter_j.blk,
&pp[lm_count], iter_i.blk))
continue;
@@ -537,11 +426,10 @@ static int _dpu_rm_reserve_lms(
if (!lm[i])
break;
- lm[i]->rsvp_nxt = rsvp;
- pp[i]->rsvp_nxt = rsvp;
+ lm[i]->enc_id = enc_id;
+ pp[i]->enc_id = enc_id;
- trace_dpu_rm_reserve_lms(lm[i]->id, lm[i]->type, rsvp->enc_id,
- pp[i]->id);
+ trace_dpu_rm_reserve_lms(lm[i]->id, enc_id, pp[i]->id);
}
return rc;
@@ -549,7 +437,7 @@ static int _dpu_rm_reserve_lms(
static int _dpu_rm_reserve_ctls(
struct dpu_rm *rm,
- struct dpu_rm_rsvp *rsvp,
+ uint32_t enc_id,
const struct msm_display_topology *top)
{
struct dpu_rm_hw_blk *ctls[MAX_BLOCKS];
@@ -570,7 +458,7 @@ static int _dpu_rm_reserve_ctls(
unsigned long features = ctl->caps->features;
bool has_split_display;
- if (RESERVED_BY_OTHER(iter.blk, rsvp))
+ if (RESERVED_BY_OTHER(iter.blk, enc_id))
continue;
has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
@@ -591,9 +479,8 @@ static int _dpu_rm_reserve_ctls(
return -ENAVAIL;
for (i = 0; i < ARRAY_SIZE(ctls) && i < num_ctls; i++) {
- ctls[i]->rsvp_nxt = rsvp;
- trace_dpu_rm_reserve_ctls(ctls[i]->id, ctls[i]->type,
- rsvp->enc_id);
+ ctls[i]->enc_id = enc_id;
+ trace_dpu_rm_reserve_ctls(ctls[i]->id, enc_id);
}
return 0;
@@ -601,7 +488,7 @@ static int _dpu_rm_reserve_ctls(
static int _dpu_rm_reserve_intf(
struct dpu_rm *rm,
- struct dpu_rm_rsvp *rsvp,
+ uint32_t enc_id,
uint32_t id,
enum dpu_hw_blk_type type)
{
@@ -614,14 +501,13 @@ static int _dpu_rm_reserve_intf(
if (iter.blk->id != id)
continue;
- if (RESERVED_BY_OTHER(iter.blk, rsvp)) {
+ if (RESERVED_BY_OTHER(iter.blk, enc_id)) {
DPU_ERROR("type %d id %d already reserved\n", type, id);
return -ENAVAIL;
}
- iter.blk->rsvp_nxt = rsvp;
- trace_dpu_rm_reserve_intf(iter.blk->id, iter.blk->type,
- rsvp->enc_id);
+ iter.blk->enc_id = enc_id;
+ trace_dpu_rm_reserve_intf(iter.blk->id, enc_id);
break;
}
@@ -636,7 +522,7 @@ static int _dpu_rm_reserve_intf(
static int _dpu_rm_reserve_intf_related_hw(
struct dpu_rm *rm,
- struct dpu_rm_rsvp *rsvp,
+ uint32_t enc_id,
struct dpu_encoder_hw_resources *hw_res)
{
int i, ret = 0;
@@ -646,7 +532,7 @@ static int _dpu_rm_reserve_intf_related_hw(
if (hw_res->intfs[i] == INTF_MODE_NONE)
continue;
id = i + INTF_0;
- ret = _dpu_rm_reserve_intf(rm, rsvp, id,
+ ret = _dpu_rm_reserve_intf(rm, enc_id, id,
DPU_HW_BLK_INTF);
if (ret)
return ret;
@@ -655,33 +541,27 @@ static int _dpu_rm_reserve_intf_related_hw(
return ret;
}
-static int _dpu_rm_make_next_rsvp(
+static int _dpu_rm_make_reservation(
struct dpu_rm *rm,
struct drm_encoder *enc,
struct drm_crtc_state *crtc_state,
- struct dpu_rm_rsvp *rsvp,
struct dpu_rm_requirements *reqs)
{
int ret;
- /* Create reservation info, tag reserved blocks with it as we go */
- rsvp->seq = ++rm->rsvp_next_seq;
- rsvp->enc_id = enc->base.id;
- list_add_tail(&rsvp->list, &rm->rsvps);
-
- ret = _dpu_rm_reserve_lms(rm, rsvp, reqs);
+ ret = _dpu_rm_reserve_lms(rm, enc->base.id, reqs);
if (ret) {
DPU_ERROR("unable to find appropriate mixers\n");
return ret;
}
- ret = _dpu_rm_reserve_ctls(rm, rsvp, &reqs->topology);
+ ret = _dpu_rm_reserve_ctls(rm, enc->base.id, &reqs->topology);
if (ret) {
DPU_ERROR("unable to find appropriate CTL\n");
return ret;
}
- ret = _dpu_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res);
+ ret = _dpu_rm_reserve_intf_related_hw(rm, enc->base.id, &reqs->hw_res);
if (ret)
return ret;
@@ -706,108 +586,31 @@ static int _dpu_rm_populate_requirements(
return 0;
}
-static struct dpu_rm_rsvp *_dpu_rm_get_rsvp(
- struct dpu_rm *rm,
- struct drm_encoder *enc)
+static void _dpu_rm_release_reservation(struct dpu_rm *rm, uint32_t enc_id)
{
- struct dpu_rm_rsvp *i;
-
- if (!rm || !enc) {
- DPU_ERROR("invalid params\n");
- return NULL;
- }
-
- if (list_empty(&rm->rsvps))
- return NULL;
-
- list_for_each_entry(i, &rm->rsvps, list)
- if (i->enc_id == enc->base.id)
- return i;
-
- return NULL;
-}
-
-/**
- * _dpu_rm_release_rsvp - release resources and release a reservation
- * @rm: KMS handle
- * @rsvp: RSVP pointer to release and release resources for
- */
-static void _dpu_rm_release_rsvp(struct dpu_rm *rm, struct dpu_rm_rsvp *rsvp)
-{
- struct dpu_rm_rsvp *rsvp_c, *rsvp_n;
struct dpu_rm_hw_blk *blk;
enum dpu_hw_blk_type type;
- if (!rsvp)
- return;
-
- DPU_DEBUG("rel rsvp %d enc %d\n", rsvp->seq, rsvp->enc_id);
-
- list_for_each_entry_safe(rsvp_c, rsvp_n, &rm->rsvps, list) {
- if (rsvp == rsvp_c) {
- list_del(&rsvp_c->list);
- break;
- }
- }
-
for (type = 0; type < DPU_HW_BLK_MAX; type++) {
list_for_each_entry(blk, &rm->hw_blks[type], list) {
- if (blk->rsvp == rsvp) {
- blk->rsvp = NULL;
- DPU_DEBUG("rel rsvp %d enc %d %d %d\n",
- rsvp->seq, rsvp->enc_id,
- blk->type, blk->id);
- }
- if (blk->rsvp_nxt == rsvp) {
- blk->rsvp_nxt = NULL;
- DPU_DEBUG("rel rsvp_nxt %d enc %d %d %d\n",
- rsvp->seq, rsvp->enc_id,
- blk->type, blk->id);
+ if (blk->enc_id == enc_id) {
+ blk->enc_id = 0;
+ DPU_DEBUG("rel enc %d %d %d\n", enc_id,
+ type, blk->id);
}
}
}
-
- kfree(rsvp);
}
void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc)
{
- struct dpu_rm_rsvp *rsvp;
-
- if (!rm || !enc) {
- DPU_ERROR("invalid params\n");
- return;
- }
-
mutex_lock(&rm->rm_lock);
- rsvp = _dpu_rm_get_rsvp(rm, enc);
- if (!rsvp) {
- DPU_ERROR("failed to find rsvp for enc %d\n", enc->base.id);
- goto end;
- }
+ _dpu_rm_release_reservation(rm, enc->base.id);
- _dpu_rm_release_rsvp(rm, rsvp);
-end:
mutex_unlock(&rm->rm_lock);
}
-static void _dpu_rm_commit_rsvp(struct dpu_rm *rm, struct dpu_rm_rsvp *rsvp)
-{
- struct dpu_rm_hw_blk *blk;
- enum dpu_hw_blk_type type;
-
- /* Swap next rsvp to be the active */
- for (type = 0; type < DPU_HW_BLK_MAX; type++) {
- list_for_each_entry(blk, &rm->hw_blks[type], list) {
- if (blk->rsvp_nxt) {
- blk->rsvp = blk->rsvp_nxt;
- blk->rsvp_nxt = NULL;
- }
- }
- }
-}
-
int dpu_rm_reserve(
struct dpu_rm *rm,
struct drm_encoder *enc,
@@ -815,7 +618,6 @@ int dpu_rm_reserve(
struct msm_display_topology topology,
bool test_only)
{
- struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt;
struct dpu_rm_requirements reqs;
int ret;
@@ -828,8 +630,6 @@ int dpu_rm_reserve(
mutex_lock(&rm->rm_lock);
- _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_BEGIN);
-
ret = _dpu_rm_populate_requirements(rm, enc, crtc_state, &reqs,
topology);
if (ret) {
@@ -837,50 +637,17 @@ int dpu_rm_reserve(
goto end;
}
- /*
- * We only support one active reservation per-hw-block. But to implement
- * transactional semantics for test-only, and for allowing failure while
- * modifying your existing reservation, over the course of this
- * function we can have two reservations:
- * Current: Existing reservation
- * Next: Proposed reservation. The proposed reservation may fail, or may
- * be discarded if in test-only mode.
- * If reservation is successful, and we're not in test-only, then we
- * replace the current with the next.
- */
- rsvp_nxt = kzalloc(sizeof(*rsvp_nxt), GFP_KERNEL);
- if (!rsvp_nxt) {
- ret = -ENOMEM;
- goto end;
- }
-
- rsvp_cur = _dpu_rm_get_rsvp(rm, enc);
-
- /* Check the proposed reservation, store it in hw's "next" field */
- ret = _dpu_rm_make_next_rsvp(rm, enc, crtc_state, rsvp_nxt, &reqs);
-
- _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_RSVPNEXT);
-
+ ret = _dpu_rm_make_reservation(rm, enc, crtc_state, &reqs);
if (ret) {
DPU_ERROR("failed to reserve hw resources: %d\n", ret);
- _dpu_rm_release_rsvp(rm, rsvp_nxt);
+ _dpu_rm_release_reservation(rm, enc->base.id);
} else if (test_only) {
- /*
- * Normally, if test_only, test the reservation and then undo
- * However, if the user requests LOCK, then keep the reservation
- * made during the atomic_check phase.
- */
- DPU_DEBUG("test_only: discard test rsvp[s%de%d]\n",
- rsvp_nxt->seq, rsvp_nxt->enc_id);
- _dpu_rm_release_rsvp(rm, rsvp_nxt);
- } else {
- _dpu_rm_release_rsvp(rm, rsvp_cur);
-
- _dpu_rm_commit_rsvp(rm, rsvp_nxt);
+ /* test_only: test the reservation and then undo */
+ DPU_DEBUG("test_only: discard test [enc: %d]\n",
+ enc->base.id);
+ _dpu_rm_release_reservation(rm, enc->base.id);
}
- _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_FINAL);
-
end:
mutex_unlock(&rm->rm_lock);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index b8273bd23801..381611fc5877 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -22,22 +22,14 @@
/**
* struct dpu_rm - DPU dynamic hardware resource manager
- * @dev: device handle for event logging purposes
- * @rsvps: list of hardware reservations by each crtc->encoder->connector
* @hw_blks: array of lists of hardware resources present in the system, one
* list per type of hardware block
- * @hw_mdp: hardware object for mdp_top
* @lm_max_width: cached layer mixer maximum width
- * @rsvp_next_seq: sequence number for next reservation for debugging purposes
* @rm_lock: resource manager mutex
*/
struct dpu_rm {
- struct drm_device *dev;
- struct list_head rsvps;
struct list_head hw_blks[DPU_HW_BLK_MAX];
- struct dpu_hw_mdp *hw_mdp;
uint32_t lm_max_width;
- uint32_t rsvp_next_seq;
struct mutex rm_lock;
};
@@ -67,13 +59,11 @@ struct dpu_rm_hw_iter {
* @rm: DPU Resource Manager handle
* @cat: Pointer to hardware catalog
* @mmio: mapped register io address of MDP
- * @dev: device handle for event logging purposes
* @Return: 0 on Success otherwise -ERROR
*/
int dpu_rm_init(struct dpu_rm *rm,
struct dpu_mdss_cfg *cat,
- void __iomem *mmio,
- struct drm_device *dev);
+ void __iomem *mmio);
/**
* dpu_rm_destroy - Free all memory allocated by dpu_rm_init
@@ -112,14 +102,6 @@ int dpu_rm_reserve(struct dpu_rm *rm,
void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc);
/**
- * dpu_rm_get_mdp - Retrieve HW block for MDP TOP.
- * This is never reserved, and is usable by any display.
- * @rm: DPU Resource Manager handle
- * @Return: Pointer to hw block or NULL
- */
-struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm);
-
-/**
* dpu_rm_init_hw_iter - setup given iterator for new iteration over hw list
* using dpu_rm_get_hw
* @iter: iter object to initialize
@@ -144,12 +126,4 @@ void dpu_rm_init_hw_iter(
* @Return: true on match found, false on no match found
*/
bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter);
-
-/**
- * dpu_rm_check_property_topctl - validate property bitmask before it is set
- * @val: user's proposed topology control bitmask
- * @Return: 0 on success or error
- */
-int dpu_rm_check_property_topctl(uint64_t val);
-
#endif /* __DPU_RM_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
index c78b521ceda1..8bb46090bd16 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -831,48 +831,42 @@ TRACE_EVENT(dpu_plane_disable,
);
DECLARE_EVENT_CLASS(dpu_rm_iter_template,
- TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
- TP_ARGS(id, type, enc_id),
+ TP_PROTO(uint32_t id, uint32_t enc_id),
+ TP_ARGS(id, enc_id),
TP_STRUCT__entry(
__field( uint32_t, id )
- __field( enum dpu_hw_blk_type, type )
__field( uint32_t, enc_id )
),
TP_fast_assign(
__entry->id = id;
- __entry->type = type;
__entry->enc_id = enc_id;
),
- TP_printk("id:%d type:%d enc_id:%u", __entry->id, __entry->type,
- __entry->enc_id)
+ TP_printk("id:%d enc_id:%u", __entry->id, __entry->enc_id)
);
DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_intf,
- TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
- TP_ARGS(id, type, enc_id)
+ TP_PROTO(uint32_t id, uint32_t enc_id),
+ TP_ARGS(id, enc_id)
);
DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_ctls,
- TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
- TP_ARGS(id, type, enc_id)
+ TP_PROTO(uint32_t id, uint32_t enc_id),
+ TP_ARGS(id, enc_id)
);
TRACE_EVENT(dpu_rm_reserve_lms,
- TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id,
- uint32_t pp_id),
- TP_ARGS(id, type, enc_id, pp_id),
+ TP_PROTO(uint32_t id, uint32_t enc_id, uint32_t pp_id),
+ TP_ARGS(id, enc_id, pp_id),
TP_STRUCT__entry(
__field( uint32_t, id )
- __field( enum dpu_hw_blk_type, type )
__field( uint32_t, enc_id )
__field( uint32_t, pp_id )
),
TP_fast_assign(
__entry->id = id;
- __entry->type = type;
__entry->enc_id = enc_id;
__entry->pp_id = pp_id;
),
- TP_printk("id:%d type:%d enc_id:%u pp_id:%u", __entry->id,
- __entry->type, __entry->enc_id, __entry->pp_id)
+ TP_printk("id:%d enc_id:%u pp_id:%u", __entry->id,
+ __entry->enc_id, __entry->pp_id)
);
TRACE_EVENT(dpu_vbif_wait_xin_halt_fail,
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 0689194b295d..4697d854b827 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -207,62 +207,44 @@ u32 msm_readl(const void __iomem *addr)
return val;
}
-struct vblank_event {
- struct list_head node;
+struct msm_vblank_work {
+ struct work_struct work;
int crtc_id;
bool enable;
+ struct msm_drm_private *priv;
};
-static void vblank_ctrl_worker(struct kthread_work *work)
+static void vblank_ctrl_worker(struct work_struct *work)
{
- struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
- struct msm_vblank_ctrl, work);
- struct msm_drm_private *priv = container_of(vbl_ctrl,
- struct msm_drm_private, vblank_ctrl);
+ struct msm_vblank_work *vbl_work = container_of(work,
+ struct msm_vblank_work, work);
+ struct msm_drm_private *priv = vbl_work->priv;
struct msm_kms *kms = priv->kms;
- struct vblank_event *vbl_ev, *tmp;
- unsigned long flags;
-
- spin_lock_irqsave(&vbl_ctrl->lock, flags);
- list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
- list_del(&vbl_ev->node);
- spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
-
- if (vbl_ev->enable)
- kms->funcs->enable_vblank(kms,
- priv->crtcs[vbl_ev->crtc_id]);
- else
- kms->funcs->disable_vblank(kms,
- priv->crtcs[vbl_ev->crtc_id]);
-
- kfree(vbl_ev);
- spin_lock_irqsave(&vbl_ctrl->lock, flags);
- }
+ if (vbl_work->enable)
+ kms->funcs->enable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
+ else
+ kms->funcs->disable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
- spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+ kfree(vbl_work);
}
static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
int crtc_id, bool enable)
{
- struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
- struct vblank_event *vbl_ev;
- unsigned long flags;
+ struct msm_vblank_work *vbl_work;
- vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
- if (!vbl_ev)
+ vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
+ if (!vbl_work)
return -ENOMEM;
- vbl_ev->crtc_id = crtc_id;
- vbl_ev->enable = enable;
+ INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
- spin_lock_irqsave(&vbl_ctrl->lock, flags);
- list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
- spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+ vbl_work->crtc_id = crtc_id;
+ vbl_work->enable = enable;
+ vbl_work->priv = priv;
- kthread_queue_work(&priv->disp_thread[crtc_id].worker,
- &vbl_ctrl->work);
+ queue_work(priv->wq, &vbl_work->work);
return 0;
}
@@ -274,31 +256,20 @@ static int msm_drm_uninit(struct device *dev)
struct msm_drm_private *priv = ddev->dev_private;
struct msm_kms *kms = priv->kms;
struct msm_mdss *mdss = priv->mdss;
- struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
- struct vblank_event *vbl_ev, *tmp;
int i;
/* We must cancel and cleanup any pending vblank enable/disable
* work before drm_irq_uninstall() to avoid work re-enabling an
* irq after uninstall has disabled it.
*/
- kthread_flush_work(&vbl_ctrl->work);
- list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
- list_del(&vbl_ev->node);
- kfree(vbl_ev);
- }
- /* clean up display commit/event worker threads */
- for (i = 0; i < priv->num_crtcs; i++) {
- if (priv->disp_thread[i].thread) {
- kthread_flush_worker(&priv->disp_thread[i].worker);
- kthread_stop(priv->disp_thread[i].thread);
- priv->disp_thread[i].thread = NULL;
- }
+ flush_workqueue(priv->wq);
+ destroy_workqueue(priv->wq);
+ /* clean up event worker threads */
+ for (i = 0; i < priv->num_crtcs; i++) {
if (priv->event_thread[i].thread) {
- kthread_flush_worker(&priv->event_thread[i].worker);
- kthread_stop(priv->event_thread[i].thread);
+ kthread_destroy_worker(&priv->event_thread[i].worker);
priv->event_thread[i].thread = NULL;
}
}
@@ -323,9 +294,6 @@ static int msm_drm_uninit(struct device *dev)
drm_irq_uninstall(ddev);
pm_runtime_put_sync(dev);
- flush_workqueue(priv->wq);
- destroy_workqueue(priv->wq);
-
if (kms && kms->funcs)
kms->funcs->destroy(kms);
@@ -490,9 +458,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
priv->wq = alloc_ordered_workqueue("msm", 0);
INIT_LIST_HEAD(&priv->inactive_list);
- INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
- kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
- spin_lock_init(&priv->vblank_ctrl.lock);
drm_mode_config_init(ddev);
@@ -554,27 +519,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
*/
param.sched_priority = 16;
for (i = 0; i < priv->num_crtcs; i++) {
-
- /* initialize display thread */
- priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
- kthread_init_worker(&priv->disp_thread[i].worker);
- priv->disp_thread[i].dev = ddev;
- priv->disp_thread[i].thread =
- kthread_run(kthread_worker_fn,
- &priv->disp_thread[i].worker,
- "crtc_commit:%d", priv->disp_thread[i].crtc_id);
- if (IS_ERR(priv->disp_thread[i].thread)) {
- DRM_DEV_ERROR(dev, "failed to create crtc_commit kthread\n");
- priv->disp_thread[i].thread = NULL;
- goto err_msm_uninit;
- }
-
- ret = sched_setscheduler(priv->disp_thread[i].thread,
- SCHED_FIFO, &param);
- if (ret)
- dev_warn(dev, "disp_thread set priority failed: %d\n",
- ret);
-
/* initialize event thread */
priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
kthread_init_worker(&priv->event_thread[i].worker);
@@ -589,13 +533,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
goto err_msm_uninit;
}
- /**
- * event thread should also run at same priority as disp_thread
- * because it is handling frame_done events. A lower priority
- * event thread and higher priority disp_thread can causes
- * frame_pending counters beyond 2. This can lead to commit
- * failure at crtc commit level.
- */
ret = sched_setscheduler(priv->event_thread[i].thread,
SCHED_FIFO, &param);
if (ret)
@@ -914,8 +851,12 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
ret = -EINVAL;
break;
}
- ret = copy_from_user(msm_obj->name,
- u64_to_user_ptr(args->value), args->len);
+ if (copy_from_user(msm_obj->name, u64_to_user_ptr(args->value),
+ args->len)) {
+ msm_obj->name[0] = '\0';
+ ret = -EFAULT;
+ break;
+ }
msm_obj->name[args->len] = '\0';
for (i = 0; i < args->len; i++) {
if (!isprint(msm_obj->name[i])) {
@@ -931,8 +872,9 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
}
args->len = strlen(msm_obj->name);
if (args->value) {
- ret = copy_to_user(u64_to_user_ptr(args->value),
- msm_obj->name, args->len);
+ if (copy_to_user(u64_to_user_ptr(args->value),
+ msm_obj->name, args->len))
+ ret = -EFAULT;
}
break;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index a3e7c95e186e..163e24d2ab99 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -77,12 +77,6 @@ enum msm_mdp_plane_property {
PLANE_PROP_MAX_NUM
};
-struct msm_vblank_ctrl {
- struct kthread_work work;
- struct list_head event_list;
- spinlock_t lock;
-};
-
#define MSM_GPU_MAX_RINGS 4
#define MAX_H_TILES_PER_DISPLAY 2
@@ -126,7 +120,7 @@ struct msm_display_topology {
/**
* struct msm_display_info - defines display properties
- * @intf_type: DRM_MODE_CONNECTOR_ display type
+ * @intf_type: DRM_MODE_ENCODER_ type
* @capabilities: Bitmask of display flags
* @num_of_h_tiles: Number of horizontal tiles in case of split interface
* @h_tile_instance: Controller instance used per tile. Number of elements is
@@ -199,7 +193,6 @@ struct msm_drm_private {
unsigned int num_crtcs;
struct drm_crtc *crtcs[MAX_CRTCS];
- struct msm_drm_thread disp_thread[MAX_CRTCS];
struct msm_drm_thread event_thread[MAX_CRTCS];
unsigned int num_encoders;
@@ -228,7 +221,6 @@ struct msm_drm_private {
struct notifier_block vmap_notifier;
struct shrinker shrinker;
- struct msm_vblank_ctrl vblank_ctrl;
struct drm_atomic_state *pm_state;
};
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index b17843dd050d..581404e6544d 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -30,6 +30,8 @@ nouveau-y += nouveau_vga.o
# DRM - memory management
nouveau-y += nouveau_bo.o
nouveau-y += nouveau_gem.o
+nouveau-$(CONFIG_DRM_NOUVEAU_SVM) += nouveau_svm.o
+nouveau-$(CONFIG_DRM_NOUVEAU_SVM) += nouveau_dmem.o
nouveau-y += nouveau_mem.o
nouveau-y += nouveau_prime.o
nouveau-y += nouveau_sgdma.o
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 432c440223bb..00cd9ab8948d 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -71,3 +71,15 @@ config DRM_NOUVEAU_BACKLIGHT
help
Say Y here if you want to control the backlight of your display
(e.g. a laptop panel).
+
+config DRM_NOUVEAU_SVM
+ bool "(EXPERIMENTAL) Enable SVM (Shared Virtual Memory) support"
+ depends on ARCH_HAS_HMM
+ depends on DRM_NOUVEAU
+ depends on STAGING
+ select HMM_MIRROR
+ select DEVICE_PRIVATE
+ default n
+ help
+ Say Y here if you want to enable experimental support for
+ Shared Virtual Memory (SVM).
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 2c569e264df3..f22f01020625 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -40,6 +40,7 @@
#include "nvreg.h"
#include "nouveau_fbcon.h"
#include "disp.h"
+#include "nouveau_dma.h"
#include <subdev/bios/pll.h>
#include <subdev/clk.h>
@@ -1077,12 +1078,223 @@ nouveau_crtc_set_config(struct drm_mode_set *set,
return ret;
}
+struct nv04_page_flip_state {
+ struct list_head head;
+ struct drm_pending_vblank_event *event;
+ struct drm_crtc *crtc;
+ int bpp, pitch;
+ u64 offset;
+};
+
+static int
+nv04_finish_page_flip(struct nouveau_channel *chan,
+ struct nv04_page_flip_state *ps)
+{
+ struct nouveau_fence_chan *fctx = chan->fence;
+ struct nouveau_drm *drm = chan->drm;
+ struct drm_device *dev = drm->dev;
+ struct nv04_page_flip_state *s;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ if (list_empty(&fctx->flip)) {
+ NV_ERROR(drm, "unexpected pageflip\n");
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ return -EINVAL;
+ }
+
+ s = list_first_entry(&fctx->flip, struct nv04_page_flip_state, head);
+ if (s->event) {
+ drm_crtc_arm_vblank_event(s->crtc, s->event);
+ } else {
+ /* Give up ownership of vblank for page-flipped crtc */
+ drm_crtc_vblank_put(s->crtc);
+ }
+
+ list_del(&s->head);
+ if (ps)
+ *ps = *s;
+ kfree(s);
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ return 0;
+}
+
+int
+nv04_flip_complete(struct nvif_notify *notify)
+{
+ struct nouveau_cli *cli = (void *)notify->object->client;
+ struct nouveau_drm *drm = cli->drm;
+ struct nouveau_channel *chan = drm->channel;
+ struct nv04_page_flip_state state;
+
+ if (!nv04_finish_page_flip(chan, &state)) {
+ nv_set_crtc_base(drm->dev, drm_crtc_index(state.crtc),
+ state.offset + state.crtc->y *
+ state.pitch + state.crtc->x *
+ state.bpp / 8);
+ }
+
+ return NVIF_NOTIFY_KEEP;
+}
+
+static int
+nv04_page_flip_emit(struct nouveau_channel *chan,
+ struct nouveau_bo *old_bo,
+ struct nouveau_bo *new_bo,
+ struct nv04_page_flip_state *s,
+ struct nouveau_fence **pfence)
+{
+ struct nouveau_fence_chan *fctx = chan->fence;
+ struct nouveau_drm *drm = chan->drm;
+ struct drm_device *dev = drm->dev;
+ unsigned long flags;
+ int ret;
+
+ /* Queue it to the pending list */
+ spin_lock_irqsave(&dev->event_lock, flags);
+ list_add_tail(&s->head, &fctx->flip);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ /* Synchronize with the old framebuffer */
+ ret = nouveau_fence_sync(old_bo, chan, false, false);
+ if (ret)
+ goto fail;
+
+ /* Emit the pageflip */
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ goto fail;
+
+ BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
+ OUT_RING (chan, 0x00000000);
+ FIRE_RING (chan);
+
+ ret = nouveau_fence_new(chan, false, pfence);
+ if (ret)
+ goto fail;
+
+ return 0;
+fail:
+ spin_lock_irqsave(&dev->event_lock, flags);
+ list_del(&s->head);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ return ret;
+}
+
+static int
+nv04_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event, u32 flags,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ const int swap_interval = (flags & DRM_MODE_PAGE_FLIP_ASYNC) ? 0 : 1;
+ struct drm_device *dev = crtc->dev;
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->primary->fb)->nvbo;
+ struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
+ struct nv04_page_flip_state *s;
+ struct nouveau_channel *chan;
+ struct nouveau_cli *cli;
+ struct nouveau_fence *fence;
+ struct nv04_display *dispnv04 = nv04_display(dev);
+ int head = nouveau_crtc(crtc)->index;
+ int ret;
+
+ chan = drm->channel;
+ if (!chan)
+ return -ENODEV;
+ cli = (void *)chan->user.client;
+
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ if (new_bo != old_bo) {
+ ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM, true);
+ if (ret)
+ goto fail_free;
+ }
+
+ mutex_lock(&cli->mutex);
+ ret = ttm_bo_reserve(&new_bo->bo, true, false, NULL);
+ if (ret)
+ goto fail_unpin;
+
+ /* synchronise rendering channel with the kernel's channel */
+ ret = nouveau_fence_sync(new_bo, chan, false, true);
+ if (ret) {
+ ttm_bo_unreserve(&new_bo->bo);
+ goto fail_unpin;
+ }
+
+ if (new_bo != old_bo) {
+ ttm_bo_unreserve(&new_bo->bo);
+
+ ret = ttm_bo_reserve(&old_bo->bo, true, false, NULL);
+ if (ret)
+ goto fail_unpin;
+ }
+
+ /* Initialize a page flip struct */
+ *s = (struct nv04_page_flip_state)
+ { { }, event, crtc, fb->format->cpp[0] * 8, fb->pitches[0],
+ new_bo->bo.offset };
+
+ /* Keep vblanks on during flip, for the target crtc of this flip */
+ drm_crtc_vblank_get(crtc);
+
+ /* Emit a page flip */
+ if (swap_interval) {
+ ret = RING_SPACE(chan, 8);
+ if (ret)
+ goto fail_unreserve;
+
+ BEGIN_NV04(chan, NvSubImageBlit, 0x012c, 1);
+ OUT_RING (chan, 0);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0134, 1);
+ OUT_RING (chan, head);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0100, 1);
+ OUT_RING (chan, 0);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0130, 1);
+ OUT_RING (chan, 0);
+ }
+
+ nouveau_bo_ref(new_bo, &dispnv04->image[head]);
+
+ ret = nv04_page_flip_emit(chan, old_bo, new_bo, s, &fence);
+ if (ret)
+ goto fail_unreserve;
+ mutex_unlock(&cli->mutex);
+
+ /* Update the crtc struct and cleanup */
+ crtc->primary->fb = fb;
+
+ nouveau_bo_fence(old_bo, fence, false);
+ ttm_bo_unreserve(&old_bo->bo);
+ if (old_bo != new_bo)
+ nouveau_bo_unpin(old_bo);
+ nouveau_fence_unref(&fence);
+ return 0;
+
+fail_unreserve:
+ drm_crtc_vblank_put(crtc);
+ ttm_bo_unreserve(&old_bo->bo);
+fail_unpin:
+ mutex_unlock(&cli->mutex);
+ if (old_bo != new_bo)
+ nouveau_bo_unpin(new_bo);
+fail_free:
+ kfree(s);
+ return ret;
+}
+
static const struct drm_crtc_funcs nv04_crtc_funcs = {
.cursor_set = nv04_crtc_cursor_set,
.cursor_move = nv04_crtc_cursor_move,
.gamma_set = nv_crtc_gamma_set,
.set_config = nouveau_crtc_set_config,
- .page_flip = nouveau_crtc_page_flip,
+ .page_flip = nv04_crtc_page_flip,
.destroy = nv_crtc_destroy,
};
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 1727d399833c..5713bacaee80 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -30,6 +30,160 @@
#include "hw.h"
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
+#include "nouveau_bo.h"
+
+#include <nvif/if0004.h>
+
+static void
+nv04_display_fini(struct drm_device *dev, bool suspend)
+{
+ struct nv04_display *disp = nv04_display(dev);
+ struct drm_crtc *crtc;
+
+ /* Disable flip completion events. */
+ nvif_notify_put(&disp->flip);
+
+ /* Disable vblank interrupts. */
+ NVWriteCRTC(dev, 0, NV_PCRTC_INTR_EN_0, 0);
+ if (nv_two_heads(dev))
+ NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0);
+
+ if (!suspend)
+ return;
+
+ /* Un-pin FB and cursors so they'll be evicted to system memory. */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_framebuffer *nouveau_fb;
+
+ nouveau_fb = nouveau_framebuffer(crtc->primary->fb);
+ if (!nouveau_fb || !nouveau_fb->nvbo)
+ continue;
+
+ nouveau_bo_unpin(nouveau_fb->nvbo);
+ }
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ if (nv_crtc->cursor.nvbo) {
+ if (nv_crtc->cursor.set_offset)
+ nouveau_bo_unmap(nv_crtc->cursor.nvbo);
+ nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+ }
+ }
+}
+
+static int
+nv04_display_init(struct drm_device *dev, bool resume, bool runtime)
+{
+ struct nv04_display *disp = nv04_display(dev);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_encoder *encoder;
+ struct drm_crtc *crtc;
+ int ret;
+
+ /* meh.. modeset apparently doesn't setup all the regs and depends
+ * on pre-existing state, for now load the state of the card *before*
+ * nouveau was loaded, and then do a modeset.
+ *
+ * best thing to do probably is to make save/restore routines not
+ * save/restore "pre-load" state, but more general so we can save
+ * on suspend too.
+ */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ nv_crtc->save(&nv_crtc->base);
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.base.head)
+ encoder->enc_save(&encoder->base.base);
+
+ /* Enable flip completion events. */
+ nvif_notify_get(&disp->flip);
+
+ if (!resume)
+ return 0;
+
+ /* Re-pin FB/cursors. */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_framebuffer *nouveau_fb;
+
+ nouveau_fb = nouveau_framebuffer(crtc->primary->fb);
+ if (!nouveau_fb || !nouveau_fb->nvbo)
+ continue;
+
+ ret = nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM, true);
+ if (ret)
+ NV_ERROR(drm, "Could not pin framebuffer\n");
+ }
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ if (!nv_crtc->cursor.nvbo)
+ continue;
+
+ ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, true);
+ if (!ret && nv_crtc->cursor.set_offset)
+ ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
+ if (ret)
+ NV_ERROR(drm, "Could not pin/map cursor.\n");
+ }
+
+ /* Force CLUT to get re-loaded during modeset. */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+ nv_crtc->lut.depth = 0;
+ }
+
+ /* This should ensure we don't hit a locking problem when someone
+ * wakes us up via a connector. We should never go into suspend
+ * while the display is on anyways.
+ */
+ if (runtime)
+ return 0;
+
+ /* Restore mode. */
+ drm_helper_resume_force_mode(dev);
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+ if (!nv_crtc->cursor.nvbo)
+ continue;
+
+ if (nv_crtc->cursor.set_offset)
+ nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset);
+ nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
+ nv_crtc->cursor_saved_y);
+ }
+
+ return 0;
+}
+
+static void
+nv04_display_destroy(struct drm_device *dev)
+{
+ struct nv04_display *disp = nv04_display(dev);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_encoder *encoder;
+ struct nouveau_crtc *nv_crtc;
+
+ /* Restore state */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.base.head)
+ encoder->enc_restore(&encoder->base.base);
+
+ list_for_each_entry(nv_crtc, &dev->mode_config.crtc_list, base.head)
+ nv_crtc->restore(&nv_crtc->base);
+
+ nouveau_hw_save_vga_fonts(dev, 0);
+
+ nvif_notify_fini(&disp->flip);
+
+ nouveau_display(dev)->priv = NULL;
+ kfree(disp);
+
+ nvif_object_unmap(&drm->client.device.object);
+}
int
nv04_display_create(struct drm_device *dev)
@@ -58,6 +212,13 @@ nv04_display_create(struct drm_device *dev)
/* Pre-nv50 doesn't support atomic, so don't expose the ioctls */
dev->driver->driver_features &= ~DRIVER_ATOMIC;
+ /* Request page flip completion event. */
+ if (drm->nvsw.client) {
+ nvif_notify_init(&drm->nvsw, nv04_flip_complete,
+ false, NV04_NVSW_NTFY_UEVENT,
+ NULL, 0, 0, &disp->flip);
+ }
+
nouveau_hw_save_vga_fonts(dev, 1);
nv04_crtc_create(dev, 0);
@@ -121,58 +282,3 @@ nv04_display_create(struct drm_device *dev)
return 0;
}
-
-void
-nv04_display_destroy(struct drm_device *dev)
-{
- struct nv04_display *disp = nv04_display(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_encoder *encoder;
- struct nouveau_crtc *nv_crtc;
-
- /* Restore state */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.base.head)
- encoder->enc_restore(&encoder->base.base);
-
- list_for_each_entry(nv_crtc, &dev->mode_config.crtc_list, base.head)
- nv_crtc->restore(&nv_crtc->base);
-
- nouveau_hw_save_vga_fonts(dev, 0);
-
- nouveau_display(dev)->priv = NULL;
- kfree(disp);
-
- nvif_object_unmap(&drm->client.device.object);
-}
-
-int
-nv04_display_init(struct drm_device *dev)
-{
- struct nouveau_encoder *encoder;
- struct nouveau_crtc *crtc;
-
- /* meh.. modeset apparently doesn't setup all the regs and depends
- * on pre-existing state, for now load the state of the card *before*
- * nouveau was loaded, and then do a modeset.
- *
- * best thing to do probably is to make save/restore routines not
- * save/restore "pre-load" state, but more general so we can save
- * on suspend too.
- */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
- crtc->save(&crtc->base);
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.base.head)
- encoder->enc_save(&encoder->base.base);
-
- return 0;
-}
-
-void
-nv04_display_fini(struct drm_device *dev)
-{
- /* disable vblank interrupts */
- NVWriteCRTC(dev, 0, NV_PCRTC_INTR_EN_0, 0);
- if (nv_two_heads(dev))
- NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0);
-}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index f74f1f2b186e..c6ed20a09f4a 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -82,6 +82,7 @@ struct nv04_display {
uint32_t saved_vga_font[4][16384];
uint32_t dac_users[4];
struct nouveau_bo *image[2];
+ struct nvif_notify flip;
};
static inline struct nv04_display *
@@ -92,9 +93,6 @@ nv04_display(struct drm_device *dev)
/* nv04_display.c */
int nv04_display_create(struct drm_device *);
-void nv04_display_destroy(struct drm_device *);
-int nv04_display_init(struct drm_device *);
-void nv04_display_fini(struct drm_device *);
/* nv04_crtc.c */
int nv04_crtc_create(struct drm_device *, int index);
@@ -176,4 +174,5 @@ nouveau_bios_run_init_table(struct drm_device *dev, u16 table,
);
}
+int nv04_flip_complete(struct nvif_notify *);
#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.c b/drivers/gpu/drm/nouveau/dispnv50/core.c
index c25e0ebe3c92..27ea3f34706d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.c
@@ -42,7 +42,7 @@ nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore)
int version;
int (*new)(struct nouveau_drm *, s32, struct nv50_core **);
} cores[] = {
- { TU104_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
+ { TU102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
{ GV100_DISP_CORE_CHANNEL_DMA, 0, corec37d_new },
{ GP102_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
{ GP100_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs.c b/drivers/gpu/drm/nouveau/dispnv50/curs.c
index cb6e4d2b1b45..121c24a18f11 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs.c
@@ -31,7 +31,7 @@ nv50_curs_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
int version;
int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
} curses[] = {
- { TU104_DISP_CURSOR, 0, cursc37a_new },
+ { TU102_DISP_CURSOR, 0, cursc37a_new },
{ GV100_DISP_CURSOR, 0, cursc37a_new },
{ GK104_DISP_CURSOR, 0, curs907a_new },
{ GF110_DISP_CURSOR, 0, curs907a_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index e8bb35f6d015..4b1650f51955 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -817,7 +817,8 @@ nv50_msto_enable(struct drm_encoder *encoder)
r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, armh->dp.pbn,
armh->dp.tu);
- WARN_ON(!r);
+ if (!r)
+ DRM_DEBUG_KMS("Failed to allocate VCPI\n");
if (!mstm->links++)
nv50_outp_acquire(mstm->outp);
@@ -2220,8 +2221,8 @@ nv50_disp_func = {
* Init
*****************************************************************************/
-void
-nv50_display_fini(struct drm_device *dev)
+static void
+nv50_display_fini(struct drm_device *dev, bool suspend)
{
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
@@ -2242,8 +2243,8 @@ nv50_display_fini(struct drm_device *dev)
}
}
-int
-nv50_display_init(struct drm_device *dev)
+static int
+nv50_display_init(struct drm_device *dev, bool resume, bool runtime)
{
struct nv50_core *core = nv50_disp(dev)->core;
struct drm_encoder *encoder;
@@ -2269,7 +2270,7 @@ nv50_display_init(struct drm_device *dev)
return 0;
}
-void
+static void
nv50_display_destroy(struct drm_device *dev)
{
struct nv50_disp *disp = nv50_disp(dev);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimm.c b/drivers/gpu/drm/nouveau/dispnv50/wimm.c
index bc9eeaf212ae..a1ac153d5e98 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wimm.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimm.c
@@ -31,7 +31,7 @@ nv50_wimm_init(struct nouveau_drm *drm, struct nv50_wndw *wndw)
int version;
int (*init)(struct nouveau_drm *, s32, struct nv50_wndw *);
} wimms[] = {
- { TU104_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
+ { TU102_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{ GV100_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{}
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index ba9eea2ff16b..b95181027b31 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -626,7 +626,7 @@ nv50_wndw_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
int (*new)(struct nouveau_drm *, enum drm_plane_type,
int, s32, struct nv50_wndw **);
} wndws[] = {
- { TU104_DISP_WINDOW_CHANNEL_DMA, 0, wndwc57e_new },
+ { TU102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc57e_new },
{ GV100_DISP_WINDOW_CHANNEL_DMA, 0, wndwc37e_new },
{}
};
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 1d82cbf70cf4..7d556a1c92fa 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -54,6 +54,9 @@
#define VOLTA_USERMODE_A 0x0000c361
+#define MAXWELL_FAULT_BUFFER_A /* clb069.h */ 0x0000b069
+#define VOLTA_FAULT_BUFFER_A /* clb069.h */ 0x0000c369
+
#define NV03_CHANNEL_DMA /* cl506b.h */ 0x0000006b
#define NV10_CHANNEL_DMA /* cl506b.h */ 0x0000006e
#define NV17_CHANNEL_DMA /* cl506b.h */ 0x0000176e
@@ -84,7 +87,7 @@
#define GP100_DISP /* cl5070.h */ 0x00009770
#define GP102_DISP /* cl5070.h */ 0x00009870
#define GV100_DISP /* cl5070.h */ 0x0000c370
-#define TU104_DISP /* cl5070.h */ 0x0000c570
+#define TU102_DISP /* cl5070.h */ 0x0000c570
#define NV31_MPEG 0x00003174
#define G82_MPEG 0x00008274
@@ -97,7 +100,7 @@
#define GF110_DISP_CURSOR /* cl507a.h */ 0x0000907a
#define GK104_DISP_CURSOR /* cl507a.h */ 0x0000917a
#define GV100_DISP_CURSOR /* cl507a.h */ 0x0000c37a
-#define TU104_DISP_CURSOR /* cl507a.h */ 0x0000c57a
+#define TU102_DISP_CURSOR /* cl507a.h */ 0x0000c57a
#define NV50_DISP_OVERLAY /* cl507b.h */ 0x0000507b
#define G82_DISP_OVERLAY /* cl507b.h */ 0x0000827b
@@ -106,7 +109,7 @@
#define GK104_DISP_OVERLAY /* cl507b.h */ 0x0000917b
#define GV100_DISP_WINDOW_IMM_CHANNEL_DMA /* clc37b.h */ 0x0000c37b
-#define TU104_DISP_WINDOW_IMM_CHANNEL_DMA /* clc37b.h */ 0x0000c57b
+#define TU102_DISP_WINDOW_IMM_CHANNEL_DMA /* clc37b.h */ 0x0000c57b
#define NV50_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000507c
#define G82_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000827c
@@ -129,7 +132,7 @@
#define GP100_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000977d
#define GP102_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000987d
#define GV100_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000c37d
-#define TU104_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000c57d
+#define TU102_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000c57d
#define NV50_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000507e
#define G82_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000827e
@@ -139,7 +142,7 @@
#define GK104_DISP_OVERLAY_CONTROL_DMA /* cl507e.h */ 0x0000917e
#define GV100_DISP_WINDOW_CHANNEL_DMA /* clc37e.h */ 0x0000c37e
-#define TU104_DISP_WINDOW_CHANNEL_DMA /* clc37e.h */ 0x0000c57e
+#define TU102_DISP_WINDOW_CHANNEL_DMA /* clc37e.h */ 0x0000c57e
#define NV50_TESLA 0x00005097
#define G82_TESLA 0x00008297
diff --git a/drivers/gpu/drm/nouveau/include/nvif/clb069.h b/drivers/gpu/drm/nouveau/include/nvif/clb069.h
new file mode 100644
index 000000000000..eef5d0227bab
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/clb069.h
@@ -0,0 +1,12 @@
+#ifndef __NVIF_CLB069_H__
+#define __NVIF_CLB069_H__
+struct nvif_clb069_v0 {
+ __u8 version;
+ __u8 pad01[3];
+ __u32 entries;
+ __u32 get;
+ __u32 put;
+};
+
+#define NVB069_V0_NTFY_FAULT 0x00
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if000c.h b/drivers/gpu/drm/nouveau/include/nvif/if000c.h
index 2928ecd989ad..d6dd40f21eed 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if000c.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if000c.h
@@ -3,7 +3,8 @@
struct nvif_vmm_v0 {
__u8 version;
__u8 page_nr;
- __u8 pad02[6];
+ __u8 managed;
+ __u8 pad03[5];
__u64 addr;
__u64 size;
__u8 data[];
@@ -14,6 +15,9 @@ struct nvif_vmm_v0 {
#define NVIF_VMM_V0_PUT 0x02
#define NVIF_VMM_V0_MAP 0x03
#define NVIF_VMM_V0_UNMAP 0x04
+#define NVIF_VMM_V0_PFNMAP 0x05
+#define NVIF_VMM_V0_PFNCLR 0x06
+#define NVIF_VMM_V0_MTHD(i) ((i) + 0x80)
struct nvif_vmm_page_v0 {
__u8 version;
@@ -61,4 +65,28 @@ struct nvif_vmm_unmap_v0 {
__u8 pad01[7];
__u64 addr;
};
+
+struct nvif_vmm_pfnmap_v0 {
+ __u8 version;
+ __u8 page;
+ __u8 pad02[6];
+ __u64 addr;
+ __u64 size;
+#define NVIF_VMM_PFNMAP_V0_ADDR 0xfffffffffffff000ULL
+#define NVIF_VMM_PFNMAP_V0_ADDR_SHIFT 12
+#define NVIF_VMM_PFNMAP_V0_APER 0x00000000000000f0ULL
+#define NVIF_VMM_PFNMAP_V0_HOST 0x0000000000000000ULL
+#define NVIF_VMM_PFNMAP_V0_VRAM 0x0000000000000010ULL
+#define NVIF_VMM_PFNMAP_V0_W 0x0000000000000002ULL
+#define NVIF_VMM_PFNMAP_V0_V 0x0000000000000001ULL
+#define NVIF_VMM_PFNMAP_V0_NONE 0x0000000000000000ULL
+ __u64 phys[];
+};
+
+struct nvif_vmm_pfnclr_v0 {
+ __u8 version;
+ __u8 pad01[7];
+ __u64 addr;
+ __u64 size;
+};
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/ifc00d.h b/drivers/gpu/drm/nouveau/include/nvif/ifc00d.h
index 1d9c637859f3..4cabd613a280 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/ifc00d.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/ifc00d.h
@@ -6,6 +6,12 @@ struct gp100_vmm_vn {
/* nvif_vmm_vX ... */
};
+struct gp100_vmm_v0 {
+ /* nvif_vmm_vX ... */
+ __u8 version;
+ __u8 fault_replay;
+};
+
struct gp100_vmm_map_vn {
/* nvif_vmm_map_vX ... */
};
@@ -18,4 +24,19 @@ struct gp100_vmm_map_v0 {
__u8 priv;
__u8 kind;
};
+
+#define GP100_VMM_VN_FAULT_REPLAY NVIF_VMM_V0_MTHD(0x00)
+#define GP100_VMM_VN_FAULT_CANCEL NVIF_VMM_V0_MTHD(0x01)
+
+struct gp100_vmm_fault_replay_vn {
+};
+
+struct gp100_vmm_fault_cancel_v0 {
+ __u8 version;
+ __u8 hub;
+ __u8 gpc;
+ __u8 client;
+ __u8 pad04[4];
+ __u64 inst;
+};
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/vmm.h b/drivers/gpu/drm/nouveau/include/nvif/vmm.h
index c5db8a2e82df..79bf85d2f43a 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/vmm.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/vmm.h
@@ -30,8 +30,8 @@ struct nvif_vmm {
int page_nr;
};
-int nvif_vmm_init(struct nvif_mmu *, s32 oclass, u64 addr, u64 size,
- void *argv, u32 argc, struct nvif_vmm *);
+int nvif_vmm_init(struct nvif_mmu *, s32 oclass, bool managed, u64 addr,
+ u64 size, void *argv, u32 argc, struct nvif_vmm *);
void nvif_vmm_fini(struct nvif_vmm *);
int nvif_vmm_get(struct nvif_vmm *, enum nvif_vmm_get, bool sparse,
u8 page, u8 align, u64 size, struct nvif_vma *);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 72e4dc1f0236..642492344196 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -28,6 +28,7 @@ enum nvkm_devidx {
NVKM_SUBDEV_ICCSENSE,
NVKM_SUBDEV_THERM,
NVKM_SUBDEV_CLK,
+ NVKM_SUBDEV_GSP,
NVKM_SUBDEV_SECBOOT,
NVKM_ENGINE_BSP,
@@ -137,6 +138,7 @@ struct nvkm_device {
struct nvkm_fb *fb;
struct nvkm_fuse *fuse;
struct nvkm_gpio *gpio;
+ struct nvkm_gsp *gsp;
struct nvkm_i2c *i2c;
struct nvkm_subdev *ibus;
struct nvkm_iccsense *iccsense;
@@ -209,6 +211,7 @@ struct nvkm_device_chip {
int (*fb )(struct nvkm_device *, int idx, struct nvkm_fb **);
int (*fuse )(struct nvkm_device *, int idx, struct nvkm_fuse **);
int (*gpio )(struct nvkm_device *, int idx, struct nvkm_gpio **);
+ int (*gsp )(struct nvkm_device *, int idx, struct nvkm_gsp **);
int (*i2c )(struct nvkm_device *, int idx, struct nvkm_i2c **);
int (*ibus )(struct nvkm_device *, int idx, struct nvkm_subdev **);
int (*iccsense)(struct nvkm_device *, int idx, struct nvkm_iccsense **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
index 86abe76023c2..5f3650692e4d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
@@ -11,5 +11,5 @@ int gm200_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
int gp100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
int gp102_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
int gv100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
-int tu104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int tu102_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index 5ca86e178bb9..3026b22d44fb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -36,5 +36,5 @@ int gm200_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gp102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gv100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int tu104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int tu102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index 3b2b685778eb..b7fc04dd1628 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -74,5 +74,5 @@ int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gp100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gp10b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gv100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int tu104_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int tu102_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index ba1518ff8b66..1e924c7f7ba7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -10,6 +10,9 @@ struct nvkm_gr {
u64 nvkm_gr_units(struct nvkm_gr *);
int nvkm_gr_tlb_flush(struct nvkm_gr *);
+int nvkm_gr_ctxsw_pause(struct nvkm_device *);
+int nvkm_gr_ctxsw_resume(struct nvkm_device *);
+u32 nvkm_gr_ctxsw_inst(struct nvkm_device *);
int nv04_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int nv10_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
index fe716859d4a9..b72a4844c5f7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
@@ -6,6 +6,8 @@
struct nvkm_nvdec {
struct nvkm_engine engine;
+ u32 addr;
+
struct nvkm_falcon *falcon;
};
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
index f7d89822b905..c93ad332461a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
@@ -5,10 +5,13 @@
struct nvkm_sec2 {
struct nvkm_engine engine;
+ u32 addr;
+
struct nvkm_falcon *falcon;
struct nvkm_msgqueue *queue;
struct work_struct work;
};
int gp102_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **);
+int tu102_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
index fd9d713b611c..da14486317ca 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
@@ -29,5 +29,5 @@ int gf100_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
int gk20a_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
int gm107_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
int gm20b_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
-int tu104_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
+int tu102_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
index 1b71812a790b..8ba982c2fdfb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
@@ -31,5 +31,5 @@ int gf100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
int gm107_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
int gm200_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
int gv100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int tu104_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int tu102_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
index 127f48066026..97322f95b3ee 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
@@ -13,6 +13,8 @@ struct nvkm_fault {
struct nvkm_event event;
struct nvkm_notify nrpfb;
+
+ struct nvkm_device_oclass user;
};
struct nvkm_fault_data {
@@ -30,5 +32,5 @@ struct nvkm_fault_data {
int gp100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
int gv100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
-int tu104_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
+int tu102_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
new file mode 100644
index 000000000000..4c672a5c4cd5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
@@ -0,0 +1,14 @@
+#ifndef __NVKM_GSP_H__
+#define __NVKM_GSP_H__
+#define nvkm_gsp(p) container_of((p), struct nvkm_gsp, subdev)
+#include <core/subdev.h>
+
+struct nvkm_gsp {
+ struct nvkm_subdev subdev;
+ u32 addr;
+
+ struct nvkm_falcon *falcon;
+};
+
+int gv100_gsp_new(struct nvkm_device *, int, struct nvkm_gsp **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
index b66dedd8abb6..e38f4958dea2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
@@ -31,5 +31,5 @@ int gk104_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gp100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gp10b_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
-int tu104_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int tu102_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 0a0e064f22e5..28ade86f74c5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -17,6 +17,7 @@ struct nvkm_vma {
bool part:1; /* Region was split from an allocated region by map(). */
bool user:1; /* Region user-allocated. */
bool busy:1; /* Region busy (for temporarily preventing user access). */
+ bool mapped:1; /* Region contains valid pages. */
struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
struct nvkm_tags *tags; /* Compression tag reference. */
};
@@ -44,6 +45,8 @@ struct nvkm_vmm {
dma_addr_t null;
void *nullp;
+
+ bool replay;
};
int nvkm_vmm_new(struct nvkm_device *, u64 addr, u64 size, void *argv, u32 argc,
@@ -63,6 +66,7 @@ struct nvkm_vmm_map {
struct nvkm_mm_node *mem;
struct scatterlist *sgl;
dma_addr_t *dma;
+ u64 *pfn;
u64 off;
const struct nvkm_vmm_page *page;
@@ -130,5 +134,5 @@ int gm20b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int gp100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int gp10b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int gv100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
-int tu104_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int tu102_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
index f7d3eb647e2e..2904e67d79d2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
@@ -9,6 +9,7 @@ struct nvkm_top {
struct list_head device;
};
+u32 nvkm_top_addr(struct nvkm_device *, enum nvkm_devidx);
u32 nvkm_top_reset(struct nvkm_device *, enum nvkm_devidx);
u32 nvkm_top_intr(struct nvkm_device *, u32 intr, u64 *subdevs);
u32 nvkm_top_intr_mask(struct nvkm_device *, enum nvkm_devidx);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
index 8a0f85f5fc1a..6a765682fbfa 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
@@ -38,6 +38,7 @@ int nvkm_volt_set_id(struct nvkm_volt *, u8 id, u8 min_id, u8 temp,
int nv40_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
int gf100_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
+int gf117_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
int gk104_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
int gk20a_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
int gm20b_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index b06cdac8f3a2..c3fd5dd39ed9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -214,6 +214,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
WARN_ON(1);
break;
}
+ break;
case NOUVEAU_GETPARAM_FB_SIZE:
getparam->value = drm->gem.vram_available;
break;
@@ -338,7 +339,8 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
goto done;
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
- ret = nouveau_vma_new(chan->ntfy, &cli->vmm, &chan->ntfy_vma);
+ ret = nouveau_vma_new(chan->ntfy, chan->chan->vmm,
+ &chan->ntfy_vma);
if (ret)
goto done;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 73eff52036d2..34a998012bf6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -194,7 +194,7 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
struct nouveau_drm *drm = cli->drm;
struct nouveau_bo *nvbo;
struct nvif_mmu *mmu = &cli->mmu;
- struct nvif_vmm *vmm = &cli->vmm.vmm;
+ struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm;
size_t acc_size;
int type = ttm_bo_type_device;
int ret, i, pi = -1;
@@ -1434,7 +1434,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || !mem->kind)
/* untiled */
break;
- /* fallthrough, tiled memory */
+ /* fall through - tiled memory */
case TTM_PL_VRAM:
reg->bus.offset = reg->start << PAGE_SHIFT;
reg->bus.base = device->func->resource_addr(device, 1);
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 668afbc29c3e..282fd90b65e1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -42,6 +42,7 @@
#include "nouveau_fence.h"
#include "nouveau_abi16.h"
#include "nouveau_vmm.h"
+#include "nouveau_svm.h"
MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM");
int nouveau_vram_pushbuf;
@@ -95,6 +96,10 @@ nouveau_channel_del(struct nouveau_channel **pchan)
if (chan->fence)
nouveau_fence(chan->drm)->context_del(chan);
+
+ if (cli)
+ nouveau_svmm_part(chan->vmm->svmm, chan->inst);
+
nvif_object_fini(&chan->nvsw);
nvif_object_fini(&chan->gart);
nvif_object_fini(&chan->vram);
@@ -130,6 +135,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
chan->device = device;
chan->drm = drm;
+ chan->vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
atomic_set(&chan->killed, 0);
/* allocate memory for dma push buffer */
@@ -157,7 +163,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
chan->push.addr = chan->push.buffer->bo.offset;
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
- ret = nouveau_vma_new(chan->push.buffer, &cli->vmm,
+ ret = nouveau_vma_new(chan->push.buffer, chan->vmm,
&chan->push.vma);
if (ret) {
nouveau_channel_del(pchan);
@@ -172,7 +178,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_VM;
args.start = 0;
- args.limit = cli->vmm.vmm.limit - 1;
+ args.limit = chan->vmm->vmm.limit - 1;
} else
if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
@@ -202,7 +208,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = 0;
- args.limit = cli->vmm.vmm.limit - 1;
+ args.limit = chan->vmm->vmm.limit - 1;
}
}
@@ -220,7 +226,6 @@ static int
nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
u64 runlist, bool priv, struct nouveau_channel **pchan)
{
- struct nouveau_cli *cli = (void *)device->object.client;
static const u16 oclasses[] = { TURING_CHANNEL_GPFIFO_A,
VOLTA_CHANNEL_GPFIFO_A,
PASCAL_CHANNEL_GPFIFO_A,
@@ -255,7 +260,7 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
args.volta.ilength = 0x02000;
args.volta.ioffset = 0x10000 + chan->push.addr;
args.volta.runlist = runlist;
- args.volta.vmm = nvif_handle(&cli->vmm.vmm.object);
+ args.volta.vmm = nvif_handle(&chan->vmm->vmm.object);
args.volta.priv = priv;
size = sizeof(args.volta);
} else
@@ -264,7 +269,7 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
args.kepler.ilength = 0x02000;
args.kepler.ioffset = 0x10000 + chan->push.addr;
args.kepler.runlist = runlist;
- args.kepler.vmm = nvif_handle(&cli->vmm.vmm.object);
+ args.kepler.vmm = nvif_handle(&chan->vmm->vmm.object);
args.kepler.priv = priv;
size = sizeof(args.kepler);
} else
@@ -272,14 +277,14 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
args.fermi.version = 0;
args.fermi.ilength = 0x02000;
args.fermi.ioffset = 0x10000 + chan->push.addr;
- args.fermi.vmm = nvif_handle(&cli->vmm.vmm.object);
+ args.fermi.vmm = nvif_handle(&chan->vmm->vmm.object);
size = sizeof(args.fermi);
} else {
args.nv50.version = 0;
args.nv50.ilength = 0x02000;
args.nv50.ioffset = 0x10000 + chan->push.addr;
args.nv50.pushbuf = nvif_handle(&chan->push.ctxdma);
- args.nv50.vmm = nvif_handle(&cli->vmm.vmm.object);
+ args.nv50.vmm = nvif_handle(&chan->vmm->vmm.object);
size = sizeof(args.nv50);
}
@@ -350,7 +355,6 @@ static int
nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
{
struct nvif_device *device = chan->device;
- struct nouveau_cli *cli = (void *)chan->user.client;
struct nouveau_drm *drm = chan->drm;
struct nv_dma_v0 args = {};
int ret, i;
@@ -376,7 +380,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_VM;
args.start = 0;
- args.limit = cli->vmm.vmm.limit - 1;
+ args.limit = chan->vmm->vmm.limit - 1;
} else {
args.target = NV_DMA_V0_TARGET_VRAM;
args.access = NV_DMA_V0_ACCESS_RDWR;
@@ -393,7 +397,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_VM;
args.start = 0;
- args.limit = cli->vmm.vmm.limit - 1;
+ args.limit = chan->vmm->vmm.limit - 1;
} else
if (chan->drm->agp.bridge) {
args.target = NV_DMA_V0_TARGET_AGP;
@@ -405,7 +409,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = 0;
- args.limit = cli->vmm.vmm.limit - 1;
+ args.limit = chan->vmm->vmm.limit - 1;
}
ret = nvif_object_init(&chan->user, gart, NV_DMA_IN_MEMORY,
@@ -495,6 +499,10 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
nouveau_channel_del(pchan);
}
+ ret = nouveau_svmm_join((*pchan)->vmm->svmm, (*pchan)->inst);
+ if (ret)
+ nouveau_channel_del(pchan);
+
done:
cli->base.super = super;
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 28418f4e5748..93814d1d31e4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -8,6 +8,7 @@ struct nvif_device;
struct nouveau_channel {
struct nvif_device *device;
struct nouveau_drm *drm;
+ struct nouveau_vmm *vmm;
int chid;
u64 inst;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 56b6ac1b8edd..55c0fa451163 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -32,18 +32,13 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_probe_helper.h>
-#include <nvif/class.h>
-
#include "nouveau_fbcon.h"
-#include "dispnv04/hw.h"
#include "nouveau_crtc.h"
-#include "nouveau_dma.h"
#include "nouveau_gem.h"
#include "nouveau_connector.h"
#include "nv50_display.h"
-#include "nouveau_fence.h"
-
+#include <nvif/class.h>
#include <nvif/cl0046.h>
#include <nvif/event.h>
@@ -412,15 +407,14 @@ nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val,
#endif
int
-nouveau_display_init(struct drm_device *dev)
+nouveau_display_init(struct drm_device *dev, bool resume, bool runtime)
{
struct nouveau_display *disp = nouveau_display(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
int ret;
- ret = disp->init(dev);
+ ret = disp->init(dev, resume, runtime);
if (ret)
return ret;
@@ -437,8 +431,6 @@ nouveau_display_init(struct drm_device *dev)
}
drm_connector_list_iter_end(&conn_iter);
- /* enable flip completion events */
- nvif_notify_get(&drm->flip);
return ret;
}
@@ -457,9 +449,6 @@ nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime)
drm_helper_force_disable_all(dev);
}
- /* disable flip completion events */
- nvif_notify_put(&drm->flip);
-
/* disable hotplug interrupts */
drm_connector_list_iter_begin(dev, &conn_iter);
nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
@@ -472,7 +461,7 @@ nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime)
cancel_work_sync(&drm->hpd_work);
drm_kms_helper_poll_disable(dev);
- disp->fini(dev);
+ disp->fini(dev, suspend);
}
static void
@@ -625,7 +614,6 @@ int
nouveau_display_suspend(struct drm_device *dev, bool runtime)
{
struct nouveau_display *disp = nouveau_display(dev);
- struct drm_crtc *crtc;
if (drm_drv_uses_atomic_modeset(dev)) {
if (!runtime) {
@@ -636,32 +624,9 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime)
return ret;
}
}
-
- nouveau_display_fini(dev, true, runtime);
- return 0;
}
nouveau_display_fini(dev, true, runtime);
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct nouveau_framebuffer *nouveau_fb;
-
- nouveau_fb = nouveau_framebuffer(crtc->primary->fb);
- if (!nouveau_fb || !nouveau_fb->nvbo)
- continue;
-
- nouveau_bo_unpin(nouveau_fb->nvbo);
- }
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- if (nv_crtc->cursor.nvbo) {
- if (nv_crtc->cursor.set_offset)
- nouveau_bo_unmap(nv_crtc->cursor.nvbo);
- nouveau_bo_unpin(nv_crtc->cursor.nvbo);
- }
- }
-
return 0;
}
@@ -669,275 +634,16 @@ void
nouveau_display_resume(struct drm_device *dev, bool runtime)
{
struct nouveau_display *disp = nouveau_display(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct drm_crtc *crtc;
- int ret;
+
+ nouveau_display_init(dev, true, runtime);
if (drm_drv_uses_atomic_modeset(dev)) {
- nouveau_display_init(dev);
if (disp->suspend) {
drm_atomic_helper_resume(dev, disp->suspend);
disp->suspend = NULL;
}
return;
}
-
- /* re-pin fb/cursors */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct nouveau_framebuffer *nouveau_fb;
-
- nouveau_fb = nouveau_framebuffer(crtc->primary->fb);
- if (!nouveau_fb || !nouveau_fb->nvbo)
- continue;
-
- ret = nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM, true);
- if (ret)
- NV_ERROR(drm, "Could not pin framebuffer\n");
- }
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- if (!nv_crtc->cursor.nvbo)
- continue;
-
- ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, true);
- if (!ret && nv_crtc->cursor.set_offset)
- ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
- if (ret)
- NV_ERROR(drm, "Could not pin/map cursor.\n");
- }
-
- nouveau_display_init(dev);
-
- /* Force CLUT to get re-loaded during modeset */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
- nv_crtc->lut.depth = 0;
- }
-
- /* This should ensure we don't hit a locking problem when someone
- * wakes us up via a connector. We should never go into suspend
- * while the display is on anyways.
- */
- if (runtime)
- return;
-
- drm_helper_resume_force_mode(dev);
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
- if (!nv_crtc->cursor.nvbo)
- continue;
-
- if (nv_crtc->cursor.set_offset)
- nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset);
- nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
- nv_crtc->cursor_saved_y);
- }
-}
-
-static int
-nouveau_page_flip_emit(struct nouveau_channel *chan,
- struct nouveau_bo *old_bo,
- struct nouveau_bo *new_bo,
- struct nouveau_page_flip_state *s,
- struct nouveau_fence **pfence)
-{
- struct nouveau_fence_chan *fctx = chan->fence;
- struct nouveau_drm *drm = chan->drm;
- struct drm_device *dev = drm->dev;
- unsigned long flags;
- int ret;
-
- /* Queue it to the pending list */
- spin_lock_irqsave(&dev->event_lock, flags);
- list_add_tail(&s->head, &fctx->flip);
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
- /* Synchronize with the old framebuffer */
- ret = nouveau_fence_sync(old_bo, chan, false, false);
- if (ret)
- goto fail;
-
- /* Emit the pageflip */
- ret = RING_SPACE(chan, 2);
- if (ret)
- goto fail;
-
- BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
- OUT_RING (chan, 0x00000000);
- FIRE_RING (chan);
-
- ret = nouveau_fence_new(chan, false, pfence);
- if (ret)
- goto fail;
-
- return 0;
-fail:
- spin_lock_irqsave(&dev->event_lock, flags);
- list_del(&s->head);
- spin_unlock_irqrestore(&dev->event_lock, flags);
- return ret;
-}
-
-int
-nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event, u32 flags,
- struct drm_modeset_acquire_ctx *ctx)
-{
- const int swap_interval = (flags & DRM_MODE_PAGE_FLIP_ASYNC) ? 0 : 1;
- struct drm_device *dev = crtc->dev;
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->primary->fb)->nvbo;
- struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
- struct nouveau_page_flip_state *s;
- struct nouveau_channel *chan;
- struct nouveau_cli *cli;
- struct nouveau_fence *fence;
- struct nv04_display *dispnv04 = nv04_display(dev);
- int head = nouveau_crtc(crtc)->index;
- int ret;
-
- chan = drm->channel;
- if (!chan)
- return -ENODEV;
- cli = (void *)chan->user.client;
-
- s = kzalloc(sizeof(*s), GFP_KERNEL);
- if (!s)
- return -ENOMEM;
-
- if (new_bo != old_bo) {
- ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM, true);
- if (ret)
- goto fail_free;
- }
-
- mutex_lock(&cli->mutex);
- ret = ttm_bo_reserve(&new_bo->bo, true, false, NULL);
- if (ret)
- goto fail_unpin;
-
- /* synchronise rendering channel with the kernel's channel */
- ret = nouveau_fence_sync(new_bo, chan, false, true);
- if (ret) {
- ttm_bo_unreserve(&new_bo->bo);
- goto fail_unpin;
- }
-
- if (new_bo != old_bo) {
- ttm_bo_unreserve(&new_bo->bo);
-
- ret = ttm_bo_reserve(&old_bo->bo, true, false, NULL);
- if (ret)
- goto fail_unpin;
- }
-
- /* Initialize a page flip struct */
- *s = (struct nouveau_page_flip_state)
- { { }, event, crtc, fb->format->cpp[0] * 8, fb->pitches[0],
- new_bo->bo.offset };
-
- /* Keep vblanks on during flip, for the target crtc of this flip */
- drm_crtc_vblank_get(crtc);
-
- /* Emit a page flip */
- if (swap_interval) {
- ret = RING_SPACE(chan, 8);
- if (ret)
- goto fail_unreserve;
-
- BEGIN_NV04(chan, NvSubImageBlit, 0x012c, 1);
- OUT_RING (chan, 0);
- BEGIN_NV04(chan, NvSubImageBlit, 0x0134, 1);
- OUT_RING (chan, head);
- BEGIN_NV04(chan, NvSubImageBlit, 0x0100, 1);
- OUT_RING (chan, 0);
- BEGIN_NV04(chan, NvSubImageBlit, 0x0130, 1);
- OUT_RING (chan, 0);
- }
-
- nouveau_bo_ref(new_bo, &dispnv04->image[head]);
-
- ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
- if (ret)
- goto fail_unreserve;
- mutex_unlock(&cli->mutex);
-
- /* Update the crtc struct and cleanup */
- crtc->primary->fb = fb;
-
- nouveau_bo_fence(old_bo, fence, false);
- ttm_bo_unreserve(&old_bo->bo);
- if (old_bo != new_bo)
- nouveau_bo_unpin(old_bo);
- nouveau_fence_unref(&fence);
- return 0;
-
-fail_unreserve:
- drm_crtc_vblank_put(crtc);
- ttm_bo_unreserve(&old_bo->bo);
-fail_unpin:
- mutex_unlock(&cli->mutex);
- if (old_bo != new_bo)
- nouveau_bo_unpin(new_bo);
-fail_free:
- kfree(s);
- return ret;
-}
-
-int
-nouveau_finish_page_flip(struct nouveau_channel *chan,
- struct nouveau_page_flip_state *ps)
-{
- struct nouveau_fence_chan *fctx = chan->fence;
- struct nouveau_drm *drm = chan->drm;
- struct drm_device *dev = drm->dev;
- struct nouveau_page_flip_state *s;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
-
- if (list_empty(&fctx->flip)) {
- NV_ERROR(drm, "unexpected pageflip\n");
- spin_unlock_irqrestore(&dev->event_lock, flags);
- return -EINVAL;
- }
-
- s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
- if (s->event) {
- drm_crtc_arm_vblank_event(s->crtc, s->event);
- } else {
- /* Give up ownership of vblank for page-flipped crtc */
- drm_crtc_vblank_put(s->crtc);
- }
-
- list_del(&s->head);
- if (ps)
- *ps = *s;
- kfree(s);
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
- return 0;
-}
-
-int
-nouveau_flip_complete(struct nvif_notify *notify)
-{
- struct nouveau_drm *drm = container_of(notify, typeof(*drm), flip);
- struct nouveau_channel *chan = drm->channel;
- struct nouveau_page_flip_state state;
-
- if (!nouveau_finish_page_flip(chan, &state)) {
- nv_set_crtc_base(drm->dev, drm_crtc_index(state.crtc),
- state.offset + state.crtc->y *
- state.pitch + state.crtc->x *
- state.bpp / 8);
- }
-
- return NVIF_NOTIFY_KEEP;
}
int
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index eb77e41c2d4e..311e175f0513 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -25,19 +25,11 @@ int nouveau_framebuffer_new(struct drm_device *,
const struct drm_mode_fb_cmd2 *,
struct nouveau_bo *, struct nouveau_framebuffer **);
-struct nouveau_page_flip_state {
- struct list_head head;
- struct drm_pending_vblank_event *event;
- struct drm_crtc *crtc;
- int bpp, pitch;
- u64 offset;
-};
-
struct nouveau_display {
void *priv;
void (*dtor)(struct drm_device *);
- int (*init)(struct drm_device *);
- void (*fini)(struct drm_device *);
+ int (*init)(struct drm_device *, bool resume, bool runtime);
+ void (*fini)(struct drm_device *, bool suspend);
struct nvif_disp disp;
@@ -61,7 +53,7 @@ nouveau_display(struct drm_device *dev)
int nouveau_display_create(struct drm_device *dev);
void nouveau_display_destroy(struct drm_device *dev);
-int nouveau_display_init(struct drm_device *dev);
+int nouveau_display_init(struct drm_device *dev, bool resume, bool runtime);
void nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime);
int nouveau_display_suspend(struct drm_device *dev, bool runtime);
void nouveau_display_resume(struct drm_device *dev, bool runtime);
@@ -71,13 +63,6 @@ bool nouveau_display_scanoutpos(struct drm_device *, unsigned int,
bool, int *, int *, ktime_t *,
ktime_t *, const struct drm_display_mode *);
-int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags,
- struct drm_modeset_acquire_ctx *ctx);
-int nouveau_finish_page_flip(struct nouveau_channel *,
- struct nouveau_page_flip_state *);
-
int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
struct drm_mode_create_dumb *args);
int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
new file mode 100644
index 000000000000..8be7a83ced9b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -0,0 +1,887 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nouveau_dmem.h"
+#include "nouveau_drv.h"
+#include "nouveau_chan.h"
+#include "nouveau_dma.h"
+#include "nouveau_mem.h"
+#include "nouveau_bo.h"
+
+#include <nvif/class.h>
+#include <nvif/object.h>
+#include <nvif/if500b.h>
+#include <nvif/if900b.h>
+
+#include <linux/sched/mm.h>
+#include <linux/hmm.h>
+
+/*
+ * FIXME: this is ugly right now we are using TTM to allocate vram and we pin
+ * it in vram while in use. We likely want to overhaul memory management for
+ * nouveau to be more page like (not necessarily with system page size but a
+ * bigger page size) at lowest level and have some shim layer on top that would
+ * provide the same functionality as TTM.
+ */
+#define DMEM_CHUNK_SIZE (2UL << 20)
+#define DMEM_CHUNK_NPAGES (DMEM_CHUNK_SIZE >> PAGE_SHIFT)
+
+struct nouveau_migrate;
+
+enum nouveau_aper {
+ NOUVEAU_APER_VIRT,
+ NOUVEAU_APER_VRAM,
+ NOUVEAU_APER_HOST,
+};
+
+typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages,
+ enum nouveau_aper, u64 dst_addr,
+ enum nouveau_aper, u64 src_addr);
+
+struct nouveau_dmem_chunk {
+ struct list_head list;
+ struct nouveau_bo *bo;
+ struct nouveau_drm *drm;
+ unsigned long pfn_first;
+ unsigned long callocated;
+ unsigned long bitmap[BITS_TO_LONGS(DMEM_CHUNK_NPAGES)];
+ spinlock_t lock;
+};
+
+struct nouveau_dmem_migrate {
+ nouveau_migrate_copy_t copy_func;
+ struct nouveau_channel *chan;
+};
+
+struct nouveau_dmem {
+ struct hmm_devmem *devmem;
+ struct nouveau_dmem_migrate migrate;
+ struct list_head chunk_free;
+ struct list_head chunk_full;
+ struct list_head chunk_empty;
+ struct mutex mutex;
+};
+
+struct nouveau_dmem_fault {
+ struct nouveau_drm *drm;
+ struct nouveau_fence *fence;
+ dma_addr_t *dma;
+ unsigned long npages;
+};
+
+struct nouveau_migrate {
+ struct vm_area_struct *vma;
+ struct nouveau_drm *drm;
+ struct nouveau_fence *fence;
+ unsigned long npages;
+ dma_addr_t *dma;
+ unsigned long dma_nr;
+};
+
+static void
+nouveau_dmem_free(struct hmm_devmem *devmem, struct page *page)
+{
+ struct nouveau_dmem_chunk *chunk;
+ struct nouveau_drm *drm;
+ unsigned long idx;
+
+ chunk = (void *)hmm_devmem_page_get_drvdata(page);
+ idx = page_to_pfn(page) - chunk->pfn_first;
+ drm = chunk->drm;
+
+ /*
+ * FIXME:
+ *
+ * This is really a bad example, we need to overhaul nouveau memory
+ * management to be more page focus and allow lighter locking scheme
+ * to be use in the process.
+ */
+ spin_lock(&chunk->lock);
+ clear_bit(idx, chunk->bitmap);
+ WARN_ON(!chunk->callocated);
+ chunk->callocated--;
+ /*
+ * FIXME when chunk->callocated reach 0 we should add the chunk to
+ * a reclaim list so that it can be freed in case of memory pressure.
+ */
+ spin_unlock(&chunk->lock);
+}
+
+static void
+nouveau_dmem_fault_alloc_and_copy(struct vm_area_struct *vma,
+ const unsigned long *src_pfns,
+ unsigned long *dst_pfns,
+ unsigned long start,
+ unsigned long end,
+ void *private)
+{
+ struct nouveau_dmem_fault *fault = private;
+ struct nouveau_drm *drm = fault->drm;
+ struct device *dev = drm->dev->dev;
+ unsigned long addr, i, npages = 0;
+ nouveau_migrate_copy_t copy;
+ int ret;
+
+
+ /* First allocate new memory */
+ for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, i++) {
+ struct page *dpage, *spage;
+
+ dst_pfns[i] = 0;
+ spage = migrate_pfn_to_page(src_pfns[i]);
+ if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ dpage = hmm_vma_alloc_locked_page(vma, addr);
+ if (!dpage) {
+ dst_pfns[i] = MIGRATE_PFN_ERROR;
+ continue;
+ }
+
+ dst_pfns[i] = migrate_pfn(page_to_pfn(dpage)) |
+ MIGRATE_PFN_LOCKED;
+ npages++;
+ }
+
+ /* Allocate storage for DMA addresses, so we can unmap later. */
+ fault->dma = kmalloc(sizeof(*fault->dma) * npages, GFP_KERNEL);
+ if (!fault->dma)
+ goto error;
+
+ /* Copy things over */
+ copy = drm->dmem->migrate.copy_func;
+ for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, i++) {
+ struct nouveau_dmem_chunk *chunk;
+ struct page *spage, *dpage;
+ u64 src_addr, dst_addr;
+
+ dpage = migrate_pfn_to_page(dst_pfns[i]);
+ if (!dpage || dst_pfns[i] == MIGRATE_PFN_ERROR)
+ continue;
+
+ spage = migrate_pfn_to_page(src_pfns[i]);
+ if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE)) {
+ dst_pfns[i] = MIGRATE_PFN_ERROR;
+ __free_page(dpage);
+ continue;
+ }
+
+ fault->dma[fault->npages] =
+ dma_map_page_attrs(dev, dpage, 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(dev, fault->dma[fault->npages])) {
+ dst_pfns[i] = MIGRATE_PFN_ERROR;
+ __free_page(dpage);
+ continue;
+ }
+
+ dst_addr = fault->dma[fault->npages++];
+
+ chunk = (void *)hmm_devmem_page_get_drvdata(spage);
+ src_addr = page_to_pfn(spage) - chunk->pfn_first;
+ src_addr = (src_addr << PAGE_SHIFT) + chunk->bo->bo.offset;
+
+ ret = copy(drm, 1, NOUVEAU_APER_HOST, dst_addr,
+ NOUVEAU_APER_VRAM, src_addr);
+ if (ret) {
+ dst_pfns[i] = MIGRATE_PFN_ERROR;
+ __free_page(dpage);
+ continue;
+ }
+ }
+
+ nouveau_fence_new(drm->dmem->migrate.chan, false, &fault->fence);
+
+ return;
+
+error:
+ for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, ++i) {
+ struct page *page;
+
+ if (!dst_pfns[i] || dst_pfns[i] == MIGRATE_PFN_ERROR)
+ continue;
+
+ page = migrate_pfn_to_page(dst_pfns[i]);
+ dst_pfns[i] = MIGRATE_PFN_ERROR;
+ if (page == NULL)
+ continue;
+
+ __free_page(page);
+ }
+}
+
+void nouveau_dmem_fault_finalize_and_map(struct vm_area_struct *vma,
+ const unsigned long *src_pfns,
+ const unsigned long *dst_pfns,
+ unsigned long start,
+ unsigned long end,
+ void *private)
+{
+ struct nouveau_dmem_fault *fault = private;
+ struct nouveau_drm *drm = fault->drm;
+
+ if (fault->fence) {
+ nouveau_fence_wait(fault->fence, true, false);
+ nouveau_fence_unref(&fault->fence);
+ } else {
+ /*
+ * FIXME wait for channel to be IDLE before calling finalizing
+ * the hmem object below (nouveau_migrate_hmem_fini()).
+ */
+ }
+
+ while (fault->npages--) {
+ dma_unmap_page(drm->dev->dev, fault->dma[fault->npages],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ }
+ kfree(fault->dma);
+}
+
+static const struct migrate_vma_ops nouveau_dmem_fault_migrate_ops = {
+ .alloc_and_copy = nouveau_dmem_fault_alloc_and_copy,
+ .finalize_and_map = nouveau_dmem_fault_finalize_and_map,
+};
+
+static int
+nouveau_dmem_fault(struct hmm_devmem *devmem,
+ struct vm_area_struct *vma,
+ unsigned long addr,
+ const struct page *page,
+ unsigned int flags,
+ pmd_t *pmdp)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(devmem->device);
+ unsigned long src[1] = {0}, dst[1] = {0};
+ struct nouveau_dmem_fault fault = {0};
+ int ret;
+
+
+
+ /*
+ * FIXME what we really want is to find some heuristic to migrate more
+ * than just one page on CPU fault. When such fault happens it is very
+ * likely that more surrounding page will CPU fault too.
+ */
+ fault.drm = nouveau_drm(drm_dev);
+ ret = migrate_vma(&nouveau_dmem_fault_migrate_ops, vma, addr,
+ addr + PAGE_SIZE, src, dst, &fault);
+ if (ret)
+ return VM_FAULT_SIGBUS;
+
+ if (dst[0] == MIGRATE_PFN_ERROR)
+ return VM_FAULT_SIGBUS;
+
+ return 0;
+}
+
+static const struct hmm_devmem_ops
+nouveau_dmem_devmem_ops = {
+ .free = nouveau_dmem_free,
+ .fault = nouveau_dmem_fault,
+};
+
+static int
+nouveau_dmem_chunk_alloc(struct nouveau_drm *drm)
+{
+ struct nouveau_dmem_chunk *chunk;
+ int ret;
+
+ if (drm->dmem == NULL)
+ return -EINVAL;
+
+ mutex_lock(&drm->dmem->mutex);
+ chunk = list_first_entry_or_null(&drm->dmem->chunk_empty,
+ struct nouveau_dmem_chunk,
+ list);
+ if (chunk == NULL) {
+ mutex_unlock(&drm->dmem->mutex);
+ return -ENOMEM;
+ }
+
+ list_del(&chunk->list);
+ mutex_unlock(&drm->dmem->mutex);
+
+ ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
+ TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL,
+ &chunk->bo);
+ if (ret)
+ goto out;
+
+ ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
+ if (ret) {
+ nouveau_bo_ref(NULL, &chunk->bo);
+ goto out;
+ }
+
+ bitmap_zero(chunk->bitmap, DMEM_CHUNK_NPAGES);
+ spin_lock_init(&chunk->lock);
+
+out:
+ mutex_lock(&drm->dmem->mutex);
+ if (chunk->bo)
+ list_add(&chunk->list, &drm->dmem->chunk_empty);
+ else
+ list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
+ mutex_unlock(&drm->dmem->mutex);
+
+ return ret;
+}
+
+static struct nouveau_dmem_chunk *
+nouveau_dmem_chunk_first_free_locked(struct nouveau_drm *drm)
+{
+ struct nouveau_dmem_chunk *chunk;
+
+ chunk = list_first_entry_or_null(&drm->dmem->chunk_free,
+ struct nouveau_dmem_chunk,
+ list);
+ if (chunk)
+ return chunk;
+
+ chunk = list_first_entry_or_null(&drm->dmem->chunk_empty,
+ struct nouveau_dmem_chunk,
+ list);
+ if (chunk->bo)
+ return chunk;
+
+ return NULL;
+}
+
+static int
+nouveau_dmem_pages_alloc(struct nouveau_drm *drm,
+ unsigned long npages,
+ unsigned long *pages)
+{
+ struct nouveau_dmem_chunk *chunk;
+ unsigned long c;
+ int ret;
+
+ memset(pages, 0xff, npages * sizeof(*pages));
+
+ mutex_lock(&drm->dmem->mutex);
+ for (c = 0; c < npages;) {
+ unsigned long i;
+
+ chunk = nouveau_dmem_chunk_first_free_locked(drm);
+ if (chunk == NULL) {
+ mutex_unlock(&drm->dmem->mutex);
+ ret = nouveau_dmem_chunk_alloc(drm);
+ if (ret) {
+ if (c)
+ break;
+ return ret;
+ }
+ continue;
+ }
+
+ spin_lock(&chunk->lock);
+ i = find_first_zero_bit(chunk->bitmap, DMEM_CHUNK_NPAGES);
+ while (i < DMEM_CHUNK_NPAGES && c < npages) {
+ pages[c] = chunk->pfn_first + i;
+ set_bit(i, chunk->bitmap);
+ chunk->callocated++;
+ c++;
+
+ i = find_next_zero_bit(chunk->bitmap,
+ DMEM_CHUNK_NPAGES, i);
+ }
+ spin_unlock(&chunk->lock);
+ }
+ mutex_unlock(&drm->dmem->mutex);
+
+ return 0;
+}
+
+static struct page *
+nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
+{
+ unsigned long pfns[1];
+ struct page *page;
+ int ret;
+
+ /* FIXME stop all the miss-match API ... */
+ ret = nouveau_dmem_pages_alloc(drm, 1, pfns);
+ if (ret)
+ return NULL;
+
+ page = pfn_to_page(pfns[0]);
+ get_page(page);
+ lock_page(page);
+ return page;
+}
+
+static void
+nouveau_dmem_page_free_locked(struct nouveau_drm *drm, struct page *page)
+{
+ unlock_page(page);
+ put_page(page);
+}
+
+void
+nouveau_dmem_resume(struct nouveau_drm *drm)
+{
+ struct nouveau_dmem_chunk *chunk;
+ int ret;
+
+ if (drm->dmem == NULL)
+ return;
+
+ mutex_lock(&drm->dmem->mutex);
+ list_for_each_entry (chunk, &drm->dmem->chunk_free, list) {
+ ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
+ /* FIXME handle pin failure */
+ WARN_ON(ret);
+ }
+ list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
+ ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
+ /* FIXME handle pin failure */
+ WARN_ON(ret);
+ }
+ list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) {
+ ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
+ /* FIXME handle pin failure */
+ WARN_ON(ret);
+ }
+ mutex_unlock(&drm->dmem->mutex);
+}
+
+void
+nouveau_dmem_suspend(struct nouveau_drm *drm)
+{
+ struct nouveau_dmem_chunk *chunk;
+
+ if (drm->dmem == NULL)
+ return;
+
+ mutex_lock(&drm->dmem->mutex);
+ list_for_each_entry (chunk, &drm->dmem->chunk_free, list) {
+ nouveau_bo_unpin(chunk->bo);
+ }
+ list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
+ nouveau_bo_unpin(chunk->bo);
+ }
+ list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) {
+ nouveau_bo_unpin(chunk->bo);
+ }
+ mutex_unlock(&drm->dmem->mutex);
+}
+
+void
+nouveau_dmem_fini(struct nouveau_drm *drm)
+{
+ struct nouveau_dmem_chunk *chunk, *tmp;
+
+ if (drm->dmem == NULL)
+ return;
+
+ mutex_lock(&drm->dmem->mutex);
+
+ WARN_ON(!list_empty(&drm->dmem->chunk_free));
+ WARN_ON(!list_empty(&drm->dmem->chunk_full));
+
+ list_for_each_entry_safe (chunk, tmp, &drm->dmem->chunk_empty, list) {
+ if (chunk->bo) {
+ nouveau_bo_unpin(chunk->bo);
+ nouveau_bo_ref(NULL, &chunk->bo);
+ }
+ list_del(&chunk->list);
+ kfree(chunk);
+ }
+
+ mutex_unlock(&drm->dmem->mutex);
+}
+
+static int
+nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
+ enum nouveau_aper dst_aper, u64 dst_addr,
+ enum nouveau_aper src_aper, u64 src_addr)
+{
+ struct nouveau_channel *chan = drm->dmem->migrate.chan;
+ u32 launch_dma = (1 << 9) /* MULTI_LINE_ENABLE. */ |
+ (1 << 8) /* DST_MEMORY_LAYOUT_PITCH. */ |
+ (1 << 7) /* SRC_MEMORY_LAYOUT_PITCH. */ |
+ (1 << 2) /* FLUSH_ENABLE_TRUE. */ |
+ (2 << 0) /* DATA_TRANSFER_TYPE_NON_PIPELINED. */;
+ int ret;
+
+ ret = RING_SPACE(chan, 13);
+ if (ret)
+ return ret;
+
+ if (src_aper != NOUVEAU_APER_VIRT) {
+ switch (src_aper) {
+ case NOUVEAU_APER_VRAM:
+ BEGIN_IMC0(chan, NvSubCopy, 0x0260, 0);
+ break;
+ case NOUVEAU_APER_HOST:
+ BEGIN_IMC0(chan, NvSubCopy, 0x0260, 1);
+ break;
+ default:
+ return -EINVAL;
+ }
+ launch_dma |= 0x00001000; /* SRC_TYPE_PHYSICAL. */
+ }
+
+ if (dst_aper != NOUVEAU_APER_VIRT) {
+ switch (dst_aper) {
+ case NOUVEAU_APER_VRAM:
+ BEGIN_IMC0(chan, NvSubCopy, 0x0264, 0);
+ break;
+ case NOUVEAU_APER_HOST:
+ BEGIN_IMC0(chan, NvSubCopy, 0x0264, 1);
+ break;
+ default:
+ return -EINVAL;
+ }
+ launch_dma |= 0x00002000; /* DST_TYPE_PHYSICAL. */
+ }
+
+ BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
+ OUT_RING (chan, upper_32_bits(src_addr));
+ OUT_RING (chan, lower_32_bits(src_addr));
+ OUT_RING (chan, upper_32_bits(dst_addr));
+ OUT_RING (chan, lower_32_bits(dst_addr));
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, npages);
+ BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
+ OUT_RING (chan, launch_dma);
+ return 0;
+}
+
+static int
+nouveau_dmem_migrate_init(struct nouveau_drm *drm)
+{
+ switch (drm->ttm.copy.oclass) {
+ case PASCAL_DMA_COPY_A:
+ case PASCAL_DMA_COPY_B:
+ case VOLTA_DMA_COPY_A:
+ case TURING_DMA_COPY_A:
+ drm->dmem->migrate.copy_func = nvc0b5_migrate_copy;
+ drm->dmem->migrate.chan = drm->ttm.chan;
+ return 0;
+ default:
+ break;
+ }
+ return -ENODEV;
+}
+
+void
+nouveau_dmem_init(struct nouveau_drm *drm)
+{
+ struct device *device = drm->dev->dev;
+ unsigned long i, size;
+ int ret;
+
+ /* This only make sense on PASCAL or newer */
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_PASCAL)
+ return;
+
+ if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL)))
+ return;
+
+ mutex_init(&drm->dmem->mutex);
+ INIT_LIST_HEAD(&drm->dmem->chunk_free);
+ INIT_LIST_HEAD(&drm->dmem->chunk_full);
+ INIT_LIST_HEAD(&drm->dmem->chunk_empty);
+
+ size = ALIGN(drm->client.device.info.ram_user, DMEM_CHUNK_SIZE);
+
+ /* Initialize migration dma helpers before registering memory */
+ ret = nouveau_dmem_migrate_init(drm);
+ if (ret) {
+ kfree(drm->dmem);
+ drm->dmem = NULL;
+ return;
+ }
+
+ /*
+ * FIXME we need some kind of policy to decide how much VRAM we
+ * want to register with HMM. For now just register everything
+ * and latter if we want to do thing like over commit then we
+ * could revisit this.
+ */
+ drm->dmem->devmem = hmm_devmem_add(&nouveau_dmem_devmem_ops,
+ device, size);
+ if (drm->dmem->devmem == NULL) {
+ kfree(drm->dmem);
+ drm->dmem = NULL;
+ return;
+ }
+
+ for (i = 0; i < (size / DMEM_CHUNK_SIZE); ++i) {
+ struct nouveau_dmem_chunk *chunk;
+ struct page *page;
+ unsigned long j;
+
+ chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
+ if (chunk == NULL) {
+ nouveau_dmem_fini(drm);
+ return;
+ }
+
+ chunk->drm = drm;
+ chunk->pfn_first = drm->dmem->devmem->pfn_first;
+ chunk->pfn_first += (i * DMEM_CHUNK_NPAGES);
+ list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
+
+ page = pfn_to_page(chunk->pfn_first);
+ for (j = 0; j < DMEM_CHUNK_NPAGES; ++j, ++page) {
+ hmm_devmem_page_set_drvdata(page, (long)chunk);
+ }
+ }
+
+ NV_INFO(drm, "DMEM: registered %ldMB of device memory\n", size >> 20);
+}
+
+static void
+nouveau_dmem_migrate_alloc_and_copy(struct vm_area_struct *vma,
+ const unsigned long *src_pfns,
+ unsigned long *dst_pfns,
+ unsigned long start,
+ unsigned long end,
+ void *private)
+{
+ struct nouveau_migrate *migrate = private;
+ struct nouveau_drm *drm = migrate->drm;
+ struct device *dev = drm->dev->dev;
+ unsigned long addr, i, npages = 0;
+ nouveau_migrate_copy_t copy;
+ int ret;
+
+ /* First allocate new memory */
+ for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, i++) {
+ struct page *dpage, *spage;
+
+ dst_pfns[i] = 0;
+ spage = migrate_pfn_to_page(src_pfns[i]);
+ if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ dpage = nouveau_dmem_page_alloc_locked(drm);
+ if (!dpage)
+ continue;
+
+ dst_pfns[i] = migrate_pfn(page_to_pfn(dpage)) |
+ MIGRATE_PFN_LOCKED |
+ MIGRATE_PFN_DEVICE;
+ npages++;
+ }
+
+ if (!npages)
+ return;
+
+ /* Allocate storage for DMA addresses, so we can unmap later. */
+ migrate->dma = kmalloc(sizeof(*migrate->dma) * npages, GFP_KERNEL);
+ if (!migrate->dma)
+ goto error;
+
+ /* Copy things over */
+ copy = drm->dmem->migrate.copy_func;
+ for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, i++) {
+ struct nouveau_dmem_chunk *chunk;
+ struct page *spage, *dpage;
+ u64 src_addr, dst_addr;
+
+ dpage = migrate_pfn_to_page(dst_pfns[i]);
+ if (!dpage || dst_pfns[i] == MIGRATE_PFN_ERROR)
+ continue;
+
+ chunk = (void *)hmm_devmem_page_get_drvdata(dpage);
+ dst_addr = page_to_pfn(dpage) - chunk->pfn_first;
+ dst_addr = (dst_addr << PAGE_SHIFT) + chunk->bo->bo.offset;
+
+ spage = migrate_pfn_to_page(src_pfns[i]);
+ if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE)) {
+ nouveau_dmem_page_free_locked(drm, dpage);
+ dst_pfns[i] = 0;
+ continue;
+ }
+
+ migrate->dma[migrate->dma_nr] =
+ dma_map_page_attrs(dev, spage, 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(dev, migrate->dma[migrate->dma_nr])) {
+ nouveau_dmem_page_free_locked(drm, dpage);
+ dst_pfns[i] = 0;
+ continue;
+ }
+
+ src_addr = migrate->dma[migrate->dma_nr++];
+
+ ret = copy(drm, 1, NOUVEAU_APER_VRAM, dst_addr,
+ NOUVEAU_APER_HOST, src_addr);
+ if (ret) {
+ nouveau_dmem_page_free_locked(drm, dpage);
+ dst_pfns[i] = 0;
+ continue;
+ }
+ }
+
+ nouveau_fence_new(drm->dmem->migrate.chan, false, &migrate->fence);
+
+ return;
+
+error:
+ for (addr = start, i = 0; addr < end; addr += PAGE_SIZE, ++i) {
+ struct page *page;
+
+ if (!dst_pfns[i] || dst_pfns[i] == MIGRATE_PFN_ERROR)
+ continue;
+
+ page = migrate_pfn_to_page(dst_pfns[i]);
+ dst_pfns[i] = MIGRATE_PFN_ERROR;
+ if (page == NULL)
+ continue;
+
+ __free_page(page);
+ }
+}
+
+void nouveau_dmem_migrate_finalize_and_map(struct vm_area_struct *vma,
+ const unsigned long *src_pfns,
+ const unsigned long *dst_pfns,
+ unsigned long start,
+ unsigned long end,
+ void *private)
+{
+ struct nouveau_migrate *migrate = private;
+ struct nouveau_drm *drm = migrate->drm;
+
+ if (migrate->fence) {
+ nouveau_fence_wait(migrate->fence, true, false);
+ nouveau_fence_unref(&migrate->fence);
+ } else {
+ /*
+ * FIXME wait for channel to be IDLE before finalizing
+ * the hmem object below (nouveau_migrate_hmem_fini()) ?
+ */
+ }
+
+ while (migrate->dma_nr--) {
+ dma_unmap_page(drm->dev->dev, migrate->dma[migrate->dma_nr],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ }
+ kfree(migrate->dma);
+
+ /*
+ * FIXME optimization: update GPU page table to point to newly
+ * migrated memory.
+ */
+}
+
+static const struct migrate_vma_ops nouveau_dmem_migrate_ops = {
+ .alloc_and_copy = nouveau_dmem_migrate_alloc_and_copy,
+ .finalize_and_map = nouveau_dmem_migrate_finalize_and_map,
+};
+
+int
+nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
+ struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end)
+{
+ unsigned long *src_pfns, *dst_pfns, npages;
+ struct nouveau_migrate migrate = {0};
+ unsigned long i, c, max;
+ int ret = 0;
+
+ npages = (end - start) >> PAGE_SHIFT;
+ max = min(SG_MAX_SINGLE_ALLOC, npages);
+ src_pfns = kzalloc(sizeof(long) * max, GFP_KERNEL);
+ if (src_pfns == NULL)
+ return -ENOMEM;
+ dst_pfns = kzalloc(sizeof(long) * max, GFP_KERNEL);
+ if (dst_pfns == NULL) {
+ kfree(src_pfns);
+ return -ENOMEM;
+ }
+
+ migrate.drm = drm;
+ migrate.vma = vma;
+ migrate.npages = npages;
+ for (i = 0; i < npages; i += c) {
+ unsigned long next;
+
+ c = min(SG_MAX_SINGLE_ALLOC, npages);
+ next = start + (c << PAGE_SHIFT);
+ ret = migrate_vma(&nouveau_dmem_migrate_ops, vma, start,
+ next, src_pfns, dst_pfns, &migrate);
+ if (ret)
+ goto out;
+ start = next;
+ }
+
+out:
+ kfree(dst_pfns);
+ kfree(src_pfns);
+ return ret;
+}
+
+static inline bool
+nouveau_dmem_page(struct nouveau_drm *drm, struct page *page)
+{
+ if (!is_device_private_page(page))
+ return false;
+
+ if (drm->dmem->devmem != page->pgmap->data)
+ return false;
+
+ return true;
+}
+
+void
+nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
+ struct hmm_range *range)
+{
+ unsigned long i, npages;
+
+ npages = (range->end - range->start) >> PAGE_SHIFT;
+ for (i = 0; i < npages; ++i) {
+ struct nouveau_dmem_chunk *chunk;
+ struct page *page;
+ uint64_t addr;
+
+ page = hmm_pfn_to_page(range, range->pfns[i]);
+ if (page == NULL)
+ continue;
+
+ if (!(range->pfns[i] & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
+ continue;
+ }
+
+ if (!nouveau_dmem_page(drm, page)) {
+ WARN(1, "Some unknown device memory !\n");
+ range->pfns[i] = 0;
+ continue;
+ }
+
+ chunk = (void *)hmm_devmem_page_get_drvdata(page);
+ addr = page_to_pfn(page) - chunk->pfn_first;
+ addr = (addr + chunk->bo->bo.mem.start) << PAGE_SHIFT;
+
+ range->pfns[i] &= ((1UL << range->pfn_shift) - 1);
+ range->pfns[i] |= (addr >> PAGE_SHIFT) << range->pfn_shift;
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.h b/drivers/gpu/drm/nouveau/nouveau_dmem.h
new file mode 100644
index 000000000000..9d97d756fb7d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __NOUVEAU_DMEM_H__
+#define __NOUVEAU_DMEM_H__
+#include <nvif/os.h>
+struct drm_device;
+struct drm_file;
+struct nouveau_drm;
+struct hmm_range;
+
+#if IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM)
+void nouveau_dmem_init(struct nouveau_drm *);
+void nouveau_dmem_fini(struct nouveau_drm *);
+void nouveau_dmem_suspend(struct nouveau_drm *);
+void nouveau_dmem_resume(struct nouveau_drm *);
+
+int nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
+ struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end);
+
+void nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
+ struct hmm_range *range);
+#else /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
+static inline void nouveau_dmem_init(struct nouveau_drm *drm) {}
+static inline void nouveau_dmem_fini(struct nouveau_drm *drm) {}
+static inline void nouveau_dmem_suspend(struct nouveau_drm *drm) {}
+static inline void nouveau_dmem_resume(struct nouveau_drm *drm) {}
+
+static inline int nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
+ struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end)
+{
+ return 0;
+}
+
+static inline void nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
+ struct hmm_range *range) {}
+#endif /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index f900e94592f8..5020265bfbd9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -44,7 +44,6 @@
#include <nvif/class.h>
#include <nvif/cl0002.h>
#include <nvif/cla06f.h>
-#include <nvif/if0004.h>
#include "nouveau_drv.h"
#include "nouveau_dma.h"
@@ -63,6 +62,8 @@
#include "nouveau_usif.h"
#include "nouveau_connector.h"
#include "nouveau_platform.h"
+#include "nouveau_svm.h"
+#include "nouveau_dmem.h"
MODULE_PARM_DESC(config, "option string to pass to driver core");
static char *nouveau_config;
@@ -173,6 +174,7 @@ nouveau_cli_fini(struct nouveau_cli *cli)
WARN_ON(!list_empty(&cli->worker));
usif_client_fini(cli);
+ nouveau_vmm_fini(&cli->svm);
nouveau_vmm_fini(&cli->vmm);
nvif_mmu_fini(&cli->mmu);
nvif_device_fini(&cli->device);
@@ -283,19 +285,134 @@ done:
}
static void
-nouveau_accel_fini(struct nouveau_drm *drm)
+nouveau_accel_ce_fini(struct nouveau_drm *drm)
+{
+ nouveau_channel_idle(drm->cechan);
+ nvif_object_fini(&drm->ttm.copy);
+ nouveau_channel_del(&drm->cechan);
+}
+
+static void
+nouveau_accel_ce_init(struct nouveau_drm *drm)
+{
+ struct nvif_device *device = &drm->client.device;
+ int ret = 0;
+
+ /* Allocate channel that has access to a (preferably async) copy
+ * engine, to use for TTM buffer moves.
+ */
+ if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
+ ret = nouveau_channel_new(drm, device,
+ nvif_fifo_runlist_ce(device), 0,
+ true, &drm->cechan);
+ } else
+ if (device->info.chipset >= 0xa3 &&
+ device->info.chipset != 0xaa &&
+ device->info.chipset != 0xac) {
+ /* Prior to Kepler, there's only a single runlist, so all
+ * engines can be accessed from any channel.
+ *
+ * We still want to use a separate channel though.
+ */
+ ret = nouveau_channel_new(drm, device, NvDmaFB, NvDmaTT, false,
+ &drm->cechan);
+ }
+
+ if (ret)
+ NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
+}
+
+static void
+nouveau_accel_gr_fini(struct nouveau_drm *drm)
{
nouveau_channel_idle(drm->channel);
nvif_object_fini(&drm->ntfy);
nvkm_gpuobj_del(&drm->notify);
- nvif_notify_fini(&drm->flip);
nvif_object_fini(&drm->nvsw);
nouveau_channel_del(&drm->channel);
+}
- nouveau_channel_idle(drm->cechan);
- nvif_object_fini(&drm->ttm.copy);
- nouveau_channel_del(&drm->cechan);
+static void
+nouveau_accel_gr_init(struct nouveau_drm *drm)
+{
+ struct nvif_device *device = &drm->client.device;
+ u32 arg0, arg1;
+ int ret;
+
+ /* Allocate channel that has access to the graphics engine. */
+ if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
+ arg0 = nvif_fifo_runlist(device, NV_DEVICE_INFO_ENGINE_GR);
+ arg1 = 1;
+ } else {
+ arg0 = NvDmaFB;
+ arg1 = NvDmaTT;
+ }
+ ret = nouveau_channel_new(drm, device, arg0, arg1, false,
+ &drm->channel);
+ if (ret) {
+ NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
+ nouveau_accel_gr_fini(drm);
+ return;
+ }
+
+ /* A SW class is used on pre-NV50 HW to assist with handling the
+ * synchronisation of page flips, as well as to implement fences
+ * on TNT/TNT2 HW that lacks any kind of support in host.
+ */
+ if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
+ ret = nvif_object_init(&drm->channel->user, NVDRM_NVSW,
+ nouveau_abi16_swclass(drm), NULL, 0,
+ &drm->nvsw);
+ if (ret == 0) {
+ ret = RING_SPACE(drm->channel, 2);
+ if (ret == 0) {
+ BEGIN_NV04(drm->channel, NvSubSw, 0, 1);
+ OUT_RING (drm->channel, drm->nvsw.handle);
+ }
+ }
+
+ if (ret) {
+ NV_ERROR(drm, "failed to allocate sw class, %d\n", ret);
+ nouveau_accel_gr_fini(drm);
+ return;
+ }
+ }
+
+ /* NvMemoryToMemoryFormat requires a notifier ctxdma for some reason,
+ * even if notification is never requested, so, allocate a ctxdma on
+ * any GPU where it's possible we'll end up using M2MF for BO moves.
+ */
+ if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
+ ret = nvkm_gpuobj_new(nvxx_device(device), 32, 0, false, NULL,
+ &drm->notify);
+ if (ret) {
+ NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
+ nouveau_accel_gr_fini(drm);
+ return;
+ }
+
+ ret = nvif_object_init(&drm->channel->user, NvNotify0,
+ NV_DMA_IN_MEMORY,
+ &(struct nv_dma_v0) {
+ .target = NV_DMA_V0_TARGET_VRAM,
+ .access = NV_DMA_V0_ACCESS_RDWR,
+ .start = drm->notify->addr,
+ .limit = drm->notify->addr + 31
+ }, sizeof(struct nv_dma_v0),
+ &drm->ntfy);
+ if (ret) {
+ nouveau_accel_gr_fini(drm);
+ return;
+ }
+ }
+}
+
+static void
+nouveau_accel_fini(struct nouveau_drm *drm)
+{
+ nouveau_accel_ce_fini(drm);
+ nouveau_accel_gr_fini(drm);
if (drm->fence)
nouveau_fence(drm)->dtor(drm);
}
@@ -305,23 +422,16 @@ nouveau_accel_init(struct nouveau_drm *drm)
{
struct nvif_device *device = &drm->client.device;
struct nvif_sclass *sclass;
- u32 arg0, arg1;
int ret, i, n;
if (nouveau_noaccel)
return;
+ /* Initialise global support for channels, and synchronisation. */
ret = nouveau_channels_init(drm);
if (ret)
return;
- if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_VOLTA) {
- ret = nvif_user_init(device);
- if (ret)
- return;
- }
-
- /* initialise synchronisation routines */
/*XXX: this is crap, but the fence/channel stuff is a little
* backwards in some places. this will be fixed.
*/
@@ -368,95 +478,18 @@ nouveau_accel_init(struct nouveau_drm *drm)
return;
}
- if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
- ret = nouveau_channel_new(drm, &drm->client.device,
- nvif_fifo_runlist_ce(device), 0,
- true, &drm->cechan);
- if (ret)
- NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
-
- arg0 = nvif_fifo_runlist(device, NV_DEVICE_INFO_ENGINE_GR);
- arg1 = 1;
- } else
- if (device->info.chipset >= 0xa3 &&
- device->info.chipset != 0xaa &&
- device->info.chipset != 0xac) {
- ret = nouveau_channel_new(drm, &drm->client.device,
- NvDmaFB, NvDmaTT, false,
- &drm->cechan);
+ /* Volta requires access to a doorbell register for kickoff. */
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_VOLTA) {
+ ret = nvif_user_init(device);
if (ret)
- NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
-
- arg0 = NvDmaFB;
- arg1 = NvDmaTT;
- } else {
- arg0 = NvDmaFB;
- arg1 = NvDmaTT;
- }
-
- ret = nouveau_channel_new(drm, &drm->client.device,
- arg0, arg1, false, &drm->channel);
- if (ret) {
- NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
- nouveau_accel_fini(drm);
- return;
- }
-
- if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
- ret = nvif_object_init(&drm->channel->user, NVDRM_NVSW,
- nouveau_abi16_swclass(drm), NULL, 0,
- &drm->nvsw);
- if (ret == 0) {
- ret = RING_SPACE(drm->channel, 2);
- if (ret == 0) {
- BEGIN_NV04(drm->channel, NvSubSw, 0, 1);
- OUT_RING (drm->channel, drm->nvsw.handle);
- }
-
- ret = nvif_notify_init(&drm->nvsw,
- nouveau_flip_complete,
- false, NV04_NVSW_NTFY_UEVENT,
- NULL, 0, 0, &drm->flip);
- if (ret == 0)
- ret = nvif_notify_get(&drm->flip);
- if (ret) {
- nouveau_accel_fini(drm);
- return;
- }
- }
-
- if (ret) {
- NV_ERROR(drm, "failed to allocate sw class, %d\n", ret);
- nouveau_accel_fini(drm);
- return;
- }
- }
-
- if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
- ret = nvkm_gpuobj_new(nvxx_device(&drm->client.device), 32, 0,
- false, NULL, &drm->notify);
- if (ret) {
- NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
- nouveau_accel_fini(drm);
return;
- }
-
- ret = nvif_object_init(&drm->channel->user, NvNotify0,
- NV_DMA_IN_MEMORY,
- &(struct nv_dma_v0) {
- .target = NV_DMA_V0_TARGET_VRAM,
- .access = NV_DMA_V0_ACCESS_RDWR,
- .start = drm->notify->addr,
- .limit = drm->notify->addr + 31
- }, sizeof(struct nv_dma_v0),
- &drm->ntfy);
- if (ret) {
- nouveau_accel_fini(drm);
- return;
- }
}
+ /* Allocate channels we need to support various functions. */
+ nouveau_accel_gr_init(drm);
+ nouveau_accel_ce_init(drm);
+ /* Initialise accelerated TTM buffer moves. */
nouveau_bo_move_init(drm);
}
@@ -504,19 +537,22 @@ nouveau_drm_device_init(struct drm_device *dev)
if (ret)
goto fail_bios;
+ nouveau_accel_init(drm);
+
ret = nouveau_display_create(dev);
if (ret)
goto fail_dispctor;
if (dev->mode_config.num_crtc) {
- ret = nouveau_display_init(dev);
+ ret = nouveau_display_init(dev, false, false);
if (ret)
goto fail_dispinit;
}
nouveau_debugfs_init(drm);
nouveau_hwmon_init(dev);
- nouveau_accel_init(drm);
+ nouveau_svm_init(drm);
+ nouveau_dmem_init(drm);
nouveau_fbcon_init(dev);
nouveau_led_init(dev);
@@ -534,6 +570,7 @@ nouveau_drm_device_init(struct drm_device *dev)
fail_dispinit:
nouveau_display_destroy(dev);
fail_dispctor:
+ nouveau_accel_fini(drm);
nouveau_bios_takedown(dev);
fail_bios:
nouveau_ttm_fini(drm);
@@ -559,7 +596,8 @@ nouveau_drm_device_fini(struct drm_device *dev)
nouveau_led_fini(dev);
nouveau_fbcon_fini(dev);
- nouveau_accel_fini(drm);
+ nouveau_dmem_fini(drm);
+ nouveau_svm_fini(drm);
nouveau_hwmon_fini(dev);
nouveau_debugfs_fini(drm);
@@ -567,6 +605,7 @@ nouveau_drm_device_fini(struct drm_device *dev)
nouveau_display_fini(dev, false, false);
nouveau_display_destroy(dev);
+ nouveau_accel_fini(drm);
nouveau_bios_takedown(dev);
nouveau_ttm_fini(drm);
@@ -704,6 +743,8 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
struct nouveau_drm *drm = nouveau_drm(dev);
int ret;
+ nouveau_svm_suspend(drm);
+ nouveau_dmem_suspend(drm);
nouveau_led_suspend(dev);
if (dev->mode_config.num_crtc) {
@@ -780,7 +821,8 @@ nouveau_do_resume(struct drm_device *dev, bool runtime)
}
nouveau_led_resume(dev);
-
+ nouveau_dmem_resume(drm);
+ nouveau_svm_resume(drm);
return 0;
}
@@ -1000,6 +1042,8 @@ nouveau_ioctls[] = {
DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_SVM_INIT, nouveau_svmm_init, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_SVM_BIND, nouveau_svmm_bind, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index d20b9ba4b1c1..da847244479d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -96,6 +96,7 @@ struct nouveau_cli {
struct nvif_device device;
struct nvif_mmu mmu;
struct nouveau_vmm vmm;
+ struct nouveau_vmm svm;
const struct nvif_mclass *mem;
struct list_head head;
@@ -181,7 +182,6 @@ struct nouveau_drm {
struct nouveau_fbdev *fbcon;
struct nvif_object nvsw;
struct nvif_object ntfy;
- struct nvif_notify flip;
/* nv10-nv40 tiling regions */
struct {
@@ -210,6 +210,10 @@ struct nouveau_drm {
bool have_disp_power_ref;
struct dev_pm_domain vga_pm_domain;
+
+ struct nouveau_svm *svm;
+
+ struct nouveau_dmem *dmem;
};
static inline struct nouveau_drm *
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index d275418edd24..0d3cd4e05728 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -353,7 +353,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
chan = nouveau_nofbaccel ? NULL : drm->channel;
if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
- ret = nouveau_vma_new(nvbo, &drm->client.vmm, &fb->vma);
+ ret = nouveau_vma_new(nvbo, chan->vmm, &fb->vma);
if (ret) {
NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
chan = NULL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index b999e6058046..ad27caeca0fd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -82,8 +82,6 @@ int nv50_fence_create(struct nouveau_drm *);
int nv84_fence_create(struct nouveau_drm *);
int nvc0_fence_create(struct nouveau_drm *);
-int nouveau_flip_complete(struct nvif_notify *);
-
struct nv84_fence_chan {
struct nouveau_fence_chan base;
struct nouveau_vma *vma;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index fb028e3b5f51..b4bda716564d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -68,10 +68,11 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct device *dev = drm->dev->dev;
+ struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
struct nouveau_vma *vma;
int ret;
- if (cli->vmm.vmm.object.oclass < NVIF_CLASS_VMM_NV50)
+ if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
return 0;
ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
@@ -82,7 +83,7 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
if (ret < 0 && ret != -EACCES)
goto out;
- ret = nouveau_vma_new(nvbo, &cli->vmm, &vma);
+ ret = nouveau_vma_new(nvbo, vmm, &vma);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
out:
@@ -142,17 +143,18 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct device *dev = drm->dev->dev;
+ struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : & cli->vmm;
struct nouveau_vma *vma;
int ret;
- if (cli->vmm.vmm.object.oclass < NVIF_CLASS_VMM_NV50)
+ if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
return;
ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
if (ret)
return;
- vma = nouveau_vma_find(nvbo, &cli->vmm);
+ vma = nouveau_vma_find(nvbo, vmm);
if (vma) {
if (--vma->refs == 0) {
ret = pm_runtime_get_sync(dev);
@@ -219,6 +221,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+ struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
struct nouveau_vma *vma;
if (is_power_of_2(nvbo->valid_domains))
@@ -228,8 +231,8 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
else
rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
rep->offset = nvbo->bo.offset;
- if (cli->vmm.vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
- vma = nouveau_vma_find(nvbo, &cli->vmm);
+ if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
+ vma = nouveau_vma_find(nvbo, vmm);
if (!vma)
return -EINVAL;
@@ -321,7 +324,8 @@ struct validate_op {
};
static void
-validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
+validate_fini_no_ticket(struct validate_op *op, struct nouveau_channel *chan,
+ struct nouveau_fence *fence,
struct drm_nouveau_gem_pushbuf_bo *pbbo)
{
struct nouveau_bo *nvbo;
@@ -332,13 +336,11 @@ validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
b = &pbbo[nvbo->pbbo_index];
if (likely(fence)) {
- struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
- struct nouveau_vma *vma;
-
nouveau_bo_fence(nvbo, fence, !!b->write_domains);
- if (drm->client.vmm.vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
- vma = (void *)(unsigned long)b->user_priv;
+ if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
+ struct nouveau_vma *vma =
+ (void *)(unsigned long)b->user_priv;
nouveau_fence_unref(&vma->fence);
dma_fence_get(&fence->base);
vma->fence = fence;
@@ -358,10 +360,11 @@ validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
}
static void
-validate_fini(struct validate_op *op, struct nouveau_fence *fence,
+validate_fini(struct validate_op *op, struct nouveau_channel *chan,
+ struct nouveau_fence *fence,
struct drm_nouveau_gem_pushbuf_bo *pbbo)
{
- validate_fini_no_ticket(op, fence, pbbo);
+ validate_fini_no_ticket(op, chan, fence, pbbo);
ww_acquire_fini(&op->ticket);
}
@@ -416,7 +419,7 @@ retry:
list_splice_tail_init(&vram_list, &op->list);
list_splice_tail_init(&gart_list, &op->list);
list_splice_tail_init(&both_list, &op->list);
- validate_fini_no_ticket(op, NULL, NULL);
+ validate_fini_no_ticket(op, chan, NULL, NULL);
if (unlikely(ret == -EDEADLK)) {
ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
&op->ticket);
@@ -430,8 +433,8 @@ retry:
}
}
- if (cli->vmm.vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
- struct nouveau_vmm *vmm = &cli->vmm;
+ if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
+ struct nouveau_vmm *vmm = chan->vmm;
struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm);
if (!vma) {
NV_PRINTK(err, cli, "vma not found!\n");
@@ -471,7 +474,7 @@ retry:
list_splice_tail(&gart_list, &op->list);
list_splice_tail(&both_list, &op->list);
if (ret)
- validate_fini(op, NULL, NULL);
+ validate_fini(op, chan, NULL, NULL);
return ret;
}
@@ -563,7 +566,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
NV_PRINTK(err, cli, "validating bo list\n");
- validate_fini(op, NULL, NULL);
+ validate_fini(op, chan, NULL, NULL);
return ret;
}
*apply_relocs = ret;
@@ -842,7 +845,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
}
out:
- validate_fini(&op, fence, bo);
+ validate_fini(&op, chan, fence, bo);
nouveau_fence_unref(&fence);
out_prevalid:
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
new file mode 100644
index 000000000000..93ed43c413f0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -0,0 +1,835 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nouveau_svm.h"
+#include "nouveau_drv.h"
+#include "nouveau_chan.h"
+#include "nouveau_dmem.h"
+
+#include <nvif/notify.h>
+#include <nvif/object.h>
+#include <nvif/vmm.h>
+
+#include <nvif/class.h>
+#include <nvif/clb069.h>
+#include <nvif/ifc00d.h>
+
+#include <linux/sched/mm.h>
+#include <linux/sort.h>
+#include <linux/hmm.h>
+
+struct nouveau_svm {
+ struct nouveau_drm *drm;
+ struct mutex mutex;
+ struct list_head inst;
+
+ struct nouveau_svm_fault_buffer {
+ int id;
+ struct nvif_object object;
+ u32 entries;
+ u32 getaddr;
+ u32 putaddr;
+ u32 get;
+ u32 put;
+ struct nvif_notify notify;
+
+ struct nouveau_svm_fault {
+ u64 inst;
+ u64 addr;
+ u64 time;
+ u32 engine;
+ u8 gpc;
+ u8 hub;
+ u8 access;
+ u8 client;
+ u8 fault;
+ struct nouveau_svmm *svmm;
+ } **fault;
+ int fault_nr;
+ } buffer[1];
+};
+
+#define SVM_DBG(s,f,a...) NV_DEBUG((s)->drm, "svm: "f"\n", ##a)
+#define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a)
+
+struct nouveau_ivmm {
+ struct nouveau_svmm *svmm;
+ u64 inst;
+ struct list_head head;
+};
+
+static struct nouveau_ivmm *
+nouveau_ivmm_find(struct nouveau_svm *svm, u64 inst)
+{
+ struct nouveau_ivmm *ivmm;
+ list_for_each_entry(ivmm, &svm->inst, head) {
+ if (ivmm->inst == inst)
+ return ivmm;
+ }
+ return NULL;
+}
+
+struct nouveau_svmm {
+ struct nouveau_vmm *vmm;
+ struct {
+ unsigned long start;
+ unsigned long limit;
+ } unmanaged;
+
+ struct mutex mutex;
+
+ struct mm_struct *mm;
+ struct hmm_mirror mirror;
+};
+
+#define SVMM_DBG(s,f,a...) \
+ NV_DEBUG((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
+#define SVMM_ERR(s,f,a...) \
+ NV_WARN((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
+
+int
+nouveau_svmm_bind(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
+ struct drm_nouveau_svm_bind *args = data;
+ unsigned target, cmd, priority;
+ unsigned long addr, end, size;
+ struct mm_struct *mm;
+
+ args->va_start &= PAGE_MASK;
+ args->va_end &= PAGE_MASK;
+
+ /* Sanity check arguments */
+ if (args->reserved0 || args->reserved1)
+ return -EINVAL;
+ if (args->header & (~NOUVEAU_SVM_BIND_VALID_MASK))
+ return -EINVAL;
+ if (args->va_start >= args->va_end)
+ return -EINVAL;
+ if (!args->npages)
+ return -EINVAL;
+
+ cmd = args->header >> NOUVEAU_SVM_BIND_COMMAND_SHIFT;
+ cmd &= NOUVEAU_SVM_BIND_COMMAND_MASK;
+ switch (cmd) {
+ case NOUVEAU_SVM_BIND_COMMAND__MIGRATE:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ priority = args->header >> NOUVEAU_SVM_BIND_PRIORITY_SHIFT;
+ priority &= NOUVEAU_SVM_BIND_PRIORITY_MASK;
+
+ /* FIXME support CPU target ie all target value < GPU_VRAM */
+ target = args->header >> NOUVEAU_SVM_BIND_TARGET_SHIFT;
+ target &= NOUVEAU_SVM_BIND_TARGET_MASK;
+ switch (target) {
+ case NOUVEAU_SVM_BIND_TARGET__GPU_VRAM:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * FIXME: For now refuse non 0 stride, we need to change the migrate
+ * kernel function to handle stride to avoid to create a mess within
+ * each device driver.
+ */
+ if (args->stride)
+ return -EINVAL;
+
+ size = ((unsigned long)args->npages) << PAGE_SHIFT;
+ if ((args->va_start + size) <= args->va_start)
+ return -EINVAL;
+ if ((args->va_start + size) > args->va_end)
+ return -EINVAL;
+
+ /*
+ * Ok we are ask to do something sane, for now we only support migrate
+ * commands but we will add things like memory policy (what to do on
+ * page fault) and maybe some other commands.
+ */
+
+ mm = get_task_mm(current);
+ down_read(&mm->mmap_sem);
+
+ for (addr = args->va_start, end = args->va_start + size; addr < end;) {
+ struct vm_area_struct *vma;
+ unsigned long next;
+
+ vma = find_vma_intersection(mm, addr, end);
+ if (!vma)
+ break;
+
+ next = min(vma->vm_end, end);
+ /* This is a best effort so we ignore errors */
+ nouveau_dmem_migrate_vma(cli->drm, vma, addr, next);
+ addr = next;
+ }
+
+ /*
+ * FIXME Return the number of page we have migrated, again we need to
+ * update the migrate API to return that information so that we can
+ * report it to user space.
+ */
+ args->result = 0;
+
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+
+ return 0;
+}
+
+/* Unlink channel instance from SVMM. */
+void
+nouveau_svmm_part(struct nouveau_svmm *svmm, u64 inst)
+{
+ struct nouveau_ivmm *ivmm;
+ if (svmm) {
+ mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
+ ivmm = nouveau_ivmm_find(svmm->vmm->cli->drm->svm, inst);
+ if (ivmm) {
+ list_del(&ivmm->head);
+ kfree(ivmm);
+ }
+ mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
+ }
+}
+
+/* Link channel instance to SVMM. */
+int
+nouveau_svmm_join(struct nouveau_svmm *svmm, u64 inst)
+{
+ struct nouveau_ivmm *ivmm;
+ if (svmm) {
+ if (!(ivmm = kmalloc(sizeof(*ivmm), GFP_KERNEL)))
+ return -ENOMEM;
+ ivmm->svmm = svmm;
+ ivmm->inst = inst;
+
+ mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
+ list_add(&ivmm->head, &svmm->vmm->cli->drm->svm->inst);
+ mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
+ }
+ return 0;
+}
+
+/* Invalidate SVMM address-range on GPU. */
+static void
+nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
+{
+ if (limit > start) {
+ bool super = svmm->vmm->vmm.object.client->super;
+ svmm->vmm->vmm.object.client->super = true;
+ nvif_object_mthd(&svmm->vmm->vmm.object, NVIF_VMM_V0_PFNCLR,
+ &(struct nvif_vmm_pfnclr_v0) {
+ .addr = start,
+ .size = limit - start,
+ }, sizeof(struct nvif_vmm_pfnclr_v0));
+ svmm->vmm->vmm.object.client->super = super;
+ }
+}
+
+static int
+nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror,
+ const struct hmm_update *update)
+{
+ struct nouveau_svmm *svmm = container_of(mirror, typeof(*svmm), mirror);
+ unsigned long start = update->start;
+ unsigned long limit = update->end;
+
+ if (!update->blockable)
+ return -EAGAIN;
+
+ SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit);
+
+ mutex_lock(&svmm->mutex);
+ if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) {
+ if (start < svmm->unmanaged.start) {
+ nouveau_svmm_invalidate(svmm, start,
+ svmm->unmanaged.limit);
+ }
+ start = svmm->unmanaged.limit;
+ }
+
+ nouveau_svmm_invalidate(svmm, start, limit);
+ mutex_unlock(&svmm->mutex);
+ return 0;
+}
+
+static void
+nouveau_svmm_release(struct hmm_mirror *mirror)
+{
+}
+
+static const struct hmm_mirror_ops
+nouveau_svmm = {
+ .sync_cpu_device_pagetables = nouveau_svmm_sync_cpu_device_pagetables,
+ .release = nouveau_svmm_release,
+};
+
+void
+nouveau_svmm_fini(struct nouveau_svmm **psvmm)
+{
+ struct nouveau_svmm *svmm = *psvmm;
+ if (svmm) {
+ hmm_mirror_unregister(&svmm->mirror);
+ kfree(*psvmm);
+ *psvmm = NULL;
+ }
+}
+
+int
+nouveau_svmm_init(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
+ struct nouveau_svmm *svmm;
+ struct drm_nouveau_svm_init *args = data;
+ int ret;
+
+ /* Allocate tracking for SVM-enabled VMM. */
+ if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL)))
+ return -ENOMEM;
+ svmm->vmm = &cli->svm;
+ svmm->unmanaged.start = args->unmanaged_addr;
+ svmm->unmanaged.limit = args->unmanaged_addr + args->unmanaged_size;
+ mutex_init(&svmm->mutex);
+
+ /* Check that SVM isn't already enabled for the client. */
+ mutex_lock(&cli->mutex);
+ if (cli->svm.cli) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ /* Allocate a new GPU VMM that can support SVM (managed by the
+ * client, with replayable faults enabled).
+ *
+ * All future channel/memory allocations will make use of this
+ * VMM instead of the standard one.
+ */
+ ret = nvif_vmm_init(&cli->mmu, cli->vmm.vmm.object.oclass, true,
+ args->unmanaged_addr, args->unmanaged_size,
+ &(struct gp100_vmm_v0) {
+ .fault_replay = true,
+ }, sizeof(struct gp100_vmm_v0), &cli->svm.vmm);
+ if (ret)
+ goto done;
+
+ /* Enable HMM mirroring of CPU address-space to VMM. */
+ svmm->mm = get_task_mm(current);
+ down_write(&svmm->mm->mmap_sem);
+ svmm->mirror.ops = &nouveau_svmm;
+ ret = hmm_mirror_register(&svmm->mirror, svmm->mm);
+ if (ret == 0) {
+ cli->svm.svmm = svmm;
+ cli->svm.cli = cli;
+ }
+ up_write(&svmm->mm->mmap_sem);
+ mmput(svmm->mm);
+
+done:
+ if (ret)
+ nouveau_svmm_fini(&svmm);
+ mutex_unlock(&cli->mutex);
+ return ret;
+}
+
+static const u64
+nouveau_svm_pfn_flags[HMM_PFN_FLAG_MAX] = {
+ [HMM_PFN_VALID ] = NVIF_VMM_PFNMAP_V0_V,
+ [HMM_PFN_WRITE ] = NVIF_VMM_PFNMAP_V0_W,
+ [HMM_PFN_DEVICE_PRIVATE] = NVIF_VMM_PFNMAP_V0_VRAM,
+};
+
+static const u64
+nouveau_svm_pfn_values[HMM_PFN_VALUE_MAX] = {
+ [HMM_PFN_ERROR ] = ~NVIF_VMM_PFNMAP_V0_V,
+ [HMM_PFN_NONE ] = NVIF_VMM_PFNMAP_V0_NONE,
+ [HMM_PFN_SPECIAL] = ~NVIF_VMM_PFNMAP_V0_V,
+};
+
+/* Issue fault replay for GPU to retry accesses that faulted previously. */
+static void
+nouveau_svm_fault_replay(struct nouveau_svm *svm)
+{
+ SVM_DBG(svm, "replay");
+ WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object,
+ GP100_VMM_VN_FAULT_REPLAY,
+ &(struct gp100_vmm_fault_replay_vn) {},
+ sizeof(struct gp100_vmm_fault_replay_vn)));
+}
+
+/* Cancel a replayable fault that could not be handled.
+ *
+ * Cancelling the fault will trigger recovery to reset the engine
+ * and kill the offending channel (ie. GPU SIGSEGV).
+ */
+static void
+nouveau_svm_fault_cancel(struct nouveau_svm *svm,
+ u64 inst, u8 hub, u8 gpc, u8 client)
+{
+ SVM_DBG(svm, "cancel %016llx %d %02x %02x", inst, hub, gpc, client);
+ WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object,
+ GP100_VMM_VN_FAULT_CANCEL,
+ &(struct gp100_vmm_fault_cancel_v0) {
+ .hub = hub,
+ .gpc = gpc,
+ .client = client,
+ .inst = inst,
+ }, sizeof(struct gp100_vmm_fault_cancel_v0)));
+}
+
+static void
+nouveau_svm_fault_cancel_fault(struct nouveau_svm *svm,
+ struct nouveau_svm_fault *fault)
+{
+ nouveau_svm_fault_cancel(svm, fault->inst,
+ fault->hub,
+ fault->gpc,
+ fault->client);
+}
+
+static int
+nouveau_svm_fault_cmp(const void *a, const void *b)
+{
+ const struct nouveau_svm_fault *fa = *(struct nouveau_svm_fault **)a;
+ const struct nouveau_svm_fault *fb = *(struct nouveau_svm_fault **)b;
+ int ret;
+ if ((ret = (s64)fa->inst - fb->inst))
+ return ret;
+ if ((ret = (s64)fa->addr - fb->addr))
+ return ret;
+ /*XXX: atomic? */
+ return (fa->access == 0 || fa->access == 3) -
+ (fb->access == 0 || fb->access == 3);
+}
+
+static void
+nouveau_svm_fault_cache(struct nouveau_svm *svm,
+ struct nouveau_svm_fault_buffer *buffer, u32 offset)
+{
+ struct nvif_object *memory = &buffer->object;
+ const u32 instlo = nvif_rd32(memory, offset + 0x00);
+ const u32 insthi = nvif_rd32(memory, offset + 0x04);
+ const u32 addrlo = nvif_rd32(memory, offset + 0x08);
+ const u32 addrhi = nvif_rd32(memory, offset + 0x0c);
+ const u32 timelo = nvif_rd32(memory, offset + 0x10);
+ const u32 timehi = nvif_rd32(memory, offset + 0x14);
+ const u32 engine = nvif_rd32(memory, offset + 0x18);
+ const u32 info = nvif_rd32(memory, offset + 0x1c);
+ const u64 inst = (u64)insthi << 32 | instlo;
+ const u8 gpc = (info & 0x1f000000) >> 24;
+ const u8 hub = (info & 0x00100000) >> 20;
+ const u8 client = (info & 0x00007f00) >> 8;
+ struct nouveau_svm_fault *fault;
+
+ //XXX: i think we're supposed to spin waiting */
+ if (WARN_ON(!(info & 0x80000000)))
+ return;
+
+ nvif_mask(memory, offset + 0x1c, 0x80000000, 0x00000000);
+
+ if (!buffer->fault[buffer->fault_nr]) {
+ fault = kmalloc(sizeof(*fault), GFP_KERNEL);
+ if (WARN_ON(!fault)) {
+ nouveau_svm_fault_cancel(svm, inst, hub, gpc, client);
+ return;
+ }
+ buffer->fault[buffer->fault_nr] = fault;
+ }
+
+ fault = buffer->fault[buffer->fault_nr++];
+ fault->inst = inst;
+ fault->addr = (u64)addrhi << 32 | addrlo;
+ fault->time = (u64)timehi << 32 | timelo;
+ fault->engine = engine;
+ fault->gpc = gpc;
+ fault->hub = hub;
+ fault->access = (info & 0x000f0000) >> 16;
+ fault->client = client;
+ fault->fault = (info & 0x0000001f);
+
+ SVM_DBG(svm, "fault %016llx %016llx %02x",
+ fault->inst, fault->addr, fault->access);
+}
+
+static int
+nouveau_svm_fault(struct nvif_notify *notify)
+{
+ struct nouveau_svm_fault_buffer *buffer =
+ container_of(notify, typeof(*buffer), notify);
+ struct nouveau_svm *svm =
+ container_of(buffer, typeof(*svm), buffer[buffer->id]);
+ struct nvif_object *device = &svm->drm->client.device.object;
+ struct nouveau_svmm *svmm;
+ struct {
+ struct {
+ struct nvif_ioctl_v0 i;
+ struct nvif_ioctl_mthd_v0 m;
+ struct nvif_vmm_pfnmap_v0 p;
+ } i;
+ u64 phys[16];
+ } args;
+ struct hmm_range range;
+ struct vm_area_struct *vma;
+ u64 inst, start, limit;
+ int fi, fn, pi, fill;
+ int replay = 0, ret;
+
+ /* Parse available fault buffer entries into a cache, and update
+ * the GET pointer so HW can reuse the entries.
+ */
+ SVM_DBG(svm, "fault handler");
+ if (buffer->get == buffer->put) {
+ buffer->put = nvif_rd32(device, buffer->putaddr);
+ buffer->get = nvif_rd32(device, buffer->getaddr);
+ if (buffer->get == buffer->put)
+ return NVIF_NOTIFY_KEEP;
+ }
+ buffer->fault_nr = 0;
+
+ SVM_DBG(svm, "get %08x put %08x", buffer->get, buffer->put);
+ while (buffer->get != buffer->put) {
+ nouveau_svm_fault_cache(svm, buffer, buffer->get * 0x20);
+ if (++buffer->get == buffer->entries)
+ buffer->get = 0;
+ }
+ nvif_wr32(device, buffer->getaddr, buffer->get);
+ SVM_DBG(svm, "%d fault(s) pending", buffer->fault_nr);
+
+ /* Sort parsed faults by instance pointer to prevent unnecessary
+ * instance to SVMM translations, followed by address and access
+ * type to reduce the amount of work when handling the faults.
+ */
+ sort(buffer->fault, buffer->fault_nr, sizeof(*buffer->fault),
+ nouveau_svm_fault_cmp, NULL);
+
+ /* Lookup SVMM structure for each unique instance pointer. */
+ mutex_lock(&svm->mutex);
+ for (fi = 0, svmm = NULL; fi < buffer->fault_nr; fi++) {
+ if (!svmm || buffer->fault[fi]->inst != inst) {
+ struct nouveau_ivmm *ivmm =
+ nouveau_ivmm_find(svm, buffer->fault[fi]->inst);
+ svmm = ivmm ? ivmm->svmm : NULL;
+ inst = buffer->fault[fi]->inst;
+ SVM_DBG(svm, "inst %016llx -> svm-%p", inst, svmm);
+ }
+ buffer->fault[fi]->svmm = svmm;
+ }
+ mutex_unlock(&svm->mutex);
+
+ /* Process list of faults. */
+ args.i.i.version = 0;
+ args.i.i.type = NVIF_IOCTL_V0_MTHD;
+ args.i.m.version = 0;
+ args.i.m.method = NVIF_VMM_V0_PFNMAP;
+ args.i.p.version = 0;
+
+ for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) {
+ /* Cancel any faults from non-SVM channels. */
+ if (!(svmm = buffer->fault[fi]->svmm)) {
+ nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
+ continue;
+ }
+ SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr);
+
+ /* We try and group handling of faults within a small
+ * window into a single update.
+ */
+ start = buffer->fault[fi]->addr;
+ limit = start + (ARRAY_SIZE(args.phys) << PAGE_SHIFT);
+ if (start < svmm->unmanaged.limit)
+ limit = min_t(u64, limit, svmm->unmanaged.start);
+ else
+ if (limit > svmm->unmanaged.start)
+ start = max_t(u64, start, svmm->unmanaged.limit);
+ SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
+
+ /* Intersect fault window with the CPU VMA, cancelling
+ * the fault if the address is invalid.
+ */
+ down_read(&svmm->mm->mmap_sem);
+ vma = find_vma_intersection(svmm->mm, start, limit);
+ if (!vma) {
+ SVMM_ERR(svmm, "wndw %016llx-%016llx", start, limit);
+ up_read(&svmm->mm->mmap_sem);
+ nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
+ continue;
+ }
+ start = max_t(u64, start, vma->vm_start);
+ limit = min_t(u64, limit, vma->vm_end);
+ SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
+
+ if (buffer->fault[fi]->addr != start) {
+ SVMM_ERR(svmm, "addr %016llx", buffer->fault[fi]->addr);
+ up_read(&svmm->mm->mmap_sem);
+ nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
+ continue;
+ }
+
+ /* Prepare the GPU-side update of all pages within the
+ * fault window, determining required pages and access
+ * permissions based on pending faults.
+ */
+ args.i.p.page = PAGE_SHIFT;
+ args.i.p.addr = start;
+ for (fn = fi, pi = 0;;) {
+ /* Determine required permissions based on GPU fault
+ * access flags.
+ *XXX: atomic?
+ */
+ if (buffer->fault[fn]->access != 0 /* READ. */ &&
+ buffer->fault[fn]->access != 3 /* PREFETCH. */) {
+ args.phys[pi++] = NVIF_VMM_PFNMAP_V0_V |
+ NVIF_VMM_PFNMAP_V0_W;
+ } else {
+ args.phys[pi++] = NVIF_VMM_PFNMAP_V0_V;
+ }
+ args.i.p.size = pi << PAGE_SHIFT;
+
+ /* It's okay to skip over duplicate addresses from the
+ * same SVMM as faults are ordered by access type such
+ * that only the first one needs to be handled.
+ *
+ * ie. WRITE faults appear first, thus any handling of
+ * pending READ faults will already be satisfied.
+ */
+ while (++fn < buffer->fault_nr &&
+ buffer->fault[fn]->svmm == svmm &&
+ buffer->fault[fn ]->addr ==
+ buffer->fault[fn - 1]->addr);
+
+ /* If the next fault is outside the window, or all GPU
+ * faults have been dealt with, we're done here.
+ */
+ if (fn >= buffer->fault_nr ||
+ buffer->fault[fn]->svmm != svmm ||
+ buffer->fault[fn]->addr >= limit)
+ break;
+
+ /* Fill in the gap between this fault and the next. */
+ fill = (buffer->fault[fn ]->addr -
+ buffer->fault[fn - 1]->addr) >> PAGE_SHIFT;
+ while (--fill)
+ args.phys[pi++] = NVIF_VMM_PFNMAP_V0_NONE;
+ }
+
+ SVMM_DBG(svmm, "wndw %016llx-%016llx covering %d fault(s)",
+ args.i.p.addr,
+ args.i.p.addr + args.i.p.size, fn - fi);
+
+ /* Have HMM fault pages within the fault window to the GPU. */
+ range.vma = vma;
+ range.start = args.i.p.addr;
+ range.end = args.i.p.addr + args.i.p.size;
+ range.pfns = args.phys;
+ range.flags = nouveau_svm_pfn_flags;
+ range.values = nouveau_svm_pfn_values;
+ range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
+again:
+ ret = hmm_vma_fault(&range, true);
+ if (ret == 0) {
+ mutex_lock(&svmm->mutex);
+ if (!hmm_vma_range_done(&range)) {
+ mutex_unlock(&svmm->mutex);
+ goto again;
+ }
+
+ nouveau_dmem_convert_pfn(svm->drm, &range);
+
+ svmm->vmm->vmm.object.client->super = true;
+ ret = nvif_object_ioctl(&svmm->vmm->vmm.object,
+ &args, sizeof(args.i) +
+ pi * sizeof(args.phys[0]),
+ NULL);
+ svmm->vmm->vmm.object.client->super = false;
+ mutex_unlock(&svmm->mutex);
+ }
+ up_read(&svmm->mm->mmap_sem);
+
+ /* Cancel any faults in the window whose pages didn't manage
+ * to keep their valid bit, or stay writeable when required.
+ *
+ * If handling failed completely, cancel all faults.
+ */
+ while (fi < fn) {
+ struct nouveau_svm_fault *fault = buffer->fault[fi++];
+ pi = (fault->addr - range.start) >> PAGE_SHIFT;
+ if (ret ||
+ !(range.pfns[pi] & NVIF_VMM_PFNMAP_V0_V) ||
+ (!(range.pfns[pi] & NVIF_VMM_PFNMAP_V0_W) &&
+ fault->access != 0 && fault->access != 3)) {
+ nouveau_svm_fault_cancel_fault(svm, fault);
+ continue;
+ }
+ replay++;
+ }
+ }
+
+ /* Issue fault replay to the GPU. */
+ if (replay)
+ nouveau_svm_fault_replay(svm);
+ return NVIF_NOTIFY_KEEP;
+}
+
+static void
+nouveau_svm_fault_buffer_fini(struct nouveau_svm *svm, int id)
+{
+ struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
+ nvif_notify_put(&buffer->notify);
+}
+
+static int
+nouveau_svm_fault_buffer_init(struct nouveau_svm *svm, int id)
+{
+ struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
+ struct nvif_object *device = &svm->drm->client.device.object;
+ buffer->get = nvif_rd32(device, buffer->getaddr);
+ buffer->put = nvif_rd32(device, buffer->putaddr);
+ SVM_DBG(svm, "get %08x put %08x (init)", buffer->get, buffer->put);
+ return nvif_notify_get(&buffer->notify);
+}
+
+static void
+nouveau_svm_fault_buffer_dtor(struct nouveau_svm *svm, int id)
+{
+ struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
+ int i;
+
+ if (buffer->fault) {
+ for (i = 0; buffer->fault[i] && i < buffer->entries; i++)
+ kfree(buffer->fault[i]);
+ kvfree(buffer->fault);
+ }
+
+ nouveau_svm_fault_buffer_fini(svm, id);
+
+ nvif_notify_fini(&buffer->notify);
+ nvif_object_fini(&buffer->object);
+}
+
+static int
+nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id)
+{
+ struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
+ struct nouveau_drm *drm = svm->drm;
+ struct nvif_object *device = &drm->client.device.object;
+ struct nvif_clb069_v0 args = {};
+ int ret;
+
+ buffer->id = id;
+
+ ret = nvif_object_init(device, 0, oclass, &args, sizeof(args),
+ &buffer->object);
+ if (ret < 0) {
+ SVM_ERR(svm, "Fault buffer allocation failed: %d", ret);
+ return ret;
+ }
+
+ nvif_object_map(&buffer->object, NULL, 0);
+ buffer->entries = args.entries;
+ buffer->getaddr = args.get;
+ buffer->putaddr = args.put;
+
+ ret = nvif_notify_init(&buffer->object, nouveau_svm_fault, true,
+ NVB069_V0_NTFY_FAULT, NULL, 0, 0,
+ &buffer->notify);
+ if (ret)
+ return ret;
+
+ buffer->fault = kvzalloc(sizeof(*buffer->fault) * buffer->entries, GFP_KERNEL);
+ if (!buffer->fault)
+ return -ENOMEM;
+
+ return nouveau_svm_fault_buffer_init(svm, id);
+}
+
+void
+nouveau_svm_resume(struct nouveau_drm *drm)
+{
+ struct nouveau_svm *svm = drm->svm;
+ if (svm)
+ nouveau_svm_fault_buffer_init(svm, 0);
+}
+
+void
+nouveau_svm_suspend(struct nouveau_drm *drm)
+{
+ struct nouveau_svm *svm = drm->svm;
+ if (svm)
+ nouveau_svm_fault_buffer_fini(svm, 0);
+}
+
+void
+nouveau_svm_fini(struct nouveau_drm *drm)
+{
+ struct nouveau_svm *svm = drm->svm;
+ if (svm) {
+ nouveau_svm_fault_buffer_dtor(svm, 0);
+ kfree(drm->svm);
+ drm->svm = NULL;
+ }
+}
+
+void
+nouveau_svm_init(struct nouveau_drm *drm)
+{
+ static const struct nvif_mclass buffers[] = {
+ { VOLTA_FAULT_BUFFER_A, 0 },
+ { MAXWELL_FAULT_BUFFER_A, 0 },
+ {}
+ };
+ struct nouveau_svm *svm;
+ int ret;
+
+ /* Disable on Volta and newer until channel recovery is fixed,
+ * otherwise clients will have a trivial way to trash the GPU
+ * for everyone.
+ */
+ if (drm->client.device.info.family > NV_DEVICE_INFO_V0_PASCAL)
+ return;
+
+ if (!(drm->svm = svm = kzalloc(sizeof(*drm->svm), GFP_KERNEL)))
+ return;
+
+ drm->svm->drm = drm;
+ mutex_init(&drm->svm->mutex);
+ INIT_LIST_HEAD(&drm->svm->inst);
+
+ ret = nvif_mclass(&drm->client.device.object, buffers);
+ if (ret < 0) {
+ SVM_DBG(svm, "No supported fault buffer class");
+ nouveau_svm_fini(drm);
+ return;
+ }
+
+ ret = nouveau_svm_fault_buffer_ctor(svm, buffers[ret].oclass, 0);
+ if (ret) {
+ nouveau_svm_fini(drm);
+ return;
+ }
+
+ SVM_DBG(svm, "Initialised");
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.h b/drivers/gpu/drm/nouveau/nouveau_svm.h
new file mode 100644
index 000000000000..e839d8189461
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.h
@@ -0,0 +1,48 @@
+#ifndef __NOUVEAU_SVM_H__
+#define __NOUVEAU_SVM_H__
+#include <nvif/os.h>
+struct drm_device;
+struct drm_file;
+struct nouveau_drm;
+
+struct nouveau_svmm;
+
+#if IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM)
+void nouveau_svm_init(struct nouveau_drm *);
+void nouveau_svm_fini(struct nouveau_drm *);
+void nouveau_svm_suspend(struct nouveau_drm *);
+void nouveau_svm_resume(struct nouveau_drm *);
+
+int nouveau_svmm_init(struct drm_device *, void *, struct drm_file *);
+void nouveau_svmm_fini(struct nouveau_svmm **);
+int nouveau_svmm_join(struct nouveau_svmm *, u64 inst);
+void nouveau_svmm_part(struct nouveau_svmm *, u64 inst);
+int nouveau_svmm_bind(struct drm_device *, void *, struct drm_file *);
+#else /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
+static inline void nouveau_svm_init(struct nouveau_drm *drm) {}
+static inline void nouveau_svm_fini(struct nouveau_drm *drm) {}
+static inline void nouveau_svm_suspend(struct nouveau_drm *drm) {}
+static inline void nouveau_svm_resume(struct nouveau_drm *drm) {}
+
+static inline int nouveau_svmm_init(struct drm_device *device, void *p,
+ struct drm_file *file)
+{
+ return -ENOSYS;
+}
+
+static inline void nouveau_svmm_fini(struct nouveau_svmm **svmmp) {}
+
+static inline int nouveau_svmm_join(struct nouveau_svmm *svmm, u64 inst)
+{
+ return 0;
+}
+
+static inline void nouveau_svmm_part(struct nouveau_svmm *svmm, u64 inst) {}
+
+static inline int nouveau_svmm_bind(struct drm_device *device, void *p,
+ struct drm_file *file)
+{
+ return -ENOSYS;
+}
+#endif /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.c b/drivers/gpu/drm/nouveau/nouveau_vmm.c
index 2032c3e4f6e5..77061182a1cf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.c
@@ -22,6 +22,7 @@
#include "nouveau_vmm.h"
#include "nouveau_drv.h"
#include "nouveau_bo.h"
+#include "nouveau_svm.h"
#include "nouveau_mem.h"
void
@@ -119,6 +120,7 @@ done:
void
nouveau_vmm_fini(struct nouveau_vmm *vmm)
{
+ nouveau_svmm_fini(&vmm->svmm);
nvif_vmm_fini(&vmm->vmm);
vmm->cli = NULL;
}
@@ -126,7 +128,7 @@ nouveau_vmm_fini(struct nouveau_vmm *vmm)
int
nouveau_vmm_init(struct nouveau_cli *cli, s32 oclass, struct nouveau_vmm *vmm)
{
- int ret = nvif_vmm_init(&cli->mmu, oclass, PAGE_SIZE, 0, NULL, 0,
+ int ret = nvif_vmm_init(&cli->mmu, oclass, false, PAGE_SIZE, 0, NULL, 0,
&vmm->vmm);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.h b/drivers/gpu/drm/nouveau/nouveau_vmm.h
index ede872f6f668..2b98d975f37e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vmm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.h
@@ -25,6 +25,7 @@ void nouveau_vma_unmap(struct nouveau_vma *);
struct nouveau_vmm {
struct nouveau_cli *cli;
struct nvif_vmm vmm;
+ struct nouveau_svmm *svmm;
};
int nouveau_vmm_init(struct nouveau_cli *, s32 oclass, struct nouveau_vmm *);
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index e721bb2163a0..f07da00f285f 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -109,7 +109,6 @@ nv84_fence_context_del(struct nouveau_channel *chan)
int
nv84_fence_context_new(struct nouveau_channel *chan)
{
- struct nouveau_cli *cli = (void *)chan->user.client;
struct nv84_fence_priv *priv = chan->drm->fence;
struct nv84_fence_chan *fctx;
int ret;
@@ -127,7 +126,7 @@ nv84_fence_context_new(struct nouveau_channel *chan)
fctx->base.sequence = nv84_fence_read(chan);
mutex_lock(&priv->mutex);
- ret = nouveau_vma_new(priv->bo, &cli->vmm, &fctx->vma);
+ ret = nouveau_vma_new(priv->bo, chan->vmm, &fctx->vma);
mutex_unlock(&priv->mutex);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvif/disp.c b/drivers/gpu/drm/nouveau/nvif/disp.c
index ef97dd223a32..61638b3b9d3d 100644
--- a/drivers/gpu/drm/nouveau/nvif/disp.c
+++ b/drivers/gpu/drm/nouveau/nvif/disp.c
@@ -34,7 +34,7 @@ int
nvif_disp_ctor(struct nvif_device *device, s32 oclass, struct nvif_disp *disp)
{
static const struct nvif_mclass disps[] = {
- { TU104_DISP, -1 },
+ { TU102_DISP, -1 },
{ GV100_DISP, -1 },
{ GP102_DISP, -1 },
{ GP100_DISP, -1 },
diff --git a/drivers/gpu/drm/nouveau/nvif/vmm.c b/drivers/gpu/drm/nouveau/nvif/vmm.c
index 6b9c5776547f..11487c00b909 100644
--- a/drivers/gpu/drm/nouveau/nvif/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvif/vmm.c
@@ -112,8 +112,8 @@ nvif_vmm_fini(struct nvif_vmm *vmm)
}
int
-nvif_vmm_init(struct nvif_mmu *mmu, s32 oclass, u64 addr, u64 size,
- void *argv, u32 argc, struct nvif_vmm *vmm)
+nvif_vmm_init(struct nvif_mmu *mmu, s32 oclass, bool managed, u64 addr,
+ u64 size, void *argv, u32 argc, struct nvif_vmm *vmm)
{
struct nvif_vmm_v0 *args;
u32 argn = sizeof(*args) + argc;
@@ -125,6 +125,7 @@ nvif_vmm_init(struct nvif_mmu *mmu, s32 oclass, u64 addr, u64 size,
if (!(args = kmalloc(argn, GFP_KERNEL)))
return -ENOMEM;
args->version = 0;
+ args->managed = managed;
args->addr = addr;
args->size = size;
memcpy(args->data, argv, argc);
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index c61b467cf45e..245990de1e90 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -39,6 +39,7 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = {
[NVKM_SUBDEV_FB ] = "fb",
[NVKM_SUBDEV_FUSE ] = "fuse",
[NVKM_SUBDEV_GPIO ] = "gpio",
+ [NVKM_SUBDEV_GSP ] = "gsp",
[NVKM_SUBDEV_I2C ] = "i2c",
[NVKM_SUBDEV_IBUS ] = "priv",
[NVKM_SUBDEV_ICCSENSE] = "iccsense",
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
index 177a23301d6a..9211663239af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
@@ -6,4 +6,4 @@ nvkm-y += nvkm/engine/ce/gm200.o
nvkm-y += nvkm/engine/ce/gp100.o
nvkm-y += nvkm/engine/ce/gp102.o
nvkm-y += nvkm/engine/ce/gv100.o
-nvkm-y += nvkm/engine/ce/tu104.o
+nvkm-y += nvkm/engine/ce/tu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
index 3c25043bbb33..b4308e2d8c75 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
@@ -24,7 +24,7 @@
#include <nvif/class.h>
static const struct nvkm_engine_func
-tu104_ce = {
+tu102_ce = {
.intr = gp100_ce_intr,
.sclass = {
{ -1, -1, TURING_DMA_COPY_A },
@@ -33,8 +33,8 @@ tu104_ce = {
};
int
-tu104_ce_new(struct nvkm_device *device, int index,
+tu102_ce_new(struct nvkm_device *device, int index,
struct nvkm_engine **pengine)
{
- return nvkm_engine_new_(&tu104_ce, device, index, true, pengine);
+ return nvkm_engine_new_(&tu102_ce, device, index, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index d9edb5785813..7971096b6767 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -1613,7 +1613,7 @@ nvd7_chipset = {
.pci = gf106_pci_new,
.therm = gf119_therm_new,
.timer = nv41_timer_new,
- .volt = gf100_volt_new,
+ .volt = gf117_volt_new,
.ce[0] = gf100_ce_new,
.disp = gf119_disp_new,
.dma = gf119_dma_new,
@@ -2405,6 +2405,7 @@ nv140_chipset = {
.fb = gv100_fb_new,
.fuse = gm107_fuse_new,
.gpio = gk104_gpio_new,
+ .gsp = gv100_gsp_new,
.i2c = gm200_i2c_new,
.ibus = gm200_ibus_new,
.imem = nv50_instmem_new,
@@ -2437,97 +2438,106 @@ nv140_chipset = {
static const struct nvkm_device_chip
nv162_chipset = {
.name = "TU102",
- .bar = tu104_bar_new,
+ .bar = tu102_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
- .devinit = tu104_devinit_new,
- .fault = tu104_fault_new,
+ .devinit = tu102_devinit_new,
+ .fault = tu102_fault_new,
.fb = gv100_fb_new,
.fuse = gm107_fuse_new,
.gpio = gk104_gpio_new,
+ .gsp = gv100_gsp_new,
.i2c = gm200_i2c_new,
.ibus = gm200_ibus_new,
.imem = nv50_instmem_new,
.ltc = gp102_ltc_new,
- .mc = tu104_mc_new,
- .mmu = tu104_mmu_new,
+ .mc = tu102_mc_new,
+ .mmu = tu102_mmu_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.therm = gp100_therm_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
- .ce[0] = tu104_ce_new,
- .ce[1] = tu104_ce_new,
- .ce[2] = tu104_ce_new,
- .ce[3] = tu104_ce_new,
- .ce[4] = tu104_ce_new,
- .disp = tu104_disp_new,
+ .ce[0] = tu102_ce_new,
+ .ce[1] = tu102_ce_new,
+ .ce[2] = tu102_ce_new,
+ .ce[3] = tu102_ce_new,
+ .ce[4] = tu102_ce_new,
+ .disp = tu102_disp_new,
.dma = gv100_dma_new,
- .fifo = tu104_fifo_new,
+ .fifo = tu102_fifo_new,
+ .nvdec[0] = gp102_nvdec_new,
+ .sec2 = tu102_sec2_new,
};
static const struct nvkm_device_chip
nv164_chipset = {
.name = "TU104",
- .bar = tu104_bar_new,
+ .bar = tu102_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
- .devinit = tu104_devinit_new,
- .fault = tu104_fault_new,
+ .devinit = tu102_devinit_new,
+ .fault = tu102_fault_new,
.fb = gv100_fb_new,
.fuse = gm107_fuse_new,
.gpio = gk104_gpio_new,
+ .gsp = gv100_gsp_new,
.i2c = gm200_i2c_new,
.ibus = gm200_ibus_new,
.imem = nv50_instmem_new,
.ltc = gp102_ltc_new,
- .mc = tu104_mc_new,
- .mmu = tu104_mmu_new,
+ .mc = tu102_mc_new,
+ .mmu = tu102_mmu_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.therm = gp100_therm_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
- .ce[0] = tu104_ce_new,
- .ce[1] = tu104_ce_new,
- .ce[2] = tu104_ce_new,
- .ce[3] = tu104_ce_new,
- .ce[4] = tu104_ce_new,
- .disp = tu104_disp_new,
+ .ce[0] = tu102_ce_new,
+ .ce[1] = tu102_ce_new,
+ .ce[2] = tu102_ce_new,
+ .ce[3] = tu102_ce_new,
+ .ce[4] = tu102_ce_new,
+ .disp = tu102_disp_new,
.dma = gv100_dma_new,
- .fifo = tu104_fifo_new,
+ .fifo = tu102_fifo_new,
+ .nvdec[0] = gp102_nvdec_new,
+ .sec2 = tu102_sec2_new,
};
static const struct nvkm_device_chip
nv166_chipset = {
.name = "TU106",
- .bar = tu104_bar_new,
+ .bar = tu102_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
- .devinit = tu104_devinit_new,
- .fault = tu104_fault_new,
+ .devinit = tu102_devinit_new,
+ .fault = tu102_fault_new,
.fb = gv100_fb_new,
.fuse = gm107_fuse_new,
.gpio = gk104_gpio_new,
+ .gsp = gv100_gsp_new,
.i2c = gm200_i2c_new,
.ibus = gm200_ibus_new,
.imem = nv50_instmem_new,
.ltc = gp102_ltc_new,
- .mc = tu104_mc_new,
- .mmu = tu104_mmu_new,
+ .mc = tu102_mc_new,
+ .mmu = tu102_mmu_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.therm = gp100_therm_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
- .ce[0] = tu104_ce_new,
- .ce[1] = tu104_ce_new,
- .ce[2] = tu104_ce_new,
- .ce[3] = tu104_ce_new,
- .ce[4] = tu104_ce_new,
- .disp = tu104_disp_new,
+ .ce[0] = tu102_ce_new,
+ .ce[1] = tu102_ce_new,
+ .ce[2] = tu102_ce_new,
+ .ce[3] = tu102_ce_new,
+ .ce[4] = tu102_ce_new,
+ .disp = tu102_disp_new,
.dma = gv100_dma_new,
- .fifo = tu104_fifo_new,
+ .fifo = tu102_fifo_new,
+ .nvdec[0] = gp102_nvdec_new,
+ .sec2 = tu102_sec2_new,
};
static int
@@ -2567,6 +2577,7 @@ nvkm_device_subdev(struct nvkm_device *device, int index)
_(FB , device->fb , &device->fb->subdev);
_(FUSE , device->fuse , &device->fuse->subdev);
_(GPIO , device->gpio , &device->gpio->subdev);
+ _(GSP , device->gsp , &device->gsp->subdev);
_(I2C , device->i2c , &device->i2c->subdev);
_(IBUS , device->ibus , device->ibus);
_(ICCSENSE, device->iccsense, &device->iccsense->subdev);
@@ -3050,6 +3061,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
_(NVKM_SUBDEV_FB , fb);
_(NVKM_SUBDEV_FUSE , fuse);
_(NVKM_SUBDEV_GPIO , gpio);
+ _(NVKM_SUBDEV_GSP , gsp);
_(NVKM_SUBDEV_I2C , i2c);
_(NVKM_SUBDEV_IBUS , ibus);
_(NVKM_SUBDEV_ICCSENSE, iccsense);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index 253ab914a8ef..2a53e37dfa7a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -12,6 +12,7 @@
#include <subdev/fb.h>
#include <subdev/fuse.h>
#include <subdev/gpio.h>
+#include <subdev/gsp.h>
#include <subdev/i2c.h>
#include <subdev/ibus.h>
#include <subdev/iccsense.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index 092ddc4ffefa..03c6d9aef075 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -365,16 +365,15 @@ nvkm_udevice_child_get(struct nvkm_object *object, int index,
}
if (!sclass) {
- switch (index) {
- case 0: sclass = &nvkm_control_oclass; break;
- case 1:
- if (!device->mmu)
- return -EINVAL;
+ if (index-- == 0)
+ sclass = &nvkm_control_oclass;
+ else if (device->mmu && index-- == 0)
sclass = &device->mmu->user;
- break;
- default:
+ else if (device->fault && index-- == 0)
+ sclass = &device->fault->user;
+ else
return -EINVAL;
- }
+
oclass->base = sclass->base;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index c6a257ba4347..2c28a5e747cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -15,7 +15,7 @@ nvkm-y += nvkm/engine/disp/gm200.o
nvkm-y += nvkm/engine/disp/gp100.o
nvkm-y += nvkm/engine/disp/gp102.o
nvkm-y += nvkm/engine/disp/gv100.o
-nvkm-y += nvkm/engine/disp/tu104.o
+nvkm-y += nvkm/engine/disp/tu102.o
nvkm-y += nvkm/engine/disp/vga.o
nvkm-y += nvkm/engine/disp/head.o
@@ -39,7 +39,7 @@ nvkm-y += nvkm/engine/disp/sorgk104.o
nvkm-y += nvkm/engine/disp/sorgm107.o
nvkm-y += nvkm/engine/disp/sorgm200.o
nvkm-y += nvkm/engine/disp/sorgv100.o
-nvkm-y += nvkm/engine/disp/sortu104.o
+nvkm-y += nvkm/engine/disp/sortu102.o
nvkm-y += nvkm/engine/disp/outp.o
nvkm-y += nvkm/engine/disp/dp.o
@@ -71,7 +71,7 @@ nvkm-y += nvkm/engine/disp/rootgm200.o
nvkm-y += nvkm/engine/disp/rootgp100.o
nvkm-y += nvkm/engine/disp/rootgp102.o
nvkm-y += nvkm/engine/disp/rootgv100.o
-nvkm-y += nvkm/engine/disp/roottu104.o
+nvkm-y += nvkm/engine/disp/roottu102.o
nvkm-y += nvkm/engine/disp/channv50.o
nvkm-y += nvkm/engine/disp/changf119.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
index 794e90982641..e675d9b9d5d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
@@ -91,15 +91,21 @@ gf119_disp_intr_error(struct nv50_disp *disp, int chid)
{
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- u32 mthd = nvkm_rd32(device, 0x6101f0 + (chid * 12));
+ u32 stat = nvkm_rd32(device, 0x6101f0 + (chid * 12));
+ u32 type = (stat & 0x00007000) >> 12;
+ u32 mthd = (stat & 0x00000ffc);
u32 data = nvkm_rd32(device, 0x6101f4 + (chid * 12));
- u32 unkn = nvkm_rd32(device, 0x6101f8 + (chid * 12));
+ u32 code = nvkm_rd32(device, 0x6101f8 + (chid * 12));
+ const struct nvkm_enum *reason =
+ nvkm_enum_find(nv50_disp_intr_error_type, type);
- nvkm_error(subdev, "chid %d mthd %04x data %08x %08x %08x\n",
- chid, (mthd & 0x0000ffc), data, mthd, unkn);
+ nvkm_error(subdev, "chid %d stat %08x reason %d [%s] mthd %04x "
+ "data %08x code %08x\n",
+ chid, stat, type, reason ? reason->name : "",
+ mthd, data, code);
if (chid < ARRAY_SIZE(disp->chan)) {
- switch (mthd & 0xffc) {
+ switch (mthd) {
case 0x0080:
nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
index 47be0ba4aebe..892be6c9b76c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
@@ -103,10 +103,13 @@ gv100_disp_exception(struct nv50_disp *disp, int chid)
u32 mthd = (stat & 0x00000fff) << 2;
u32 data = nvkm_rd32(device, 0x611024 + (chid * 12));
u32 code = nvkm_rd32(device, 0x611028 + (chid * 12));
+ const struct nvkm_enum *reason =
+ nvkm_enum_find(nv50_disp_intr_error_type, type);
- nvkm_error(subdev, "chid %d %08x [type %d mthd %04x] "
+ nvkm_error(subdev, "chid %d stat %08x reason %d [%s] mthd %04x "
"data %08x code %08x\n",
- chid, stat, type, mthd, data, code);
+ chid, stat, type, reason ? reason->name : "",
+ mthd, data, code);
if (chid < ARRAY_SIZE(disp->chan) && disp->chan[chid]) {
switch (mthd) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index 790e42f460fd..1681ddccd298 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -201,5 +201,5 @@ int gm200_sor_new(struct nvkm_disp *, int);
int gv100_sor_cnt(struct nvkm_disp *, unsigned long *);
int gv100_sor_new(struct nvkm_disp *, int);
-int tu104_sor_new(struct nvkm_disp *, int);
+int tu102_sor_new(struct nvkm_disp *, int);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index def005dd5fda..e21556bf2cb1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -28,7 +28,6 @@
#include "rootnv50.h"
#include <core/client.h>
-#include <core/enum.h>
#include <core/ramht.h>
#include <subdev/bios.h>
#include <subdev/bios/disp.h>
@@ -593,12 +592,15 @@ nv50_disp_super(struct work_struct *work)
nvkm_wr32(device, 0x610030, 0x80000000);
}
-static const struct nvkm_enum
+const struct nvkm_enum
nv50_disp_intr_error_type[] = {
- { 3, "ILLEGAL_MTHD" },
- { 4, "INVALID_VALUE" },
+ { 0, "NONE" },
+ { 1, "PUSHBUFFER_ERR" },
+ { 2, "TRAP" },
+ { 3, "RESERVED_METHOD" },
+ { 4, "INVALID_ARG" },
{ 5, "INVALID_STATE" },
- { 7, "INVALID_HANDLE" },
+ { 7, "UNRESOLVABLE_HANDLE" },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index c36a8a7cafa1..e5d00f478bb1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -5,6 +5,8 @@
#include "priv.h"
struct nvkm_head;
+#include <core/enum.h>
+
struct nv50_disp {
const struct nv50_disp_func *func;
struct nvkm_disp base;
@@ -71,6 +73,7 @@ int nv50_disp_init(struct nv50_disp *);
void nv50_disp_fini(struct nv50_disp *);
void nv50_disp_intr(struct nv50_disp *);
void nv50_disp_super(struct work_struct *);
+extern const struct nvkm_enum nv50_disp_intr_error_type[];
int gf119_disp_init(struct nv50_disp *);
void gf119_disp_fini(struct nv50_disp *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index 97de928cbde1..aee9822a7a87 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -37,5 +37,5 @@ extern const struct nvkm_disp_oclass gm200_disp_root_oclass;
extern const struct nvkm_disp_oclass gp100_disp_root_oclass;
extern const struct nvkm_disp_oclass gp102_disp_root_oclass;
extern const struct nvkm_disp_oclass gv100_disp_root_oclass;
-extern const struct nvkm_disp_oclass tu104_disp_root_oclass;
+extern const struct nvkm_disp_oclass tu102_disp_root_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu102.c
index ad438c62f66c..579a5d02308a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu102.c
@@ -25,28 +25,28 @@
#include <nvif/class.h>
static const struct nv50_disp_root_func
-tu104_disp_root = {
+tu102_disp_root = {
.user = {
- {{0,0,TU104_DISP_CURSOR }, gv100_disp_curs_new },
- {{0,0,TU104_DISP_WINDOW_IMM_CHANNEL_DMA}, gv100_disp_wimm_new },
- {{0,0,TU104_DISP_CORE_CHANNEL_DMA }, gv100_disp_core_new },
- {{0,0,TU104_DISP_WINDOW_CHANNEL_DMA }, gv100_disp_wndw_new },
+ {{0,0,TU102_DISP_CURSOR }, gv100_disp_curs_new },
+ {{0,0,TU102_DISP_WINDOW_IMM_CHANNEL_DMA}, gv100_disp_wimm_new },
+ {{0,0,TU102_DISP_CORE_CHANNEL_DMA }, gv100_disp_core_new },
+ {{0,0,TU102_DISP_WINDOW_CHANNEL_DMA }, gv100_disp_wndw_new },
{}
},
};
static int
-tu104_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+tu102_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
void *data, u32 size, struct nvkm_object **pobject)
{
- return nv50_disp_root_new_(&tu104_disp_root, disp, oclass,
+ return nv50_disp_root_new_(&tu102_disp_root, disp, oclass,
data, size, pobject);
}
const struct nvkm_disp_oclass
-tu104_disp_root_oclass = {
- .base.oclass = TU104_DISP,
+tu102_disp_root_oclass = {
+ .base.oclass = TU102_DISP,
.base.minver = -1,
.base.maxver = -1,
- .ctor = tu104_disp_root_new,
+ .ctor = tu102_disp_root_new,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
index df026a525ef1..d57b73ada89e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
@@ -24,7 +24,7 @@
#include <subdev/timer.h>
static void
-tu104_sor_dp_vcpi(struct nvkm_ior *sor, int head,
+tu102_sor_dp_vcpi(struct nvkm_ior *sor, int head,
u8 slot, u8 slot_nr, u16 pbn, u16 aligned)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
@@ -35,7 +35,7 @@ tu104_sor_dp_vcpi(struct nvkm_ior *sor, int head,
}
static int
-tu104_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux)
+tu102_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 soff = nv50_ior_base(sor);
@@ -62,7 +62,7 @@ tu104_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux)
}
static const struct nvkm_ior_func
-tu104_sor = {
+tu102_sor = {
.route = {
.get = gm200_sor_route_get,
.set = gm200_sor_route_set,
@@ -75,11 +75,11 @@ tu104_sor = {
},
.dp = {
.lanes = { 0, 1, 2, 3 },
- .links = tu104_sor_dp_links,
+ .links = tu102_sor_dp_links,
.power = g94_sor_dp_power,
.pattern = gm107_sor_dp_pattern,
.drive = gm200_sor_dp_drive,
- .vcpi = tu104_sor_dp_vcpi,
+ .vcpi = tu102_sor_dp_vcpi,
.audio = gv100_sor_dp_audio,
.audio_sym = gv100_sor_dp_audio_sym,
.watermark = gv100_sor_dp_watermark,
@@ -91,7 +91,7 @@ tu104_sor = {
};
int
-tu104_sor_new(struct nvkm_disp *disp, int id)
+tu102_sor_new(struct nvkm_disp *disp, int id)
{
- return nvkm_ior_new_(&tu104_sor, disp, SOR, id);
+ return nvkm_ior_new_(&tu102_sor, disp, SOR, id);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
index 13fa21459d38..883ae4151ff8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
@@ -29,7 +29,7 @@
#include <subdev/timer.h>
static int
-tu104_disp_init(struct nv50_disp *disp)
+tu102_disp_init(struct nv50_disp *disp)
{
struct nvkm_device *device = disp->base.engine.subdev.device;
struct nvkm_head *head;
@@ -132,21 +132,21 @@ tu104_disp_init(struct nv50_disp *disp)
}
static const struct nv50_disp_func
-tu104_disp = {
- .init = tu104_disp_init,
+tu102_disp = {
+ .init = tu102_disp_init,
.fini = gv100_disp_fini,
.intr = gv100_disp_intr,
.uevent = &gv100_disp_chan_uevent,
.super = gv100_disp_super,
- .root = &tu104_disp_root_oclass,
+ .root = &tu102_disp_root_oclass,
.wndw = { .cnt = gv100_disp_wndw_cnt },
.head = { .cnt = gv100_head_cnt, .new = gv100_head_new },
- .sor = { .cnt = gv100_sor_cnt, .new = tu104_sor_new },
+ .sor = { .cnt = gv100_sor_cnt, .new = tu102_sor_new },
.ramht_size = 0x2000,
};
int
-tu104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+tu102_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
{
- return nv50_disp_new_(&tu104_disp, device, index, pdisp);
+ return nv50_disp_new_(&tu102_disp, device, index, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index 87d8e054e40a..05aada541ea5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -16,7 +16,7 @@ nvkm-y += nvkm/engine/fifo/gm20b.o
nvkm-y += nvkm/engine/fifo/gp100.o
nvkm-y += nvkm/engine/fifo/gp10b.o
nvkm-y += nvkm/engine/fifo/gv100.o
-nvkm-y += nvkm/engine/fifo/tu104.o
+nvkm-y += nvkm/engine/fifo/tu102.o
nvkm-y += nvkm/engine/fifo/chan.o
nvkm-y += nvkm/engine/fifo/channv50.o
@@ -34,7 +34,7 @@ nvkm-y += nvkm/engine/fifo/gpfifog84.o
nvkm-y += nvkm/engine/fifo/gpfifogf100.o
nvkm-y += nvkm/engine/fifo/gpfifogk104.o
nvkm-y += nvkm/engine/fifo/gpfifogv100.o
-nvkm-y += nvkm/engine/fifo/gpfifotu104.o
+nvkm-y += nvkm/engine/fifo/gpfifotu102.o
nvkm-y += nvkm/engine/fifo/usergv100.o
-nvkm-y += nvkm/engine/fifo/usertu104.o
+nvkm-y += nvkm/engine/fifo/usertu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
index a14545d871d8..f8557cdfbd81 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
@@ -47,6 +47,6 @@ int gv100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *,
int gv100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *,
struct nvkm_engine *, bool);
-int tu104_fifo_gpfifo_new(struct gk104_fifo *, const struct nvkm_oclass *,
+int tu102_fifo_gpfifo_new(struct gk104_fifo *, const struct nvkm_oclass *,
void *data, u32 size, struct nvkm_object **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c
index ff70484dd01a..abef7fb6e2d3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c
@@ -29,14 +29,14 @@
#include <nvif/unpack.h>
static u32
-tu104_fifo_gpfifo_submit_token(struct nvkm_fifo_chan *base)
+tu102_fifo_gpfifo_submit_token(struct nvkm_fifo_chan *base)
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
return (chan->runl << 16) | chan->base.chid;
}
static const struct nvkm_fifo_chan_func
-tu104_fifo_gpfifo = {
+tu102_fifo_gpfifo = {
.dtor = gk104_fifo_gpfifo_dtor,
.init = gk104_fifo_gpfifo_init,
.fini = gk104_fifo_gpfifo_fini,
@@ -45,11 +45,11 @@ tu104_fifo_gpfifo = {
.engine_dtor = gk104_fifo_gpfifo_engine_dtor,
.engine_init = gv100_fifo_gpfifo_engine_init,
.engine_fini = gv100_fifo_gpfifo_engine_fini,
- .submit_token = tu104_fifo_gpfifo_submit_token,
+ .submit_token = tu102_fifo_gpfifo_submit_token,
};
int
-tu104_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
+tu102_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
void *data, u32 size, struct nvkm_object **pobject)
{
struct nvkm_object *parent = oclass->parent;
@@ -67,7 +67,7 @@ tu104_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
args->v0.ilength, args->v0.runlist, args->v0.priv);
if (args->v0.priv && !oclass->client->super)
return -EINVAL;
- return gv100_fifo_gpfifo_new_(&tu104_fifo_gpfifo, fifo,
+ return gv100_fifo_gpfifo_new_(&tu102_fifo_gpfifo, fifo,
&args->v0.runlist,
&args->v0.chid,
args->v0.vmm,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
index 98c80705bc61..005f3e1729b9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
@@ -29,7 +29,7 @@
#include <nvif/class.h>
static void
-tu104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl,
+tu102_fifo_runlist_commit(struct gk104_fifo *fifo, int runl,
struct nvkm_memory *mem, int nr)
{
struct nvkm_device *device = fifo->base.engine.subdev.device;
@@ -44,15 +44,15 @@ tu104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl,
}
const struct gk104_fifo_runlist_func
-tu104_fifo_runlist = {
+tu102_fifo_runlist = {
.size = 16,
.cgrp = gv100_fifo_runlist_cgrp,
.chan = gv100_fifo_runlist_chan,
- .commit = tu104_fifo_runlist_commit,
+ .commit = tu102_fifo_runlist_commit,
};
static const struct nvkm_enum
-tu104_fifo_fault_engine[] = {
+tu102_fifo_fault_engine[] = {
{ 0x01, "DISPLAY" },
{ 0x03, "PTP" },
{ 0x06, "PWR_PMU" },
@@ -80,7 +80,7 @@ tu104_fifo_fault_engine[] = {
};
static void
-tu104_fifo_pbdma_init(struct gk104_fifo *fifo)
+tu102_fifo_pbdma_init(struct gk104_fifo *fifo)
{
struct nvkm_device *device = fifo->base.engine.subdev.device;
const u32 mask = (1 << fifo->pbdma_nr) - 1;
@@ -89,28 +89,28 @@ tu104_fifo_pbdma_init(struct gk104_fifo *fifo)
}
static const struct gk104_fifo_pbdma_func
-tu104_fifo_pbdma = {
+tu102_fifo_pbdma = {
.nr = gm200_fifo_pbdma_nr,
- .init = tu104_fifo_pbdma_init,
+ .init = tu102_fifo_pbdma_init,
.init_timeout = gk208_fifo_pbdma_init_timeout,
};
static const struct gk104_fifo_func
-tu104_fifo = {
- .pbdma = &tu104_fifo_pbdma,
+tu102_fifo = {
+ .pbdma = &tu102_fifo_pbdma,
.fault.access = gv100_fifo_fault_access,
- .fault.engine = tu104_fifo_fault_engine,
+ .fault.engine = tu102_fifo_fault_engine,
.fault.reason = gv100_fifo_fault_reason,
.fault.hubclient = gv100_fifo_fault_hubclient,
.fault.gpcclient = gv100_fifo_fault_gpcclient,
- .runlist = &tu104_fifo_runlist,
- .user = {{-1,-1,VOLTA_USERMODE_A }, tu104_fifo_user_new },
- .chan = {{ 0, 0,TURING_CHANNEL_GPFIFO_A}, tu104_fifo_gpfifo_new },
+ .runlist = &tu102_fifo_runlist,
+ .user = {{-1,-1,VOLTA_USERMODE_A }, tu102_fifo_user_new },
+ .chan = {{ 0, 0,TURING_CHANNEL_GPFIFO_A}, tu102_fifo_gpfifo_new },
.cgrp_force = true,
};
int
-tu104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+tu102_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
{
- return gk104_fifo_new_(&tu104_fifo, device, index, 4096, pfifo);
+ return gk104_fifo_new_(&tu102_fifo, device, index, 4096, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h
index 14b0c6bde8eb..54a3a3092cc0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h
@@ -3,6 +3,6 @@
#include "priv.h"
int gv100_fifo_user_new(const struct nvkm_oclass *, void *, u32,
struct nvkm_object **);
-int tu104_fifo_user_new(const struct nvkm_oclass *, void *, u32,
+int tu102_fifo_user_new(const struct nvkm_oclass *, void *, u32,
struct nvkm_object **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/usertu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/usertu102.c
index 8f98548a21f6..217268f8ccad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/usertu104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/usertu102.c
@@ -22,7 +22,7 @@
#include "user.h"
static int
-tu104_fifo_user_map(struct nvkm_object *object, void *argv, u32 argc,
+tu102_fifo_user_map(struct nvkm_object *object, void *argv, u32 argc,
enum nvkm_object_map *type, u64 *addr, u64 *size)
{
struct nvkm_device *device = object->engine->subdev.device;
@@ -33,13 +33,13 @@ tu104_fifo_user_map(struct nvkm_object *object, void *argv, u32 argc,
}
static const struct nvkm_object_func
-tu104_fifo_user = {
- .map = tu104_fifo_user_map,
+tu102_fifo_user = {
+ .map = tu102_fifo_user_map,
};
int
-tu104_fifo_user_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+tu102_fifo_user_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
struct nvkm_object **pobject)
{
- return nvkm_object_new_(&tu104_fifo_user, oclass, argv, argc, pobject);
+ return nvkm_object_new_(&tu102_fifo_user, oclass, argv, argc, pobject);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
index cd8cf6f7024c..d41fb94524e9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
@@ -25,6 +25,33 @@
#include <engine/fifo.h>
+u32
+nvkm_gr_ctxsw_inst(struct nvkm_device *device)
+{
+ struct nvkm_gr *gr = device->gr;
+ if (gr && gr->func->ctxsw.inst)
+ return gr->func->ctxsw.inst(gr);
+ return 0;
+}
+
+int
+nvkm_gr_ctxsw_resume(struct nvkm_device *device)
+{
+ struct nvkm_gr *gr = device->gr;
+ if (gr && gr->func->ctxsw.resume)
+ return gr->func->ctxsw.resume(gr);
+ return 0;
+}
+
+int
+nvkm_gr_ctxsw_pause(struct nvkm_device *device)
+{
+ struct nvkm_gr *gr = device->gr;
+ if (gr && gr->func->ctxsw.pause)
+ return gr->func->ctxsw.pause(gr);
+ return 0;
+}
+
static bool
nvkm_gr_chsw_load(struct nvkm_engine *engine)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index e813a3f8ea93..85f2d1e950e8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1523,13 +1523,9 @@ gf100_grctx_generate(struct gf100_gr *gr)
/* Make channel current. */
addr = nvkm_memory_addr(inst) >> 12;
if (gr->firmware) {
- nvkm_wr32(device, 0x409840, 0x00000030);
- nvkm_wr32(device, 0x409500, 0x80000000 | addr);
- nvkm_wr32(device, 0x409504, 0x00000003);
- nvkm_msec(device, 2000,
- if (nvkm_rd32(device, 0x409800) & 0x00000010)
- break;
- );
+ ret = gf100_gr_fecs_bind_pointer(gr, 0x80000000 | addr);
+ if (ret)
+ goto done;
nvkm_kmap(data);
nvkm_wo32(data, 0x1c, 1);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 70d3d41e616c..81a13cf9a292 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -715,6 +715,211 @@ gf100_gr_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
+static u32
+gf100_gr_ctxsw_inst(struct nvkm_gr *gr)
+{
+ return nvkm_rd32(gr->engine.subdev.device, 0x409b00);
+}
+
+static int
+gf100_gr_fecs_ctrl_ctxsw(struct gf100_gr *gr, u32 mthd)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+
+ nvkm_wr32(device, 0x409804, 0xffffffff);
+ nvkm_wr32(device, 0x409840, 0xffffffff);
+ nvkm_wr32(device, 0x409500, 0xffffffff);
+ nvkm_wr32(device, 0x409504, mthd);
+ nvkm_msec(device, 2000,
+ u32 stat = nvkm_rd32(device, 0x409804);
+ if (stat == 0x00000002)
+ return -EIO;
+ if (stat == 0x00000001)
+ return 0;
+ );
+
+ return -ETIMEDOUT;
+}
+
+int
+gf100_gr_fecs_start_ctxsw(struct nvkm_gr *base)
+{
+ struct gf100_gr *gr = gf100_gr(base);
+ int ret = 0;
+
+ mutex_lock(&gr->fecs.mutex);
+ if (!--gr->fecs.disable) {
+ if (WARN_ON(ret = gf100_gr_fecs_ctrl_ctxsw(gr, 0x39)))
+ gr->fecs.disable++;
+ }
+ mutex_unlock(&gr->fecs.mutex);
+ return ret;
+}
+
+int
+gf100_gr_fecs_stop_ctxsw(struct nvkm_gr *base)
+{
+ struct gf100_gr *gr = gf100_gr(base);
+ int ret = 0;
+
+ mutex_lock(&gr->fecs.mutex);
+ if (!gr->fecs.disable++) {
+ if (WARN_ON(ret = gf100_gr_fecs_ctrl_ctxsw(gr, 0x38)))
+ gr->fecs.disable--;
+ }
+ mutex_unlock(&gr->fecs.mutex);
+ return ret;
+}
+
+int
+gf100_gr_fecs_bind_pointer(struct gf100_gr *gr, u32 inst)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+
+ nvkm_wr32(device, 0x409840, 0x00000030);
+ nvkm_wr32(device, 0x409500, inst);
+ nvkm_wr32(device, 0x409504, 0x00000003);
+ nvkm_msec(device, 2000,
+ u32 stat = nvkm_rd32(device, 0x409800);
+ if (stat & 0x00000020)
+ return -EIO;
+ if (stat & 0x00000010)
+ return 0;
+ );
+
+ return -ETIMEDOUT;
+}
+
+static int
+gf100_gr_fecs_set_reglist_virtual_address(struct gf100_gr *gr, u64 addr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+
+ nvkm_wr32(device, 0x409810, addr >> 8);
+ nvkm_wr32(device, 0x409800, 0x00000000);
+ nvkm_wr32(device, 0x409500, 0x00000001);
+ nvkm_wr32(device, 0x409504, 0x00000032);
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x409800) == 0x00000001)
+ return 0;
+ );
+
+ return -ETIMEDOUT;
+}
+
+static int
+gf100_gr_fecs_set_reglist_bind_instance(struct gf100_gr *gr, u32 inst)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+
+ nvkm_wr32(device, 0x409810, inst);
+ nvkm_wr32(device, 0x409800, 0x00000000);
+ nvkm_wr32(device, 0x409500, 0x00000001);
+ nvkm_wr32(device, 0x409504, 0x00000031);
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x409800) == 0x00000001)
+ return 0;
+ );
+
+ return -ETIMEDOUT;
+}
+
+static int
+gf100_gr_fecs_discover_reglist_image_size(struct gf100_gr *gr, u32 *psize)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+
+ nvkm_wr32(device, 0x409800, 0x00000000);
+ nvkm_wr32(device, 0x409500, 0x00000001);
+ nvkm_wr32(device, 0x409504, 0x00000030);
+ nvkm_msec(device, 2000,
+ if ((*psize = nvkm_rd32(device, 0x409800)))
+ return 0;
+ );
+
+ return -ETIMEDOUT;
+}
+
+static int
+gf100_gr_fecs_elpg_bind(struct gf100_gr *gr)
+{
+ u32 size;
+ int ret;
+
+ ret = gf100_gr_fecs_discover_reglist_image_size(gr, &size);
+ if (ret)
+ return ret;
+
+ /*XXX: We need to allocate + map the above into PMU's inst block,
+ * which which means we probably need a proper PMU before we
+ * even bother.
+ */
+
+ ret = gf100_gr_fecs_set_reglist_bind_instance(gr, 0);
+ if (ret)
+ return ret;
+
+ return gf100_gr_fecs_set_reglist_virtual_address(gr, 0);
+}
+
+static int
+gf100_gr_fecs_discover_pm_image_size(struct gf100_gr *gr, u32 *psize)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+
+ nvkm_wr32(device, 0x409840, 0xffffffff);
+ nvkm_wr32(device, 0x409500, 0x00000000);
+ nvkm_wr32(device, 0x409504, 0x00000025);
+ nvkm_msec(device, 2000,
+ if ((*psize = nvkm_rd32(device, 0x409800)))
+ return 0;
+ );
+
+ return -ETIMEDOUT;
+}
+
+static int
+gf100_gr_fecs_discover_zcull_image_size(struct gf100_gr *gr, u32 *psize)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+
+ nvkm_wr32(device, 0x409840, 0xffffffff);
+ nvkm_wr32(device, 0x409500, 0x00000000);
+ nvkm_wr32(device, 0x409504, 0x00000016);
+ nvkm_msec(device, 2000,
+ if ((*psize = nvkm_rd32(device, 0x409800)))
+ return 0;
+ );
+
+ return -ETIMEDOUT;
+}
+
+static int
+gf100_gr_fecs_discover_image_size(struct gf100_gr *gr, u32 *psize)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+
+ nvkm_wr32(device, 0x409840, 0xffffffff);
+ nvkm_wr32(device, 0x409500, 0x00000000);
+ nvkm_wr32(device, 0x409504, 0x00000010);
+ nvkm_msec(device, 2000,
+ if ((*psize = nvkm_rd32(device, 0x409800)))
+ return 0;
+ );
+
+ return -ETIMEDOUT;
+}
+
+static void
+gf100_gr_fecs_set_watchdog_timeout(struct gf100_gr *gr, u32 timeout)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+
+ nvkm_wr32(device, 0x409840, 0xffffffff);
+ nvkm_wr32(device, 0x409500, timeout);
+ nvkm_wr32(device, 0x409504, 0x00000021);
+}
+
static bool
gf100_gr_chsw_load(struct nvkm_gr *base)
{
@@ -1487,6 +1692,7 @@ gf100_gr_init_ctxctl_ext(struct gf100_gr *gr)
struct nvkm_device *device = subdev->device;
struct nvkm_secboot *sb = device->secboot;
u32 secboot_mask = 0;
+ int ret;
/* load fuc microcode */
nvkm_mc_unk260(device, 0);
@@ -1495,12 +1701,12 @@ gf100_gr_init_ctxctl_ext(struct gf100_gr *gr)
if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS))
secboot_mask |= BIT(NVKM_SECBOOT_FALCON_FECS);
else
- gf100_gr_init_fw(gr->fecs, &gr->fuc409c, &gr->fuc409d);
+ gf100_gr_init_fw(gr->fecs.falcon, &gr->fuc409c, &gr->fuc409d);
if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS))
secboot_mask |= BIT(NVKM_SECBOOT_FALCON_GPCCS);
else
- gf100_gr_init_fw(gr->gpccs, &gr->fuc41ac, &gr->fuc41ad);
+ gf100_gr_init_fw(gr->gpccs.falcon, &gr->fuc41ac, &gr->fuc41ad);
if (secboot_mask != 0) {
int ret = nvkm_secboot_reset(sb, secboot_mask);
@@ -1515,8 +1721,8 @@ gf100_gr_init_ctxctl_ext(struct gf100_gr *gr)
nvkm_wr32(device, 0x41a10c, 0x00000000);
nvkm_wr32(device, 0x40910c, 0x00000000);
- nvkm_falcon_start(gr->gpccs);
- nvkm_falcon_start(gr->fecs);
+ nvkm_falcon_start(gr->gpccs.falcon);
+ nvkm_falcon_start(gr->fecs.falcon);
if (nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x409800) & 0x00000001)
@@ -1524,72 +1730,36 @@ gf100_gr_init_ctxctl_ext(struct gf100_gr *gr)
) < 0)
return -EBUSY;
- nvkm_wr32(device, 0x409840, 0xffffffff);
- nvkm_wr32(device, 0x409500, 0x7fffffff);
- nvkm_wr32(device, 0x409504, 0x00000021);
+ gf100_gr_fecs_set_watchdog_timeout(gr, 0x7fffffff);
- nvkm_wr32(device, 0x409840, 0xffffffff);
- nvkm_wr32(device, 0x409500, 0x00000000);
- nvkm_wr32(device, 0x409504, 0x00000010);
- if (nvkm_msec(device, 2000,
- if ((gr->size = nvkm_rd32(device, 0x409800)))
- break;
- ) < 0)
- return -EBUSY;
-
- nvkm_wr32(device, 0x409840, 0xffffffff);
- nvkm_wr32(device, 0x409500, 0x00000000);
- nvkm_wr32(device, 0x409504, 0x00000016);
- if (nvkm_msec(device, 2000,
- if (nvkm_rd32(device, 0x409800))
- break;
- ) < 0)
- return -EBUSY;
+ /* Determine how much memory is required to store main context image. */
+ ret = gf100_gr_fecs_discover_image_size(gr, &gr->size);
+ if (ret)
+ return ret;
- nvkm_wr32(device, 0x409840, 0xffffffff);
- nvkm_wr32(device, 0x409500, 0x00000000);
- nvkm_wr32(device, 0x409504, 0x00000025);
- if (nvkm_msec(device, 2000,
- if (nvkm_rd32(device, 0x409800))
- break;
- ) < 0)
- return -EBUSY;
+ /* Determine how much memory is required to store ZCULL image. */
+ ret = gf100_gr_fecs_discover_zcull_image_size(gr, &gr->size_zcull);
+ if (ret)
+ return ret;
- if (device->chipset >= 0xe0) {
- nvkm_wr32(device, 0x409800, 0x00000000);
- nvkm_wr32(device, 0x409500, 0x00000001);
- nvkm_wr32(device, 0x409504, 0x00000030);
- if (nvkm_msec(device, 2000,
- if (nvkm_rd32(device, 0x409800))
- break;
- ) < 0)
- return -EBUSY;
-
- nvkm_wr32(device, 0x409810, 0xb00095c8);
- nvkm_wr32(device, 0x409800, 0x00000000);
- nvkm_wr32(device, 0x409500, 0x00000001);
- nvkm_wr32(device, 0x409504, 0x00000031);
- if (nvkm_msec(device, 2000,
- if (nvkm_rd32(device, 0x409800))
- break;
- ) < 0)
- return -EBUSY;
-
- nvkm_wr32(device, 0x409810, 0x00080420);
- nvkm_wr32(device, 0x409800, 0x00000000);
- nvkm_wr32(device, 0x409500, 0x00000001);
- nvkm_wr32(device, 0x409504, 0x00000032);
- if (nvkm_msec(device, 2000,
- if (nvkm_rd32(device, 0x409800))
- break;
- ) < 0)
- return -EBUSY;
+ /* Determine how much memory is required to store PerfMon image. */
+ ret = gf100_gr_fecs_discover_pm_image_size(gr, &gr->size_pm);
+ if (ret)
+ return ret;
- nvkm_wr32(device, 0x409614, 0x00000070);
- nvkm_wr32(device, 0x409614, 0x00000770);
- nvkm_wr32(device, 0x40802c, 0x00000001);
+ /*XXX: We (likely) require PMU support to even bother with this.
+ *
+ * Also, it seems like not all GPUs support ELPG. Traces I
+ * have here show RM enabling it on Kepler/Turing, but none
+ * of the GPUs between those. NVGPU decides this by PCIID.
+ */
+ if (0) {
+ ret = gf100_gr_fecs_elpg_bind(gr);
+ if (ret)
+ return ret;
}
+ /* Generate golden context image. */
if (gr->data == NULL) {
int ret = gf100_grctx_generate(gr);
if (ret) {
@@ -1614,15 +1784,19 @@ gf100_gr_init_ctxctl_int(struct gf100_gr *gr)
/* load HUB microcode */
nvkm_mc_unk260(device, 0);
- nvkm_falcon_load_dmem(gr->fecs, gr->func->fecs.ucode->data.data, 0x0,
+ nvkm_falcon_load_dmem(gr->fecs.falcon,
+ gr->func->fecs.ucode->data.data, 0x0,
gr->func->fecs.ucode->data.size, 0);
- nvkm_falcon_load_imem(gr->fecs, gr->func->fecs.ucode->code.data, 0x0,
+ nvkm_falcon_load_imem(gr->fecs.falcon,
+ gr->func->fecs.ucode->code.data, 0x0,
gr->func->fecs.ucode->code.size, 0, 0, false);
/* load GPC microcode */
- nvkm_falcon_load_dmem(gr->gpccs, gr->func->gpccs.ucode->data.data, 0x0,
+ nvkm_falcon_load_dmem(gr->gpccs.falcon,
+ gr->func->gpccs.ucode->data.data, 0x0,
gr->func->gpccs.ucode->data.size, 0);
- nvkm_falcon_load_imem(gr->gpccs, gr->func->gpccs.ucode->code.data, 0x0,
+ nvkm_falcon_load_imem(gr->gpccs.falcon,
+ gr->func->gpccs.ucode->code.data, 0x0,
gr->func->gpccs.ucode->code.size, 0, 0, false);
nvkm_mc_unk260(device, 1);
@@ -1769,11 +1943,13 @@ gf100_gr_oneinit(struct nvkm_gr *base)
int i, j;
int ret;
- ret = nvkm_falcon_v1_new(subdev, "FECS", 0x409000, &gr->fecs);
+ ret = nvkm_falcon_v1_new(subdev, "FECS", 0x409000, &gr->fecs.falcon);
if (ret)
return ret;
- ret = nvkm_falcon_v1_new(subdev, "GPCCS", 0x41a000, &gr->gpccs);
+ mutex_init(&gr->fecs.mutex);
+
+ ret = nvkm_falcon_v1_new(subdev, "GPCCS", 0x41a000, &gr->gpccs.falcon);
if (ret)
return ret;
@@ -1816,11 +1992,11 @@ gf100_gr_init_(struct nvkm_gr *base)
nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false);
- ret = nvkm_falcon_get(gr->fecs, subdev);
+ ret = nvkm_falcon_get(gr->fecs.falcon, subdev);
if (ret)
return ret;
- ret = nvkm_falcon_get(gr->gpccs, subdev);
+ ret = nvkm_falcon_get(gr->gpccs.falcon, subdev);
if (ret)
return ret;
@@ -1832,8 +2008,8 @@ gf100_gr_fini_(struct nvkm_gr *base, bool suspend)
{
struct gf100_gr *gr = gf100_gr(base);
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
- nvkm_falcon_put(gr->gpccs, subdev);
- nvkm_falcon_put(gr->fecs, subdev);
+ nvkm_falcon_put(gr->gpccs.falcon, subdev);
+ nvkm_falcon_put(gr->fecs.falcon, subdev);
return 0;
}
@@ -1859,8 +2035,8 @@ gf100_gr_dtor(struct nvkm_gr *base)
gr->func->dtor(gr);
kfree(gr->data);
- nvkm_falcon_del(&gr->gpccs);
- nvkm_falcon_del(&gr->fecs);
+ nvkm_falcon_del(&gr->gpccs.falcon);
+ nvkm_falcon_del(&gr->fecs.falcon);
gf100_gr_dtor_fw(&gr->fuc409c);
gf100_gr_dtor_fw(&gr->fuc409d);
@@ -1886,6 +2062,9 @@ gf100_gr_ = {
.chan_new = gf100_gr_chan_new,
.object_get = gf100_gr_object_get,
.chsw_load = gf100_gr_chsw_load,
+ .ctxsw.pause = gf100_gr_fecs_stop_ctxsw,
+ .ctxsw.resume = gf100_gr_fecs_start_ctxsw,
+ .ctxsw.inst = gf100_gr_ctxsw_inst,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index dc46cf0131db..fafdd0bbea9b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -82,8 +82,16 @@ struct gf100_gr {
const struct gf100_gr_func *func;
struct nvkm_gr base;
- struct nvkm_falcon *fecs;
- struct nvkm_falcon *gpccs;
+ struct {
+ struct nvkm_falcon *falcon;
+ struct mutex mutex;
+ u32 disable;
+ } fecs;
+
+ struct {
+ struct nvkm_falcon *falcon;
+ } gpccs;
+
struct gf100_gr_fuc fuc409c;
struct gf100_gr_fuc fuc409d;
struct gf100_gr_fuc fuc41ac;
@@ -128,6 +136,8 @@ struct gf100_gr {
struct gf100_gr_mmio mmio_list[4096/8];
u32 size;
u32 *data;
+ u32 size_zcull;
+ u32 size_pm;
};
int gf100_gr_ctor(const struct gf100_gr_func *, struct nvkm_device *,
@@ -136,6 +146,8 @@ int gf100_gr_new_(const struct gf100_gr_func *, struct nvkm_device *,
int, struct nvkm_gr **);
void *gf100_gr_dtor(struct nvkm_gr *);
+int gf100_gr_fecs_bind_pointer(struct gf100_gr *, u32 inst);
+
struct gf100_gr_func_zbc {
void (*clear_color)(struct gf100_gr *, int zbc);
void (*clear_depth)(struct gf100_gr *, int zbc);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
index 66359c23cbce..d4d5601c51e7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
@@ -27,6 +27,11 @@ struct nvkm_gr_func {
*/
u64 (*units)(struct nvkm_gr *);
bool (*chsw_load)(struct nvkm_gr *);
+ struct {
+ int (*pause)(struct nvkm_gr *);
+ int (*resume)(struct nvkm_gr *);
+ u32 (*inst)(struct nvkm_gr *);
+ } ctxsw;
struct nvkm_sclass sclass[];
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
index 4807021fd990..4a63581bdd5e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
@@ -21,13 +21,21 @@
*/
#include "priv.h"
+#include <subdev/top.h>
#include <engine/falcon.h>
static int
nvkm_nvdec_oneinit(struct nvkm_engine *engine)
{
struct nvkm_nvdec *nvdec = nvkm_nvdec(engine);
- return nvkm_falcon_v1_new(&nvdec->engine.subdev, "NVDEC", 0x84000,
+ struct nvkm_subdev *subdev = &nvdec->engine.subdev;
+
+ nvdec->addr = nvkm_top_addr(subdev->device, subdev->index);
+ if (!nvdec->addr)
+ return -EINVAL;
+
+ /*XXX: fix naming of this when adding support for multiple-NVDEC */
+ return nvkm_falcon_v1_new(subdev, "NVDEC", nvdec->addr,
&nvdec->falcon);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild
index 4b17254cfbd0..d9cdea7d9353 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild
@@ -1,2 +1,3 @@
nvkm-y += nvkm/engine/sec2/base.o
nvkm-y += nvkm/engine/sec2/gp102.o
+nvkm-y += nvkm/engine/sec2/tu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
index f865d2a3e184..1b49e5b6717f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
@@ -22,6 +22,7 @@
#include "priv.h"
#include <core/msgqueue.h>
+#include <subdev/top.h>
#include <engine/falcon.h>
static void *
@@ -39,18 +40,18 @@ nvkm_sec2_intr(struct nvkm_engine *engine)
struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
struct nvkm_subdev *subdev = &engine->subdev;
struct nvkm_device *device = subdev->device;
- u32 disp = nvkm_rd32(device, 0x8701c);
- u32 intr = nvkm_rd32(device, 0x87008) & disp & ~(disp >> 16);
+ u32 disp = nvkm_rd32(device, sec2->addr + 0x01c);
+ u32 intr = nvkm_rd32(device, sec2->addr + 0x008) & disp & ~(disp >> 16);
if (intr & 0x00000040) {
schedule_work(&sec2->work);
- nvkm_wr32(device, 0x87004, 0x00000040);
+ nvkm_wr32(device, sec2->addr + 0x004, 0x00000040);
intr &= ~0x00000040;
}
if (intr) {
nvkm_error(subdev, "unhandled intr %08x\n", intr);
- nvkm_wr32(device, 0x87004, intr);
+ nvkm_wr32(device, sec2->addr + 0x004, intr);
}
}
@@ -74,8 +75,15 @@ static int
nvkm_sec2_oneinit(struct nvkm_engine *engine)
{
struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
- return nvkm_falcon_v1_new(&sec2->engine.subdev, "SEC2", 0x87000,
- &sec2->falcon);
+ struct nvkm_subdev *subdev = &sec2->engine.subdev;
+
+ if (!sec2->addr) {
+ sec2->addr = nvkm_top_addr(subdev->device, subdev->index);
+ if (WARN_ON(!sec2->addr))
+ return -EINVAL;
+ }
+
+ return nvkm_falcon_v1_new(subdev, "SEC2", sec2->addr, &sec2->falcon);
}
static int
@@ -95,13 +103,14 @@ nvkm_sec2 = {
};
int
-nvkm_sec2_new_(struct nvkm_device *device, int index,
+nvkm_sec2_new_(struct nvkm_device *device, int index, u32 addr,
struct nvkm_sec2 **psec2)
{
struct nvkm_sec2 *sec2;
if (!(sec2 = *psec2 = kzalloc(sizeof(*sec2), GFP_KERNEL)))
return -ENOMEM;
+ sec2->addr = addr;
INIT_WORK(&sec2->work, nvkm_sec2_recv);
return nvkm_engine_ctor(&nvkm_sec2, device, index, true, &sec2->engine);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
index 9be1524c08f5..858cf27fa010 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
@@ -26,5 +26,5 @@ int
gp102_sec2_new(struct nvkm_device *device, int index,
struct nvkm_sec2 **psec2)
{
- return nvkm_sec2_new_(device, index, psec2);
+ return nvkm_sec2_new_(device, index, 0, psec2);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
index 2f97c806a79d..ab0165e2d1a3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
@@ -5,6 +5,5 @@
#define nvkm_sec2(p) container_of((p), struct nvkm_sec2, engine)
-int nvkm_sec2_new_(struct nvkm_device *, int, struct nvkm_sec2 **);
-
+int nvkm_sec2_new_(struct nvkm_device *, int, u32 addr, struct nvkm_sec2 **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
new file mode 100644
index 000000000000..d655576164b1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "priv.h"
+
+int
+tu102_sec2_new(struct nvkm_device *device, int index,
+ struct nvkm_sec2 **psec2)
+{
+ /* TOP info wasn't updated on Turing to reflect the PRI
+ * address change for some reason. We override it here.
+ */
+ return nvkm_sec2_new_(device, index, 0x840000, psec2);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
index 427340153640..366c87de6e72 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
@@ -204,6 +204,9 @@ nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
debug_reg = 0x408;
falcon->has_emem = true;
break;
+ case NVKM_SUBDEV_GSP:
+ debug_reg = 0x0; /*XXX*/
+ break;
default:
nvkm_warn(subdev, "unsupported falcon %s!\n",
nvkm_subdev_name[subdev->index]);
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
index 771e16a16267..a8bee1e046aa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
@@ -269,7 +269,7 @@ cmd_write(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *cmd,
commit = false;
}
- cmd_queue_close(priv, queue, commit);
+ cmd_queue_close(priv, queue, commit);
return ret;
}
@@ -347,7 +347,7 @@ nvkm_msgqueue_post(struct nvkm_msgqueue *priv, enum msgqueue_msg_priority prio,
ret = cmd_write(priv, cmd, queue);
if (ret) {
seq->state = SEQ_STATE_PENDING;
- msgqueue_seq_release(priv, seq);
+ msgqueue_seq_release(priv, seq);
}
return ret;
@@ -373,7 +373,7 @@ msgqueue_msg_handle(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *hdr)
if (seq->completion)
complete(seq->completion);
- msgqueue_seq_release(priv, seq);
+ msgqueue_seq_release(priv, seq);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
index cfdffef1afb9..a339fe03d423 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
@@ -7,6 +7,7 @@ include $(src)/nvkm/subdev/fault/Kbuild
include $(src)/nvkm/subdev/fb/Kbuild
include $(src)/nvkm/subdev/fuse/Kbuild
include $(src)/nvkm/subdev/gpio/Kbuild
+include $(src)/nvkm/subdev/gsp/Kbuild
include $(src)/nvkm/subdev/i2c/Kbuild
include $(src)/nvkm/subdev/ibus/Kbuild
include $(src)/nvkm/subdev/iccsense/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
index ab0282dc0736..dc300600c019 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
@@ -5,4 +5,4 @@ nvkm-y += nvkm/subdev/bar/gf100.o
nvkm-y += nvkm/subdev/bar/gk20a.o
nvkm-y += nvkm/subdev/bar/gm107.o
nvkm-y += nvkm/subdev/bar/gm20b.o
-nvkm-y += nvkm/subdev/bar/tu104.o
+nvkm-y += nvkm/subdev/bar/tu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c
index ecaead156e9b..798f65ec3a86 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c
@@ -25,7 +25,7 @@
#include <subdev/timer.h>
static void
-tu104_bar_bar2_wait(struct nvkm_bar *bar)
+tu102_bar_bar2_wait(struct nvkm_bar *bar)
{
struct nvkm_device *device = bar->subdev.device;
nvkm_msec(device, 2000,
@@ -35,13 +35,13 @@ tu104_bar_bar2_wait(struct nvkm_bar *bar)
}
static void
-tu104_bar_bar2_fini(struct nvkm_bar *bar)
+tu102_bar_bar2_fini(struct nvkm_bar *bar)
{
nvkm_mask(bar->subdev.device, 0xb80f48, 0x80000000, 0x00000000);
}
static void
-tu104_bar_bar2_init(struct nvkm_bar *base)
+tu102_bar_bar2_init(struct nvkm_bar *base)
{
struct nvkm_device *device = base->subdev.device;
struct gf100_bar *bar = gf100_bar(base);
@@ -52,7 +52,7 @@ tu104_bar_bar2_init(struct nvkm_bar *base)
}
static void
-tu104_bar_bar1_wait(struct nvkm_bar *bar)
+tu102_bar_bar1_wait(struct nvkm_bar *bar)
{
struct nvkm_device *device = bar->subdev.device;
nvkm_msec(device, 2000,
@@ -62,13 +62,13 @@ tu104_bar_bar1_wait(struct nvkm_bar *bar)
}
static void
-tu104_bar_bar1_fini(struct nvkm_bar *bar)
+tu102_bar_bar1_fini(struct nvkm_bar *bar)
{
nvkm_mask(bar->subdev.device, 0xb80f40, 0x80000000, 0x00000000);
}
static void
-tu104_bar_bar1_init(struct nvkm_bar *base)
+tu102_bar_bar1_init(struct nvkm_bar *base)
{
struct nvkm_device *device = base->subdev.device;
struct gf100_bar *bar = gf100_bar(base);
@@ -77,22 +77,22 @@ tu104_bar_bar1_init(struct nvkm_bar *base)
}
static const struct nvkm_bar_func
-tu104_bar = {
+tu102_bar = {
.dtor = gf100_bar_dtor,
.oneinit = gf100_bar_oneinit,
- .bar1.init = tu104_bar_bar1_init,
- .bar1.fini = tu104_bar_bar1_fini,
- .bar1.wait = tu104_bar_bar1_wait,
+ .bar1.init = tu102_bar_bar1_init,
+ .bar1.fini = tu102_bar_bar1_fini,
+ .bar1.wait = tu102_bar_bar1_wait,
.bar1.vmm = gf100_bar_bar1_vmm,
- .bar2.init = tu104_bar_bar2_init,
- .bar2.fini = tu104_bar_bar2_fini,
- .bar2.wait = tu104_bar_bar2_wait,
+ .bar2.init = tu102_bar_bar2_init,
+ .bar2.fini = tu102_bar_bar2_fini,
+ .bar2.wait = tu102_bar_bar2_wait,
.bar2.vmm = gf100_bar_bar2_vmm,
.flush = g84_bar_flush,
};
int
-tu104_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+tu102_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
{
- return gf100_bar_new_(&tu104_bar, device, index, pbar);
+ return gf100_bar_new_(&tu102_bar, device, index, pbar);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
index 3133b28f849c..b099d1209be8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
@@ -212,7 +212,7 @@ nvbios_dpcfg_match(struct nvkm_bios *bios, u16 outp, u8 pc, u8 vs, u8 pe,
u16 data;
if (*ver >= 0x30) {
- const u8 vsoff[] = { 0, 4, 7, 9 };
+ static const u8 vsoff[] = { 0, 4, 7, 9 };
idx = (pc * 10) + vsoff[vs] + pe;
if (*ver >= 0x40 && *ver <= 0x41 && *hdr >= 0x12)
idx += nvbios_rd08(bios, outp + 0x11) * 40;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
index 9cc10e438b3d..ec0e9f7224b5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
@@ -806,12 +806,12 @@ init_generic_condition(struct nvbios_init *init)
init->offset += 3;
switch (cond) {
- case 0:
+ case 0: /* CONDITION_ID_INT_DP. */
if (init_conn(init) != DCB_CONNECTOR_eDP)
init_exec_set(init, false);
break;
- case 1:
- case 2:
+ case 1: /* CONDITION_ID_USE_SPPLL0. */
+ case 2: /* CONDITION_ID_USE_SPPLL1. */
if ( init->outp &&
(data = nvbios_dpout_match(bios, DCB_OUTPUT_DP,
(init->outp->or << 0) |
@@ -826,10 +826,13 @@ init_generic_condition(struct nvbios_init *init)
if (init_exec(init))
warn("script needs dp output table data\n");
break;
- case 5:
+ case 5: /* CONDITION_ID_ASSR_SUPPORT. */
if (!(init_rdauxr(init, 0x0d) & 1))
init_exec_set(init, false);
break;
+ case 7: /* CONDITION_ID_NO_PANEL_SEQ_DELAYS. */
+ init_exec_set(init, false);
+ break;
default:
warn("INIT_GENERIC_CONDITON: unknown 0x%02x\n", cond);
init->offset += size;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
index 3ef505a5c01b..f3c388932b6f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
@@ -13,4 +13,4 @@ nvkm-y += nvkm/subdev/devinit/gf100.o
nvkm-y += nvkm/subdev/devinit/gm107.o
nvkm-y += nvkm/subdev/devinit/gm200.o
nvkm-y += nvkm/subdev/devinit/gv100.o
-nvkm-y += nvkm/subdev/devinit/tu104.o
+nvkm-y += nvkm/subdev/devinit/tu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
index aae87b3fc429..397670e72fff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
@@ -26,7 +26,7 @@
#include <subdev/clk/pll.h>
static int
-tu104_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
+tu102_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
{
struct nvkm_subdev *subdev = &init->subdev;
struct nvkm_device *device = subdev->device;
@@ -66,7 +66,7 @@ tu104_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
}
static int
-tu104_devinit_post(struct nvkm_devinit *base, bool post)
+tu102_devinit_post(struct nvkm_devinit *base, bool post)
{
struct nv50_devinit *init = nv50_devinit(base);
gm200_devinit_preos(init, post);
@@ -74,16 +74,16 @@ tu104_devinit_post(struct nvkm_devinit *base, bool post)
}
static const struct nvkm_devinit_func
-tu104_devinit = {
+tu102_devinit = {
.init = nv50_devinit_init,
- .post = tu104_devinit_post,
- .pll_set = tu104_devinit_pll_set,
+ .post = tu102_devinit_post,
+ .pll_set = tu102_devinit_pll_set,
.disable = gm107_devinit_disable,
};
int
-tu104_devinit_new(struct nvkm_device *device, int index,
+tu102_devinit_new(struct nvkm_device *device, int index,
struct nvkm_devinit **pinit)
{
- return nv50_devinit_new_(&tu104_devinit, device, index, pinit);
+ return nv50_devinit_new_(&tu102_devinit, device, index, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
index 794eb1745b2f..42586267fc08 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
@@ -1,4 +1,5 @@
nvkm-y += nvkm/subdev/fault/base.o
+nvkm-y += nvkm/subdev/fault/user.o
nvkm-y += nvkm/subdev/fault/gp100.o
nvkm-y += nvkm/subdev/fault/gv100.o
-nvkm-y += nvkm/subdev/fault/tu104.o
+nvkm-y += nvkm/subdev/fault/tu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
index 4ba1e21e8fda..ca251560d3e0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
@@ -176,5 +176,7 @@ nvkm_fault_new_(const struct nvkm_fault_func *func, struct nvkm_device *device,
return -ENOMEM;
nvkm_subdev_ctor(&nvkm_fault, device, index, &fault->subdev);
fault->func = func;
+ fault->user.ctor = nvkm_ufault_new;
+ fault->user.base = func->user.base;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
index 8fb96fe614f9..4f3c4e091117 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
@@ -23,6 +23,8 @@
#include <subdev/mc.h>
+#include <nvif/class.h>
+
static void
gp100_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
{
@@ -69,6 +71,7 @@ gp100_fault = {
.buffer.init = gp100_fault_buffer_init,
.buffer.fini = gp100_fault_buffer_fini,
.buffer.intr = gp100_fault_buffer_intr,
+ .user = { { 0, 0, MAXWELL_FAULT_BUFFER_A }, 0 },
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
index 6fc54e17c935..6747f09c2dc3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
@@ -25,6 +25,8 @@
#include <subdev/mmu.h>
#include <engine/fifo.h>
+#include <nvif/class.h>
+
static void
gv100_fault_buffer_process(struct nvkm_fault_buffer *buffer)
{
@@ -166,6 +168,13 @@ gv100_fault_intr(struct nvkm_fault *fault)
}
}
+ if (stat & 0x08000000) {
+ if (fault->buffer[1]) {
+ nvkm_event_send(&fault->event, 1, 1, NULL, 0);
+ stat &= ~0x08000000;
+ }
+ }
+
if (stat) {
nvkm_debug(subdev, "intr %08x\n", stat);
}
@@ -208,6 +217,13 @@ gv100_fault = {
.buffer.init = gv100_fault_buffer_init,
.buffer.fini = gv100_fault_buffer_fini,
.buffer.intr = gv100_fault_buffer_intr,
+ /*TODO: Figure out how to expose non-replayable fault buffer, which,
+ * for some reason, is where recoverable CE faults appear...
+ *
+ * It's a bit tricky, as both NVKM and SVM will need access to
+ * the non-replayable fault buffer.
+ */
+ .user = { { 0, 0, VOLTA_FAULT_BUFFER_A }, 1 },
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
index 8ca8b2876dad..975e66ac6344 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
@@ -34,7 +34,14 @@ struct nvkm_fault_func {
void (*fini)(struct nvkm_fault_buffer *);
void (*intr)(struct nvkm_fault_buffer *, bool enable);
} buffer;
+ struct {
+ struct nvkm_sclass base;
+ int rp;
+ } user;
};
int gv100_fault_oneinit(struct nvkm_fault *);
+
+int nvkm_ufault_new(struct nvkm_device *, const struct nvkm_oclass *,
+ void *, u32, struct nvkm_object **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
index 9c8a3adf99d7..fa1dfe5692b0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
@@ -28,7 +28,7 @@
#include <nvif/class.h>
static void
-tu104_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
+tu102_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
{
/*XXX: Earlier versions of RM touched the old regs on Turing,
* which don't appear to actually work anymore, but newer
@@ -37,7 +37,7 @@ tu104_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
}
static void
-tu104_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
+tu102_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
{
struct nvkm_device *device = buffer->fault->subdev.device;
const u32 foff = buffer->id * 0x20;
@@ -45,7 +45,7 @@ tu104_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
}
static void
-tu104_fault_buffer_init(struct nvkm_fault_buffer *buffer)
+tu102_fault_buffer_init(struct nvkm_fault_buffer *buffer)
{
struct nvkm_device *device = buffer->fault->subdev.device;
const u32 foff = buffer->id * 0x20;
@@ -57,7 +57,7 @@ tu104_fault_buffer_init(struct nvkm_fault_buffer *buffer)
}
static void
-tu104_fault_buffer_info(struct nvkm_fault_buffer *buffer)
+tu102_fault_buffer_info(struct nvkm_fault_buffer *buffer)
{
struct nvkm_device *device = buffer->fault->subdev.device;
const u32 foff = buffer->id * 0x20;
@@ -70,7 +70,7 @@ tu104_fault_buffer_info(struct nvkm_fault_buffer *buffer)
}
static void
-tu104_fault_intr_fault(struct nvkm_fault *fault)
+tu102_fault_intr_fault(struct nvkm_fault *fault)
{
struct nvkm_subdev *subdev = &fault->subdev;
struct nvkm_device *device = subdev->device;
@@ -96,14 +96,14 @@ tu104_fault_intr_fault(struct nvkm_fault *fault)
}
static void
-tu104_fault_intr(struct nvkm_fault *fault)
+tu102_fault_intr(struct nvkm_fault *fault)
{
struct nvkm_subdev *subdev = &fault->subdev;
struct nvkm_device *device = subdev->device;
u32 stat = nvkm_rd32(device, 0xb83094);
if (stat & 0x80000000) {
- tu104_fault_intr_fault(fault);
+ tu102_fault_intr_fault(fault);
nvkm_wr32(device, 0xb83094, 0x80000000);
stat &= ~0x80000000;
}
@@ -129,7 +129,7 @@ tu104_fault_intr(struct nvkm_fault *fault)
}
static void
-tu104_fault_fini(struct nvkm_fault *fault)
+tu102_fault_fini(struct nvkm_fault *fault)
{
nvkm_notify_put(&fault->nrpfb);
if (fault->buffer[0])
@@ -138,7 +138,7 @@ tu104_fault_fini(struct nvkm_fault *fault)
}
static void
-tu104_fault_init(struct nvkm_fault *fault)
+tu102_fault_init(struct nvkm_fault *fault)
{
/*XXX: enable priv faults */
fault->func->buffer.init(fault->buffer[0]);
@@ -146,22 +146,23 @@ tu104_fault_init(struct nvkm_fault *fault)
}
static const struct nvkm_fault_func
-tu104_fault = {
+tu102_fault = {
.oneinit = gv100_fault_oneinit,
- .init = tu104_fault_init,
- .fini = tu104_fault_fini,
- .intr = tu104_fault_intr,
+ .init = tu102_fault_init,
+ .fini = tu102_fault_fini,
+ .intr = tu102_fault_intr,
.buffer.nr = 2,
.buffer.entry_size = 32,
- .buffer.info = tu104_fault_buffer_info,
- .buffer.init = tu104_fault_buffer_init,
- .buffer.fini = tu104_fault_buffer_fini,
- .buffer.intr = tu104_fault_buffer_intr,
+ .buffer.info = tu102_fault_buffer_info,
+ .buffer.init = tu102_fault_buffer_init,
+ .buffer.fini = tu102_fault_buffer_fini,
+ .buffer.intr = tu102_fault_buffer_intr,
+ .user = { { 0, 0, VOLTA_FAULT_BUFFER_A }, 1 },
};
int
-tu104_fault_new(struct nvkm_device *device, int index,
+tu102_fault_new(struct nvkm_device *device, int index,
struct nvkm_fault **pfault)
{
- return nvkm_fault_new_(&tu104_fault, device, index, pfault);
+ return nvkm_fault_new_(&tu102_fault, device, index, pfault);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c
new file mode 100644
index 000000000000..ac835c9582fd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/memory.h>
+#include <subdev/mmu.h>
+
+#include <nvif/clb069.h>
+#include <nvif/unpack.h>
+
+static int
+nvkm_ufault_map(struct nvkm_object *object, void *argv, u32 argc,
+ enum nvkm_object_map *type, u64 *addr, u64 *size)
+{
+ struct nvkm_fault_buffer *buffer = nvkm_fault_buffer(object);
+ struct nvkm_device *device = buffer->fault->subdev.device;
+ *type = NVKM_OBJECT_MAP_IO;
+ *addr = device->func->resource_addr(device, 3) + buffer->addr;
+ *size = nvkm_memory_size(buffer->mem);
+ return 0;
+}
+
+static int
+nvkm_ufault_ntfy(struct nvkm_object *object, u32 type,
+ struct nvkm_event **pevent)
+{
+ struct nvkm_fault_buffer *buffer = nvkm_fault_buffer(object);
+ if (type == NVB069_V0_NTFY_FAULT) {
+ *pevent = &buffer->fault->event;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int
+nvkm_ufault_fini(struct nvkm_object *object, bool suspend)
+{
+ struct nvkm_fault_buffer *buffer = nvkm_fault_buffer(object);
+ buffer->fault->func->buffer.fini(buffer);
+ return 0;
+}
+
+static int
+nvkm_ufault_init(struct nvkm_object *object)
+{
+ struct nvkm_fault_buffer *buffer = nvkm_fault_buffer(object);
+ buffer->fault->func->buffer.init(buffer);
+ return 0;
+}
+
+static void *
+nvkm_ufault_dtor(struct nvkm_object *object)
+{
+ return NULL;
+}
+
+static const struct nvkm_object_func
+nvkm_ufault = {
+ .dtor = nvkm_ufault_dtor,
+ .init = nvkm_ufault_init,
+ .fini = nvkm_ufault_fini,
+ .ntfy = nvkm_ufault_ntfy,
+ .map = nvkm_ufault_map,
+};
+
+int
+nvkm_ufault_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
+ void *argv, u32 argc, struct nvkm_object **pobject)
+{
+ union {
+ struct nvif_clb069_v0 v0;
+ } *args = argv;
+ struct nvkm_fault *fault = device->fault;
+ struct nvkm_fault_buffer *buffer = fault->buffer[fault->func->user.rp];
+ int ret = -ENOSYS;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ args->v0.entries = buffer->entries;
+ args->v0.get = buffer->get;
+ args->v0.put = buffer->put;
+ } else
+ return ret;
+
+ nvkm_object_ctor(&nvkm_ufault, oclass, &buffer->object);
+ *pobject = &buffer->object;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
index 60ece0a8a2e1..1d2d6bae73cd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
@@ -87,7 +87,7 @@ nvkm_gddr3_calc(struct nvkm_ram *ram)
WR = (ram->next->bios.timing[2] & 0x007f0000) >> 16;
/* XXX: Get these values from the VBIOS instead */
DLL = !(ram->mr[1] & 0x1);
- RON = !(ram->mr[1] & 0x300) >> 8;
+ RON = !((ram->mr[1] & 0x300) >> 8);
break;
default:
return -ENOSYS;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
new file mode 100644
index 000000000000..26fc6feb807e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
@@ -0,0 +1 @@
+nvkm-y += nvkm/subdev/gsp/gv100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c
new file mode 100644
index 000000000000..dccfaf1162e2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2019 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <subdev/gsp.h>
+#include <subdev/top.h>
+#include <engine/falcon.h>
+
+static int
+gv100_gsp_oneinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_gsp *gsp = nvkm_gsp(subdev);
+
+ gsp->addr = nvkm_top_addr(subdev->device, subdev->index);
+ if (!gsp->addr)
+ return -EINVAL;
+
+ return nvkm_falcon_v1_new(subdev, "GSP", gsp->addr, &gsp->falcon);
+}
+
+static void *
+gv100_gsp_dtor(struct nvkm_subdev *subdev)
+{
+ struct nvkm_gsp *gsp = nvkm_gsp(subdev);
+ nvkm_falcon_del(&gsp->falcon);
+ return gsp;
+}
+
+static const struct nvkm_subdev_func
+gv100_gsp = {
+ .dtor = gv100_gsp_dtor,
+ .oneinit = gv100_gsp_oneinit,
+};
+
+int
+gv100_gsp_new(struct nvkm_device *device, int index, struct nvkm_gsp **pgsp)
+{
+ struct nvkm_gsp *gsp;
+
+ if (!(gsp = *pgsp = kzalloc(sizeof(*gsp), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_subdev_ctor(&gv100_gsp, device, index, &gsp->subdev);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
index f3b06329c338..c64e399326b3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
@@ -12,4 +12,4 @@ nvkm-y += nvkm/subdev/mc/gk104.o
nvkm-y += nvkm/subdev/mc/gk20a.o
nvkm-y += nvkm/subdev/mc/gp100.o
nvkm-y += nvkm/subdev/mc/gp10b.o
-nvkm-y += nvkm/subdev/mc/tu104.o
+nvkm-y += nvkm/subdev/mc/tu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c
index b7165bd18999..d098c44a4fcb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c
@@ -22,7 +22,7 @@
#include "priv.h"
static void
-tu104_mc_intr_hack(struct nvkm_mc *mc, bool *handled)
+tu102_mc_intr_hack(struct nvkm_mc *mc, bool *handled)
{
struct nvkm_device *device = mc->subdev.device;
u32 stat = nvkm_rd32(device, 0xb81010);
@@ -37,19 +37,19 @@ tu104_mc_intr_hack(struct nvkm_mc *mc, bool *handled)
}
static const struct nvkm_mc_func
-tu104_mc = {
+tu102_mc = {
.init = nv50_mc_init,
.intr = gp100_mc_intr,
.intr_unarm = gp100_mc_intr_unarm,
.intr_rearm = gp100_mc_intr_rearm,
.intr_mask = gp100_mc_intr_mask,
.intr_stat = gf100_mc_intr_stat,
- .intr_hack = tu104_mc_intr_hack,
+ .intr_hack = tu102_mc_intr_hack,
.reset = gk104_mc_reset,
};
int
-tu104_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+tu102_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
{
- return gp100_mc_new_(&tu104_mc, device, index, pmc);
+ return gp100_mc_new_(&tu102_mc, device, index, pmc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
index 8966180b36cc..db9c56028f21 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
@@ -13,7 +13,7 @@ nvkm-y += nvkm/subdev/mmu/gm20b.o
nvkm-y += nvkm/subdev/mmu/gp100.o
nvkm-y += nvkm/subdev/mmu/gp10b.o
nvkm-y += nvkm/subdev/mmu/gv100.o
-nvkm-y += nvkm/subdev/mmu/tu104.o
+nvkm-y += nvkm/subdev/mmu/tu102.o
nvkm-y += nvkm/subdev/mmu/mem.o
nvkm-y += nvkm/subdev/mmu/memnv04.o
@@ -34,7 +34,7 @@ nvkm-y += nvkm/subdev/mmu/vmmgm20b.o
nvkm-y += nvkm/subdev/mmu/vmmgp100.o
nvkm-y += nvkm/subdev/mmu/vmmgp10b.o
nvkm-y += nvkm/subdev/mmu/vmmgv100.o
-nvkm-y += nvkm/subdev/mmu/vmmtu104.o
+nvkm-y += nvkm/subdev/mmu/vmmtu102.o
nvkm-y += nvkm/subdev/mmu/umem.o
nvkm-y += nvkm/subdev/mmu/ummu.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c
index 651b8805c67c..65cb9d28e60e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c
@@ -31,7 +31,7 @@ gp100_mmu = {
.dma_bits = 47,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
- .vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gp100_vmm_new },
+ .vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, gp100_vmm_new },
.kind = gm200_mmu_kind,
.kind_sys = true,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c
index 3bd3db31e0bb..0a50be9a785a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c
@@ -31,7 +31,7 @@ gp10b_mmu = {
.dma_bits = 47,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
- .vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gp10b_vmm_new },
+ .vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, gp10b_vmm_new },
.kind = gm200_mmu_kind,
.kind_sys = true,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c
index f666cb57f69e..e0997eedd6d9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c
@@ -31,7 +31,7 @@ gv100_mmu = {
.dma_bits = 47,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
- .vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gv100_vmm_new },
+ .vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, gv100_vmm_new },
.kind = gm200_mmu_kind,
.kind_sys = true,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
index 948a48c21be4..2ad1102a4e31 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
@@ -28,7 +28,7 @@ struct nvkm_mmu_func {
struct {
struct nvkm_sclass user;
- int (*ctor)(struct nvkm_mmu *, u64 addr, u64 size,
+ int (*ctor)(struct nvkm_mmu *, bool managed, u64 addr, u64 size,
void *argv, u32 argc, struct lock_class_key *,
const char *name, struct nvkm_vmm **);
bool global;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
index 8e6f4096170d..c0db0ce10cba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
@@ -27,17 +27,17 @@
#include <nvif/class.h>
static const struct nvkm_mmu_func
-tu104_mmu = {
+tu102_mmu = {
.dma_bits = 47,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
- .vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, tu104_vmm_new },
+ .vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, tu102_vmm_new },
.kind = gm200_mmu_kind,
.kind_sys = true,
};
int
-tu104_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+tu102_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
{
- return nvkm_mmu_new_(&tu104_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&tu102_mmu, device, index, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
index 6889076097ec..c43b8248c682 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
@@ -43,6 +43,69 @@ nvkm_uvmm_search(struct nvkm_client *client, u64 handle)
}
static int
+nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+ struct nvkm_client *client = uvmm->object.client;
+ union {
+ struct nvif_vmm_pfnclr_v0 v0;
+ } *args = argv;
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ int ret = -ENOSYS;
+ u64 addr, size;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ addr = args->v0.addr;
+ size = args->v0.size;
+ } else
+ return ret;
+
+ if (!client->super)
+ return -ENOENT;
+
+ if (size) {
+ mutex_lock(&vmm->mutex);
+ ret = nvkm_vmm_pfn_unmap(vmm, addr, size);
+ mutex_unlock(&vmm->mutex);
+ }
+
+ return ret;
+}
+
+static int
+nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+ struct nvkm_client *client = uvmm->object.client;
+ union {
+ struct nvif_vmm_pfnmap_v0 v0;
+ } *args = argv;
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ int ret = -ENOSYS;
+ u64 addr, size, *phys;
+ u8 page;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
+ page = args->v0.page;
+ addr = args->v0.addr;
+ size = args->v0.size;
+ phys = args->v0.phys;
+ if (argc != (size >> page) * sizeof(args->v0.phys[0]))
+ return -EINVAL;
+ } else
+ return ret;
+
+ if (!client->super)
+ return -ENOENT;
+
+ if (size) {
+ mutex_lock(&vmm->mutex);
+ ret = nvkm_vmm_pfn_map(vmm, page, addr, size, phys);
+ mutex_unlock(&vmm->mutex);
+ }
+
+ return ret;
+}
+
+static int
nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
{
struct nvkm_client *client = uvmm->object.client;
@@ -78,7 +141,7 @@ nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
goto done;
}
- nvkm_vmm_unmap_locked(vmm, vma);
+ nvkm_vmm_unmap_locked(vmm, vma, false);
ret = 0;
done:
mutex_unlock(&vmm->mutex);
@@ -124,6 +187,11 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
goto fail;
}
+ if (ret = -EINVAL, vma->mapped && !vma->memory) {
+ VMM_DEBUG(vmm, "pfnmap %016llx", addr);
+ goto fail;
+ }
+
if (ret = -EINVAL, vma->addr != addr || vma->size != size) {
if (addr + size > vma->addr + vma->size || vma->memory ||
(vma->refd == NVKM_VMA_PAGE_NONE && !vma->mapref)) {
@@ -271,6 +339,15 @@ nvkm_uvmm_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
case NVIF_VMM_V0_PUT : return nvkm_uvmm_mthd_put (uvmm, argv, argc);
case NVIF_VMM_V0_MAP : return nvkm_uvmm_mthd_map (uvmm, argv, argc);
case NVIF_VMM_V0_UNMAP : return nvkm_uvmm_mthd_unmap (uvmm, argv, argc);
+ case NVIF_VMM_V0_PFNMAP: return nvkm_uvmm_mthd_pfnmap(uvmm, argv, argc);
+ case NVIF_VMM_V0_PFNCLR: return nvkm_uvmm_mthd_pfnclr(uvmm, argv, argc);
+ case NVIF_VMM_V0_MTHD(0x00) ... NVIF_VMM_V0_MTHD(0x7f):
+ if (uvmm->vmm->func->mthd) {
+ return uvmm->vmm->func->mthd(uvmm->vmm,
+ uvmm->object.client,
+ mthd, argv, argc);
+ }
+ break;
default:
break;
}
@@ -304,8 +381,10 @@ nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
struct nvkm_uvmm *uvmm;
int ret = -ENOSYS;
u64 addr, size;
+ bool managed;
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, more))) {
+ managed = args->v0.managed != 0;
addr = args->v0.addr;
size = args->v0.size;
} else
@@ -317,7 +396,7 @@ nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
*pobject = &uvmm->object;
if (!mmu->vmm) {
- ret = mmu->func->vmm.ctor(mmu, addr, size, argv, argc,
+ ret = mmu->func->vmm.ctor(mmu, managed, addr, size, argv, argc,
NULL, "user", &uvmm->vmm);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index 6b87fff014b3..fa93f964e6a4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -255,11 +255,23 @@ nvkm_vmm_unref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
}
static bool
-nvkm_vmm_unref_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
+nvkm_vmm_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
{
const struct nvkm_vmm_desc *desc = it->desc;
const int type = desc->type == SPT;
struct nvkm_vmm_pt *pgt = it->pt[0];
+ bool dma;
+
+ if (pfn) {
+ /* Need to clear PTE valid bits before we dma_unmap_page(). */
+ dma = desc->func->pfn_clear(it->vmm, pgt->pt[type], ptei, ptes);
+ if (dma) {
+ /* GPU may have cached the PT, flush before unmap. */
+ nvkm_vmm_flush_mark(it);
+ nvkm_vmm_flush(it);
+ desc->func->pfn_unmap(it->vmm, pgt->pt[type], ptei, ptes);
+ }
+ }
/* Drop PTE references. */
pgt->refs[type] -= ptes;
@@ -349,7 +361,7 @@ nvkm_vmm_ref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
}
static bool
-nvkm_vmm_ref_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
+nvkm_vmm_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
{
const struct nvkm_vmm_desc *desc = it->desc;
const int type = desc->type == SPT;
@@ -379,7 +391,7 @@ nvkm_vmm_sparse_ptes(const struct nvkm_vmm_desc *desc,
}
static bool
-nvkm_vmm_sparse_unref_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
+nvkm_vmm_sparse_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
{
struct nvkm_vmm_pt *pt = it->pt[0];
if (it->desc->type == PGD)
@@ -387,14 +399,14 @@ nvkm_vmm_sparse_unref_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
else
if (it->desc->type == LPT)
memset(&pt->pte[ptei], 0x00, sizeof(pt->pte[0]) * ptes);
- return nvkm_vmm_unref_ptes(it, ptei, ptes);
+ return nvkm_vmm_unref_ptes(it, pfn, ptei, ptes);
}
static bool
-nvkm_vmm_sparse_ref_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
+nvkm_vmm_sparse_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
{
nvkm_vmm_sparse_ptes(it->desc, it->pt[0], ptei, ptes);
- return nvkm_vmm_ref_ptes(it, ptei, ptes);
+ return nvkm_vmm_ref_ptes(it, pfn, ptei, ptes);
}
static bool
@@ -487,8 +499,8 @@ nvkm_vmm_ref_swpt(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgd, u32 pdei)
static inline u64
nvkm_vmm_iter(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
- u64 addr, u64 size, const char *name, bool ref,
- bool (*REF_PTES)(struct nvkm_vmm_iter *, u32, u32),
+ u64 addr, u64 size, const char *name, bool ref, bool pfn,
+ bool (*REF_PTES)(struct nvkm_vmm_iter *, bool pfn, u32, u32),
nvkm_vmm_pte_func MAP_PTES, struct nvkm_vmm_map *map,
nvkm_vmm_pxe_func CLR_PTES)
{
@@ -548,7 +560,7 @@ nvkm_vmm_iter(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
}
/* Handle PTE updates. */
- if (!REF_PTES || REF_PTES(&it, ptei, ptes)) {
+ if (!REF_PTES || REF_PTES(&it, pfn, ptei, ptes)) {
struct nvkm_mmu_pt *pt = pgt->pt[type];
if (MAP_PTES || CLR_PTES) {
if (MAP_PTES)
@@ -590,7 +602,7 @@ static void
nvkm_vmm_ptes_sparse_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size)
{
- nvkm_vmm_iter(vmm, page, addr, size, "sparse unref", false,
+ nvkm_vmm_iter(vmm, page, addr, size, "sparse unref", false, false,
nvkm_vmm_sparse_unref_ptes, NULL, NULL,
page->desc->func->invalid ?
page->desc->func->invalid : page->desc->func->unmap);
@@ -602,8 +614,8 @@ nvkm_vmm_ptes_sparse_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
{
if ((page->type & NVKM_VMM_PAGE_SPARSE)) {
u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "sparse ref",
- true, nvkm_vmm_sparse_ref_ptes, NULL,
- NULL, page->desc->func->sparse);
+ true, false, nvkm_vmm_sparse_ref_ptes,
+ NULL, NULL, page->desc->func->sparse);
if (fail != ~0ULL) {
if ((size = fail - addr))
nvkm_vmm_ptes_sparse_put(vmm, page, addr, size);
@@ -666,11 +678,11 @@ nvkm_vmm_ptes_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
static void
nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
- u64 addr, u64 size, bool sparse)
+ u64 addr, u64 size, bool sparse, bool pfn)
{
const struct nvkm_vmm_desc_func *func = page->desc->func;
nvkm_vmm_iter(vmm, page, addr, size, "unmap + unref",
- false, nvkm_vmm_unref_ptes, NULL, NULL,
+ false, pfn, nvkm_vmm_unref_ptes, NULL, NULL,
sparse ? func->sparse : func->invalid ? func->invalid :
func->unmap);
}
@@ -681,10 +693,10 @@ nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
nvkm_vmm_pte_func func)
{
u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref + map", true,
- nvkm_vmm_ref_ptes, func, map, NULL);
+ false, nvkm_vmm_ref_ptes, func, map, NULL);
if (fail != ~0ULL) {
if ((size = fail - addr))
- nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false);
+ nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false, false);
return -ENOMEM;
}
return 0;
@@ -692,10 +704,11 @@ nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
static void
nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
- u64 addr, u64 size, bool sparse)
+ u64 addr, u64 size, bool sparse, bool pfn)
{
const struct nvkm_vmm_desc_func *func = page->desc->func;
- nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, NULL, NULL, NULL,
+ nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, pfn,
+ NULL, NULL, NULL,
sparse ? func->sparse : func->invalid ? func->invalid :
func->unmap);
}
@@ -705,7 +718,7 @@ nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, struct nvkm_vmm_map *map,
nvkm_vmm_pte_func func)
{
- nvkm_vmm_iter(vmm, page, addr, size, "map", false,
+ nvkm_vmm_iter(vmm, page, addr, size, "map", false, false,
NULL, func, map, NULL);
}
@@ -713,7 +726,7 @@ static void
nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size)
{
- nvkm_vmm_iter(vmm, page, addr, size, "unref", false,
+ nvkm_vmm_iter(vmm, page, addr, size, "unref", false, false,
nvkm_vmm_unref_ptes, NULL, NULL, NULL);
}
@@ -721,7 +734,7 @@ static int
nvkm_vmm_ptes_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size)
{
- u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true,
+ u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true, false,
nvkm_vmm_ref_ptes, NULL, NULL, NULL);
if (fail != ~0ULL) {
if (fail != addr)
@@ -763,6 +776,7 @@ nvkm_vma_tail(struct nvkm_vma *vma, u64 tail)
new->part = vma->part;
new->user = vma->user;
new->busy = vma->busy;
+ new->mapped = vma->mapped;
list_add(&new->head, &vma->head);
return new;
}
@@ -935,11 +949,40 @@ nvkm_vmm_node_split(struct nvkm_vmm *vmm,
}
static void
+nvkm_vma_dump(struct nvkm_vma *vma)
+{
+ printk(KERN_ERR "%016llx %016llx %c%c%c%c%c%c%c%c%c %p\n",
+ vma->addr, (u64)vma->size,
+ vma->used ? '-' : 'F',
+ vma->mapref ? 'R' : '-',
+ vma->sparse ? 'S' : '-',
+ vma->page != NVKM_VMA_PAGE_NONE ? '0' + vma->page : '-',
+ vma->refd != NVKM_VMA_PAGE_NONE ? '0' + vma->refd : '-',
+ vma->part ? 'P' : '-',
+ vma->user ? 'U' : '-',
+ vma->busy ? 'B' : '-',
+ vma->mapped ? 'M' : '-',
+ vma->memory);
+}
+
+static void
+nvkm_vmm_dump(struct nvkm_vmm *vmm)
+{
+ struct nvkm_vma *vma;
+ list_for_each_entry(vma, &vmm->list, head) {
+ nvkm_vma_dump(vma);
+ }
+}
+
+static void
nvkm_vmm_dtor(struct nvkm_vmm *vmm)
{
struct nvkm_vma *vma;
struct rb_node *node;
+ if (0)
+ nvkm_vmm_dump(vmm);
+
while ((node = rb_first(&vmm->root))) {
struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree);
nvkm_vmm_put(vmm, &vma);
@@ -972,16 +1015,32 @@ nvkm_vmm_dtor(struct nvkm_vmm *vmm)
}
}
+static int
+nvkm_vmm_ctor_managed(struct nvkm_vmm *vmm, u64 addr, u64 size)
+{
+ struct nvkm_vma *vma;
+ if (!(vma = nvkm_vma_new(addr, size)))
+ return -ENOMEM;
+ vma->mapref = true;
+ vma->sparse = false;
+ vma->used = true;
+ vma->user = true;
+ nvkm_vmm_node_insert(vmm, vma);
+ list_add_tail(&vma->head, &vmm->list);
+ return 0;
+}
+
int
nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
- u32 pd_header, u64 addr, u64 size, struct lock_class_key *key,
- const char *name, struct nvkm_vmm *vmm)
+ u32 pd_header, bool managed, u64 addr, u64 size,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm *vmm)
{
static struct lock_class_key _key;
const struct nvkm_vmm_page *page = func->page;
const struct nvkm_vmm_desc *desc;
struct nvkm_vma *vma;
- int levels, bits = 0;
+ int levels, bits = 0, ret;
vmm->func = func;
vmm->mmu = mmu;
@@ -1009,11 +1068,6 @@ nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
if (WARN_ON(levels > NVKM_VMM_LEVELS_MAX))
return -EINVAL;
- vmm->start = addr;
- vmm->limit = size ? (addr + size) : (1ULL << bits);
- if (vmm->start > vmm->limit || vmm->limit > (1ULL << bits))
- return -EINVAL;
-
/* Allocate top-level page table. */
vmm->pd = nvkm_vmm_pt_new(desc, false, NULL);
if (!vmm->pd)
@@ -1036,50 +1090,273 @@ nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
vmm->free = RB_ROOT;
vmm->root = RB_ROOT;
- if (!(vma = nvkm_vma_new(vmm->start, vmm->limit - vmm->start)))
- return -ENOMEM;
+ if (managed) {
+ /* Address-space will be managed by the client for the most
+ * part, except for a specified area where NVKM allocations
+ * are allowed to be placed.
+ */
+ vmm->start = 0;
+ vmm->limit = 1ULL << bits;
+ if (addr + size < addr || addr + size > vmm->limit)
+ return -EINVAL;
+
+ /* Client-managed area before the NVKM-managed area. */
+ if (addr && (ret = nvkm_vmm_ctor_managed(vmm, 0, addr)))
+ return ret;
+
+ /* NVKM-managed area. */
+ if (size) {
+ if (!(vma = nvkm_vma_new(addr, size)))
+ return -ENOMEM;
+ nvkm_vmm_free_insert(vmm, vma);
+ list_add_tail(&vma->head, &vmm->list);
+ }
+
+ /* Client-managed area after the NVKM-managed area. */
+ addr = addr + size;
+ size = vmm->limit - addr;
+ if (size && (ret = nvkm_vmm_ctor_managed(vmm, addr, size)))
+ return ret;
+ } else {
+ /* Address-space fully managed by NVKM, requiring calls to
+ * nvkm_vmm_get()/nvkm_vmm_put() to allocate address-space.
+ */
+ vmm->start = addr;
+ vmm->limit = size ? (addr + size) : (1ULL << bits);
+ if (vmm->start > vmm->limit || vmm->limit > (1ULL << bits))
+ return -EINVAL;
+
+ if (!(vma = nvkm_vma_new(vmm->start, vmm->limit - vmm->start)))
+ return -ENOMEM;
+
+ nvkm_vmm_free_insert(vmm, vma);
+ list_add(&vma->head, &vmm->list);
+ }
- nvkm_vmm_free_insert(vmm, vma);
- list_add(&vma->head, &vmm->list);
return 0;
}
int
nvkm_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
- u32 hdr, u64 addr, u64 size, struct lock_class_key *key,
- const char *name, struct nvkm_vmm **pvmm)
+ u32 hdr, bool managed, u64 addr, u64 size,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
{
if (!(*pvmm = kzalloc(sizeof(**pvmm), GFP_KERNEL)))
return -ENOMEM;
- return nvkm_vmm_ctor(func, mmu, hdr, addr, size, key, name, *pvmm);
+ return nvkm_vmm_ctor(func, mmu, hdr, managed, addr, size, key, name, *pvmm);
+}
+
+static struct nvkm_vma *
+nvkm_vmm_pfn_split_merge(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
+ u64 addr, u64 size, u8 page, bool map)
+{
+ struct nvkm_vma *prev = NULL;
+ struct nvkm_vma *next = NULL;
+
+ if (vma->addr == addr && vma->part && (prev = node(vma, prev))) {
+ if (prev->memory || prev->mapped != map)
+ prev = NULL;
+ }
+
+ if (vma->addr + vma->size == addr + size && (next = node(vma, next))) {
+ if (!next->part ||
+ next->memory || next->mapped != map)
+ next = NULL;
+ }
+
+ if (prev || next)
+ return nvkm_vmm_node_merge(vmm, prev, vma, next, size);
+ return nvkm_vmm_node_split(vmm, vma, addr, size);
+}
+
+int
+nvkm_vmm_pfn_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size)
+{
+ struct nvkm_vma *vma = nvkm_vmm_node_search(vmm, addr);
+ struct nvkm_vma *next;
+ u64 limit = addr + size;
+ u64 start = addr;
+
+ if (!vma)
+ return -EINVAL;
+
+ do {
+ if (!vma->mapped || vma->memory)
+ continue;
+
+ size = min(limit - start, vma->size - (start - vma->addr));
+
+ nvkm_vmm_ptes_unmap_put(vmm, &vmm->func->page[vma->refd],
+ start, size, false, true);
+
+ next = nvkm_vmm_pfn_split_merge(vmm, vma, start, size, 0, false);
+ if (!WARN_ON(!next)) {
+ vma = next;
+ vma->refd = NVKM_VMA_PAGE_NONE;
+ vma->mapped = false;
+ }
+ } while ((vma = node(vma, next)) && (start = vma->addr) < limit);
+
+ return 0;
+}
+
+/*TODO:
+ * - Avoid PT readback (for dma_unmap etc), this might end up being dealt
+ * with inside HMM, which would be a lot nicer for us to deal with.
+ * - Multiple page sizes (particularly for huge page support).
+ * - Support for systems without a 4KiB page size.
+ */
+int
+nvkm_vmm_pfn_map(struct nvkm_vmm *vmm, u8 shift, u64 addr, u64 size, u64 *pfn)
+{
+ const struct nvkm_vmm_page *page = vmm->func->page;
+ struct nvkm_vma *vma, *tmp;
+ u64 limit = addr + size;
+ u64 start = addr;
+ int pm = size >> shift;
+ int pi = 0;
+
+ /* Only support mapping where the page size of the incoming page
+ * array matches a page size available for direct mapping.
+ */
+ while (page->shift && page->shift != shift &&
+ page->desc->func->pfn == NULL)
+ page++;
+
+ if (!page->shift || !IS_ALIGNED(addr, 1ULL << shift) ||
+ !IS_ALIGNED(size, 1ULL << shift) ||
+ addr + size < addr || addr + size > vmm->limit) {
+ VMM_DEBUG(vmm, "paged map %d %d %016llx %016llx\n",
+ shift, page->shift, addr, size);
+ return -EINVAL;
+ }
+
+ if (!(vma = nvkm_vmm_node_search(vmm, addr)))
+ return -ENOENT;
+
+ do {
+ bool map = !!(pfn[pi] & NVKM_VMM_PFN_V);
+ bool mapped = vma->mapped;
+ u64 size = limit - start;
+ u64 addr = start;
+ int pn, ret = 0;
+
+ /* Narrow the operation window to cover a single action (page
+ * should be mapped or not) within a single VMA.
+ */
+ for (pn = 0; pi + pn < pm; pn++) {
+ if (map != !!(pfn[pi + pn] & NVKM_VMM_PFN_V))
+ break;
+ }
+ size = min_t(u64, size, pn << page->shift);
+ size = min_t(u64, size, vma->size + vma->addr - addr);
+
+ /* Reject any operation to unmanaged regions, and areas that
+ * have nvkm_memory objects mapped in them already.
+ */
+ if (!vma->mapref || vma->memory) {
+ ret = -EINVAL;
+ goto next;
+ }
+
+ /* In order to both properly refcount GPU page tables, and
+ * prevent "normal" mappings and these direct mappings from
+ * interfering with each other, we need to track contiguous
+ * ranges that have been mapped with this interface.
+ *
+ * Here we attempt to either split an existing VMA so we're
+ * able to flag the region as either unmapped/mapped, or to
+ * merge with adjacent VMAs that are already compatible.
+ *
+ * If the region is already compatible, nothing is required.
+ */
+ if (map != mapped) {
+ tmp = nvkm_vmm_pfn_split_merge(vmm, vma, addr, size,
+ page -
+ vmm->func->page, map);
+ if (WARN_ON(!tmp)) {
+ ret = -ENOMEM;
+ goto next;
+ }
+
+ if ((tmp->mapped = map))
+ tmp->refd = page - vmm->func->page;
+ else
+ tmp->refd = NVKM_VMA_PAGE_NONE;
+ vma = tmp;
+ }
+
+ /* Update HW page tables. */
+ if (map) {
+ struct nvkm_vmm_map args;
+ args.page = page;
+ args.pfn = &pfn[pi];
+
+ if (!mapped) {
+ ret = nvkm_vmm_ptes_get_map(vmm, page, addr,
+ size, &args, page->
+ desc->func->pfn);
+ } else {
+ nvkm_vmm_ptes_map(vmm, page, addr, size, &args,
+ page->desc->func->pfn);
+ }
+ } else {
+ if (mapped) {
+ nvkm_vmm_ptes_unmap_put(vmm, page, addr, size,
+ false, true);
+ }
+ }
+
+next:
+ /* Iterate to next operation. */
+ if (vma->addr + vma->size == addr + size)
+ vma = node(vma, next);
+ start += size;
+
+ if (ret) {
+ /* Failure is signalled by clearing the valid bit on
+ * any PFN that couldn't be modified as requested.
+ */
+ while (size) {
+ pfn[pi++] = NVKM_VMM_PFN_NONE;
+ size -= 1 << page->shift;
+ }
+ } else {
+ pi += size >> page->shift;
+ }
+ } while (vma && start < limit);
+
+ return 0;
}
void
nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{
- struct nvkm_vma *next = node(vma, next);
struct nvkm_vma *prev = NULL;
+ struct nvkm_vma *next;
nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
nvkm_memory_unref(&vma->memory);
+ vma->mapped = false;
- if (!vma->part || ((prev = node(vma, prev)), prev->memory))
+ if (vma->part && (prev = node(vma, prev)) && prev->mapped)
prev = NULL;
- if (!next->part || next->memory)
+ if ((next = node(vma, next)) && (!next->part || next->mapped))
next = NULL;
nvkm_vmm_node_merge(vmm, prev, vma, next, vma->size);
}
void
-nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, bool pfn)
{
const struct nvkm_vmm_page *page = &vmm->func->page[vma->refd];
if (vma->mapref) {
- nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse);
+ nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
vma->refd = NVKM_VMA_PAGE_NONE;
} else {
- nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse);
+ nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
}
nvkm_vmm_unmap_region(vmm, vma);
@@ -1090,7 +1367,7 @@ nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{
if (vma->memory) {
mutex_lock(&vmm->mutex);
- nvkm_vmm_unmap_locked(vmm, vma);
+ nvkm_vmm_unmap_locked(vmm, vma, false);
mutex_unlock(&vmm->mutex);
}
}
@@ -1224,6 +1501,7 @@ nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
nvkm_memory_unref(&vma->memory);
vma->memory = nvkm_memory_ref(map->memory);
+ vma->mapped = true;
vma->tags = map->tags;
return 0;
}
@@ -1269,14 +1547,16 @@ nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
if (vma->mapref || !vma->sparse) {
do {
- const bool map = next->memory != NULL;
+ const bool mem = next->memory != NULL;
+ const bool map = next->mapped;
const u8 refd = next->refd;
const u64 addr = next->addr;
u64 size = next->size;
/* Merge regions that are in the same state. */
while ((next = node(next, next)) && next->part &&
- (next->memory != NULL) == map &&
+ (next->mapped == map) &&
+ (next->memory != NULL) == mem &&
(next->refd == refd))
size += next->size;
@@ -1286,7 +1566,8 @@ nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
* the page tree.
*/
nvkm_vmm_ptes_unmap_put(vmm, &page[refd], addr,
- size, vma->sparse);
+ size, vma->sparse,
+ !mem);
} else
if (refd != NVKM_VMA_PAGE_NONE) {
/* Drop allocation-time PTE references. */
@@ -1301,7 +1582,7 @@ nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
*/
next = vma;
do {
- if (next->memory)
+ if (next->mapped)
nvkm_vmm_unmap_region(vmm, next);
} while ((next = node(vma, next)) && next->part);
@@ -1522,7 +1803,7 @@ nvkm_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
}
static bool
-nvkm_vmm_boot_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
+nvkm_vmm_boot_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
{
const struct nvkm_vmm_desc *desc = it->desc;
const int type = desc->type == SPT;
@@ -1544,7 +1825,7 @@ nvkm_vmm_boot(struct nvkm_vmm *vmm)
if (ret)
return ret;
- nvkm_vmm_iter(vmm, page, vmm->start, limit, "bootstrap", false,
+ nvkm_vmm_iter(vmm, page, vmm->start, limit, "bootstrap", false, false,
nvkm_vmm_boot_ptes, NULL, NULL, NULL);
vmm->bootstrapped = true;
return 0;
@@ -1584,7 +1865,8 @@ nvkm_vmm_new(struct nvkm_device *device, u64 addr, u64 size, void *argv,
struct nvkm_mmu *mmu = device->mmu;
struct nvkm_vmm *vmm = NULL;
int ret;
- ret = mmu->func->vmm.ctor(mmu, addr, size, argv, argc, key, name, &vmm);
+ ret = mmu->func->vmm.ctor(mmu, false, addr, size, argv, argc,
+ key, name, &vmm);
if (ret)
nvkm_vmm_unref(&vmm);
*pvmm = vmm;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
index 42ad326521a3..5e55ecbd8005 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
@@ -67,6 +67,10 @@ struct nvkm_vmm_desc_func {
nvkm_vmm_pte_func mem;
nvkm_vmm_pte_func dma;
nvkm_vmm_pte_func sgl;
+
+ nvkm_vmm_pte_func pfn;
+ bool (*pfn_clear)(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32 ptei, u32 ptes);
+ nvkm_vmm_pxe_func pfn_unmap;
};
extern const struct nvkm_vmm_desc_func gf100_vmm_pgd;
@@ -141,6 +145,11 @@ struct nvkm_vmm_func {
struct nvkm_vmm_map *);
void (*flush)(struct nvkm_vmm *, int depth);
+ int (*mthd)(struct nvkm_vmm *, struct nvkm_client *,
+ u32 mthd, void *argv, u32 argc);
+
+ void (*invalidate_pdb)(struct nvkm_vmm *, u64 addr);
+
u64 page_block;
const struct nvkm_vmm_page page[];
};
@@ -151,11 +160,12 @@ struct nvkm_vmm_join {
};
int nvkm_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *,
- u32 pd_header, u64 addr, u64 size, struct lock_class_key *,
- const char *name, struct nvkm_vmm **);
+ u32 pd_header, bool managed, u64 addr, u64 size,
+ struct lock_class_key *, const char *name,
+ struct nvkm_vmm **);
int nvkm_vmm_ctor(const struct nvkm_vmm_func *, struct nvkm_mmu *,
- u32 pd_header, u64 addr, u64 size, struct lock_class_key *,
- const char *name, struct nvkm_vmm *);
+ u32 pd_header, bool managed, u64 addr, u64 size,
+ struct lock_class_key *, const char *name, struct nvkm_vmm *);
struct nvkm_vma *nvkm_vmm_node_search(struct nvkm_vmm *, u64 addr);
struct nvkm_vma *nvkm_vmm_node_split(struct nvkm_vmm *, struct nvkm_vma *,
u64 addr, u64 size);
@@ -163,13 +173,25 @@ int nvkm_vmm_get_locked(struct nvkm_vmm *, bool getref, bool mapref,
bool sparse, u8 page, u8 align, u64 size,
struct nvkm_vma **pvma);
void nvkm_vmm_put_locked(struct nvkm_vmm *, struct nvkm_vma *);
-void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *);
-void nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma);
+void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *, bool pfn);
+void nvkm_vmm_unmap_region(struct nvkm_vmm *, struct nvkm_vma *);
+
+#define NVKM_VMM_PFN_ADDR 0xfffffffffffff000ULL
+#define NVKM_VMM_PFN_ADDR_SHIFT 12
+#define NVKM_VMM_PFN_APER 0x00000000000000f0ULL
+#define NVKM_VMM_PFN_HOST 0x0000000000000000ULL
+#define NVKM_VMM_PFN_VRAM 0x0000000000000010ULL
+#define NVKM_VMM_PFN_W 0x0000000000000002ULL
+#define NVKM_VMM_PFN_V 0x0000000000000001ULL
+#define NVKM_VMM_PFN_NONE 0x0000000000000000ULL
+
+int nvkm_vmm_pfn_map(struct nvkm_vmm *, u8 page, u64 addr, u64 size, u64 *pfn);
+int nvkm_vmm_pfn_unmap(struct nvkm_vmm *, u64 addr, u64 size);
struct nvkm_vma *nvkm_vma_tail(struct nvkm_vma *, u64 tail);
int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32,
- u64, u64, void *, u32, struct lock_class_key *,
+ bool, u64, u64, void *, u32, struct lock_class_key *,
const char *, struct nvkm_vmm **);
int nv04_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
@@ -179,70 +201,76 @@ int nv50_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
void nv50_vmm_flush(struct nvkm_vmm *, int);
int gf100_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *,
- struct nvkm_mmu *, u64, u64, void *, u32,
+ struct nvkm_mmu *, bool, u64, u64, void *, u32,
struct lock_class_key *, const char *, struct nvkm_vmm **);
int gf100_vmm_join_(struct nvkm_vmm *, struct nvkm_memory *, u64 base);
int gf100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
void gf100_vmm_part(struct nvkm_vmm *, struct nvkm_memory *);
int gf100_vmm_aper(enum nvkm_memory_target);
int gf100_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
-void gf100_vmm_flush_(struct nvkm_vmm *, int);
void gf100_vmm_flush(struct nvkm_vmm *, int);
+void gf100_vmm_invalidate(struct nvkm_vmm *, u32 type);
+void gf100_vmm_invalidate_pdb(struct nvkm_vmm *, u64 addr);
int gk20a_vmm_aper(enum nvkm_memory_target);
int gm200_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *,
- struct nvkm_mmu *, u64, u64, void *, u32,
+ struct nvkm_mmu *, bool, u64, u64, void *, u32,
struct lock_class_key *, const char *, struct nvkm_vmm **);
int gm200_vmm_join_(struct nvkm_vmm *, struct nvkm_memory *, u64 base);
int gm200_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
+int gp100_vmm_new_(const struct nvkm_vmm_func *,
+ struct nvkm_mmu *, bool, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
int gp100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
int gp100_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
void gp100_vmm_flush(struct nvkm_vmm *, int);
+int gp100_vmm_mthd(struct nvkm_vmm *, struct nvkm_client *, u32, void *, u32);
+void gp100_vmm_invalidate_pdb(struct nvkm_vmm *, u64 addr);
int gv100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
-int nv04_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+int nv04_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
struct lock_class_key *, const char *, struct nvkm_vmm **);
-int nv41_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+int nv41_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
struct lock_class_key *, const char *, struct nvkm_vmm **);
-int nv44_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+int nv44_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
struct lock_class_key *, const char *, struct nvkm_vmm **);
-int nv50_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+int nv50_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
struct lock_class_key *, const char *, struct nvkm_vmm **);
-int mcp77_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+int mcp77_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
struct lock_class_key *, const char *, struct nvkm_vmm **);
-int g84_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+int g84_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
struct lock_class_key *, const char *, struct nvkm_vmm **);
-int gf100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+int gf100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
struct lock_class_key *, const char *, struct nvkm_vmm **);
-int gk104_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+int gk104_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
struct lock_class_key *, const char *, struct nvkm_vmm **);
-int gk20a_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+int gk20a_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
struct lock_class_key *, const char *, struct nvkm_vmm **);
-int gm200_vmm_new_fixed(struct nvkm_mmu *, u64, u64, void *, u32,
+int gm200_vmm_new_fixed(struct nvkm_mmu *, bool, u64, u64, void *, u32,
struct lock_class_key *, const char *,
struct nvkm_vmm **);
-int gm200_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+int gm200_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
struct lock_class_key *, const char *,
struct nvkm_vmm **);
-int gm20b_vmm_new_fixed(struct nvkm_mmu *, u64, u64, void *, u32,
+int gm20b_vmm_new_fixed(struct nvkm_mmu *, bool, u64, u64, void *, u32,
struct lock_class_key *, const char *,
struct nvkm_vmm **);
-int gm20b_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+int gm20b_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
struct lock_class_key *, const char *,
struct nvkm_vmm **);
-int gp100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+int gp100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
struct lock_class_key *, const char *,
struct nvkm_vmm **);
-int gp10b_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+int gp10b_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
struct lock_class_key *, const char *,
struct nvkm_vmm **);
-int gv100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+int gv100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
struct lock_class_key *, const char *,
struct nvkm_vmm **);
-int tu104_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+int tu102_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
struct lock_class_key *, const char *,
struct nvkm_vmm **);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
index faf5a7e9265e..ab6424faf84c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
@@ -178,15 +178,19 @@ gf100_vmm_desc_16_16[] = {
};
void
-gf100_vmm_flush_(struct nvkm_vmm *vmm, int depth)
+gf100_vmm_invalidate_pdb(struct nvkm_vmm *vmm, u64 addr)
+{
+ struct nvkm_device *device = vmm->mmu->subdev.device;
+ nvkm_wr32(device, 0x100cb8, addr);
+}
+
+void
+gf100_vmm_invalidate(struct nvkm_vmm *vmm, u32 type)
{
struct nvkm_subdev *subdev = &vmm->mmu->subdev;
struct nvkm_device *device = subdev->device;
- u32 type = depth << 24;
-
- type = 0x00000001; /* PAGE_ALL */
- if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
- type |= 0x00000004; /* HUB_ONLY */
+ struct nvkm_mmu_pt *pd = vmm->pd->pt[0];
+ u64 addr = 0;
mutex_lock(&subdev->mutex);
/* Looks like maybe a "free flush slots" counter, the
@@ -197,7 +201,20 @@ gf100_vmm_flush_(struct nvkm_vmm *vmm, int depth)
break;
);
- nvkm_wr32(device, 0x100cb8, vmm->pd->pt[0]->addr >> 8);
+ if (!(type & 0x00000002) /* ALL_PDB. */) {
+ switch (nvkm_memory_target(pd->memory)) {
+ case NVKM_MEM_TARGET_VRAM: addr |= 0x00000000; break;
+ case NVKM_MEM_TARGET_HOST: addr |= 0x00000002; break;
+ case NVKM_MEM_TARGET_NCOH: addr |= 0x00000003; break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+ addr |= (vmm->pd->pt[0]->addr >> 12) << 4;
+
+ vmm->func->invalidate_pdb(vmm, addr);
+ }
+
nvkm_wr32(device, 0x100cbc, 0x80000000 | type);
/* Wait for flush to be queued? */
@@ -211,7 +228,10 @@ gf100_vmm_flush_(struct nvkm_vmm *vmm, int depth)
void
gf100_vmm_flush(struct nvkm_vmm *vmm, int depth)
{
- gf100_vmm_flush_(vmm, 0);
+ u32 type = 0x00000001; /* PAGE_ALL */
+ if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
+ type |= 0x00000004; /* HUB_ONLY */
+ gf100_vmm_invalidate(vmm, type);
}
int
@@ -354,6 +374,7 @@ gf100_vmm_17 = {
.aper = gf100_vmm_aper,
.valid = gf100_vmm_valid,
.flush = gf100_vmm_flush,
+ .invalidate_pdb = gf100_vmm_invalidate_pdb,
.page = {
{ 17, &gf100_vmm_desc_17_17[0], NVKM_VMM_PAGE_xVxC },
{ 12, &gf100_vmm_desc_17_12[0], NVKM_VMM_PAGE_xVHx },
@@ -368,6 +389,7 @@ gf100_vmm_16 = {
.aper = gf100_vmm_aper,
.valid = gf100_vmm_valid,
.flush = gf100_vmm_flush,
+ .invalidate_pdb = gf100_vmm_invalidate_pdb,
.page = {
{ 16, &gf100_vmm_desc_16_16[0], NVKM_VMM_PAGE_xVxC },
{ 12, &gf100_vmm_desc_16_12[0], NVKM_VMM_PAGE_xVHx },
@@ -378,14 +400,14 @@ gf100_vmm_16 = {
int
gf100_vmm_new_(const struct nvkm_vmm_func *func_16,
const struct nvkm_vmm_func *func_17,
- struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
- struct lock_class_key *key, const char *name,
- struct nvkm_vmm **pvmm)
+ struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
{
switch (mmu->subdev.device->fb->page) {
- case 16: return nv04_vmm_new_(func_16, mmu, 0, addr, size,
+ case 16: return nv04_vmm_new_(func_16, mmu, 0, managed, addr, size,
argv, argc, key, name, pvmm);
- case 17: return nv04_vmm_new_(func_17, mmu, 0, addr, size,
+ case 17: return nv04_vmm_new_(func_17, mmu, 0, managed, addr, size,
argv, argc, key, name, pvmm);
default:
WARN_ON(1);
@@ -394,10 +416,10 @@ gf100_vmm_new_(const struct nvkm_vmm_func *func_16,
}
int
-gf100_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
- struct lock_class_key *key, const char *name,
- struct nvkm_vmm **pvmm)
+gf100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
{
- return gf100_vmm_new_(&gf100_vmm_16, &gf100_vmm_17, mmu, addr,
+ return gf100_vmm_new_(&gf100_vmm_16, &gf100_vmm_17, mmu, managed, addr,
size, argv, argc, key, name, pvmm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk104.c
index 0ebb7bccfcd2..0b59c01fd146 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk104.c
@@ -71,6 +71,7 @@ gk104_vmm_17 = {
.aper = gf100_vmm_aper,
.valid = gf100_vmm_valid,
.flush = gf100_vmm_flush,
+ .invalidate_pdb = gf100_vmm_invalidate_pdb,
.page = {
{ 17, &gk104_vmm_desc_17_17[0], NVKM_VMM_PAGE_xVxC },
{ 12, &gk104_vmm_desc_17_12[0], NVKM_VMM_PAGE_xVHx },
@@ -85,6 +86,7 @@ gk104_vmm_16 = {
.aper = gf100_vmm_aper,
.valid = gf100_vmm_valid,
.flush = gf100_vmm_flush,
+ .invalidate_pdb = gf100_vmm_invalidate_pdb,
.page = {
{ 16, &gk104_vmm_desc_16_16[0], NVKM_VMM_PAGE_xVxC },
{ 12, &gk104_vmm_desc_16_12[0], NVKM_VMM_PAGE_xVHx },
@@ -93,10 +95,10 @@ gk104_vmm_16 = {
};
int
-gk104_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
- struct lock_class_key *key, const char *name,
- struct nvkm_vmm **pvmm)
+gk104_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
{
- return gf100_vmm_new_(&gk104_vmm_16, &gk104_vmm_17, mmu, addr,
+ return gf100_vmm_new_(&gk104_vmm_16, &gk104_vmm_17, mmu, managed, addr,
size, argv, argc, key, name, pvmm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk20a.c
index 8086994a0446..5a9582dce970 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk20a.c
@@ -40,6 +40,7 @@ gk20a_vmm_17 = {
.aper = gf100_vmm_aper,
.valid = gf100_vmm_valid,
.flush = gf100_vmm_flush,
+ .invalidate_pdb = gf100_vmm_invalidate_pdb,
.page = {
{ 17, &gk104_vmm_desc_17_17[0], NVKM_VMM_PAGE_xxHC },
{ 12, &gk104_vmm_desc_17_12[0], NVKM_VMM_PAGE_xxHx },
@@ -54,6 +55,7 @@ gk20a_vmm_16 = {
.aper = gf100_vmm_aper,
.valid = gf100_vmm_valid,
.flush = gf100_vmm_flush,
+ .invalidate_pdb = gf100_vmm_invalidate_pdb,
.page = {
{ 16, &gk104_vmm_desc_16_16[0], NVKM_VMM_PAGE_xxHC },
{ 12, &gk104_vmm_desc_16_12[0], NVKM_VMM_PAGE_xxHx },
@@ -62,10 +64,10 @@ gk20a_vmm_16 = {
};
int
-gk20a_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
- struct lock_class_key *key, const char *name,
- struct nvkm_vmm **pvmm)
+gk20a_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
{
- return gf100_vmm_new_(&gk20a_vmm_16, &gk20a_vmm_17, mmu, addr,
+ return gf100_vmm_new_(&gk20a_vmm_16, &gk20a_vmm_17, mmu, managed, addr,
size, argv, argc, key, name, pvmm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c
index a1676a4644fe..2e61af02d4d8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c
@@ -113,6 +113,7 @@ gm200_vmm_17 = {
.aper = gf100_vmm_aper,
.valid = gf100_vmm_valid,
.flush = gf100_vmm_flush,
+ .invalidate_pdb = gf100_vmm_invalidate_pdb,
.page = {
{ 27, &gm200_vmm_desc_17_17[1], NVKM_VMM_PAGE_Sxxx },
{ 17, &gm200_vmm_desc_17_17[0], NVKM_VMM_PAGE_SVxC },
@@ -128,6 +129,7 @@ gm200_vmm_16 = {
.aper = gf100_vmm_aper,
.valid = gf100_vmm_valid,
.flush = gf100_vmm_flush,
+ .invalidate_pdb = gf100_vmm_invalidate_pdb,
.page = {
{ 27, &gm200_vmm_desc_16_16[1], NVKM_VMM_PAGE_Sxxx },
{ 16, &gm200_vmm_desc_16_16[0], NVKM_VMM_PAGE_SVxC },
@@ -139,9 +141,9 @@ gm200_vmm_16 = {
int
gm200_vmm_new_(const struct nvkm_vmm_func *func_16,
const struct nvkm_vmm_func *func_17,
- struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
- struct lock_class_key *key, const char *name,
- struct nvkm_vmm **pvmm)
+ struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
{
const struct nvkm_vmm_func *func;
union {
@@ -163,23 +165,23 @@ gm200_vmm_new_(const struct nvkm_vmm_func *func_16,
} else
return ret;
- return nvkm_vmm_new_(func, mmu, 0, addr, size, key, name, pvmm);
+ return nvkm_vmm_new_(func, mmu, 0, managed, addr, size, key, name, pvmm);
}
int
-gm200_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
- struct lock_class_key *key, const char *name,
- struct nvkm_vmm **pvmm)
+gm200_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
{
- return gm200_vmm_new_(&gm200_vmm_16, &gm200_vmm_17, mmu, addr,
+ return gm200_vmm_new_(&gm200_vmm_16, &gm200_vmm_17, mmu, managed, addr,
size, argv, argc, key, name, pvmm);
}
int
-gm200_vmm_new_fixed(struct nvkm_mmu *mmu, u64 addr, u64 size,
+gm200_vmm_new_fixed(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
void *argv, u32 argc, struct lock_class_key *key,
const char *name, struct nvkm_vmm **pvmm)
{
- return gf100_vmm_new_(&gm200_vmm_16, &gm200_vmm_17, mmu, addr,
+ return gf100_vmm_new_(&gm200_vmm_16, &gm200_vmm_17, mmu, managed, addr,
size, argv, argc, key, name, pvmm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c
index 64d4b6cff8dd..96b759695dd8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c
@@ -28,6 +28,7 @@ gm20b_vmm_17 = {
.aper = gk20a_vmm_aper,
.valid = gf100_vmm_valid,
.flush = gf100_vmm_flush,
+ .invalidate_pdb = gf100_vmm_invalidate_pdb,
.page = {
{ 27, &gm200_vmm_desc_17_17[1], NVKM_VMM_PAGE_Sxxx },
{ 17, &gm200_vmm_desc_17_17[0], NVKM_VMM_PAGE_SxHC },
@@ -43,6 +44,7 @@ gm20b_vmm_16 = {
.aper = gk20a_vmm_aper,
.valid = gf100_vmm_valid,
.flush = gf100_vmm_flush,
+ .invalidate_pdb = gf100_vmm_invalidate_pdb,
.page = {
{ 27, &gm200_vmm_desc_16_16[1], NVKM_VMM_PAGE_Sxxx },
{ 16, &gm200_vmm_desc_16_16[0], NVKM_VMM_PAGE_SxHC },
@@ -52,19 +54,19 @@ gm20b_vmm_16 = {
};
int
-gm20b_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
- struct lock_class_key *key, const char *name,
- struct nvkm_vmm **pvmm)
+gm20b_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
{
- return gm200_vmm_new_(&gm20b_vmm_16, &gm20b_vmm_17, mmu, addr,
+ return gm200_vmm_new_(&gm20b_vmm_16, &gm20b_vmm_17, mmu, managed, addr,
size, argv, argc, key, name, pvmm);
}
int
-gm20b_vmm_new_fixed(struct nvkm_mmu *mmu, u64 addr, u64 size,
+gm20b_vmm_new_fixed(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
void *argv, u32 argc, struct lock_class_key *key,
const char *name, struct nvkm_vmm **pvmm)
{
- return gf100_vmm_new_(&gm20b_vmm_16, &gm20b_vmm_17, mmu, addr,
+ return gf100_vmm_new_(&gm20b_vmm_16, &gm20b_vmm_17, mmu, managed, addr,
size, argv, argc, key, name, pvmm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
index 059fafe0e771..b4f519768d5e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
@@ -21,12 +21,90 @@
*/
#include "vmm.h"
+#include <core/client.h>
#include <subdev/fb.h>
#include <subdev/ltc.h>
+#include <subdev/timer.h>
+#include <engine/gr.h>
#include <nvif/ifc00d.h>
#include <nvif/unpack.h>
+static void
+gp100_vmm_pfn_unmap(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ struct device *dev = vmm->mmu->subdev.device->dev;
+ dma_addr_t addr;
+
+ nvkm_kmap(pt->memory);
+ while (ptes--) {
+ u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 0);
+ u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 4);
+ u64 data = (u64)datahi << 32 | datalo;
+ if ((data & (3ULL << 1)) != 0) {
+ addr = (data >> 8) << 12;
+ dma_unmap_page(dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
+ ptei++;
+ }
+ nvkm_done(pt->memory);
+}
+
+static bool
+gp100_vmm_pfn_clear(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ bool dma = false;
+ nvkm_kmap(pt->memory);
+ while (ptes--) {
+ u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 0);
+ u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 4);
+ u64 data = (u64)datahi << 32 | datalo;
+ if ((data & BIT_ULL(0)) && (data & (3ULL << 1)) != 0) {
+ VMM_WO064(pt, vmm, ptei * 8, data & ~BIT_ULL(0));
+ dma = true;
+ }
+ ptei++;
+ }
+ nvkm_done(pt->memory);
+ return dma;
+}
+
+static void
+gp100_vmm_pgt_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ struct device *dev = vmm->mmu->subdev.device->dev;
+ dma_addr_t addr;
+
+ nvkm_kmap(pt->memory);
+ while (ptes--) {
+ u64 data = 0;
+ if (!(*map->pfn & NVKM_VMM_PFN_W))
+ data |= BIT_ULL(6); /* RO. */
+
+ if (!(*map->pfn & NVKM_VMM_PFN_VRAM)) {
+ addr = *map->pfn >> NVKM_VMM_PFN_ADDR_SHIFT;
+ addr = dma_map_page(dev, pfn_to_page(addr), 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (!WARN_ON(dma_mapping_error(dev, addr))) {
+ data |= addr >> 4;
+ data |= 2ULL << 1; /* SYSTEM_COHERENT_MEMORY. */
+ data |= BIT_ULL(3); /* VOL. */
+ data |= BIT_ULL(0); /* VALID. */
+ }
+ } else {
+ data |= (*map->pfn & NVKM_VMM_PFN_ADDR) >> 4;
+ data |= BIT_ULL(0); /* VALID. */
+ }
+
+ VMM_WO064(pt, vmm, ptei++ * 8, data);
+ map->pfn++;
+ }
+ nvkm_done(pt->memory);
+}
+
static inline void
gp100_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
@@ -89,6 +167,9 @@ gp100_vmm_desc_spt = {
.mem = gp100_vmm_pgt_mem,
.dma = gp100_vmm_pgt_dma,
.sgl = gp100_vmm_pgt_sgl,
+ .pfn = gp100_vmm_pgt_pfn,
+ .pfn_clear = gp100_vmm_pfn_clear,
+ .pfn_unmap = gp100_vmm_pfn_unmap,
};
static void
@@ -306,16 +387,100 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
return 0;
}
+static int
+gp100_vmm_fault_cancel(struct nvkm_vmm *vmm, void *argv, u32 argc)
+{
+ struct nvkm_device *device = vmm->mmu->subdev.device;
+ union {
+ struct gp100_vmm_fault_cancel_v0 v0;
+ } *args = argv;
+ int ret = -ENOSYS;
+ u32 inst, aper;
+
+ if ((ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false)))
+ return ret;
+
+ /* Translate MaxwellFaultBufferA instance pointer to the same
+ * format as the NV_GR_FECS_CURRENT_CTX register.
+ */
+ aper = (args->v0.inst >> 8) & 3;
+ args->v0.inst >>= 12;
+ args->v0.inst |= aper << 28;
+ args->v0.inst |= 0x80000000;
+
+ if (!WARN_ON(nvkm_gr_ctxsw_pause(device))) {
+ if ((inst = nvkm_gr_ctxsw_inst(device)) == args->v0.inst) {
+ gf100_vmm_invalidate(vmm, 0x0000001b
+ /* CANCEL_TARGETED. */ |
+ (args->v0.hub << 20) |
+ (args->v0.gpc << 15) |
+ (args->v0.client << 9));
+ }
+ WARN_ON(nvkm_gr_ctxsw_resume(device));
+ }
+
+ return 0;
+}
+
+static int
+gp100_vmm_fault_replay(struct nvkm_vmm *vmm, void *argv, u32 argc)
+{
+ union {
+ struct gp100_vmm_fault_replay_vn vn;
+ } *args = argv;
+ int ret = -ENOSYS;
+
+ if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ gf100_vmm_invalidate(vmm, 0x0000000b); /* REPLAY_GLOBAL. */
+ }
+
+ return ret;
+}
+
+int
+gp100_vmm_mthd(struct nvkm_vmm *vmm,
+ struct nvkm_client *client, u32 mthd, void *argv, u32 argc)
+{
+ if (client->super) {
+ switch (mthd) {
+ case GP100_VMM_VN_FAULT_REPLAY:
+ return gp100_vmm_fault_replay(vmm, argv, argc);
+ case GP100_VMM_VN_FAULT_CANCEL:
+ return gp100_vmm_fault_cancel(vmm, argv, argc);
+ default:
+ break;
+ }
+ }
+ return -EINVAL;
+}
+
+void
+gp100_vmm_invalidate_pdb(struct nvkm_vmm *vmm, u64 addr)
+{
+ struct nvkm_device *device = vmm->mmu->subdev.device;
+ nvkm_wr32(device, 0x100cb8, lower_32_bits(addr));
+ nvkm_wr32(device, 0x100cec, upper_32_bits(addr));
+}
+
void
gp100_vmm_flush(struct nvkm_vmm *vmm, int depth)
{
- gf100_vmm_flush_(vmm, 5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth);
+ u32 type = (5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth) << 24;
+ type = 0; /*XXX: need to confirm stuff works with depth enabled... */
+ if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
+ type |= 0x00000004; /* HUB_ONLY */
+ type |= 0x00000001; /* PAGE_ALL */
+ gf100_vmm_invalidate(vmm, type);
}
int
gp100_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{
- const u64 base = BIT_ULL(10) /* VER2 */ | BIT_ULL(11); /* 64KiB */
+ u64 base = BIT_ULL(10) /* VER2 */ | BIT_ULL(11) /* 64KiB */;
+ if (vmm->replay) {
+ base |= BIT_ULL(4); /* FAULT_REPLAY_TEX */
+ base |= BIT_ULL(5); /* FAULT_REPLAY_GCC */
+ }
return gf100_vmm_join_(vmm, inst, base);
}
@@ -326,6 +491,8 @@ gp100_vmm = {
.aper = gf100_vmm_aper,
.valid = gp100_vmm_valid,
.flush = gp100_vmm_flush,
+ .mthd = gp100_vmm_mthd,
+ .invalidate_pdb = gp100_vmm_invalidate_pdb,
.page = {
{ 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
{ 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
@@ -338,10 +505,39 @@ gp100_vmm = {
};
int
-gp100_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
- struct lock_class_key *key, const char *name,
- struct nvkm_vmm **pvmm)
+gp100_vmm_new_(const struct nvkm_vmm_func *func,
+ struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
+{
+ union {
+ struct gp100_vmm_vn vn;
+ struct gp100_vmm_v0 v0;
+ } *args = argv;
+ int ret = -ENOSYS;
+ bool replay;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ replay = args->v0.fault_replay != 0;
+ } else
+ if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ replay = false;
+ } else
+ return ret;
+
+ ret = nvkm_vmm_new_(func, mmu, 0, managed, addr, size, key, name, pvmm);
+ if (ret)
+ return ret;
+
+ (*pvmm)->replay = replay;
+ return 0;
+}
+
+int
+gp100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
{
- return nv04_vmm_new_(&gp100_vmm, mmu, 0, addr, size,
- argv, argc, key, name, pvmm);
+ return gp100_vmm_new_(&gp100_vmm, mmu, managed, addr, size,
+ argv, argc, key, name, pvmm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c
index 3dcc6bddb32f..e081239afe58 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c
@@ -28,6 +28,8 @@ gp10b_vmm = {
.aper = gk20a_vmm_aper,
.valid = gp100_vmm_valid,
.flush = gp100_vmm_flush,
+ .mthd = gp100_vmm_mthd,
+ .invalidate_pdb = gp100_vmm_invalidate_pdb,
.page = {
{ 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
{ 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
@@ -40,10 +42,10 @@ gp10b_vmm = {
};
int
-gp10b_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
- struct lock_class_key *key, const char *name,
- struct nvkm_vmm **pvmm)
+gp10b_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
{
- return nv04_vmm_new_(&gp10b_vmm, mmu, 0, addr, size,
- argv, argc, key, name, pvmm);
+ return gp100_vmm_new_(&gp10b_vmm, mmu, managed, addr, size,
+ argv, argc, key, name, pvmm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgv100.c
index 2fa40c16e6d2..f0e21f63253a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgv100.c
@@ -66,6 +66,8 @@ gv100_vmm = {
.aper = gf100_vmm_aper,
.valid = gp100_vmm_valid,
.flush = gp100_vmm_flush,
+ .mthd = gp100_vmm_mthd,
+ .invalidate_pdb = gp100_vmm_invalidate_pdb,
.page = {
{ 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
{ 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
@@ -78,10 +80,10 @@ gv100_vmm = {
};
int
-gv100_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
- struct lock_class_key *key, const char *name,
- struct nvkm_vmm **pvmm)
+gv100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
{
- return nv04_vmm_new_(&gv100_vmm, mmu, 0, addr, size,
- argv, argc, key, name, pvmm);
+ return gp100_vmm_new_(&gv100_vmm, mmu, managed, addr, size,
+ argv, argc, key, name, pvmm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c
index e63d984cbfd4..bdddd99f5877 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c
@@ -36,10 +36,10 @@ mcp77_vmm = {
};
int
-mcp77_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
- struct lock_class_key *key, const char *name,
- struct nvkm_vmm **pvmm)
+mcp77_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
{
- return nv04_vmm_new_(&mcp77_vmm, mmu, 0, addr, size,
+ return nv04_vmm_new_(&mcp77_vmm, mmu, 0, managed, addr, size,
argv, argc, key, name, pvmm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c
index 0cab1ffc9f64..4c6b3b7d221f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c
@@ -100,16 +100,17 @@ nv04_vmm = {
int
nv04_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
- u32 pd_header, u64 addr, u64 size, void *argv, u32 argc,
- struct lock_class_key *key, const char *name,
- struct nvkm_vmm **pvmm)
+ u32 pd_header, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
{
union {
struct nv04_vmm_vn vn;
} *args = argv;
int ret;
- ret = nvkm_vmm_new_(func, mmu, pd_header, addr, size, key, name, pvmm);
+ ret = nvkm_vmm_new_(func, mmu, pd_header, managed, addr, size,
+ key, name, pvmm);
if (ret)
return ret;
@@ -117,15 +118,15 @@ nv04_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
}
int
-nv04_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
- struct lock_class_key *key, const char *name,
+nv04_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key, const char *name,
struct nvkm_vmm **pvmm)
{
struct nvkm_memory *mem;
struct nvkm_vmm *vmm;
int ret;
- ret = nv04_vmm_new_(&nv04_vmm, mmu, 8, addr, size,
+ ret = nv04_vmm_new_(&nv04_vmm, mmu, 8, managed, addr, size,
argv, argc, key, name, &vmm);
*pvmm = vmm;
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c
index b595f130e573..1d3369683a21 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c
@@ -104,10 +104,10 @@ nv41_vmm = {
};
int
-nv41_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
- struct lock_class_key *key, const char *name,
+nv41_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key, const char *name,
struct nvkm_vmm **pvmm)
{
- return nv04_vmm_new_(&nv41_vmm, mmu, 0, addr, size,
+ return nv04_vmm_new_(&nv41_vmm, mmu, 0, managed, addr, size,
argv, argc, key, name, pvmm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
index b834e4352334..a82936ba9890 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
@@ -205,15 +205,15 @@ nv44_vmm = {
};
int
-nv44_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
- struct lock_class_key *key, const char *name,
+nv44_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key, const char *name,
struct nvkm_vmm **pvmm)
{
struct nvkm_subdev *subdev = &mmu->subdev;
struct nvkm_vmm *vmm;
int ret;
- ret = nv04_vmm_new_(&nv44_vmm, mmu, 0, addr, size,
+ ret = nv04_vmm_new_(&nv44_vmm, mmu, 0, managed, addr, size,
argv, argc, key, name, &vmm);
*pvmm = vmm;
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
index 64f75d906202..c98afe3134ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
@@ -376,10 +376,10 @@ nv50_vmm = {
};
int
-nv50_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
- struct lock_class_key *key, const char *name,
+nv50_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key, const char *name,
struct nvkm_vmm **pvmm)
{
- return nv04_vmm_new_(&nv50_vmm, mmu, 0, addr, size,
+ return nv04_vmm_new_(&nv50_vmm, mmu, 0, managed, addr, size,
argv, argc, key, name, pvmm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
index adaadd92110f..be91cffc3b52 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
@@ -24,7 +24,7 @@
#include <subdev/timer.h>
static void
-tu104_vmm_flush(struct nvkm_vmm *vmm, int depth)
+tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
{
struct nvkm_subdev *subdev = &vmm->mmu->subdev;
struct nvkm_device *device = subdev->device;
@@ -50,12 +50,13 @@ tu104_vmm_flush(struct nvkm_vmm *vmm, int depth)
}
static const struct nvkm_vmm_func
-tu104_vmm = {
+tu102_vmm = {
.join = gv100_vmm_join,
.part = gf100_vmm_part,
.aper = gf100_vmm_aper,
.valid = gp100_vmm_valid,
- .flush = tu104_vmm_flush,
+ .flush = tu102_vmm_flush,
+ .mthd = gp100_vmm_mthd,
.page = {
{ 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
{ 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
@@ -68,10 +69,10 @@ tu104_vmm = {
};
int
-tu104_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size,
+tu102_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
void *argv, u32 argc, struct lock_class_key *key,
const char *name, struct nvkm_vmm **pvmm)
{
- return nv04_vmm_new_(&tu104_vmm, mmu, 0, addr, size,
- argv, argc, key, name, pvmm);
+ return gp100_vmm_new_(&tu102_vmm, mmu, managed, addr, size,
+ argv, argc, key, name, pvmm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
index 11b28b086a06..7b052879af72 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
@@ -88,10 +88,10 @@ nvkm_memx_fini(struct nvkm_memx **pmemx, bool exec)
if (exec) {
nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_EXEC,
memx->base, finish);
+ nvkm_debug(subdev, "Exec took %uns, PMU_IN %08x\n",
+ reply[0], reply[1]);
}
- nvkm_debug(subdev, "Exec took %uns, PMU_IN %08x\n",
- reply[0], reply[1]);
kfree(memx);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
index 5c14d6ac855d..1df09ed6fe6d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
@@ -853,7 +853,7 @@ acr_r352_shutdown(struct acr_r352 *acr, struct nvkm_secboot *sb)
* and the expected behavior on RM as well
*/
if (ret && ret != 0x1d) {
- nvkm_error(subdev, "HS unload failed, ret 0x%08x", ret);
+ nvkm_error(subdev, "HS unload failed, ret 0x%08x\n", ret);
return -EINVAL;
}
nvkm_debug(subdev, "HS unload blob completed\n");
@@ -922,7 +922,7 @@ acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb)
if (ret < 0) {
return ret;
} else if (ret > 0) {
- nvkm_error(subdev, "HS load failed, ret 0x%08x", ret);
+ nvkm_error(subdev, "HS load failed, ret 0x%08x\n", ret);
return -EINVAL;
}
nvkm_debug(subdev, "HS load blob completed\n");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
index 67ada1d9a28c..cce6e4e90ebf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
@@ -41,6 +41,22 @@ nvkm_top_device_new(struct nvkm_top *top)
}
u32
+nvkm_top_addr(struct nvkm_device *device, enum nvkm_devidx index)
+{
+ struct nvkm_top *top = device->top;
+ struct nvkm_top_device *info;
+
+ if (top) {
+ list_for_each_entry(info, &top->device, head) {
+ if (info->index == index)
+ return info->addr;
+ }
+ }
+
+ return 0;
+}
+
+u32
nvkm_top_reset(struct nvkm_device *device, enum nvkm_devidx index)
{
struct nvkm_top *top = device->top;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
index 39081eadfd84..e01746ce9fc4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
@@ -73,6 +73,7 @@ gk104_top_oneinit(struct nvkm_top *top)
#define A_(A) if (inst == 0) info->index = NVKM_ENGINE_##A
#define B_(A) if (inst + NVKM_ENGINE_##A##0 < NVKM_ENGINE_##A##_LAST + 1) \
info->index = NVKM_ENGINE_##A##0 + inst
+#define C_(A) if (inst == 0) info->index = NVKM_SUBDEV_##A
switch (type) {
case 0x00000000: A_(GR ); break;
case 0x00000001: A_(CE0 ); break;
@@ -88,6 +89,7 @@ gk104_top_oneinit(struct nvkm_top *top)
case 0x0000000f: A_(NVENC1); break;
case 0x00000010: B_(NVDEC ); break;
case 0x00000013: B_(CE ); break;
+ case 0x00000014: C_(GSP ); break;
break;
default:
break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
index bcd179ba11d0..146adcdd316a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
@@ -2,6 +2,7 @@ nvkm-y += nvkm/subdev/volt/base.o
nvkm-y += nvkm/subdev/volt/gpio.o
nvkm-y += nvkm/subdev/volt/nv40.o
nvkm-y += nvkm/subdev/volt/gf100.o
+nvkm-y += nvkm/subdev/volt/gf117.o
nvkm-y += nvkm/subdev/volt/gk104.o
nvkm-y += nvkm/subdev/volt/gk20a.o
nvkm-y += nvkm/subdev/volt/gm20b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c
new file mode 100644
index 000000000000..547a58f0aeac
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2019 Ilia Mirkin
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ilia Mirkin
+ */
+#include "priv.h"
+
+#include <subdev/fuse.h>
+
+static int
+gf117_volt_speedo_read(struct nvkm_volt *volt)
+{
+ struct nvkm_device *device = volt->subdev.device;
+ struct nvkm_fuse *fuse = device->fuse;
+
+ if (!fuse)
+ return -EINVAL;
+
+ return nvkm_fuse_read(fuse, 0x3a8);
+}
+
+static const struct nvkm_volt_func
+gf117_volt = {
+ .oneinit = gf100_volt_oneinit,
+ .vid_get = nvkm_voltgpio_get,
+ .vid_set = nvkm_voltgpio_set,
+ .speedo_read = gf117_volt_speedo_read,
+};
+
+int
+gf117_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+{
+ struct nvkm_volt *volt;
+ int ret;
+
+ ret = nvkm_volt_new_(&gf117_volt, device, index, &volt);
+ *pvolt = volt;
+ if (ret)
+ return ret;
+
+ return nvkm_voltgpio_init(volt);
+}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index a97294ac96d5..a12439266bb0 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -4869,10 +4869,12 @@ static void ci_request_link_speed_change_before_state_change(struct radeon_devic
pi->force_pcie_gen = RADEON_PCIE_GEN2;
if (current_link_speed == RADEON_PCIE_GEN2)
break;
+ /* fall through */
case RADEON_PCIE_GEN2:
if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
break;
#endif
+ /* fall through */
default:
pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
break;
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index f471537c852f..1e14c6921454 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -1299,6 +1299,7 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ break;
case CB_TARGET_MASK:
track->cb_target_mask = radeon_get_ib_value(p, idx);
track->cb_dirty = true;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index dec1e081f529..6a8fb6fd183c 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -172,6 +172,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
}
if (radeon_is_px(dev)) {
+ dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
pm_runtime_set_active(dev->dev);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 0a785ef0ab66..c9f6cb77e857 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -5762,10 +5762,12 @@ static void si_request_link_speed_change_before_state_change(struct radeon_devic
si_pi->force_pcie_gen = RADEON_PCIE_GEN2;
if (current_link_speed == RADEON_PCIE_GEN2)
break;
+ /* fall through */
case RADEON_PCIE_GEN2:
if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
break;
#endif
+ /* fall through */
default:
si_pi->force_pcie_gen = si_get_current_pcie_speed(rdev);
break;
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index e2942c9a11a7..35ddbec1375a 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -52,12 +52,12 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
{
int i;
- if (!(entity && rq_list && num_rq_list > 0 && rq_list[0]))
+ if (!(entity && rq_list && (num_rq_list == 0 || rq_list[0])))
return -EINVAL;
memset(entity, 0, sizeof(struct drm_sched_entity));
INIT_LIST_HEAD(&entity->list);
- entity->rq = rq_list[0];
+ entity->rq = NULL;
entity->guilty = guilty;
entity->num_rq_list = num_rq_list;
entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *),
@@ -67,6 +67,10 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
for (i = 0; i < num_rq_list; ++i)
entity->rq_list[i] = rq_list[i];
+
+ if (num_rq_list)
+ entity->rq = rq_list[0];
+
entity->last_scheduled = NULL;
spin_lock_init(&entity->rq_lock);
@@ -165,6 +169,9 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
struct task_struct *last_user;
long ret = timeout;
+ if (!entity->rq)
+ return 0;
+
sched = entity->rq->sched;
/**
* The client will not queue more IBs during this fini, consume existing
@@ -264,20 +271,24 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
*/
void drm_sched_entity_fini(struct drm_sched_entity *entity)
{
- struct drm_gpu_scheduler *sched;
+ struct drm_gpu_scheduler *sched = NULL;
- sched = entity->rq->sched;
- drm_sched_rq_remove_entity(entity->rq, entity);
+ if (entity->rq) {
+ sched = entity->rq->sched;
+ drm_sched_rq_remove_entity(entity->rq, entity);
+ }
/* Consumption of existing IBs wasn't completed. Forcefully
* remove them here.
*/
if (spsc_queue_peek(&entity->job_queue)) {
- /* Park the kernel for a moment to make sure it isn't processing
- * our enity.
- */
- kthread_park(sched->thread);
- kthread_unpark(sched->thread);
+ if (sched) {
+ /* Park the kernel for a moment to make sure it isn't processing
+ * our enity.
+ */
+ kthread_park(sched->thread);
+ kthread_unpark(sched->thread);
+ }
if (entity->dependency) {
dma_fence_remove_callback(entity->dependency,
&entity->cb);
@@ -362,9 +373,11 @@ void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
for (i = 0; i < entity->num_rq_list; ++i)
drm_sched_entity_set_rq_priority(&entity->rq_list[i], priority);
- drm_sched_rq_remove_entity(entity->rq, entity);
- drm_sched_entity_set_rq_priority(&entity->rq, priority);
- drm_sched_rq_add_entity(entity->rq, entity);
+ if (entity->rq) {
+ drm_sched_rq_remove_entity(entity->rq, entity);
+ drm_sched_entity_set_rq_priority(&entity->rq, priority);
+ drm_sched_rq_add_entity(entity->rq, entity);
+ }
spin_unlock(&entity->rq_lock);
}
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c
index 4a28f3fbb0a2..6cacfd61d984 100644
--- a/drivers/gpu/ipu-v3/ipu-pre.c
+++ b/drivers/gpu/ipu-v3/ipu-pre.c
@@ -265,6 +265,12 @@ void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr)
writel(IPU_PRE_CTRL_SDW_UPDATE, pre->regs + IPU_PRE_CTRL_SET);
}
+bool ipu_pre_update_pending(struct ipu_pre *pre)
+{
+ return !!(readl_relaxed(pre->regs + IPU_PRE_CTRL) &
+ IPU_PRE_CTRL_SDW_UPDATE);
+}
+
u32 ipu_pre_get_baddr(struct ipu_pre *pre)
{
return (u32)pre->buffer_paddr;
diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c
index 38a3a9764e49..94b76badf677 100644
--- a/drivers/gpu/ipu-v3/ipu-prg.c
+++ b/drivers/gpu/ipu-v3/ipu-prg.c
@@ -347,6 +347,22 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,
}
EXPORT_SYMBOL_GPL(ipu_prg_channel_configure);
+bool ipu_prg_channel_configure_pending(struct ipuv3_channel *ipu_chan)
+{
+ int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num);
+ struct ipu_prg *prg = ipu_chan->ipu->prg_priv;
+ struct ipu_prg_channel *chan;
+
+ if (prg_chan < 0)
+ return false;
+
+ chan = &prg->chan[prg_chan];
+ WARN_ON(!chan->enabled);
+
+ return ipu_pre_update_pending(prg->pres[chan->used_pre]);
+}
+EXPORT_SYMBOL_GPL(ipu_prg_channel_configure_pending);
+
static int ipu_prg_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
diff --git a/drivers/gpu/ipu-v3/ipu-prv.h b/drivers/gpu/ipu-v3/ipu-prv.h
index d6beee99b6b8..38622e835e95 100644
--- a/drivers/gpu/ipu-v3/ipu-prv.h
+++ b/drivers/gpu/ipu-v3/ipu-prv.h
@@ -272,6 +272,7 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
unsigned int height, unsigned int stride, u32 format,
uint64_t modifier, unsigned int bufaddr);
void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr);
+bool ipu_pre_update_pending(struct ipu_pre *pre);
struct ipu_prg *ipu_prg_lookup_by_phandle(struct device *dev, const char *name,
int ipu_id);
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index c13c0ba30f63..d499cd61c0e8 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -783,6 +783,7 @@ void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
static int c4iw_rdev_open(struct c4iw_rdev *rdev)
{
int err;
+ unsigned int factor;
c4iw_init_dev_ucontext(rdev, &rdev->uctx);
@@ -806,8 +807,18 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
return -EINVAL;
}
- rdev->qpmask = rdev->lldi.udb_density - 1;
- rdev->cqmask = rdev->lldi.ucq_density - 1;
+ /* This implementation requires a sge_host_page_size <= PAGE_SIZE. */
+ if (rdev->lldi.sge_host_page_size > PAGE_SIZE) {
+ pr_err("%s: unsupported sge host page size %u\n",
+ pci_name(rdev->lldi.pdev),
+ rdev->lldi.sge_host_page_size);
+ return -EINVAL;
+ }
+
+ factor = PAGE_SIZE / rdev->lldi.sge_host_page_size;
+ rdev->qpmask = (rdev->lldi.udb_density * factor) - 1;
+ rdev->cqmask = (rdev->lldi.ucq_density * factor) - 1;
+
pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u srq size %u\n",
pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 31d91538bbf4..694324b37480 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -3032,7 +3032,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(scmnd->device->host);
struct srp_rdma_ch *ch;
- int i, j;
u8 status;
shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
@@ -3044,15 +3043,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
if (status)
return FAILED;
- for (i = 0; i < target->ch_count; i++) {
- ch = &target->ch[i];
- for (j = 0; j < target->req_ring_size; ++j) {
- struct srp_request *req = &ch->req_ring[j];
-
- srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
- }
- }
-
return SUCCESS;
}
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index dc9f14811e0f..58dc70bffd5b 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -144,7 +144,7 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
for (tmp = dev; tmp; tmp = tmp->bus->self)
level++;
- size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path);
+ size = sizeof(*info) + level * sizeof(info->path[0]);
if (size <= sizeof(dmar_pci_notify_info_buf)) {
info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
} else {
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
index d713271ebf7c..a64116586b4c 100644
--- a/drivers/mailbox/bcm-flexrm-mailbox.c
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -1396,9 +1396,9 @@ static void flexrm_shutdown(struct mbox_chan *chan)
/* Clear ring flush state */
timeout = 1000; /* timeout of 1s */
- writel_relaxed(0x0, ring + RING_CONTROL);
+ writel_relaxed(0x0, ring->regs + RING_CONTROL);
do {
- if (!(readl_relaxed(ring + RING_FLUSH_DONE) &
+ if (!(readl_relaxed(ring->regs + RING_FLUSH_DONE) &
FLUSH_DONE_MASK))
break;
mdelay(1);
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index c6a7d4582dc6..38d9df3fb199 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -310,6 +310,7 @@ int mbox_flush(struct mbox_chan *chan, unsigned long timeout)
return ret;
}
+EXPORT_SYMBOL_GPL(mbox_flush);
/**
* mbox_request_channel - Request a mailbox channel.
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 14f3fdb8c6bb..9ce8eb51a60f 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -2380,12 +2380,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
"mmcblk%u%s", card->host->index, subname ? subname : "");
- if (mmc_card_mmc(card))
- blk_queue_logical_block_size(md->queue.queue,
- card->ext_csd.data_sector_size);
- else
- blk_queue_logical_block_size(md->queue.queue, 512);
-
set_capacity(md->disk, size);
if (mmc_host_cmd23(card->host)) {
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 5bd58b95d318..b27a1e620233 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -95,7 +95,7 @@ static void mmc_should_fail_request(struct mmc_host *host,
if (!data)
return;
- if (cmd->error || data->error ||
+ if ((cmd && cmd->error) || data->error ||
!should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
return;
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 35cc138b096d..15a45ec6518d 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -355,6 +355,7 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
{
struct mmc_host *host = card->host;
u64 limit = BLK_BOUNCE_HIGH;
+ unsigned block_size = 512;
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
@@ -368,7 +369,13 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
blk_queue_max_hw_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512));
blk_queue_max_segments(mq->queue, host->max_segs);
- blk_queue_max_segment_size(mq->queue, host->max_seg_size);
+
+ if (mmc_card_mmc(card))
+ block_size = card->ext_csd.data_sector_size;
+
+ blk_queue_logical_block_size(mq->queue, block_size);
+ blk_queue_max_segment_size(mq->queue,
+ round_down(host->max_seg_size, block_size));
INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c
index 159270e947cf..a8af682a9182 100644
--- a/drivers/mmc/host/cqhci.c
+++ b/drivers/mmc/host/cqhci.c
@@ -201,7 +201,7 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
- (cq_host->num_slots - 1);
+ cq_host->mmc->cqe_qdepth;
pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
@@ -217,12 +217,21 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
cq_host->desc_size,
&cq_host->desc_dma_base,
GFP_KERNEL);
+ if (!cq_host->desc_base)
+ return -ENOMEM;
+
cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
cq_host->data_size,
&cq_host->trans_desc_dma_base,
GFP_KERNEL);
- if (!cq_host->desc_base || !cq_host->trans_desc_base)
+ if (!cq_host->trans_desc_base) {
+ dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size,
+ cq_host->desc_base,
+ cq_host->desc_dma_base);
+ cq_host->desc_base = NULL;
+ cq_host->desc_dma_base = 0;
return -ENOMEM;
+ }
pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 10ba46b728e8..8ade14fb2148 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1450,6 +1450,7 @@ static int mmc_spi_probe(struct spi_device *spi)
mmc->caps &= ~MMC_CAP_NEEDS_POLL;
mmc_gpiod_request_cd_irq(mmc);
}
+ mmc_detect_change(mmc, 0);
/* Index 1 is write protect/read only */
status = mmc_gpiod_request_ro(mmc, NULL, 1, false, 0, NULL);
diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
index 8471160316e0..02cd878e209f 100644
--- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
@@ -65,6 +65,7 @@ static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = {
.scc_offset = 0x0300,
.taps = rcar_gen2_scc_taps,
.taps_num = ARRAY_SIZE(rcar_gen2_scc_taps),
+ .max_blk_count = 0xffffffff,
};
/* Definitions for sampling clocks */
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index d0d319398a54..00d41b312c79 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -1095,11 +1095,12 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
writel(readl(host->ioaddr + SDHCI_HOST_CONTROL)
| ESDHC_BURST_LEN_EN_INCR,
host->ioaddr + SDHCI_HOST_CONTROL);
+
/*
- * erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
- * TO1.1, it's harmless for MX6SL
- */
- writel(readl(host->ioaddr + 0x6c) | BIT(7),
+ * erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
+ * TO1.1, it's harmless for MX6SL
+ */
+ writel(readl(host->ioaddr + 0x6c) & ~BIT(7),
host->ioaddr + 0x6c);
/* disable DLL_CTRL delay line settings */
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index c03529e3f01a..2adb0d24360f 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -277,6 +277,11 @@ static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host,
iowrite16(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
}
+static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
+{
+ iowrite32(val, host->ctl + (addr << host->bus_shift));
+}
+
static inline void sd_ctrl_write32_rep(struct tmio_mmc_host *host, int addr,
const u32 *buf, int count)
{
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 085a0fab769c..f7a6f005899a 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -43,6 +43,7 @@
#include <linux/regulator/consumer.h>
#include <linux/mmc/sdio.h>
#include <linux/scatterlist.h>
+#include <linux/sizes.h>
#include <linux/spinlock.h>
#include <linux/swiotlb.h>
#include <linux/workqueue.h>
@@ -629,7 +630,7 @@ static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host, int ireg,
return false;
}
-static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
+static bool __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
{
struct mmc_host *mmc = host->mmc;
struct tmio_mmc_data *pdata = host->pdata;
@@ -637,7 +638,7 @@ static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
unsigned int sdio_status;
if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
- return;
+ return false;
status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask;
@@ -650,6 +651,8 @@ static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
mmc_signal_sdio_irq(mmc);
+
+ return ireg;
}
irqreturn_t tmio_mmc_irq(int irq, void *devid)
@@ -668,9 +671,10 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid)
if (__tmio_mmc_sdcard_irq(host, ireg, status))
return IRQ_HANDLED;
- __tmio_mmc_sdio_irq(host);
+ if (__tmio_mmc_sdio_irq(host))
+ return IRQ_HANDLED;
- return IRQ_HANDLED;
+ return IRQ_NONE;
}
EXPORT_SYMBOL_GPL(tmio_mmc_irq);
@@ -700,7 +704,10 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
/* Set transfer length / blocksize */
sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
- sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
+ if (host->mmc->max_blk_count >= SZ_64K)
+ sd_ctrl_write32(host, CTL_XFER_BLK_COUNT, data->blocks);
+ else
+ sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
tmio_mmc_start_dma(host, data);
diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c
index 22f753e555ac..83f88b8b5d9f 100644
--- a/drivers/mtd/devices/powernv_flash.c
+++ b/drivers/mtd/devices/powernv_flash.c
@@ -212,7 +212,7 @@ static int powernv_flash_set_driver_info(struct device *dev,
* Going to have to check what details I need to set and how to
* get them
*/
- mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFn", dev->of_node);
+ mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
mtd->type = MTD_NORFLASH;
mtd->flags = MTD_WRITEABLE;
mtd->size = size;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 999b705769a8..3ef01baef9b6 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -507,6 +507,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
{
struct nvmem_config config = {};
+ config.id = -1;
config.dev = &mtd->dev;
config.name = mtd->name;
config.owner = THIS_MODULE;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 485462d3087f..537c90c8eb0a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1183,29 +1183,22 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
}
}
- /* Link-local multicast packets should be passed to the
- * stack on the link they arrive as well as pass them to the
- * bond-master device. These packets are mostly usable when
- * stack receives it with the link on which they arrive
- * (e.g. LLDP) they also must be available on master. Some of
- * the use cases include (but are not limited to): LLDP agents
- * that must be able to operate both on enslaved interfaces as
- * well as on bonds themselves; linux bridges that must be able
- * to process/pass BPDUs from attached bonds when any kind of
- * STP version is enabled on the network.
+ /*
+ * For packets determined by bond_should_deliver_exact_match() call to
+ * be suppressed we want to make an exception for link-local packets.
+ * This is necessary for e.g. LLDP daemons to be able to monitor
+ * inactive slave links without being forced to bind to them
+ * explicitly.
+ *
+ * At the same time, packets that are passed to the bonding master
+ * (including link-local ones) can have their originating interface
+ * determined via PACKET_ORIGDEV socket option.
*/
- if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) {
- struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
-
- if (nskb) {
- nskb->dev = bond->dev;
- nskb->queue_mapping = 0;
- netif_rx(nskb);
- }
- return RX_HANDLER_PASS;
- }
- if (bond_should_deliver_exact_match(skb, slave, bond))
+ if (bond_should_deliver_exact_match(skb, slave, bond)) {
+ if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
+ return RX_HANDLER_PASS;
return RX_HANDLER_EXACT;
+ }
skb->dev = bond->dev;
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 0e4bbdcc614f..c76892ac4e69 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -344,7 +344,8 @@ static void b53_set_forwarding(struct b53_device *dev, int enable)
b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
}
-static void b53_enable_vlan(struct b53_device *dev, bool enable)
+static void b53_enable_vlan(struct b53_device *dev, bool enable,
+ bool enable_filtering)
{
u8 mgmt, vc0, vc1, vc4 = 0, vc5;
@@ -369,8 +370,13 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable)
vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
vc4 &= ~VC4_ING_VID_CHECK_MASK;
- vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
- vc5 |= VC5_DROP_VTABLE_MISS;
+ if (enable_filtering) {
+ vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
+ vc5 |= VC5_DROP_VTABLE_MISS;
+ } else {
+ vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
+ vc5 &= ~VC5_DROP_VTABLE_MISS;
+ }
if (is5325(dev))
vc0 &= ~VC0_RESERVED_1;
@@ -420,6 +426,9 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable)
}
b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
+
+ dev->vlan_enabled = enable;
+ dev->vlan_filtering_enabled = enable_filtering;
}
static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100)
@@ -632,25 +641,35 @@ static void b53_enable_mib(struct b53_device *dev)
b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
}
+static u16 b53_default_pvid(struct b53_device *dev)
+{
+ if (is5325(dev) || is5365(dev))
+ return 1;
+ else
+ return 0;
+}
+
int b53_configure_vlan(struct dsa_switch *ds)
{
struct b53_device *dev = ds->priv;
struct b53_vlan vl = { 0 };
- int i;
+ int i, def_vid;
+
+ def_vid = b53_default_pvid(dev);
/* clear all vlan entries */
if (is5325(dev) || is5365(dev)) {
- for (i = 1; i < dev->num_vlans; i++)
+ for (i = def_vid; i < dev->num_vlans; i++)
b53_set_vlan_entry(dev, i, &vl);
} else {
b53_do_vlan_op(dev, VTA_CMD_CLEAR);
}
- b53_enable_vlan(dev, false);
+ b53_enable_vlan(dev, false, dev->vlan_filtering_enabled);
b53_for_each_port(dev, i)
b53_write16(dev, B53_VLAN_PAGE,
- B53_VLAN_PORT_DEF_TAG(i), 1);
+ B53_VLAN_PORT_DEF_TAG(i), def_vid);
if (!is5325(dev) && !is5365(dev))
b53_set_jumbo(dev, dev->enable_jumbo, false);
@@ -1255,6 +1274,46 @@ EXPORT_SYMBOL(b53_phylink_mac_link_up);
int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
{
+ struct b53_device *dev = ds->priv;
+ struct net_device *bridge_dev;
+ unsigned int i;
+ u16 pvid, new_pvid;
+
+ /* Handle the case were multiple bridges span the same switch device
+ * and one of them has a different setting than what is being requested
+ * which would be breaking filtering semantics for any of the other
+ * bridge devices.
+ */
+ b53_for_each_port(dev, i) {
+ bridge_dev = dsa_to_port(ds, i)->bridge_dev;
+ if (bridge_dev &&
+ bridge_dev != dsa_to_port(ds, port)->bridge_dev &&
+ br_vlan_enabled(bridge_dev) != vlan_filtering) {
+ netdev_err(bridge_dev,
+ "VLAN filtering is global to the switch!\n");
+ return -EINVAL;
+ }
+ }
+
+ b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
+ new_pvid = pvid;
+ if (dev->vlan_filtering_enabled && !vlan_filtering) {
+ /* Filtering is currently enabled, use the default PVID since
+ * the bridge does not expect tagging anymore
+ */
+ dev->ports[port].pvid = pvid;
+ new_pvid = b53_default_pvid(dev);
+ } else if (!dev->vlan_filtering_enabled && vlan_filtering) {
+ /* Filtering is currently disabled, restore the previous PVID */
+ new_pvid = dev->ports[port].pvid;
+ }
+
+ if (pvid != new_pvid)
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
+ new_pvid);
+
+ b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering);
+
return 0;
}
EXPORT_SYMBOL(b53_vlan_filtering);
@@ -1270,7 +1329,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port,
if (vlan->vid_end > dev->num_vlans)
return -ERANGE;
- b53_enable_vlan(dev, true);
+ b53_enable_vlan(dev, true, dev->vlan_filtering_enabled);
return 0;
}
@@ -1300,7 +1359,7 @@ void b53_vlan_add(struct dsa_switch *ds, int port,
b53_fast_age_vlan(dev, vid);
}
- if (pvid) {
+ if (pvid && !dsa_is_cpu_port(ds, port)) {
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
vlan->vid_end);
b53_fast_age_vlan(dev, vid);
@@ -1326,12 +1385,8 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
vl->members &= ~BIT(port);
- if (pvid == vid) {
- if (is5325(dev) || is5365(dev))
- pvid = 1;
- else
- pvid = 0;
- }
+ if (pvid == vid)
+ pvid = b53_default_pvid(dev);
if (untagged && !dsa_is_cpu_port(ds, port))
vl->untag &= ~(BIT(port));
@@ -1644,10 +1699,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
dev->ports[port].vlan_ctl_mask = pvlan;
- if (is5325(dev) || is5365(dev))
- pvid = 1;
- else
- pvid = 0;
+ pvid = b53_default_pvid(dev);
/* Make this port join all VLANs without VLAN entries */
if (is58xx(dev)) {
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index ec796482792d..4dc7ee38b258 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -91,6 +91,7 @@ enum {
struct b53_port {
u16 vlan_ctl_mask;
struct ethtool_eee eee;
+ u16 pvid;
};
struct b53_vlan {
@@ -137,6 +138,8 @@ struct b53_device {
unsigned int num_vlans;
struct b53_vlan *vlans;
+ bool vlan_enabled;
+ bool vlan_filtering_enabled;
unsigned int num_ports;
struct b53_port *ports;
};
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 17ec32b0a1cc..14138d423cf1 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -726,10 +726,11 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
{
struct net_device *p = ds->ports[port].cpu_dp->master;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- struct ethtool_wolinfo pwol;
+ struct ethtool_wolinfo pwol = { };
/* Get the parent device WoL settings */
- p->ethtool_ops->get_wol(p, &pwol);
+ if (p->ethtool_ops->get_wol)
+ p->ethtool_ops->get_wol(p, &pwol);
/* Advertise the parent device supported settings */
wol->supported = pwol.supported;
@@ -750,9 +751,10 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
struct net_device *p = ds->ports[port].cpu_dp->master;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
s8 cpu_port = ds->ports[port].cpu_dp->index;
- struct ethtool_wolinfo pwol;
+ struct ethtool_wolinfo pwol = { };
- p->ethtool_ops->get_wol(p, &pwol);
+ if (p->ethtool_ops->get_wol)
+ p->ethtool_ops->get_wol(p, &pwol);
if (wol->wolopts & ~pwol.supported)
return -EINVAL;
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 693a67f45bef..ddc1f9ca8ebc 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -1162,6 +1162,12 @@ static struct platform_driver gswip_driver = {
module_platform_driver(gswip_driver);
+MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin");
+MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin");
MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
MODULE_DESCRIPTION("Lantiq / Intel GSWIP driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 12fd7ce3f1ff..7e3c00bd9532 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -896,7 +896,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
default:
return U64_MAX;
}
- value = (((u64)high) << 16) | low;
+ value = (((u64)high) << 32) | low;
return value;
}
@@ -3093,7 +3093,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
- .stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
@@ -4595,6 +4595,14 @@ static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
return 0;
}
+static void mv88e6xxx_ports_cmode_init(struct mv88e6xxx_chip *chip)
+{
+ int i;
+
+ for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
+ chip->ports[i].cmode = MV88E6XXX_PORT_STS_CMODE_INVALID;
+}
+
static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
int port)
{
@@ -4631,6 +4639,8 @@ static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
if (err)
goto free;
+ mv88e6xxx_ports_cmode_init(chip);
+
mutex_lock(&chip->reg_lock);
err = mv88e6xxx_switch_reset(chip);
mutex_unlock(&chip->reg_lock);
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index ebd26b6a93e6..79ab51e69aee 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -398,6 +398,10 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
cmode = 0;
}
+ /* cmode doesn't change, nothing to do for us */
+ if (cmode == chip->ports[port].cmode)
+ return 0;
+
lane = mv88e6390x_serdes_get_lane(chip, port);
if (lane < 0)
return lane;
@@ -408,7 +412,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
return err;
}
- err = mv88e6390_serdes_power(chip, port, false);
+ err = mv88e6390x_serdes_power(chip, port, false);
if (err)
return err;
@@ -424,7 +428,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
if (err)
return err;
- err = mv88e6390_serdes_power(chip, port, true);
+ err = mv88e6390x_serdes_power(chip, port, true);
if (err)
return err;
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index e583641de758..4aadf321edb7 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -52,6 +52,7 @@
#define MV88E6185_PORT_STS_CMODE_1000BASE_X 0x0005
#define MV88E6185_PORT_STS_CMODE_PHY 0x0006
#define MV88E6185_PORT_STS_CMODE_DISABLED 0x0007
+#define MV88E6XXX_PORT_STS_CMODE_INVALID 0xff
/* Offset 0x01: MAC (or PCS or Physical) Control Register */
#define MV88E6XXX_PORT_MAC_CTL 0x01
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index b58ca7cb8e9d..fbba300c1d01 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -275,6 +275,9 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
{
+ /* Tx TC/Queue number config */
+ hw_atl_rpb_tps_tx_tc_mode_set(self, 1U);
+
hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index 939f77e2e117..8ac7a67b15c1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -1274,6 +1274,15 @@ void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
HW_ATL_TPB_TX_BUF_EN_SHIFT, tx_buff_en);
}
+void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_traf_class_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_TC_MODE_ADDR,
+ HW_ATL_TPB_TX_TC_MODE_MSK,
+ HW_ATL_TPB_TX_TC_MODE_SHIFT,
+ tx_traf_class_mode);
+}
+
void hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
u32 tx_buff_hi_threshold_per_tc,
u32 buffer)
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index 03c570d115fe..f529540bfd7e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -605,6 +605,10 @@ void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
/* tpb */
+/* set TX Traffic Class Mode */
+void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_traf_class_mode);
+
/* set tx buffer enable */
void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index 8470d92db812..e91ffce005f1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -1948,6 +1948,19 @@
/* default value of bitfield tx_buf_en */
#define HW_ATL_TPB_TX_BUF_EN_DEFAULT 0x0
+/* register address for bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_ADDR 0x00007900
+/* bitmask for bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_MSK 0x00000100
+/* inverted bitmask for bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_MSKN 0xFFFFFEFF
+/* lower bit position of bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_SHIFT 8
+/* width of bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_WIDTH 1
+/* default value of bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_DEFAULT 0x0
+
/* tx tx{b}_hi_thresh[c:0] bitfield definitions
* preprocessor definitions for the bitfield "tx{b}_hi_thresh[c:0]".
* parameter: buffer {b} | stride size 0x10 | range [0, 7]
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index bb41becb6609..31ff1e0d1baa 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1335,13 +1335,11 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
struct atl2_adapter *adapter;
- static int cards_found;
+ static int cards_found = 0;
unsigned long mmio_start;
int mmio_len;
int err;
- cards_found = 0;
-
err = pci_enable_device(pdev);
if (err)
return err;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 28c9b0bdf2f6..bc3ac369cbe3 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -134,6 +134,10 @@ static void bcm_sysport_set_rx_csum(struct net_device *dev,
priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
reg = rxchk_readl(priv, RXCHK_CONTROL);
+ /* Clear L2 header checks, which would prevent BPDUs
+ * from being received.
+ */
+ reg &= ~RXCHK_L2_HDR_DIS;
if (priv->rx_chk_en)
reg |= RXCHK_EN;
else
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 8bc7e495b027..803f7990d32b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -500,6 +500,12 @@ normal_tx:
}
length >>= 9;
+ if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
+ dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
+ skb->len);
+ i = 0;
+ goto tx_dma_error;
+ }
flags |= bnxt_lhint_arr[length];
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
@@ -3903,7 +3909,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
if (len)
break;
/* on first few passes, just barely sleep */
- if (i < DFLT_HWRM_CMD_TIMEOUT)
+ if (i < HWRM_SHORT_TIMEOUT_COUNTER)
usleep_range(HWRM_SHORT_MIN_TIMEOUT,
HWRM_SHORT_MAX_TIMEOUT);
else
@@ -3926,7 +3932,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
dma_rmb();
if (*valid)
break;
- udelay(1);
+ usleep_range(1, 5);
}
if (j >= HWRM_VALID_BIT_DELAY_USEC) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index a451796deefe..2fb653e0048d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -582,7 +582,7 @@ struct nqe_cn {
(HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \
((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT))
-#define HWRM_VALID_BIT_DELAY_USEC 20
+#define HWRM_VALID_BIT_DELAY_USEC 150
#define BNXT_HWRM_CHNL_CHIMP 0
#define BNXT_HWRM_CHNL_KONG 1
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index f4d81765221e..62636c1ed141 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -271,7 +271,7 @@ struct xcast_addr_list {
};
struct nicvf_work {
- struct delayed_work work;
+ struct work_struct work;
u8 mode;
struct xcast_addr_list *mc;
};
@@ -327,7 +327,11 @@ struct nicvf {
struct nicvf_work rx_mode_work;
/* spinlock to protect workqueue arguments from concurrent access */
spinlock_t rx_mode_wq_lock;
-
+ /* workqueue for handling kernel ndo_set_rx_mode() calls */
+ struct workqueue_struct *nicvf_rx_mode_wq;
+ /* mutex to protect VF's mailbox contents from concurrent access */
+ struct mutex rx_mode_mtx;
+ struct delayed_work link_change_work;
/* PTP timestamp */
struct cavium_ptp *ptp_clock;
/* Inbound timestamping is on */
@@ -575,10 +579,8 @@ struct set_ptp {
struct xcast {
u8 msg;
- union {
- u8 mode;
- u64 mac;
- } data;
+ u8 mode;
+ u64 mac:48;
};
/* 128 bit shared memory between PF and each VF */
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 6c8dcb65ff03..c90252829ed3 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -57,14 +57,8 @@ struct nicpf {
#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF)
u8 *vf_lmac_map;
- struct delayed_work dwork;
- struct workqueue_struct *check_link;
- u8 *link;
- u8 *duplex;
- u32 *speed;
u16 cpi_base[MAX_NUM_VFS_SUPPORTED];
u16 rssi_base[MAX_NUM_VFS_SUPPORTED];
- bool mbx_lock[MAX_NUM_VFS_SUPPORTED];
/* MSI-X */
u8 num_vec;
@@ -929,6 +923,35 @@ static void nic_config_timestamp(struct nicpf *nic, int vf, struct set_ptp *ptp)
nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3), pkind_val);
}
+/* Get BGX LMAC link status and update corresponding VF
+ * if there is a change, valid only if internal L2 switch
+ * is not present otherwise VF link is always treated as up
+ */
+static void nic_link_status_get(struct nicpf *nic, u8 vf)
+{
+ union nic_mbx mbx = {};
+ struct bgx_link_status link;
+ u8 bgx, lmac;
+
+ mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
+
+ /* Get BGX, LMAC indices for the VF */
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+ /* Get interface link status */
+ bgx_get_lmac_link_state(nic->node, bgx, lmac, &link);
+
+ /* Send a mbox message to VF with current link status */
+ mbx.link_status.link_up = link.link_up;
+ mbx.link_status.duplex = link.duplex;
+ mbx.link_status.speed = link.speed;
+ mbx.link_status.mac_type = link.mac_type;
+
+ /* reply with link status */
+ nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
/* Interrupt handler to handle mailbox messages from VFs */
static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
{
@@ -941,8 +964,6 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
int i;
int ret = 0;
- nic->mbx_lock[vf] = true;
-
mbx_addr = nic_get_mbx_addr(vf);
mbx_data = (u64 *)&mbx;
@@ -957,12 +978,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
switch (mbx.msg.msg) {
case NIC_MBOX_MSG_READY:
nic_mbx_send_ready(nic, vf);
- if (vf < nic->num_vf_en) {
- nic->link[vf] = 0;
- nic->duplex[vf] = 0;
- nic->speed[vf] = 0;
- }
- goto unlock;
+ return;
case NIC_MBOX_MSG_QS_CFG:
reg_addr = NIC_PF_QSET_0_127_CFG |
(mbx.qs.num << NIC_QS_ID_SHIFT);
@@ -1031,7 +1047,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
break;
case NIC_MBOX_MSG_RSS_SIZE:
nic_send_rss_size(nic, vf);
- goto unlock;
+ return;
case NIC_MBOX_MSG_RSS_CFG:
case NIC_MBOX_MSG_RSS_CFG_CONT:
nic_config_rss(nic, &mbx.rss_cfg);
@@ -1039,7 +1055,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
case NIC_MBOX_MSG_CFG_DONE:
/* Last message of VF config msg sequence */
nic_enable_vf(nic, vf, true);
- goto unlock;
+ break;
case NIC_MBOX_MSG_SHUTDOWN:
/* First msg in VF teardown sequence */
if (vf >= nic->num_vf_en)
@@ -1049,19 +1065,19 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
break;
case NIC_MBOX_MSG_ALLOC_SQS:
nic_alloc_sqs(nic, &mbx.sqs_alloc);
- goto unlock;
+ return;
case NIC_MBOX_MSG_NICVF_PTR:
nic->nicvf[vf] = mbx.nicvf.nicvf;
break;
case NIC_MBOX_MSG_PNICVF_PTR:
nic_send_pnicvf(nic, vf);
- goto unlock;
+ return;
case NIC_MBOX_MSG_SNICVF_PTR:
nic_send_snicvf(nic, &mbx.nicvf);
- goto unlock;
+ return;
case NIC_MBOX_MSG_BGX_STATS:
nic_get_bgx_stats(nic, &mbx.bgx_stats);
- goto unlock;
+ return;
case NIC_MBOX_MSG_LOOPBACK:
ret = nic_config_loopback(nic, &mbx.lbk);
break;
@@ -1070,7 +1086,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
break;
case NIC_MBOX_MSG_PFC:
nic_pause_frame(nic, vf, &mbx.pfc);
- goto unlock;
+ return;
case NIC_MBOX_MSG_PTP_CFG:
nic_config_timestamp(nic, vf, &mbx.ptp);
break;
@@ -1094,7 +1110,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
bgx_set_dmac_cam_filter(nic->node, bgx, lmac,
- mbx.xcast.data.mac,
+ mbx.xcast.mac,
vf < NIC_VF_PER_MBX_REG ? vf :
vf - NIC_VF_PER_MBX_REG);
break;
@@ -1106,8 +1122,15 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
}
bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
- bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.data.mode);
+ bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.mode);
break;
+ case NIC_MBOX_MSG_BGX_LINK_CHANGE:
+ if (vf >= nic->num_vf_en) {
+ ret = -1; /* NACK */
+ break;
+ }
+ nic_link_status_get(nic, vf);
+ return;
default:
dev_err(&nic->pdev->dev,
"Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
@@ -1121,8 +1144,6 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
mbx.msg.msg, vf);
nic_mbx_send_nack(nic, vf);
}
-unlock:
- nic->mbx_lock[vf] = false;
}
static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq)
@@ -1270,52 +1291,6 @@ static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic)
return 0;
}
-/* Poll for BGX LMAC link status and update corresponding VF
- * if there is a change, valid only if internal L2 switch
- * is not present otherwise VF link is always treated as up
- */
-static void nic_poll_for_link(struct work_struct *work)
-{
- union nic_mbx mbx = {};
- struct nicpf *nic;
- struct bgx_link_status link;
- u8 vf, bgx, lmac;
-
- nic = container_of(work, struct nicpf, dwork.work);
-
- mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
-
- for (vf = 0; vf < nic->num_vf_en; vf++) {
- /* Poll only if VF is UP */
- if (!nic->vf_enabled[vf])
- continue;
-
- /* Get BGX, LMAC indices for the VF */
- bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
- lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
- /* Get interface link status */
- bgx_get_lmac_link_state(nic->node, bgx, lmac, &link);
-
- /* Inform VF only if link status changed */
- if (nic->link[vf] == link.link_up)
- continue;
-
- if (!nic->mbx_lock[vf]) {
- nic->link[vf] = link.link_up;
- nic->duplex[vf] = link.duplex;
- nic->speed[vf] = link.speed;
-
- /* Send a mbox message to VF with current link status */
- mbx.link_status.link_up = link.link_up;
- mbx.link_status.duplex = link.duplex;
- mbx.link_status.speed = link.speed;
- mbx.link_status.mac_type = link.mac_type;
- nic_send_msg_to_vf(nic, vf, &mbx);
- }
- }
- queue_delayed_work(nic->check_link, &nic->dwork, HZ * 2);
-}
-
static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
@@ -1384,18 +1359,6 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!nic->vf_lmac_map)
goto err_release_regions;
- nic->link = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL);
- if (!nic->link)
- goto err_release_regions;
-
- nic->duplex = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL);
- if (!nic->duplex)
- goto err_release_regions;
-
- nic->speed = devm_kmalloc_array(dev, max_lmac, sizeof(u32), GFP_KERNEL);
- if (!nic->speed)
- goto err_release_regions;
-
/* Initialize hardware */
nic_init_hw(nic);
@@ -1411,22 +1374,8 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_unregister_interrupts;
- /* Register a physical link status poll fn() */
- nic->check_link = alloc_workqueue("check_link_status",
- WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
- if (!nic->check_link) {
- err = -ENOMEM;
- goto err_disable_sriov;
- }
-
- INIT_DELAYED_WORK(&nic->dwork, nic_poll_for_link);
- queue_delayed_work(nic->check_link, &nic->dwork, 0);
-
return 0;
-err_disable_sriov:
- if (nic->flags & NIC_SRIOV_ENABLED)
- pci_disable_sriov(pdev);
err_unregister_interrupts:
nic_unregister_interrupts(nic);
err_release_regions:
@@ -1447,12 +1396,6 @@ static void nic_remove(struct pci_dev *pdev)
if (nic->flags & NIC_SRIOV_ENABLED)
pci_disable_sriov(pdev);
- if (nic->check_link) {
- /* Destroy work Queue */
- cancel_delayed_work_sync(&nic->dwork);
- destroy_workqueue(nic->check_link);
- }
-
nic_unregister_interrupts(nic);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 88f8a8fa93cd..503cfadff4ac 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -68,9 +68,6 @@ module_param(cpi_alg, int, 0444);
MODULE_PARM_DESC(cpi_alg,
"PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
-/* workqueue for handling kernel ndo_set_rx_mode() calls */
-static struct workqueue_struct *nicvf_rx_mode_wq;
-
static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
{
if (nic->sqs_mode)
@@ -127,6 +124,9 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
{
int timeout = NIC_MBOX_MSG_TIMEOUT;
int sleep = 10;
+ int ret = 0;
+
+ mutex_lock(&nic->rx_mode_mtx);
nic->pf_acked = false;
nic->pf_nacked = false;
@@ -139,7 +139,8 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
netdev_err(nic->netdev,
"PF NACK to mbox msg 0x%02x from VF%d\n",
(mbx->msg.msg & 0xFF), nic->vf_id);
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
msleep(sleep);
if (nic->pf_acked)
@@ -149,10 +150,12 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
netdev_err(nic->netdev,
"PF didn't ACK to mbox msg 0x%02x from VF%d\n",
(mbx->msg.msg & 0xFF), nic->vf_id);
- return -EBUSY;
+ ret = -EBUSY;
+ break;
}
}
- return 0;
+ mutex_unlock(&nic->rx_mode_mtx);
+ return ret;
}
/* Checks if VF is able to comminicate with PF
@@ -172,6 +175,17 @@ static int nicvf_check_pf_ready(struct nicvf *nic)
return 1;
}
+static void nicvf_send_cfg_done(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+
+ mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
+ if (nicvf_send_msg_to_pf(nic, &mbx)) {
+ netdev_err(nic->netdev,
+ "PF didn't respond to CFG DONE msg\n");
+ }
+}
+
static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
{
if (bgx->rx)
@@ -228,21 +242,24 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
break;
case NIC_MBOX_MSG_BGX_LINK_CHANGE:
nic->pf_acked = true;
- nic->link_up = mbx.link_status.link_up;
- nic->duplex = mbx.link_status.duplex;
- nic->speed = mbx.link_status.speed;
- nic->mac_type = mbx.link_status.mac_type;
- if (nic->link_up) {
- netdev_info(nic->netdev, "Link is Up %d Mbps %s duplex\n",
- nic->speed,
- nic->duplex == DUPLEX_FULL ?
- "Full" : "Half");
- netif_carrier_on(nic->netdev);
- netif_tx_start_all_queues(nic->netdev);
- } else {
- netdev_info(nic->netdev, "Link is Down\n");
- netif_carrier_off(nic->netdev);
- netif_tx_stop_all_queues(nic->netdev);
+ if (nic->link_up != mbx.link_status.link_up) {
+ nic->link_up = mbx.link_status.link_up;
+ nic->duplex = mbx.link_status.duplex;
+ nic->speed = mbx.link_status.speed;
+ nic->mac_type = mbx.link_status.mac_type;
+ if (nic->link_up) {
+ netdev_info(nic->netdev,
+ "Link is Up %d Mbps %s duplex\n",
+ nic->speed,
+ nic->duplex == DUPLEX_FULL ?
+ "Full" : "Half");
+ netif_carrier_on(nic->netdev);
+ netif_tx_start_all_queues(nic->netdev);
+ } else {
+ netdev_info(nic->netdev, "Link is Down\n");
+ netif_carrier_off(nic->netdev);
+ netif_tx_stop_all_queues(nic->netdev);
+ }
}
break;
case NIC_MBOX_MSG_ALLOC_SQS:
@@ -1311,6 +1328,11 @@ int nicvf_stop(struct net_device *netdev)
struct nicvf_cq_poll *cq_poll = NULL;
union nic_mbx mbx = {};
+ cancel_delayed_work_sync(&nic->link_change_work);
+
+ /* wait till all queued set_rx_mode tasks completes */
+ drain_workqueue(nic->nicvf_rx_mode_wq);
+
mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
nicvf_send_msg_to_pf(nic, &mbx);
@@ -1410,13 +1432,27 @@ static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
return nicvf_send_msg_to_pf(nic, &mbx);
}
+static void nicvf_link_status_check_task(struct work_struct *work_arg)
+{
+ struct nicvf *nic = container_of(work_arg,
+ struct nicvf,
+ link_change_work.work);
+ union nic_mbx mbx = {};
+ mbx.msg.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
+ nicvf_send_msg_to_pf(nic, &mbx);
+ queue_delayed_work(nic->nicvf_rx_mode_wq,
+ &nic->link_change_work, 2 * HZ);
+}
+
int nicvf_open(struct net_device *netdev)
{
int cpu, err, qidx;
struct nicvf *nic = netdev_priv(netdev);
struct queue_set *qs = nic->qs;
struct nicvf_cq_poll *cq_poll = NULL;
- union nic_mbx mbx = {};
+
+ /* wait till all queued set_rx_mode tasks completes if any */
+ drain_workqueue(nic->nicvf_rx_mode_wq);
netif_carrier_off(netdev);
@@ -1512,8 +1548,12 @@ int nicvf_open(struct net_device *netdev)
nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
/* Send VF config done msg to PF */
- mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
- nicvf_write_to_mbx(nic, &mbx);
+ nicvf_send_cfg_done(nic);
+
+ INIT_DELAYED_WORK(&nic->link_change_work,
+ nicvf_link_status_check_task);
+ queue_delayed_work(nic->nicvf_rx_mode_wq,
+ &nic->link_change_work, 0);
return 0;
cleanup:
@@ -1941,15 +1981,17 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
/* flush DMAC filters and reset RX mode */
mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST;
- nicvf_send_msg_to_pf(nic, &mbx);
+ if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
+ goto free_mc;
if (mode & BGX_XCAST_MCAST_FILTER) {
/* once enabling filtering, we need to signal to PF to add
* its' own LMAC to the filter to accept packets for it.
*/
mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
- mbx.xcast.data.mac = 0;
- nicvf_send_msg_to_pf(nic, &mbx);
+ mbx.xcast.mac = 0;
+ if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
+ goto free_mc;
}
/* check if we have any specific MACs to be added to PF DMAC filter */
@@ -1957,23 +1999,25 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
/* now go through kernel list of MACs and add them one by one */
for (idx = 0; idx < mc_addrs->count; idx++) {
mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
- mbx.xcast.data.mac = mc_addrs->mc[idx];
- nicvf_send_msg_to_pf(nic, &mbx);
+ mbx.xcast.mac = mc_addrs->mc[idx];
+ if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
+ goto free_mc;
}
- kfree(mc_addrs);
}
/* and finally set rx mode for PF accordingly */
mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST;
- mbx.xcast.data.mode = mode;
+ mbx.xcast.mode = mode;
nicvf_send_msg_to_pf(nic, &mbx);
+free_mc:
+ kfree(mc_addrs);
}
static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
{
struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work,
- work.work);
+ work);
struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work);
u8 mode;
struct xcast_addr_list *mc;
@@ -2030,7 +2074,7 @@ static void nicvf_set_rx_mode(struct net_device *netdev)
kfree(nic->rx_mode_work.mc);
nic->rx_mode_work.mc = mc_list;
nic->rx_mode_work.mode = mode;
- queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 0);
+ queue_work(nic->nicvf_rx_mode_wq, &nic->rx_mode_work.work);
spin_unlock(&nic->rx_mode_wq_lock);
}
@@ -2187,8 +2231,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&nic->reset_task, nicvf_reset_task);
- INIT_DELAYED_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
+ nic->nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_rx_mode_wq_VF%d",
+ WQ_MEM_RECLAIM,
+ nic->vf_id);
+ INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
spin_lock_init(&nic->rx_mode_wq_lock);
+ mutex_init(&nic->rx_mode_mtx);
err = register_netdev(netdev);
if (err) {
@@ -2228,13 +2276,15 @@ static void nicvf_remove(struct pci_dev *pdev)
nic = netdev_priv(netdev);
pnetdev = nic->pnicvf->netdev;
- cancel_delayed_work_sync(&nic->rx_mode_work.work);
-
/* Check if this Qset is assigned to different VF.
* If yes, clean primary and all secondary Qsets.
*/
if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED))
unregister_netdev(pnetdev);
+ if (nic->nicvf_rx_mode_wq) {
+ destroy_workqueue(nic->nicvf_rx_mode_wq);
+ nic->nicvf_rx_mode_wq = NULL;
+ }
nicvf_unregister_interrupts(nic);
pci_set_drvdata(pdev, NULL);
if (nic->drv_stats)
@@ -2261,17 +2311,11 @@ static struct pci_driver nicvf_driver = {
static int __init nicvf_init_module(void)
{
pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
- nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_generic",
- WQ_MEM_RECLAIM);
return pci_register_driver(&nicvf_driver);
}
static void __exit nicvf_cleanup_module(void)
{
- if (nicvf_rx_mode_wq) {
- destroy_workqueue(nicvf_rx_mode_wq);
- nicvf_rx_mode_wq = NULL;
- }
pci_unregister_driver(&nicvf_driver);
}
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index e337da6ba2a4..673c57b8023f 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1217,7 +1217,7 @@ static void bgx_init_hw(struct bgx *bgx)
/* Disable MAC steering (NCSI traffic) */
for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
- bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
+ bgx_reg_write(bgx, 0, BGX_CMR_RX_STEERING + (i * 8), 0x00);
}
static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac)
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index cbdd20b9ee6f..5cbc54e9eb19 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -60,7 +60,7 @@
#define RX_DMACX_CAM_EN BIT_ULL(48)
#define RX_DMACX_CAM_LMACID(x) (((u64)x) << 49)
#define RX_DMAC_COUNT 32
-#define BGX_CMR_RX_STREERING 0x300
+#define BGX_CMR_RX_STEERING 0x300
#define RX_TRAFFIC_STEER_RULE_COUNT 8
#define BGX_CMR_CHAN_MSK_AND 0x450
#define BGX_CMR_BIST_STATUS 0x460
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index c041f44324db..b3654598a2d5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -660,6 +660,7 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
lld->udb_density = 1 << adap->params.sge.eq_qpp;
lld->ucq_density = 1 << adap->params.sge.iq_qpp;
+ lld->sge_host_page_size = 1 << (adap->params.sge.hps + 10);
lld->filt_mode = adap->params.tp.vlan_pri_map;
/* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
for (i = 0; i < NCHAN; i++)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 5fa9a2d5fc4b..21da34a4ca24 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -336,6 +336,7 @@ struct cxgb4_lld_info {
unsigned int cclk_ps; /* Core clock period in psec */
unsigned short udb_density; /* # of user DB/page */
unsigned short ucq_density; /* # of user CQs/page */
+ unsigned int sge_host_page_size; /* SGE host page size */
unsigned short filt_mode; /* filter optional components */
unsigned short tx_modq[NCHAN]; /* maps each tx channel to a */
/* scheduler queue */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index b8155f5e71b4..ac55db065f16 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -3128,6 +3128,9 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1);
dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
}
+
+ put_device(&pdev->dev);
+
return 0;
}
EXPORT_SYMBOL(hns_dsaf_roce_reset);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index f52e2c46e6a7..e4ff531db14a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -3289,8 +3289,11 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) :
!i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
if (!ok) {
+ /* Log this in case the user has forgotten to give the kernel
+ * any buffers, even later in the application.
+ */
dev_info(&vsi->back->pdev->dev,
- "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n",
+ "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
ring->xsk_umem ? "UMEM enabled " : "",
ring->queue_index, pf_q);
}
@@ -6725,8 +6728,13 @@ void i40e_down(struct i40e_vsi *vsi)
for (i = 0; i < vsi->num_queue_pairs; i++) {
i40e_clean_tx_ring(vsi->tx_rings[i]);
- if (i40e_enabled_xdp_vsi(vsi))
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ /* Make sure that in-progress ndo_xdp_xmit
+ * calls are completed.
+ */
+ synchronize_rcu();
i40e_clean_tx_ring(vsi->xdp_rings[i]);
+ }
i40e_clean_rx_ring(vsi->rx_rings[i]);
}
@@ -11895,6 +11903,14 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi,
if (old_prog)
bpf_prog_put(old_prog);
+ /* Kick start the NAPI context if there is an AF_XDP socket open
+ * on that queue id. This so that receiving will start.
+ */
+ if (need_reset && prog)
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ if (vsi->xdp_rings[i]->xsk_umem)
+ (void)i40e_xsk_async_xmit(vsi->netdev, i);
+
return 0;
}
@@ -11955,8 +11971,13 @@ static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
{
i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
- if (i40e_enabled_xdp_vsi(vsi))
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ /* Make sure that in-progress ndo_xdp_xmit calls are
+ * completed.
+ */
+ synchronize_rcu();
i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
+ }
i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index a7e14e98889f..6c97667d20ef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3709,6 +3709,7 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
struct i40e_netdev_priv *np = netdev_priv(dev);
unsigned int queue_index = smp_processor_id();
struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
struct i40e_ring *xdp_ring;
int drops = 0;
int i;
@@ -3716,7 +3717,8 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
if (test_bit(__I40E_VSI_DOWN, vsi->state))
return -ENETDOWN;
- if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
+ if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs ||
+ test_bit(__I40E_CONFIG_BUSY, pf->state))
return -ENXIO;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 870cf654e436..3827f16e6923 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -183,6 +183,11 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
err = i40e_queue_pair_enable(vsi, qid);
if (err)
return err;
+
+ /* Kick start the NAPI context so that receiving will start */
+ err = i40e_xsk_async_xmit(vsi->netdev, qid);
+ if (err)
+ return err;
}
return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index daff8183534b..cb35d8202572 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3953,8 +3953,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
else
mrqc = IXGBE_MRQC_VMDQRSS64EN;
- /* Enable L3/L4 for Tx Switched packets */
- mrqc |= IXGBE_MRQC_L3L4TXSWEN;
+ /* Enable L3/L4 for Tx Switched packets only for X550,
+ * older devices do not support this feature
+ */
+ if (hw->mac.type >= ixgbe_mac_X550)
+ mrqc |= IXGBE_MRQC_L3L4TXSWEN;
} else {
if (tcs > 4)
mrqc = IXGBE_MRQC_RTRSS8TCEN;
@@ -10225,6 +10228,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct bpf_prog *old_prog;
+ bool need_reset;
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
return -EINVAL;
@@ -10247,9 +10251,10 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
return -ENOMEM;
old_prog = xchg(&adapter->xdp_prog, prog);
+ need_reset = (!!prog != !!old_prog);
/* If transitioning XDP modes reconfigure rings */
- if (!!prog != !!old_prog) {
+ if (need_reset) {
int err = ixgbe_setup_tc(dev, adapter->hw_tcs);
if (err) {
@@ -10265,6 +10270,14 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
if (old_prog)
bpf_prog_put(old_prog);
+ /* Kick start the NAPI context if there is an AF_XDP socket open
+ * on that queue id. This so that receiving will start.
+ */
+ if (need_reset && prog)
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ if (adapter->xdp_ring[i]->xsk_umem)
+ (void)ixgbe_xsk_async_xmit(adapter->netdev, i);
+
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 65c3e2c979d4..36a8879536a4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -144,11 +144,19 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
ixgbe_txrx_ring_disable(adapter, qid);
err = ixgbe_add_xsk_umem(adapter, umem, qid);
+ if (err)
+ return err;
- if (if_running)
+ if (if_running) {
ixgbe_txrx_ring_enable(adapter, qid);
- return err;
+ /* Kick start the NAPI context so that receiving will start */
+ err = ixgbe_xsk_async_xmit(adapter->netdev, qid);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
@@ -634,7 +642,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
dma_addr_t dma;
while (budget-- > 0) {
- if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
+ if (unlikely(!ixgbe_desc_unused(xdp_ring)) ||
+ !netif_carrier_ok(xdp_ring->netdev)) {
work_done = false;
break;
}
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 2f427271a793..292a668ce88e 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2879,7 +2879,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
ret = mv643xx_eth_shared_of_probe(pdev);
if (ret)
- return ret;
+ goto err_put_clk;
pd = dev_get_platdata(&pdev->dev);
msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
@@ -2887,6 +2887,11 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
infer_hw_params(msp);
return 0;
+
+err_put_clk:
+ if (!IS_ERR(msp->clk))
+ clk_disable_unprepare(msp->clk);
+ return ret;
}
static int mv643xx_eth_shared_remove(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 9d4568eb2297..8433fb9c3eee 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2146,7 +2146,7 @@ err_drop_frame:
if (unlikely(!skb))
goto err_drop_frame_ret_pool;
- dma_sync_single_range_for_cpu(dev->dev.parent,
+ dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev,
rx_desc->buf_phys_addr,
MVNETA_MH_SIZE + NET_SKB_PAD,
rx_bytes,
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index f3a5fa84860f..57727fe1501e 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -5073,7 +5073,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&hw->restart_work, sky2_restart);
pci_set_drvdata(pdev, hw);
- pdev->d3_delay = 200;
+ pdev->d3_delay = 300;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 6b88881b8e35..c1438ae52a11 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -3360,7 +3360,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->addr_len = ETH_ALEN;
mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
if (!is_valid_ether_addr(dev->dev_addr)) {
- en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
+ en_err(priv, "Port: %d, invalid mac burned: %pM, quitting\n",
priv->port, dev->dev_addr);
err = -EINVAL;
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 32519c93df17..b65e274b02e9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -862,8 +862,9 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
bool configure = false;
bool pfc = false;
+ u16 thres_cells;
+ u16 delay_cells;
bool lossy;
- u16 thres;
for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
if (prio_tc[j] == i) {
@@ -877,10 +878,11 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
continue;
lossy = !(pfc || pause_en);
- thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
- delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc,
- pause_en);
- mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy);
+ thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
+ delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay,
+ pfc, pause_en);
+ mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres_cells + delay_cells,
+ thres_cells, lossy);
}
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index f6ecfa778660..8f72587b5a2c 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1681,5 +1681,5 @@ MODULE_DESCRIPTION(DRV_NAME " ethernet driver");
MODULE_AUTHOR("Claudio Lanconelli <lanconelli.claudio@eptar.com>");
MODULE_LICENSE("GPL");
module_param_named(debug, debug.msg_enable, int, 0);
-MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., ffff=all)");
+MODULE_PARM_DESC(debug, "Debug verbosity level in amount of bits set (0=none, ..., 31=all)");
MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 310807ef328b..4d1b4a24907f 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1400,7 +1400,8 @@ static int lan743x_tx_frame_start(struct lan743x_tx *tx,
}
static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
- unsigned int frame_length)
+ unsigned int frame_length,
+ int nr_frags)
{
/* called only from within lan743x_tx_xmit_frame.
* assuming tx->ring_lock has already been acquired.
@@ -1410,6 +1411,10 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
/* wrap up previous descriptor */
tx->frame_data0 |= TX_DESC_DATA0_EXT_;
+ if (nr_frags <= 0) {
+ tx->frame_data0 |= TX_DESC_DATA0_LS_;
+ tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+ }
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
tx_descriptor->data0 = tx->frame_data0;
@@ -1514,8 +1519,11 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx,
u32 tx_tail_flags = 0;
/* wrap up previous descriptor */
- tx->frame_data0 |= TX_DESC_DATA0_LS_;
- tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+ if ((tx->frame_data0 & TX_DESC_DATA0_DTYPE_MASK_) ==
+ TX_DESC_DATA0_DTYPE_DATA_) {
+ tx->frame_data0 |= TX_DESC_DATA0_LS_;
+ tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+ }
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
buffer_info = &tx->buffer_info[tx->frame_tail];
@@ -1600,7 +1608,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
}
if (gso)
- lan743x_tx_frame_add_lso(tx, frame_length);
+ lan743x_tx_frame_add_lso(tx, frame_length, nr_frags);
if (nr_frags <= 0)
goto finish;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index e23ca90289f7..0a868c829b90 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -1291,15 +1291,10 @@ wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
static int
wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
- enum alu_op alu_op, bool skip)
+ enum alu_op alu_op)
{
const struct bpf_insn *insn = &meta->insn;
- if (skip) {
- meta->skip = true;
- return 0;
- }
-
wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm);
wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
@@ -2309,7 +2304,7 @@ static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm);
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR);
}
static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2319,7 +2314,7 @@ static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm);
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND);
}
static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2329,7 +2324,7 @@ static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm);
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR);
}
static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2339,7 +2334,7 @@ static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm);
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD);
}
static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2349,7 +2344,7 @@ static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm);
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB);
}
static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index beb8e5d6401a..ded556b7bab5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -1688,6 +1688,15 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
+ if (!ether_addr_equal(ethh->h_dest,
+ p_hwfn->p_rdma_info->iwarp.mac_addr)) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "Got unexpected mac %pM instead of %pM\n",
+ ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr);
+ return -EINVAL;
+ }
+
ether_addr_copy(remote_mac_addr, ethh->h_source);
ether_addr_copy(local_mac_addr, ethh->h_dest);
@@ -2605,7 +2614,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
struct qed_iwarp_info *iwarp_info;
struct qed_ll2_acquire_data data;
struct qed_ll2_cbs cbs;
- u32 mpa_buff_size;
+ u32 buff_size;
u16 n_ooo_bufs;
int rc = 0;
int i;
@@ -2632,7 +2641,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
memset(&data, 0, sizeof(data));
data.input.conn_type = QED_LL2_TYPE_IWARP;
- data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE;
+ data.input.mtu = params->max_mtu;
data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */
@@ -2654,9 +2663,10 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
goto err;
}
+ buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
QED_IWARP_LL2_SYN_RX_SIZE,
- QED_IWARP_MAX_SYN_PKT_SIZE,
+ buff_size,
iwarp_info->ll2_syn_handle);
if (rc)
goto err;
@@ -2710,10 +2720,9 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
if (rc)
goto err;
- mpa_buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
data.input.rx_num_desc,
- mpa_buff_size,
+ buff_size,
iwarp_info->ll2_mpa_handle);
if (rc)
goto err;
@@ -2726,7 +2735,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
- iwarp_info->mpa_intermediate_buf = kzalloc(mpa_buff_size, GFP_KERNEL);
+ iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL);
if (!iwarp_info->mpa_intermediate_buf)
goto err;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
index b8f612d00241..7ac959038324 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
@@ -46,7 +46,6 @@ enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state);
#define QED_IWARP_LL2_SYN_TX_SIZE (128)
#define QED_IWARP_LL2_SYN_RX_SIZE (256)
-#define QED_IWARP_MAX_SYN_PKT_SIZE (128)
#define QED_IWARP_LL2_OOO_DEF_TX_SIZE (256)
#define QED_IWARP_MAX_OOO (16)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 20299f6f65fc..736e29635b77 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -241,15 +241,18 @@ static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts)
static int dwmac4_rx_check_timestamp(void *desc)
{
struct dma_desc *p = (struct dma_desc *)desc;
+ unsigned int rdes0 = le32_to_cpu(p->des0);
+ unsigned int rdes1 = le32_to_cpu(p->des1);
+ unsigned int rdes3 = le32_to_cpu(p->des3);
u32 own, ctxt;
int ret = 1;
- own = p->des3 & RDES3_OWN;
- ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR)
+ own = rdes3 & RDES3_OWN;
+ ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR)
>> RDES3_CONTEXT_DESCRIPTOR_SHIFT);
if (likely(!own && ctxt)) {
- if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff))
+ if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff))
/* Corrupted value */
ret = -EINVAL;
else
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 5d85742a2be0..3c749c327cbd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -696,25 +696,27 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
struct ethtool_eee *edata)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ int ret;
- priv->eee_enabled = edata->eee_enabled;
-
- if (!priv->eee_enabled)
+ if (!edata->eee_enabled) {
stmmac_disable_eee_mode(priv);
- else {
+ } else {
/* We are asking for enabling the EEE but it is safe
* to verify all by invoking the eee_init function.
* In case of failure it will return an error.
*/
- priv->eee_enabled = stmmac_eee_init(priv);
- if (!priv->eee_enabled)
+ edata->eee_enabled = stmmac_eee_init(priv);
+ if (!edata->eee_enabled)
return -EOPNOTSUPP;
-
- /* Do not change tx_lpi_timer in case of failure */
- priv->tx_lpi_timer = edata->tx_lpi_timer;
}
- return phy_ethtool_set_eee(dev->phydev, edata);
+ ret = phy_ethtool_set_eee(dev->phydev, edata);
+ if (ret)
+ return ret;
+
+ priv->eee_enabled = edata->eee_enabled;
+ priv->tx_lpi_timer = edata->tx_lpi_timer;
+ return 0;
}
static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 1f612268c998..d847f672a705 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -259,7 +259,7 @@ static int netcp_module_probe(struct netcp_device *netcp_device,
const char *name;
char node_name[32];
- if (of_property_read_string(node, "label", &name) < 0) {
+ if (of_property_read_string(child, "label", &name) < 0) {
snprintf(node_name, sizeof(node_name), "%pOFn", child);
name = node_name;
}
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 3377ac66a347..5583d993480d 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -692,15 +692,20 @@ out:
static int geneve_open(struct net_device *dev)
{
struct geneve_dev *geneve = netdev_priv(dev);
- bool ipv6 = !!(geneve->info.mode & IP_TUNNEL_INFO_IPV6);
bool metadata = geneve->collect_md;
+ bool ipv4, ipv6;
int ret = 0;
+ ipv6 = geneve->info.mode & IP_TUNNEL_INFO_IPV6 || metadata;
+ ipv4 = !ipv6 || metadata;
#if IS_ENABLED(CONFIG_IPV6)
- if (ipv6 || metadata)
+ if (ipv6) {
ret = geneve_sock_add(geneve, true);
+ if (ret < 0 && ret != -EAFNOSUPPORT)
+ ipv4 = false;
+ }
#endif
- if (!ret && (!ipv6 || metadata))
+ if (ipv4)
ret = geneve_sock_add(geneve, false);
if (ret < 0)
geneve_sock_release(geneve);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 256adbd044f5..cf4897043e83 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -744,6 +744,14 @@ void netvsc_linkstatus_callback(struct net_device *net,
schedule_delayed_work(&ndev_ctx->dwork, 0);
}
+static void netvsc_comp_ipcsum(struct sk_buff *skb)
+{
+ struct iphdr *iph = (struct iphdr *)skb->data;
+
+ iph->check = 0;
+ iph->check = ip_fast_csum(iph, iph->ihl);
+}
+
static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
struct netvsc_channel *nvchan)
{
@@ -770,9 +778,17 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
/* skb is already created with CHECKSUM_NONE */
skb_checksum_none_assert(skb);
- /*
- * In Linux, the IP checksum is always checked.
- * Do L4 checksum offload if enabled and present.
+ /* Incoming packets may have IP header checksum verified by the host.
+ * They may not have IP header checksum computed after coalescing.
+ * We compute it here if the flags are set, because on Linux, the IP
+ * checksum is always checked.
+ */
+ if (csum_info && csum_info->receive.ip_checksum_value_invalid &&
+ csum_info->receive.ip_checksum_succeeded &&
+ skb->protocol == htons(ETH_P_IP))
+ netvsc_comp_ipcsum(skb);
+
+ /* Do L4 checksum offload if enabled and present.
*/
if (csum_info && (net->features & NETIF_F_RXCSUM)) {
if (csum_info->receive.tcp_checksum_succeeded ||
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 7cdac77d0c68..07e41c42bcf5 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -499,6 +499,8 @@ static int ipvlan_nl_changelink(struct net_device *dev,
if (!data)
return 0;
+ if (!ns_capable(dev_net(ipvlan->phy_dev)->user_ns, CAP_NET_ADMIN))
+ return -EPERM;
if (data[IFLA_IPVLAN_MODE]) {
u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
@@ -601,6 +603,8 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
struct ipvl_dev *tmp = netdev_priv(phy_dev);
phy_dev = tmp->phy_dev;
+ if (!ns_capable(dev_net(phy_dev)->user_ns, CAP_NET_ADMIN))
+ return -EPERM;
} else if (!netif_is_ipvlan_port(phy_dev)) {
/* Exit early if the underlying link is invalid or busy */
if (phy_dev->type != ARPHRD_ETHER ||
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index da6a67d47ce9..56fa3606cb9c 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy.h>
+#include <linux/delay.h>
#include <dt-bindings/net/ti-dp83867.h>
@@ -325,6 +326,8 @@ static int dp83867_phy_reset(struct phy_device *phydev)
if (err < 0)
return err;
+ usleep_range(10, 20);
+
return dp83867_config_init(phydev);
}
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 82ab6ed3b74e..6bac602094bd 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -26,6 +26,8 @@
#include <linux/marvell_phy.h>
#include <linux/phy.h>
+#define MDIO_AN_10GBT_CTRL_ADV_NBT_MASK 0x01e0
+
enum {
MV_PCS_BASE_T = 0x0000,
MV_PCS_BASE_R = 0x1000,
@@ -386,8 +388,10 @@ static int mv3310_config_aneg(struct phy_device *phydev)
else
reg = 0;
+ /* Make sure we clear unsupported 2.5G/5G advertising */
ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
- MDIO_AN_10GBT_CTRL_ADV10G, reg);
+ MDIO_AN_10GBT_CTRL_ADV10G |
+ MDIO_AN_10GBT_CTRL_ADV_NBT_MASK, reg);
if (ret < 0)
return ret;
if (ret > 0)
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 66b9cfe692fc..7368616286ae 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -379,7 +379,6 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
err = device_register(&bus->dev);
if (err) {
pr_err("mii_bus %s failed to register\n", bus->id);
- put_device(&bus->dev);
return -EINVAL;
}
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index b1f959935f50..b7df0295a3ca 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -344,6 +344,17 @@ static int ksz8041_config_aneg(struct phy_device *phydev)
return genphy_config_aneg(phydev);
}
+static int ksz8061_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
+ if (ret)
+ return ret;
+
+ return kszphy_config_init(phydev);
+}
+
static int ksz9021_load_values_from_of(struct phy_device *phydev,
const struct device_node *of_node,
u16 reg,
@@ -1040,7 +1051,7 @@ static struct phy_driver ksphy_driver[] = {
.name = "Micrel KSZ8061",
.phy_id_mask = MICREL_PHY_ID_MASK,
.features = PHY_BASIC_FEATURES,
- .config_init = kszphy_config_init,
+ .config_init = ksz8061_config_init,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.suspend = genphy_suspend,
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 938803237d7f..85987aac31c4 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -320,6 +320,10 @@ static int phylink_get_mac_state(struct phylink *pl, struct phylink_link_state *
linkmode_zero(state->lp_advertising);
state->interface = pl->link_config.interface;
state->an_enabled = pl->link_config.an_enabled;
+ state->speed = SPEED_UNKNOWN;
+ state->duplex = DUPLEX_UNKNOWN;
+ state->pause = MLO_PAUSE_NONE;
+ state->an_complete = 0;
state->link = 1;
return pl->ops->mac_link_state(ndev, state);
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index c6010fb1aa0f..cb4a23041a94 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -282,6 +282,13 @@ static struct phy_driver realtek_drvs[] = {
.name = "RTL8366RB Gigabit Ethernet",
.features = PHY_GBIT_FEATURES,
.config_init = &rtl8366rb_config_init,
+ /* These interrupts are handled by the irq controller
+ * embedded inside the RTL8366RB, they get unmasked when the
+ * irq is requested and ACKed by reading the status register,
+ * which is done by the irqchip code.
+ */
+ .ack_interrupt = genphy_no_ack_interrupt,
+ .config_intr = genphy_no_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
},
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
index 74a8782313cf..bd6084e315de 100644
--- a/drivers/net/phy/xilinx_gmii2rgmii.c
+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
@@ -44,7 +44,10 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev)
u16 val = 0;
int err;
- err = priv->phy_drv->read_status(phydev);
+ if (priv->phy_drv->read_status)
+ err = priv->phy_drv->read_status(phydev);
+ else
+ err = genphy_read_status(phydev);
if (err < 0)
return err;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 958f1cf67282..6ce3f666d142 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1256,7 +1256,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
list_add_tail_rcu(&port->list, &team->port_list);
team_port_enable(team, port);
__team_compute_features(team);
- __team_port_change_port_added(port, !!netif_carrier_ok(port_dev));
+ __team_port_change_port_added(port, !!netif_oper_up(port_dev));
__team_options_change_check(team);
netdev_info(dev, "Port device %s added\n", portname);
@@ -2915,7 +2915,7 @@ static int team_device_event(struct notifier_block *unused,
switch (event) {
case NETDEV_UP:
- if (netif_carrier_ok(dev))
+ if (netif_oper_up(dev))
team_port_change_check(port, true);
break;
case NETDEV_DOWN:
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index fed298c0cb39..53f4f37b0ffd 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -2167,9 +2167,9 @@ static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
}
add_wait_queue(&tfile->wq.wait, &wait);
- current->state = TASK_INTERRUPTIBLE;
while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
ptr = ptr_ring_consume(&tfile->tx_ring);
if (ptr)
break;
@@ -2185,7 +2185,7 @@ static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
schedule();
}
- current->state = TASK_RUNNING;
+ __set_current_state(TASK_RUNNING);
remove_wait_queue(&tfile->wq.wait, &wait);
out:
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 735ad838e2ba..18af2f8eee96 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1201,8 +1201,8 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
{QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
- {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354 */
- {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC7304/MC7354 */
+ {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354, WP76xx */
+ {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 10)},/* Sierra Wireless MC7304/MC7354 */
{QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
{QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
{QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 60dd1ec1665f..86c8c64fbb0f 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -557,6 +557,7 @@ enum spd_duplex {
/* MAC PASSTHRU */
#define AD_MASK 0xfee0
#define BND_MASK 0x0004
+#define BD_MASK 0x0001
#define EFUSE 0xcfdb
#define PASS_THRU_MASK 0x1
@@ -1176,9 +1177,9 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
return -ENODEV;
}
} else {
- /* test for RTL8153-BND */
+ /* test for RTL8153-BND and RTL8153-BD */
ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1);
- if ((ocp_data & BND_MASK) == 0) {
+ if ((ocp_data & BND_MASK) == 0 && (ocp_data & BD_MASK) == 0) {
netif_dbg(tp, probe, tp->netdev,
"Invalid variant for MAC pass through\n");
return -ENODEV;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 95909e262ba4..7c1430ed0244 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1273,6 +1273,9 @@ static void vrf_setup(struct net_device *dev)
/* default to no qdisc; user can add if desired */
dev->priv_flags |= IFF_NO_QUEUE;
+
+ dev->min_mtu = 0;
+ dev->max_mtu = 0;
}
static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 320edcac4699..6359053bd0c7 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3554,7 +3554,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
goto out_err;
}
- genlmsg_reply(skb, info);
+ res = genlmsg_reply(skb, info);
break;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 0e6b43bb4678..a5ea3ba495a4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -158,39 +158,49 @@ static const struct ieee80211_ops mt76x0u_ops = {
.get_txpower = mt76x02_get_txpower,
};
-static int mt76x0u_register_device(struct mt76x02_dev *dev)
+static int mt76x0u_init_hardware(struct mt76x02_dev *dev)
{
- struct ieee80211_hw *hw = dev->mt76.hw;
int err;
- err = mt76u_alloc_queues(&dev->mt76);
- if (err < 0)
- goto out_err;
-
- err = mt76u_mcu_init_rx(&dev->mt76);
- if (err < 0)
- goto out_err;
-
mt76x0_chip_onoff(dev, true, true);
- if (!mt76x02_wait_for_mac(&dev->mt76)) {
- err = -ETIMEDOUT;
- goto out_err;
- }
+
+ if (!mt76x02_wait_for_mac(&dev->mt76))
+ return -ETIMEDOUT;
err = mt76x0u_mcu_init(dev);
if (err < 0)
- goto out_err;
+ return err;
mt76x0_init_usb_dma(dev);
err = mt76x0_init_hardware(dev);
if (err < 0)
- goto out_err;
+ return err;
mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
mt76_wr(dev, MT_TXOP_CTRL_CFG,
FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
+ return 0;
+}
+
+static int mt76x0u_register_device(struct mt76x02_dev *dev)
+{
+ struct ieee80211_hw *hw = dev->mt76.hw;
+ int err;
+
+ err = mt76u_alloc_queues(&dev->mt76);
+ if (err < 0)
+ goto out_err;
+
+ err = mt76u_mcu_init_rx(&dev->mt76);
+ if (err < 0)
+ goto out_err;
+
+ err = mt76x0u_init_hardware(dev);
+ if (err < 0)
+ goto out_err;
+
err = mt76x0_register_device(dev);
if (err < 0)
goto out_err;
@@ -301,6 +311,8 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,
mt76u_stop_queues(&dev->mt76);
mt76x0u_mac_stop(dev);
+ clear_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+ mt76x0_chip_onoff(dev, false, false);
usb_kill_urb(usb->mcu.res.urb);
return 0;
@@ -328,7 +340,7 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf)
tasklet_enable(&usb->rx_tasklet);
tasklet_enable(&usb->tx_tasklet);
- ret = mt76x0_init_hardware(dev);
+ ret = mt76x0u_init_hardware(dev);
if (ret)
goto err;
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
index 0ccb021f1e78..10d580c3dea3 100644
--- a/drivers/net/xen-netback/hash.c
+++ b/drivers/net/xen-netback/hash.c
@@ -454,6 +454,8 @@ void xenvif_init_hash(struct xenvif *vif)
if (xenvif_hash_cache_size == 0)
return;
+ BUG_ON(vif->hash.cache.count);
+
spin_lock_init(&vif->hash.cache.lock);
INIT_LIST_HEAD(&vif->hash.cache.list);
}
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 182d6770f102..6da12518e693 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -153,6 +153,13 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
{
struct xenvif *vif = netdev_priv(dev);
unsigned int size = vif->hash.size;
+ unsigned int num_queues;
+
+ /* If queues are not set up internally - always return 0
+ * as the packet going to be dropped anyway */
+ num_queues = READ_ONCE(vif->num_queues);
+ if (num_queues < 1)
+ return 0;
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 80aae3a32c2a..f09948b009dd 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1072,11 +1072,6 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
skb_frag_size_set(&frags[i], len);
}
- /* Copied all the bits from the frag list -- free it. */
- skb_frag_list_init(skb);
- xenvif_skb_zerocopy_prepare(queue, nskb);
- kfree_skb(nskb);
-
/* Release all the original (foreign) frags. */
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
skb_frag_unref(skb, f);
@@ -1145,6 +1140,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
xenvif_fill_frags(queue, skb);
if (unlikely(skb_has_frag_list(skb))) {
+ struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
+ xenvif_skb_zerocopy_prepare(queue, nskb);
if (xenvif_handle_frag_list(queue, skb)) {
if (net_ratelimit())
netdev_err(queue->vif->dev,
@@ -1153,6 +1150,9 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
kfree_skb(skb);
continue;
}
+ /* Copied all the bits from the frag list -- free it. */
+ skb_frag_list_init(skb);
+ kfree_skb(nskb);
}
skb->dev = queue->vif->dev;
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index c69ca95b1ad5..0f140a802137 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -693,7 +693,7 @@ static const char * const sd_a_groups[] = {
static const char * const sdxc_a_groups[] = {
"sdxc_d0_0_a", "sdxc_d13_0_a", "sdxc_d47_a", "sdxc_clk_a",
- "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d0_13_1_a"
+ "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d13_1_a"
};
static const char * const pcm_a_groups[] = {
diff --git a/drivers/pinctrl/qcom/pinctrl-qcs404.c b/drivers/pinctrl/qcom/pinctrl-qcs404.c
index 7aae52a09ff0..4ffd56ff809e 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcs404.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcs404.c
@@ -79,7 +79,7 @@ enum {
.intr_cfg_reg = 0, \
.intr_status_reg = 0, \
.intr_target_reg = 0, \
- .tile = NORTH, \
+ .tile = SOUTH, \
.mux_bit = -1, \
.pull_bit = pull, \
.drv_bit = drv, \
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index a3c20e3a8b7c..3337b1e80412 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -2009,7 +2009,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
struct Scsi_Host *host = NULL;
TW_Device_Extension *tw_dev;
unsigned long mem_addr, mem_len;
- int retval = -ENODEV;
+ int retval;
retval = pci_enable_device(pdev);
if (retval) {
@@ -2020,8 +2020,10 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (retval)
+ retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
retval = -ENODEV;
goto out_disable_device;
@@ -2240,8 +2242,10 @@ static int twa_resume(struct pci_dev *pdev)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (retval)
+ retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
retval = -ENODEV;
goto out_disable_device;
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index cd096104bcec..dda6fa857709 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -1573,8 +1573,10 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (retval)
+ retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
retval = -ENODEV;
goto out_disable_device;
@@ -1805,8 +1807,10 @@ static int twl_resume(struct pci_dev *pdev)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (retval)
+ retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
retval = -ENODEV;
goto out_disable_device;
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 07efcb9b5b94..bbdae67774f0 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -769,9 +769,11 @@ static int asd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (err)
goto Err_remove;
- err = -ENODEV;
- if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) {
+ err = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64));
+ if (err)
+ err = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ err = -ENODEV;
asd_printk("no suitable DMA mask for %s\n", pci_name(dev));
goto Err_remove;
}
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 42a0caf6740d..88880a66a189 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -727,7 +727,7 @@ bfad_init_timer(struct bfad_s *bfad)
int
bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
{
- int rc = -ENODEV;
+ int rc = -ENODEV;
if (pci_enable_device(pdev)) {
printk(KERN_ERR "pci_enable_device fail %p\n", pdev);
@@ -739,8 +739,12 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
pci_set_master(pdev);
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc)
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+
+ if (rc) {
+ rc = -ENODEV;
printk(KERN_ERR "dma_set_mask_and_coherent fail %p\n", pdev);
goto out_release_region;
}
@@ -1534,6 +1538,7 @@ bfad_pci_slot_reset(struct pci_dev *pdev)
{
struct bfad_s *bfad = pci_get_drvdata(pdev);
u8 byte;
+ int rc;
dev_printk(KERN_ERR, &pdev->dev,
"bfad_pci_slot_reset flags: 0x%x\n", bfad->bfad_flags);
@@ -1561,8 +1566,11 @@ bfad_pci_slot_reset(struct pci_dev *pdev)
pci_save_state(pdev);
pci_set_master(pdev);
- if (dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(32)))
+ rc = dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(64));
+ if (rc)
+ rc = dma_set_mask_and_coherent(&bfad->pcidev->dev,
+ DMA_BIT_MASK(32));
+ if (rc)
goto out_disable_device;
if (restart_bfa(bfad) == -1)
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index cf629380a981..616b25bf7941 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -210,8 +210,11 @@ csio_pci_init(struct pci_dev *pdev, int *bars)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rv)
+ rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (rv) {
+ rv = -ENODEV;
dev_err(&pdev->dev, "No suitable DMA available.\n");
goto err_release_regions;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index eed7fc5b3389..bc17fa0d8375 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -2323,6 +2323,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
struct Scsi_Host *shost;
struct hisi_hba *hisi_hba;
struct device *dev = &pdev->dev;
+ int error;
shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba));
if (!shost) {
@@ -2343,8 +2344,11 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
if (hisi_sas_get_fw_info(hisi_hba) < 0)
goto err_out;
- if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
+ error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (error)
+ error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+
+ if (error) {
dev_err(dev, "No usable DMA addressing method\n");
goto err_out;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index c92b3822c408..e0570fd8466e 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -2447,10 +2447,12 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
goto err_out_disable_device;
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc)
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (rc) {
dev_err(dev, "No usable DMA addressing method\n");
- rc = -EIO;
+ rc = -ENODEV;
goto err_out_regions;
}
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index 3eedfd4f8f57..251c084a6ff0 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1292,6 +1292,7 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
dma_addr_t start_phy;
void *start_virt;
u32 offset, i, req_size;
+ int rc;
dprintk("hptiop_probe(%p)\n", pcidev);
@@ -1308,9 +1309,12 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
/* Enable 64bit DMA if possible */
iop_ops = (struct hptiop_adapter_ops *)id->driver_data;
- if (dma_set_mask(&pcidev->dev,
- DMA_BIT_MASK(iop_ops->hw_dma_bit_mask)) ||
- dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32))) {
+ rc = dma_set_mask(&pcidev->dev,
+ DMA_BIT_MASK(iop_ops->hw_dma_bit_mask));
+ if (rc)
+ rc = dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32));
+
+ if (rc) {
printk(KERN_ERR "hptiop: fail to set dma_mask\n");
goto disable_pci_device;
}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index b8d325ce8754..120fc520f27a 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1459,7 +1459,13 @@ static int iscsi_xmit_task(struct iscsi_conn *conn)
if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx))
return -ENODATA;
+ spin_lock_bh(&conn->session->back_lock);
+ if (conn->task == NULL) {
+ spin_unlock_bh(&conn->session->back_lock);
+ return -ENODATA;
+ }
__iscsi_get_task(task);
+ spin_unlock_bh(&conn->session->back_lock);
spin_unlock_bh(&conn->session->frwd_lock);
rc = conn->session->tt->xmit_task(task);
spin_lock_bh(&conn->session->frwd_lock);
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 17eb4185f29d..f21c93bbb35c 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -828,6 +828,7 @@ static struct domain_device *sas_ex_discover_end_dev(
rphy = sas_end_device_alloc(phy->port);
if (!rphy)
goto out_free;
+ rphy->identify.phy_identifier = phy_id;
child->rphy = rphy;
get_device(&rphy->dev);
@@ -854,6 +855,7 @@ static struct domain_device *sas_ex_discover_end_dev(
child->rphy = rphy;
get_device(&rphy->dev);
+ rphy->identify.phy_identifier = phy_id;
sas_fill_in_rphy(child, rphy);
list_add_tail(&child->disco_list_node, &parent->port->disco_list);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index bede11e16349..e1129260ed18 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -7361,15 +7361,18 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
unsigned long bar0map_len, bar2map_len;
int i, hbq_count;
void *ptr;
- int error = -ENODEV;
+ int error;
if (!pdev)
- return error;
+ return -ENODEV;
/* Set the device DMA mask size */
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+ error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (error)
+ error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (error)
return error;
+ error = -ENODEV;
/* Get the bus address of Bar0 and Bar2 and the number of bytes
* required by each mapping.
@@ -9742,11 +9745,13 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
uint32_t if_type;
if (!pdev)
- return error;
+ return -ENODEV;
/* Set the device DMA mask size */
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+ error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (error)
+ error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (error)
return error;
/*
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 6d65ac584eba..a6828391d6b3 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -655,6 +655,7 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
set_host_byte(cmd, DID_OK);
return BLK_STS_TARGET;
case DID_NEXUS_FAILURE:
+ set_host_byte(cmd, DID_OK);
return BLK_STS_NEXUS;
case DID_ALLOC_FAILURE:
set_host_byte(cmd, DID_OK);
@@ -2597,7 +2598,6 @@ void scsi_device_resume(struct scsi_device *sdev)
* device deleted during suspend)
*/
mutex_lock(&sdev->state_mutex);
- WARN_ON_ONCE(!sdev->quiesced_by);
sdev->quiesced_by = NULL;
blk_clear_pm_only(sdev->request_queue);
if (sdev->sdev_state == SDEV_QUIESCE)
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index fff86940388b..a340af797a85 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -142,10 +142,12 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
return -EOPNOTSUPP;
/*
- * Get a reply buffer for the number of requested zones plus a header.
- * For ATA, buffers must be aligned to 512B.
+ * Get a reply buffer for the number of requested zones plus a header,
+ * without exceeding the device maximum command size. For ATA disks,
+ * buffers must be aligned to 512B.
*/
- buflen = roundup((nrz + 1) * 64, 512);
+ buflen = min(queue_max_hw_sectors(disk->queue) << 9,
+ roundup((nrz + 1) * 64, 512));
buf = kmalloc(buflen, gfp_mask);
if (!buf)
return -ENOMEM;
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index e5efce3c08e2..947f9b28de9e 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -699,8 +699,10 @@ static int __init optee_driver_init(void)
return -ENODEV;
np = of_find_matching_node(fw_np, optee_match);
- if (!np || !of_device_is_available(np))
+ if (!np || !of_device_is_available(np)) {
+ of_node_put(np);
return -ENODEV;
+ }
optee = optee_probe(np);
of_node_put(np);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 24a129fcdd61..a2e5dc7716e2 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1788,7 +1788,7 @@ static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
len, iov, 64, VHOST_ACCESS_WO);
- if (ret)
+ if (ret < 0)
return ret;
for (i = 0; i < ret; i++) {
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index cf445dbd5f2e..9de46116c749 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -173,6 +173,7 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
rcu_assign_pointer(cell->vl_servers, vllist);
cell->dns_expiry = TIME64_MAX;
+ __clear_bit(AFS_CELL_FL_NO_LOOKUP_YET, &cell->flags);
} else {
cell->dns_expiry = ktime_get_real_seconds();
}
diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
index 7cde3f46ad26..e996174cbfc0 100644
--- a/fs/binfmt_script.c
+++ b/fs/binfmt_script.c
@@ -14,13 +14,30 @@
#include <linux/err.h>
#include <linux/fs.h>
+static inline bool spacetab(char c) { return c == ' ' || c == '\t'; }
+static inline char *next_non_spacetab(char *first, const char *last)
+{
+ for (; first <= last; first++)
+ if (!spacetab(*first))
+ return first;
+ return NULL;
+}
+static inline char *next_terminator(char *first, const char *last)
+{
+ for (; first <= last; first++)
+ if (spacetab(*first) || !*first)
+ return first;
+ return NULL;
+}
+
static int load_script(struct linux_binprm *bprm)
{
const char *i_arg, *i_name;
- char *cp;
+ char *cp, *buf_end;
struct file *file;
int retval;
+ /* Not ours to exec if we don't start with "#!". */
if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!'))
return -ENOEXEC;
@@ -33,18 +50,40 @@ static int load_script(struct linux_binprm *bprm)
if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE)
return -ENOENT;
- /*
- * This section does the #! interpretation.
- * Sorta complicated, but hopefully it will work. -TYT
- */
-
+ /* Release since we are not mapping a binary into memory. */
allow_write_access(bprm->file);
fput(bprm->file);
bprm->file = NULL;
- bprm->buf[BINPRM_BUF_SIZE - 1] = '\0';
- if ((cp = strchr(bprm->buf, '\n')) == NULL)
- cp = bprm->buf+BINPRM_BUF_SIZE-1;
+ /*
+ * This section handles parsing the #! line into separate
+ * interpreter path and argument strings. We must be careful
+ * because bprm->buf is not yet guaranteed to be NUL-terminated
+ * (though the buffer will have trailing NUL padding when the
+ * file size was smaller than the buffer size).
+ *
+ * We do not want to exec a truncated interpreter path, so either
+ * we find a newline (which indicates nothing is truncated), or
+ * we find a space/tab/NUL after the interpreter path (which
+ * itself may be preceded by spaces/tabs). Truncating the
+ * arguments is fine: the interpreter can re-read the script to
+ * parse them on its own.
+ */
+ buf_end = bprm->buf + sizeof(bprm->buf) - 1;
+ cp = strnchr(bprm->buf, sizeof(bprm->buf), '\n');
+ if (!cp) {
+ cp = next_non_spacetab(bprm->buf + 2, buf_end);
+ if (!cp)
+ return -ENOEXEC; /* Entire buf is spaces/tabs */
+ /*
+ * If there is no later space/tab/NUL we must assume the
+ * interpreter path is truncated.
+ */
+ if (!next_terminator(cp, buf_end))
+ return -ENOEXEC;
+ cp = buf_end;
+ }
+ /* NUL-terminate the buffer and any trailing spaces/tabs. */
*cp = '\0';
while (cp > bprm->buf) {
cp--;
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 041c27ea8de1..f74193da0e09 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -616,7 +616,8 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
capsnap->size);
spin_lock(&mdsc->snap_flush_lock);
- list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
+ if (list_empty(&ci->i_snap_flush_item))
+ list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
spin_unlock(&mdsc->snap_flush_lock);
return 1; /* caller may want to ceph_flush_snaps */
}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 32920a10100e..a7fa037b876b 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -859,6 +859,18 @@ static int hugetlbfs_migrate_page(struct address_space *mapping,
rc = migrate_huge_page_move_mapping(mapping, newpage, page);
if (rc != MIGRATEPAGE_SUCCESS)
return rc;
+
+ /*
+ * page_private is subpool pointer in hugetlb pages. Transfer to
+ * new page. PagePrivate is not associated with page_private for
+ * hugetlb pages and can not be set here as only page_huge_active
+ * pages can be migrated.
+ */
+ if (page_private(page)) {
+ set_page_private(newpage, page_private(page));
+ set_page_private(page, 0);
+ }
+
if (mode != MIGRATE_SYNC_NO_COPY)
migrate_page_copy(newpage, page);
else
diff --git a/fs/namespace.c b/fs/namespace.c
index a677b59efd74..678ef175d63a 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2698,7 +2698,6 @@ static long exact_copy_from_user(void *to, const void __user * from,
if (!access_ok(from, n))
return n;
- current->kernel_uaccess_faults_ok++;
while (n) {
if (__get_user(c, f)) {
memset(t, 0, n);
@@ -2708,7 +2707,6 @@ static long exact_copy_from_user(void *to, const void __user * from,
f++;
n--;
}
- current->kernel_uaccess_faults_ok--;
return n;
}
diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c
index 3f23b6840547..bf34ddaa2ad7 100644
--- a/fs/nfs/nfs4idmap.c
+++ b/fs/nfs/nfs4idmap.c
@@ -44,6 +44,7 @@
#include <linux/keyctl.h>
#include <linux/key-type.h>
#include <keys/user-type.h>
+#include <keys/request_key_auth-type.h>
#include <linux/module.h>
#include "internal.h"
@@ -59,7 +60,7 @@ static struct key_type key_type_id_resolver_legacy;
struct idmap_legacy_upcalldata {
struct rpc_pipe_msg pipe_msg;
struct idmap_msg idmap_msg;
- struct key_construction *key_cons;
+ struct key *authkey;
struct idmap *idmap;
};
@@ -384,7 +385,7 @@ static const match_table_t nfs_idmap_tokens = {
{ Opt_find_err, NULL }
};
-static int nfs_idmap_legacy_upcall(struct key_construction *, const char *, void *);
+static int nfs_idmap_legacy_upcall(struct key *, void *);
static ssize_t idmap_pipe_downcall(struct file *, const char __user *,
size_t);
static void idmap_release_pipe(struct inode *);
@@ -549,11 +550,12 @@ nfs_idmap_prepare_pipe_upcall(struct idmap *idmap,
static void
nfs_idmap_complete_pipe_upcall_locked(struct idmap *idmap, int ret)
{
- struct key_construction *cons = idmap->idmap_upcall_data->key_cons;
+ struct key *authkey = idmap->idmap_upcall_data->authkey;
kfree(idmap->idmap_upcall_data);
idmap->idmap_upcall_data = NULL;
- complete_request_key(cons, ret);
+ complete_request_key(authkey, ret);
+ key_put(authkey);
}
static void
@@ -563,15 +565,14 @@ nfs_idmap_abort_pipe_upcall(struct idmap *idmap, int ret)
nfs_idmap_complete_pipe_upcall_locked(idmap, ret);
}
-static int nfs_idmap_legacy_upcall(struct key_construction *cons,
- const char *op,
- void *aux)
+static int nfs_idmap_legacy_upcall(struct key *authkey, void *aux)
{
struct idmap_legacy_upcalldata *data;
+ struct request_key_auth *rka = get_request_key_auth(authkey);
struct rpc_pipe_msg *msg;
struct idmap_msg *im;
struct idmap *idmap = (struct idmap *)aux;
- struct key *key = cons->key;
+ struct key *key = rka->target_key;
int ret = -ENOKEY;
if (!aux)
@@ -586,7 +587,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
msg = &data->pipe_msg;
im = &data->idmap_msg;
data->idmap = idmap;
- data->key_cons = cons;
+ data->authkey = key_get(authkey);
ret = nfs_idmap_prepare_message(key->description, idmap, im, msg);
if (ret < 0)
@@ -604,7 +605,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
out2:
kfree(data);
out1:
- complete_request_key(cons, ret);
+ complete_request_key(authkey, ret);
return ret;
}
@@ -651,9 +652,10 @@ out:
static ssize_t
idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
{
+ struct request_key_auth *rka;
struct rpc_inode *rpci = RPC_I(file_inode(filp));
struct idmap *idmap = (struct idmap *)rpci->private;
- struct key_construction *cons;
+ struct key *authkey;
struct idmap_msg im;
size_t namelen_in;
int ret = -ENOKEY;
@@ -665,7 +667,8 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
if (idmap->idmap_upcall_data == NULL)
goto out_noupcall;
- cons = idmap->idmap_upcall_data->key_cons;
+ authkey = idmap->idmap_upcall_data->authkey;
+ rka = get_request_key_auth(authkey);
if (mlen != sizeof(im)) {
ret = -ENOSPC;
@@ -690,9 +693,9 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
ret = nfs_idmap_read_and_verify_message(&im,
&idmap->idmap_upcall_data->idmap_msg,
- cons->key, cons->authkey);
+ rka->target_key, authkey);
if (ret >= 0) {
- key_set_timeout(cons->key, nfs_idmap_cache_timeout);
+ key_set_timeout(rka->target_key, nfs_idmap_cache_timeout);
ret = mlen;
}
diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c
index a5a2fe76568f..b094d3d79354 100644
--- a/fs/orangefs/file.c
+++ b/fs/orangefs/file.c
@@ -398,8 +398,6 @@ static ssize_t orangefs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter
loff_t pos = iocb->ki_pos;
ssize_t rc = 0;
- BUG_ON(iocb->private);
-
gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_read_iter\n");
orangefs_stats.reads++;
@@ -416,8 +414,6 @@ static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *ite
loff_t pos;
ssize_t rc;
- BUG_ON(iocb->private);
-
gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_write_iter\n");
inode_lock(file->f_mapping->host);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 633a63462573..f5ed9512d193 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1086,10 +1086,6 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
task_lock(p);
if (!p->vfork_done && process_shares_mm(p, mm)) {
- pr_info("updating oom_score_adj for %d (%s) from %d to %d because it shares mm with %d (%s). Report if this is unexpected.\n",
- task_pid_nr(p), p->comm,
- p->signal->oom_score_adj, oom_adj,
- task_pid_nr(task), task->comm);
p->signal->oom_score_adj = oom_adj;
if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE))
p->signal->oom_score_adj_min = (short)oom_adj;
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 2711cdfa0c13..97ce790a5b5a 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -560,6 +560,8 @@
# define DP_TEST_LINK_EDID_READ (1 << 2)
# define DP_TEST_LINK_PHY_TEST_PATTERN (1 << 3) /* DPCD >= 1.1 */
# define DP_TEST_LINK_FAUX_PATTERN (1 << 4) /* DPCD >= 1.2 */
+# define DP_TEST_LINK_AUDIO_PATTERN (1 << 5) /* DPCD >= 1.2 */
+# define DP_TEST_LINK_AUDIO_DISABLED_VIDEO (1 << 6) /* DPCD >= 1.2 */
#define DP_TEST_LINK_RATE 0x219
# define DP_LINK_RATE_162 (0x6)
@@ -608,6 +610,7 @@
# define DP_COLOR_FORMAT_RGB (0 << 1)
# define DP_COLOR_FORMAT_YCbCr422 (1 << 1)
# define DP_COLOR_FORMAT_YCbCr444 (2 << 1)
+# define DP_TEST_DYNAMIC_RANGE_VESA (0 << 3)
# define DP_TEST_DYNAMIC_RANGE_CEA (1 << 3)
# define DP_TEST_YCBCR_COEFFICIENTS (1 << 4)
# define DP_YCBCR_COEFFICIENTS_ITU601 (0 << 4)
@@ -657,6 +660,16 @@
#define DP_TEST_SINK 0x270
# define DP_TEST_SINK_START (1 << 0)
+#define DP_TEST_AUDIO_MODE 0x271
+#define DP_TEST_AUDIO_PATTERN_TYPE 0x272
+#define DP_TEST_AUDIO_PERIOD_CH1 0x273
+#define DP_TEST_AUDIO_PERIOD_CH2 0x274
+#define DP_TEST_AUDIO_PERIOD_CH3 0x275
+#define DP_TEST_AUDIO_PERIOD_CH4 0x276
+#define DP_TEST_AUDIO_PERIOD_CH5 0x277
+#define DP_TEST_AUDIO_PERIOD_CH6 0x278
+#define DP_TEST_AUDIO_PERIOD_CH7 0x279
+#define DP_TEST_AUDIO_PERIOD_CH8 0x27A
#define DP_FEC_STATUS 0x280 /* 1.4 */
# define DP_FEC_DECODE_EN_DETECTED (1 << 0)
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index e81bce2698e3..3224abb1535c 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -770,7 +770,7 @@ static inline bool drm_dev_is_unplugged(struct drm_device *dev)
*
* Returns true if the @feature is supported, false otherwise.
*/
-static inline bool drm_core_check_feature(struct drm_device *dev, u32 feature)
+static inline bool drm_core_check_feature(const struct drm_device *dev, u32 feature)
{
return dev->driver->driver_features & dev->driver_features & feature;
}
diff --git a/include/keys/request_key_auth-type.h b/include/keys/request_key_auth-type.h
new file mode 100644
index 000000000000..a726dd3f1dc6
--- /dev/null
+++ b/include/keys/request_key_auth-type.h
@@ -0,0 +1,36 @@
+/* request_key authorisation token key type
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _KEYS_REQUEST_KEY_AUTH_TYPE_H
+#define _KEYS_REQUEST_KEY_AUTH_TYPE_H
+
+#include <linux/key.h>
+
+/*
+ * Authorisation record for request_key().
+ */
+struct request_key_auth {
+ struct key *target_key;
+ struct key *dest_keyring;
+ const struct cred *cred;
+ void *callout_info;
+ size_t callout_len;
+ pid_t pid;
+ char op[8];
+} __randomize_layout;
+
+static inline struct request_key_auth *get_request_key_auth(const struct key *key)
+{
+ return key->payload.data[0];
+}
+
+
+#endif /* _KEYS_REQUEST_KEY_AUTH_TYPE_H */
diff --git a/include/keys/user-type.h b/include/keys/user-type.h
index e098cbe27db5..12babe991594 100644
--- a/include/keys/user-type.h
+++ b/include/keys/user-type.h
@@ -31,7 +31,7 @@
struct user_key_payload {
struct rcu_head rcu; /* RCU destructor */
unsigned short datalen; /* length of this data */
- char data[0]; /* actual data */
+ char data[0] __aligned(__alignof__(u64)); /* actual data */
};
extern struct key_type key_type_user;
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index bc9af551fc83..e49d1de0614e 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -21,15 +21,6 @@ struct kernel_pkey_query;
struct kernel_pkey_params;
/*
- * key under-construction record
- * - passed to the request_key actor if supplied
- */
-struct key_construction {
- struct key *key; /* key being constructed */
- struct key *authkey;/* authorisation for key being constructed */
-};
-
-/*
* Pre-parsed payload, used by key add, update and instantiate.
*
* This struct will be cleared and data and datalen will be set with the data
@@ -50,8 +41,7 @@ struct key_preparsed_payload {
time64_t expiry; /* Expiry time of key */
} __randomize_layout;
-typedef int (*request_key_actor_t)(struct key_construction *key,
- const char *op, void *aux);
+typedef int (*request_key_actor_t)(struct key *auth_key, void *aux);
/*
* Preparsed matching criterion.
@@ -181,20 +171,20 @@ extern int key_instantiate_and_link(struct key *key,
const void *data,
size_t datalen,
struct key *keyring,
- struct key *instkey);
+ struct key *authkey);
extern int key_reject_and_link(struct key *key,
unsigned timeout,
unsigned error,
struct key *keyring,
- struct key *instkey);
-extern void complete_request_key(struct key_construction *cons, int error);
+ struct key *authkey);
+extern void complete_request_key(struct key *authkey, int error);
static inline int key_negate_and_link(struct key *key,
unsigned timeout,
struct key *keyring,
- struct key *instkey)
+ struct key *authkey)
{
- return key_reject_and_link(key, timeout, ENOKEY, keyring, instkey);
+ return key_reject_and_link(key, timeout, ENOKEY, keyring, authkey);
}
extern int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep);
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 2b2a6dce1630..4c76fe2c8488 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -11,6 +11,8 @@
#define _LINUX_NETDEV_FEATURES_H
#include <linux/types.h>
+#include <linux/bitops.h>
+#include <asm/byteorder.h>
typedef u64 netdev_features_t;
@@ -154,8 +156,26 @@ enum {
#define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX)
#define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX)
-#define for_each_netdev_feature(mask_addr, bit) \
- for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
+/* Finds the next feature with the highest number of the range of start till 0.
+ */
+static inline int find_next_netdev_feature(u64 feature, unsigned long start)
+{
+ /* like BITMAP_LAST_WORD_MASK() for u64
+ * this sets the most significant 64 - start to 0.
+ */
+ feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1));
+
+ return fls64(feature) - 1;
+}
+
+/* This goes for the MSB to the LSB through the set feature bits,
+ * mask_addr should be a u64 and bit an int
+ */
+#define for_each_netdev_feature(mask_addr, bit) \
+ for ((bit) = find_next_netdev_feature((mask_addr), \
+ NETDEV_FEATURE_COUNT); \
+ (bit) >= 0; \
+ (bit) = find_next_netdev_feature((mask_addr), (bit) - 1))
/* Features valid for ethtool to change */
/* = all defined minus driver/device-class-related */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 86dbb3e29139..848b54b7ec91 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3861,7 +3861,7 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
if (debug_value == 0) /* no output */
return 0;
/* set low N bits */
- return (1 << debug_value) - 1;
+ return (1U << debug_value) - 1;
}
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 127fcc9c3778..333b56d8f746 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -992,6 +992,14 @@ static inline int genphy_no_soft_reset(struct phy_device *phydev)
{
return 0;
}
+static inline int genphy_no_ack_interrupt(struct phy_device *phydev)
+{
+ return 0;
+}
+static inline int genphy_no_config_intr(struct phy_device *phydev)
+{
+ return 0;
+}
int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad,
u16 regnum);
int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index bba3afb4e9bf..f9b43c989577 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -739,12 +739,6 @@ struct task_struct {
unsigned use_memdelay:1;
#endif
- /*
- * May usercopy functions fault on kernel addresses?
- * This is not just a single bit because this can potentially nest.
- */
- unsigned int kernel_uaccess_faults_ok;
-
unsigned long atomic_flags; /* Flags requiring atomic access. */
struct restart_block restart_block;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 95d25b010a25..bdb9563c64a0 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2434,7 +2434,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0))
skb_set_transport_header(skb, keys.control.thoff);
- else
+ else if (offset_hint >= 0)
skb_set_transport_header(skb, offset_hint);
}
@@ -4212,6 +4212,12 @@ static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
}
+static inline bool skb_is_gso_tcp(const struct sk_buff *skb)
+{
+ return skb_is_gso(skb) &&
+ skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
+}
+
static inline void skb_gso_reset(struct sk_buff *skb)
{
skb_shinfo(skb)->gso_size = 0;
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index cb462f9ab7dd..e0348cb0a1dd 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -57,6 +57,25 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
if (!skb_partial_csum_set(skb, start, off))
return -EINVAL;
+ } else {
+ /* gso packets without NEEDS_CSUM do not set transport_offset.
+ * probe and drop if does not match one of the above types.
+ */
+ if (gso_type && skb->network_header) {
+ if (!skb->protocol)
+ virtio_net_hdr_set_proto(skb, hdr);
+retry:
+ skb_probe_transport_header(skb, -1);
+ if (!skb_transport_header_was_set(skb)) {
+ /* UFO does not specify ipv4 or 6: try both */
+ if (gso_type & SKB_GSO_UDP &&
+ skb->protocol == htons(ETH_P_IP)) {
+ skb->protocol = htons(ETH_P_IPV6);
+ goto retry;
+ }
+ return -EINVAL;
+ }
+ }
}
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
diff --git a/include/net/icmp.h b/include/net/icmp.h
index 6ac3a5bd0117..e0f709d26dde 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -22,6 +22,7 @@
#include <net/inet_sock.h>
#include <net/snmp.h>
+#include <net/ip.h>
struct icmp_err {
int errno;
@@ -39,7 +40,13 @@ struct net_proto_family;
struct sk_buff;
struct net;
-void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info);
+void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
+ const struct ip_options *opt);
+static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
+{
+ __icmp_send(skb_in, type, code, info, &IPCB(skb_in)->opt);
+}
+
int icmp_rcv(struct sk_buff *skb);
int icmp_err(struct sk_buff *skb, u32 info);
int icmp_init(void);
diff --git a/include/net/ip.h b/include/net/ip.h
index 8866bfce6121..be3cad9c2e4c 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -667,6 +667,8 @@ static inline int ip_options_echo(struct net *net, struct ip_options *dopt,
}
void ip_options_fragment(struct sk_buff *skb);
+int __ip_options_compile(struct net *net, struct ip_options *opt,
+ struct sk_buff *skb, __be32 *info);
int ip_options_compile(struct net *net, struct ip_options *opt,
struct sk_buff *skb);
int ip_options_get(struct net *net, struct ip_options_rcu **optp,
@@ -716,7 +718,7 @@ extern int sysctl_icmp_msgs_burst;
int ip_misc_proc_init(void);
#endif
-int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto,
+int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
struct netlink_ext_ack *extack);
#endif /* _IP_H */
diff --git a/include/net/phonet/pep.h b/include/net/phonet/pep.h
index b669fe6dbc3b..98f31c7ea23d 100644
--- a/include/net/phonet/pep.h
+++ b/include/net/phonet/pep.h
@@ -63,10 +63,11 @@ struct pnpipehdr {
u8 state_after_reset; /* reset request */
u8 error_code; /* any response */
u8 pep_type; /* status indication */
- u8 data[1];
+ u8 data0; /* anything else */
};
+ u8 data[];
};
-#define other_pep_type data[1]
+#define other_pep_type data[0]
static inline struct pnpipehdr *pnp_hdr(struct sk_buff *skb)
{
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 7298a53b9702..85386becbaea 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -853,7 +853,7 @@ static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
xfrm_pol_put(pols[i]);
}
-void __xfrm_state_destroy(struct xfrm_state *);
+void __xfrm_state_destroy(struct xfrm_state *, bool);
static inline void __xfrm_state_put(struct xfrm_state *x)
{
@@ -863,7 +863,13 @@ static inline void __xfrm_state_put(struct xfrm_state *x)
static inline void xfrm_state_put(struct xfrm_state *x)
{
if (refcount_dec_and_test(&x->refcnt))
- __xfrm_state_destroy(x);
+ __xfrm_state_destroy(x, false);
+}
+
+static inline void xfrm_state_put_sync(struct xfrm_state *x)
+{
+ if (refcount_dec_and_test(&x->refcnt))
+ __xfrm_state_destroy(x, true);
}
static inline void xfrm_state_hold(struct xfrm_state *x)
@@ -1590,7 +1596,7 @@ struct xfrmk_spdinfo {
struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
int xfrm_state_delete(struct xfrm_state *x);
-int xfrm_state_flush(struct net *net, u8 proto, bool task_valid);
+int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync);
int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 2acbc8bc465b..4a53f6cfa034 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -272,13 +272,14 @@ union drm_amdgpu_vm {
/* sched ioctl */
#define AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE 1
+#define AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE 2
struct drm_amdgpu_sched_in {
/* AMDGPU_SCHED_OP_* */
__u32 op;
__u32 fd;
__s32 priority;
- __u32 flags;
+ __u32 ctx_id;
};
union drm_amdgpu_sched {
diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
index 259588a4b61b..9459a6e3bc1f 100644
--- a/include/uapi/drm/nouveau_drm.h
+++ b/include/uapi/drm/nouveau_drm.h
@@ -133,12 +133,63 @@ struct drm_nouveau_gem_cpu_fini {
#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05 /* deprecated */
#define DRM_NOUVEAU_GPUOBJ_FREE 0x06 /* deprecated */
#define DRM_NOUVEAU_NVIF 0x07
+#define DRM_NOUVEAU_SVM_INIT 0x08
+#define DRM_NOUVEAU_SVM_BIND 0x09
#define DRM_NOUVEAU_GEM_NEW 0x40
#define DRM_NOUVEAU_GEM_PUSHBUF 0x41
#define DRM_NOUVEAU_GEM_CPU_PREP 0x42
#define DRM_NOUVEAU_GEM_CPU_FINI 0x43
#define DRM_NOUVEAU_GEM_INFO 0x44
+struct drm_nouveau_svm_init {
+ __u64 unmanaged_addr;
+ __u64 unmanaged_size;
+};
+
+struct drm_nouveau_svm_bind {
+ __u64 header;
+ __u64 va_start;
+ __u64 va_end;
+ __u64 npages;
+ __u64 stride;
+ __u64 result;
+ __u64 reserved0;
+ __u64 reserved1;
+};
+
+#define NOUVEAU_SVM_BIND_COMMAND_SHIFT 0
+#define NOUVEAU_SVM_BIND_COMMAND_BITS 8
+#define NOUVEAU_SVM_BIND_COMMAND_MASK ((1 << 8) - 1)
+#define NOUVEAU_SVM_BIND_PRIORITY_SHIFT 8
+#define NOUVEAU_SVM_BIND_PRIORITY_BITS 8
+#define NOUVEAU_SVM_BIND_PRIORITY_MASK ((1 << 8) - 1)
+#define NOUVEAU_SVM_BIND_TARGET_SHIFT 16
+#define NOUVEAU_SVM_BIND_TARGET_BITS 32
+#define NOUVEAU_SVM_BIND_TARGET_MASK 0xffffffff
+
+/*
+ * Below is use to validate ioctl argument, userspace can also use it to make
+ * sure that no bit are set beyond known fields for a given kernel version.
+ */
+#define NOUVEAU_SVM_BIND_VALID_BITS 48
+#define NOUVEAU_SVM_BIND_VALID_MASK ((1ULL << NOUVEAU_SVM_BIND_VALID_BITS) - 1)
+
+
+/*
+ * NOUVEAU_BIND_COMMAND__MIGRATE: synchronous migrate to target memory.
+ * result: number of page successfuly migrate to the target memory.
+ */
+#define NOUVEAU_SVM_BIND_COMMAND__MIGRATE 0
+
+/*
+ * NOUVEAU_SVM_BIND_HEADER_TARGET__GPU_VRAM: target the GPU VRAM memory.
+ */
+#define NOUVEAU_SVM_BIND_TARGET__GPU_VRAM (1UL << 31)
+
+
+#define DRM_IOCTL_NOUVEAU_SVM_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_INIT, struct drm_nouveau_svm_init)
+#define DRM_IOCTL_NOUVEAU_SVM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_BIND, struct drm_nouveau_svm_bind)
+
#define DRM_IOCTL_NOUVEAU_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_NEW, struct drm_nouveau_gem_new)
#define DRM_IOCTL_NOUVEAU_GEM_PUSHBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_PUSHBUF, struct drm_nouveau_gem_pushbuf)
#define DRM_IOCTL_NOUVEAU_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_PREP, struct drm_nouveau_gem_cpu_prep)
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index e582e8e7527a..b80b85f0d9d8 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -348,6 +348,7 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,
unsigned int axi_id, unsigned int width,
unsigned int height, unsigned int stride,
u32 format, uint64_t modifier, unsigned long *eba);
+bool ipu_prg_channel_configure_pending(struct ipuv3_channel *ipu_chan);
/*
* IPU CMOS Sensor Interface (csi) functions
diff --git a/init/initramfs.c b/init/initramfs.c
index 7cea802d00ef..fca899622937 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -550,6 +550,7 @@ skip:
initrd_end = 0;
}
+#ifdef CONFIG_BLK_DEV_RAM
#define BUF_SIZE 1024
static void __init clean_rootfs(void)
{
@@ -596,6 +597,7 @@ static void __init clean_rootfs(void)
ksys_close(fd);
kfree(buf);
}
+#endif
static int __init populate_rootfs(void)
{
@@ -638,10 +640,8 @@ static int __init populate_rootfs(void)
printk(KERN_INFO "Unpacking initramfs...\n");
err = unpack_to_rootfs((char *)initrd_start,
initrd_end - initrd_start);
- if (err) {
+ if (err)
printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err);
- clean_rootfs();
- }
free_initrd();
#endif
}
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index abf1002080df..93a5cbbde421 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -471,6 +471,7 @@ static int trie_delete_elem(struct bpf_map *map, void *_key)
}
if (!node || node->prefixlen != key->prefixlen ||
+ node->prefixlen != matchlen ||
(node->flags & LPM_TREE_NODE_FLAG_IM)) {
ret = -ENOENT;
goto out;
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index d43b14535827..950ab2f28922 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -44,7 +44,7 @@ static void do_up_read(struct irq_work *entry)
struct stack_map_irq_work *work;
work = container_of(entry, struct stack_map_irq_work, irq_work);
- up_read(work->sem);
+ up_read_non_owner(work->sem);
work->sem = NULL;
}
@@ -338,6 +338,12 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
} else {
work->sem = &current->mm->mmap_sem;
irq_work_queue(&work->irq_work);
+ /*
+ * The irq_work will release the mmap_sem with
+ * up_read_non_owner(). The rwsem_release() is called
+ * here to release the lock from lockdep's perspective.
+ */
+ rwsem_release(&current->mm->mmap_sem.dep_map, 1, _RET_IP_);
}
}
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 8577bb7f8be6..84470d1480aa 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -559,12 +559,12 @@ static int map_create(union bpf_attr *attr)
err = bpf_map_new_fd(map, f_flags);
if (err < 0) {
/* failed to allocate fd.
- * bpf_map_put() is needed because the above
+ * bpf_map_put_with_uref() is needed because the above
* bpf_map_alloc_id() has published the map
* to the userspace and the userspace may
* have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
*/
- bpf_map_put(map);
+ bpf_map_put_with_uref(map);
return err;
}
@@ -1986,7 +1986,7 @@ static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
fd = bpf_map_new_fd(map, f_flags);
if (fd < 0)
- bpf_map_put(map);
+ bpf_map_put_with_uref(map);
return fd;
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 56674a7c3778..5fcce2f4209d 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1617,12 +1617,13 @@ static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
return 0;
}
-static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off,
- int size, enum bpf_access_type t)
+static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
+ u32 regno, int off, int size,
+ enum bpf_access_type t)
{
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_reg_state *reg = &regs[regno];
- struct bpf_insn_access_aux info;
+ struct bpf_insn_access_aux info = {};
if (reg->smin_value < 0) {
verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
@@ -1636,6 +1637,8 @@ static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off,
return -EACCES;
}
+ env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
+
return 0;
}
@@ -2032,7 +2035,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
verbose(env, "cannot write into socket\n");
return -EACCES;
}
- err = check_sock_access(env, regno, off, size, t);
+ err = check_sock_access(env, insn_idx, regno, off, size, t);
if (!err && value_regno >= 0)
mark_reg_unknown(env, regs, value_regno);
} else {
@@ -6917,7 +6920,8 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
u32 off_reg;
aux = &env->insn_aux_data[i + delta];
- if (!aux->alu_state)
+ if (!aux->alu_state ||
+ aux->alu_state == BPF_ALU_NON_POINTER)
continue;
isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index c3484785b179..0e97ca9306ef 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -322,7 +322,7 @@ static bool update_stats(struct psi_group *group)
expires = group->next_update;
if (now < expires)
goto out;
- if (now - expires > psi_period)
+ if (now - expires >= psi_period)
missed_periods = div_u64(now - expires, psi_period);
/*
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c521b7347482..c4238b441624 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3384,6 +3384,8 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
const char tgid_space[] = " ";
const char space[] = " ";
+ print_event_info(buf, m);
+
seq_printf(m, "# %s _-----=> irqs-off\n",
tgid ? tgid_space : space);
seq_printf(m, "# %s / _----=> need-resched\n",
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index d5fb09ebba8b..9eaf07f99212 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -861,22 +861,14 @@ static const struct file_operations kprobe_profile_ops = {
static nokprobe_inline int
fetch_store_strlen(unsigned long addr)
{
- mm_segment_t old_fs;
int ret, len = 0;
u8 c;
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- pagefault_disable();
-
do {
- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
+ ret = probe_mem_read(&c, (u8 *)addr + len, 1);
len++;
} while (c && ret == 0 && len < MAX_STRING_SIZE);
- pagefault_enable();
- set_fs(old_fs);
-
return (ret < 0) ? ret : len;
}
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index d8c474b6691e..9737059ec58b 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -113,6 +113,28 @@ config KASAN_INLINE
endchoice
+config KASAN_STACK_ENABLE
+ bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
+ default !(CLANG_VERSION < 90000)
+ depends on KASAN
+ help
+ The LLVM stack address sanitizer has a know problem that
+ causes excessive stack usage in a lot of functions, see
+ https://bugs.llvm.org/show_bug.cgi?id=38809
+ Disabling asan-stack makes it safe to run kernels build
+ with clang-8 with KASAN enabled, though it loses some of
+ the functionality.
+ This feature is always disabled when compile-testing with clang-8
+ or earlier to avoid cluttering the output in stack overflow
+ warnings, but clang-8 users can still enable it for builds without
+ CONFIG_COMPILE_TEST. On gcc and later clang versions it is
+ assumed to always be safe to use and enabled by default.
+
+config KASAN_STACK
+ int
+ default 1 if KASAN_STACK_ENABLE || CC_IS_GCC
+ default 0
+
config KASAN_S390_4_LEVEL_PAGING
bool "KASan: use 4-level paging"
depends on KASAN && S390
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index c6659cb37033..59875eb278ea 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -768,9 +768,11 @@ all_leaves_cluster_together:
new_s0->index_key[i] =
ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE);
- blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
- pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank);
- new_s0->index_key[keylen - 1] &= ~blank;
+ if (level & ASSOC_ARRAY_KEY_CHUNK_MASK) {
+ blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
+ pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank);
+ new_s0->index_key[keylen - 1] &= ~blank;
+ }
/* This now reduces to a node splitting exercise for which we'll need
* to regenerate the disparity table.
diff --git a/mm/debug.c b/mm/debug.c
index 0abb987dad9b..1611cf00a137 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -44,7 +44,7 @@ const struct trace_print_flags vmaflag_names[] = {
void __dump_page(struct page *page, const char *reason)
{
- struct address_space *mapping = page_mapping(page);
+ struct address_space *mapping;
bool page_poisoned = PagePoisoned(page);
int mapcount;
@@ -58,6 +58,8 @@ void __dump_page(struct page *page, const char *reason)
goto hex_only;
}
+ mapping = page_mapping(page);
+
/*
* Avoid VM_BUG_ON() in page_mapcount().
* page->_mapcount space in struct page is used by sl[aou]b pages to
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index afef61656c1e..8dfdffc34a99 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3624,7 +3624,6 @@ retry_avoidcopy:
copy_user_huge_page(new_page, old_page, address, vma,
pages_per_huge_page(h));
__SetPageUptodate(new_page);
- set_page_huge_active(new_page);
mmu_notifier_range_init(&range, mm, haddr, haddr + huge_page_size(h));
mmu_notifier_invalidate_range_start(&range);
@@ -3645,6 +3644,7 @@ retry_avoidcopy:
make_huge_pte(vma, new_page, 1));
page_remove_rmap(old_page, true);
hugepage_add_new_anon_rmap(new_page, vma, haddr);
+ set_page_huge_active(new_page);
/* Make the old page be freed below */
new_page = old_page;
}
@@ -3729,6 +3729,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
pte_t new_pte;
spinlock_t *ptl;
unsigned long haddr = address & huge_page_mask(h);
+ bool new_page = false;
/*
* Currently, we are forced to kill the process in the event the
@@ -3790,7 +3791,7 @@ retry:
}
clear_huge_page(page, address, pages_per_huge_page(h));
__SetPageUptodate(page);
- set_page_huge_active(page);
+ new_page = true;
if (vma->vm_flags & VM_MAYSHARE) {
int err = huge_add_to_page_cache(page, mapping, idx);
@@ -3861,6 +3862,15 @@ retry:
}
spin_unlock(ptl);
+
+ /*
+ * Only make newly allocated pages active. Existing pages found
+ * in the pagecache could be !page_huge_active() if they have been
+ * isolated for migration.
+ */
+ if (new_page)
+ set_page_huge_active(page);
+
unlock_page(page);
out:
return ret;
@@ -4095,7 +4105,6 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
* the set_pte_at() write.
*/
__SetPageUptodate(page);
- set_page_huge_active(page);
mapping = dst_vma->vm_file->f_mapping;
idx = vma_hugecache_offset(h, dst_vma, dst_addr);
@@ -4163,6 +4172,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
update_mmu_cache(dst_vma, dst_addr, dst_pte);
spin_unlock(ptl);
+ set_page_huge_active(page);
if (vm_shared)
unlock_page(page);
ret = 0;
diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile
index e2bb06c1b45e..5d1065efbd47 100644
--- a/mm/kasan/Makefile
+++ b/mm/kasan/Makefile
@@ -7,6 +7,8 @@ KCOV_INSTRUMENT := n
CFLAGS_REMOVE_common.o = -pg
CFLAGS_REMOVE_generic.o = -pg
+CFLAGS_REMOVE_tags.o = -pg
+
# Function splitter causes unnecessary splits in __asan_load1/__asan_store1
# see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 73c9cbfdedf4..09b534fbba17 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -361,10 +361,15 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
* get different tags.
*/
static u8 assign_tag(struct kmem_cache *cache, const void *object,
- bool init, bool krealloc)
+ bool init, bool keep_tag)
{
- /* Reuse the same tag for krealloc'ed objects. */
- if (krealloc)
+ /*
+ * 1. When an object is kmalloc()'ed, two hooks are called:
+ * kasan_slab_alloc() and kasan_kmalloc(). We assign the
+ * tag only in the first one.
+ * 2. We reuse the same tag for krealloc'ed objects.
+ */
+ if (keep_tag)
return get_tag(object);
/*
@@ -405,12 +410,6 @@ void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
return (void *)object;
}
-void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
- gfp_t flags)
-{
- return kasan_kmalloc(cache, object, cache->object_size, flags);
-}
-
static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
{
if (IS_ENABLED(CONFIG_KASAN_GENERIC))
@@ -467,7 +466,7 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
}
static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
- size_t size, gfp_t flags, bool krealloc)
+ size_t size, gfp_t flags, bool keep_tag)
{
unsigned long redzone_start;
unsigned long redzone_end;
@@ -485,7 +484,7 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
KASAN_SHADOW_SCALE_SIZE);
if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
- tag = assign_tag(cache, object, false, krealloc);
+ tag = assign_tag(cache, object, false, keep_tag);
/* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
kasan_unpoison_shadow(set_tag(object, tag), size);
@@ -498,10 +497,16 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
return set_tag(object, tag);
}
+void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
+ gfp_t flags)
+{
+ return __kasan_kmalloc(cache, object, cache->object_size, flags, false);
+}
+
void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
size_t size, gfp_t flags)
{
- return __kasan_kmalloc(cache, object, size, flags, false);
+ return __kasan_kmalloc(cache, object, size, flags, true);
}
EXPORT_SYMBOL(kasan_kmalloc);
diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c
index 0777649e07c4..63fca3172659 100644
--- a/mm/kasan/tags.c
+++ b/mm/kasan/tags.c
@@ -46,7 +46,7 @@ void kasan_init_tags(void)
int cpu;
for_each_possible_cpu(cpu)
- per_cpu(prng_state, cpu) = get_random_u32();
+ per_cpu(prng_state, cpu) = (u32)get_cycles();
}
/*
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index f9d9dc250428..707fa5579f66 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -574,6 +574,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
unsigned long flags;
struct kmemleak_object *object, *parent;
struct rb_node **link, *rb_parent;
+ unsigned long untagged_ptr;
object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
if (!object) {
@@ -619,8 +620,9 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
write_lock_irqsave(&kmemleak_lock, flags);
- min_addr = min(min_addr, ptr);
- max_addr = max(max_addr, ptr + size);
+ untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
+ min_addr = min(min_addr, untagged_ptr);
+ max_addr = max(max_addr, untagged_ptr + size);
link = &object_tree_root.rb_node;
rb_parent = NULL;
while (*link) {
@@ -1333,6 +1335,7 @@ static void scan_block(void *_start, void *_end,
unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
unsigned long *end = _end - (BYTES_PER_POINTER - 1);
unsigned long flags;
+ unsigned long untagged_ptr;
read_lock_irqsave(&kmemleak_lock, flags);
for (ptr = start; ptr < end; ptr++) {
@@ -1347,7 +1350,8 @@ static void scan_block(void *_start, void *_end,
pointer = *ptr;
kasan_enable_current();
- if (pointer < min_addr || pointer >= max_addr)
+ untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
+ if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
continue;
/*
diff --git a/mm/maccess.c b/mm/maccess.c
index f3416632e5a4..ec00be51a24f 100644
--- a/mm/maccess.c
+++ b/mm/maccess.c
@@ -30,10 +30,8 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
set_fs(KERNEL_DS);
pagefault_disable();
- current->kernel_uaccess_faults_ok++;
ret = __copy_from_user_inatomic(dst,
(__force const void __user *)src, size);
- current->kernel_uaccess_faults_ok--;
pagefault_enable();
set_fs(old_fs);
@@ -60,9 +58,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
set_fs(KERNEL_DS);
pagefault_disable();
- current->kernel_uaccess_faults_ok++;
ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
- current->kernel_uaccess_faults_ok--;
pagefault_enable();
set_fs(old_fs);
@@ -98,13 +94,11 @@ long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count)
set_fs(KERNEL_DS);
pagefault_disable();
- current->kernel_uaccess_faults_ok++;
do {
ret = __get_user(*dst++, (const char __user __force *)src++);
} while (dst[-1] && ret == 0 && src - unsafe_addr < count);
- current->kernel_uaccess_faults_ok--;
dst[-1] = '\0';
pagefault_enable();
set_fs(old_fs);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 124e794867c5..1ad28323fb9f 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1188,11 +1188,13 @@ static inline int pageblock_free(struct page *page)
return PageBuddy(page) && page_order(page) >= pageblock_order;
}
-/* Return the start of the next active pageblock after a given page */
-static struct page *next_active_pageblock(struct page *page)
+/* Return the pfn of the start of the next active pageblock after a given pfn */
+static unsigned long next_active_pageblock(unsigned long pfn)
{
+ struct page *page = pfn_to_page(pfn);
+
/* Ensure the starting page is pageblock-aligned */
- BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
+ BUG_ON(pfn & (pageblock_nr_pages - 1));
/* If the entire pageblock is free, move to the end of free page */
if (pageblock_free(page)) {
@@ -1200,16 +1202,16 @@ static struct page *next_active_pageblock(struct page *page)
/* be careful. we don't have locks, page_order can be changed.*/
order = page_order(page);
if ((order < MAX_ORDER) && (order >= pageblock_order))
- return page + (1 << order);
+ return pfn + (1 << order);
}
- return page + pageblock_nr_pages;
+ return pfn + pageblock_nr_pages;
}
-static bool is_pageblock_removable_nolock(struct page *page)
+static bool is_pageblock_removable_nolock(unsigned long pfn)
{
+ struct page *page = pfn_to_page(pfn);
struct zone *zone;
- unsigned long pfn;
/*
* We have to be careful here because we are iterating over memory
@@ -1232,13 +1234,14 @@ static bool is_pageblock_removable_nolock(struct page *page)
/* Checks if this range of memory is likely to be hot-removable. */
bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
{
- struct page *page = pfn_to_page(start_pfn);
- unsigned long end_pfn = min(start_pfn + nr_pages, zone_end_pfn(page_zone(page)));
- struct page *end_page = pfn_to_page(end_pfn);
+ unsigned long end_pfn, pfn;
+
+ end_pfn = min(start_pfn + nr_pages,
+ zone_end_pfn(page_zone(pfn_to_page(start_pfn))));
/* Check the starting page of each pageblock within the range */
- for (; page < end_page; page = next_active_pageblock(page)) {
- if (!is_pageblock_removable_nolock(page))
+ for (pfn = start_pfn; pfn < end_pfn; pfn = next_active_pageblock(pfn)) {
+ if (!is_pageblock_removable_nolock(pfn))
return false;
cond_resched();
}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index d4496d9d34f5..ee2bce59d2bf 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1314,7 +1314,7 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
nodemask_t *nodes)
{
unsigned long copy = ALIGN(maxnode-1, 64) / 8;
- const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
+ unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
if (copy > nbytes) {
if (copy > PAGE_SIZE)
@@ -1491,7 +1491,7 @@ static int kernel_get_mempolicy(int __user *policy,
int uninitialized_var(pval);
nodemask_t nodes;
- if (nmask != NULL && maxnode < MAX_NUMNODES)
+ if (nmask != NULL && maxnode < nr_node_ids)
return -EINVAL;
err = do_get_mempolicy(&pval, &nodes, addr, flags);
@@ -1527,7 +1527,7 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
unsigned long nr_bits, alloc_size;
DECLARE_BITMAP(bm, MAX_NUMNODES);
- nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
+ nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
if (nmask)
diff --git a/mm/migrate.c b/mm/migrate.c
index d4fd680be3b0..181f5d2718a9 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1315,6 +1315,16 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
lock_page(hpage);
}
+ /*
+ * Check for pages which are in the process of being freed. Without
+ * page_mapping() set, hugetlbfs specific move page routine will not
+ * be called and we could leak usage counts for subpools.
+ */
+ if (page_private(hpage) && !page_mapping(hpage)) {
+ rc = -EBUSY;
+ goto out_unlock;
+ }
+
if (PageAnon(hpage))
anon_vma = page_get_anon_vma(hpage);
@@ -1345,6 +1355,7 @@ put_anon:
put_new_page = NULL;
}
+out_unlock:
unlock_page(hpage);
out:
if (rc != -EAGAIN)
diff --git a/mm/mmap.c b/mm/mmap.c
index f901065c4c64..fc1809b1bed6 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2426,12 +2426,11 @@ int expand_downwards(struct vm_area_struct *vma,
{
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *prev;
- int error;
+ int error = 0;
address &= PAGE_MASK;
- error = security_mmap_addr(address);
- if (error)
- return error;
+ if (address < mmap_min_addr)
+ return -EPERM;
/* Enforce stack_guard_gap */
prev = vma->vm_prev;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 46285d28e43b..0b9f577b1a2a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2170,6 +2170,18 @@ static inline void boost_watermark(struct zone *zone)
max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
watermark_boost_factor, 10000);
+
+ /*
+ * high watermark may be uninitialised if fragmentation occurs
+ * very early in boot so do not boost. We do not fall
+ * through and boost by pageblock_nr_pages as failing
+ * allocations that early means that reclaim is not going
+ * to help and it may even be impossible to reclaim the
+ * boosted watermark resulting in a hang.
+ */
+ if (!max_boost)
+ return;
+
max_boost = max(pageblock_nr_pages, max_boost);
zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
@@ -4675,11 +4687,11 @@ refill:
/* Even if we own the page, we do not use atomic_set().
* This would break get_page_unless_zero() users.
*/
- page_ref_add(page, size);
+ page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
/* reset page count bias and offset to start of new frag */
nc->pfmemalloc = page_is_pfmemalloc(page);
- nc->pagecnt_bias = size + 1;
+ nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
nc->offset = size;
}
@@ -4695,10 +4707,10 @@ refill:
size = nc->size;
#endif
/* OK, page count is 0, we can safely set it */
- set_page_count(page, size + 1);
+ set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
/* reset page count bias and offset to start of new frag */
- nc->pagecnt_bias = size + 1;
+ nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
offset = size - fragsz;
}
diff --git a/mm/shmem.c b/mm/shmem.c
index 6ece1e2fe76e..2c012eee133d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2848,16 +2848,20 @@ static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
{
struct inode *inode = d_inode(old_dentry);
- int ret;
+ int ret = 0;
/*
* No ordinary (disk based) filesystem counts links as inodes;
* but each new link needs a new dentry, pinning lowmem, and
* tmpfs dentries cannot be pruned until they are unlinked.
+ * But if an O_TMPFILE file is linked into the tmpfs, the
+ * first link must skip that, to get the accounting right.
*/
- ret = shmem_reserve_inode(inode->i_sb);
- if (ret)
- goto out;
+ if (inode->i_nlink) {
+ ret = shmem_reserve_inode(inode->i_sb);
+ if (ret)
+ goto out;
+ }
dir->i_size += BOGO_DIRENT_SIZE;
inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
diff --git a/mm/slab.c b/mm/slab.c
index 78eb8c5bf4e4..91c1863df93d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2359,7 +2359,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
void *freelist;
void *addr = page_address(page);
- page->s_mem = kasan_reset_tag(addr) + colour_off;
+ page->s_mem = addr + colour_off;
page->active = 0;
if (OBJFREELIST_SLAB(cachep))
@@ -2368,6 +2368,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
/* Slab management obj is off-slab. */
freelist = kmem_cache_alloc_node(cachep->freelist_cache,
local_flags, nodeid);
+ freelist = kasan_reset_tag(freelist);
if (!freelist)
return NULL;
} else {
@@ -2681,6 +2682,13 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep,
offset *= cachep->colour_off;
+ /*
+ * Call kasan_poison_slab() before calling alloc_slabmgmt(), so
+ * page_address() in the latter returns a non-tagged pointer,
+ * as it should be for slab pages.
+ */
+ kasan_poison_slab(page);
+
/* Get slab management. */
freelist = alloc_slabmgmt(cachep, page, offset,
local_flags & ~GFP_CONSTRAINT_MASK, page_node);
@@ -2689,7 +2697,6 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep,
slab_map_pages(cachep, page, freelist);
- kasan_poison_slab(page);
cache_init_objs(cachep, page);
if (gfpflags_allow_blocking(local_flags))
@@ -3540,7 +3547,6 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
void *ret = slab_alloc(cachep, flags, _RET_IP_);
- ret = kasan_slab_alloc(cachep, ret, flags);
trace_kmem_cache_alloc(_RET_IP_, ret,
cachep->object_size, cachep->size, flags);
@@ -3630,7 +3636,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
- ret = kasan_slab_alloc(cachep, ret, flags);
trace_kmem_cache_alloc_node(_RET_IP_, ret,
cachep->object_size, cachep->size,
flags, nodeid);
@@ -4408,6 +4413,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
unsigned int objnr;
unsigned long offset;
+ ptr = kasan_reset_tag(ptr);
+
/* Find and validate object. */
cachep = page->slab_cache;
objnr = obj_to_index(cachep, page, (void *)ptr);
diff --git a/mm/slab.h b/mm/slab.h
index 4190c24ef0e9..384105318779 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -437,11 +437,10 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
flags &= gfp_allowed_mask;
for (i = 0; i < size; i++) {
- void *object = p[i];
-
- kmemleak_alloc_recursive(object, s->object_size, 1,
+ p[i] = kasan_slab_alloc(s, p[i], flags);
+ /* As p[i] might get tagged, call kmemleak hook after KASAN. */
+ kmemleak_alloc_recursive(p[i], s->object_size, 1,
s->flags, flags);
- p[i] = kasan_slab_alloc(s, object, flags);
}
if (memcg_kmem_enabled())
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 81732d05e74a..f9d89c1b5977 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1228,8 +1228,9 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
flags |= __GFP_COMP;
page = alloc_pages(flags, order);
ret = page ? page_address(page) : NULL;
- kmemleak_alloc(ret, size, 1, flags);
ret = kasan_kmalloc_large(ret, size, flags);
+ /* As ret might get tagged, call kmemleak hook after KASAN. */
+ kmemleak_alloc(ret, size, 1, flags);
return ret;
}
EXPORT_SYMBOL(kmalloc_order);
diff --git a/mm/slub.c b/mm/slub.c
index 1e3d0ec4e200..dc777761b6b7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -249,7 +249,18 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
unsigned long ptr_addr)
{
#ifdef CONFIG_SLAB_FREELIST_HARDENED
- return (void *)((unsigned long)ptr ^ s->random ^ ptr_addr);
+ /*
+ * When CONFIG_KASAN_SW_TAGS is enabled, ptr_addr might be tagged.
+ * Normally, this doesn't cause any issues, as both set_freepointer()
+ * and get_freepointer() are called with a pointer with the same tag.
+ * However, there are some issues with CONFIG_SLUB_DEBUG code. For
+ * example, when __free_slub() iterates over objects in a cache, it
+ * passes untagged pointers to check_object(). check_object() in turns
+ * calls get_freepointer() with an untagged pointer, which causes the
+ * freepointer to be restored incorrectly.
+ */
+ return (void *)((unsigned long)ptr ^ s->random ^
+ (unsigned long)kasan_reset_tag((void *)ptr_addr));
#else
return ptr;
#endif
@@ -303,15 +314,10 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
__p < (__addr) + (__objects) * (__s)->size; \
__p += (__s)->size)
-#define for_each_object_idx(__p, __idx, __s, __addr, __objects) \
- for (__p = fixup_red_left(__s, __addr), __idx = 1; \
- __idx <= __objects; \
- __p += (__s)->size, __idx++)
-
/* Determine object index from a given position */
static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr)
{
- return (p - addr) / s->size;
+ return (kasan_reset_tag(p) - addr) / s->size;
}
static inline unsigned int order_objects(unsigned int order, unsigned int size)
@@ -507,6 +513,7 @@ static inline int check_valid_pointer(struct kmem_cache *s,
return 1;
base = page_address(page);
+ object = kasan_reset_tag(object);
object = restore_red_left(s, object);
if (object < base || object >= base + page->objects * s->size ||
(object - base) % s->size) {
@@ -1075,6 +1082,16 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
init_tracking(s, object);
}
+static void setup_page_debug(struct kmem_cache *s, void *addr, int order)
+{
+ if (!(s->flags & SLAB_POISON))
+ return;
+
+ metadata_access_enable();
+ memset(addr, POISON_INUSE, PAGE_SIZE << order);
+ metadata_access_disable();
+}
+
static inline int alloc_consistency_checks(struct kmem_cache *s,
struct page *page,
void *object, unsigned long addr)
@@ -1330,6 +1347,8 @@ slab_flags_t kmem_cache_flags(unsigned int object_size,
#else /* !CONFIG_SLUB_DEBUG */
static inline void setup_object_debug(struct kmem_cache *s,
struct page *page, void *object) {}
+static inline void setup_page_debug(struct kmem_cache *s,
+ void *addr, int order) {}
static inline int alloc_debug_processing(struct kmem_cache *s,
struct page *page, void *object, unsigned long addr) { return 0; }
@@ -1374,8 +1393,10 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
*/
static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
{
+ ptr = kasan_kmalloc_large(ptr, size, flags);
+ /* As ptr might get tagged, call kmemleak hook after KASAN. */
kmemleak_alloc(ptr, size, 1, flags);
- return kasan_kmalloc_large(ptr, size, flags);
+ return ptr;
}
static __always_inline void kfree_hook(void *x)
@@ -1641,27 +1662,25 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
if (page_is_pfmemalloc(page))
SetPageSlabPfmemalloc(page);
+ kasan_poison_slab(page);
+
start = page_address(page);
- if (unlikely(s->flags & SLAB_POISON))
- memset(start, POISON_INUSE, PAGE_SIZE << order);
-
- kasan_poison_slab(page);
+ setup_page_debug(s, start, order);
shuffle = shuffle_freelist(s, page);
if (!shuffle) {
- for_each_object_idx(p, idx, s, start, page->objects) {
- if (likely(idx < page->objects)) {
- next = p + s->size;
- next = setup_object(s, page, next);
- set_freepointer(s, p, next);
- } else
- set_freepointer(s, p, NULL);
- }
start = fixup_red_left(s, start);
start = setup_object(s, page, start);
page->freelist = start;
+ for (idx = 0, p = start; idx < page->objects - 1; idx++) {
+ next = p + s->size;
+ next = setup_object(s, page, next);
+ set_freepointer(s, p, next);
+ p = next;
+ }
+ set_freepointer(s, p, NULL);
}
page->inuse = page->objects;
diff --git a/mm/swap.c b/mm/swap.c
index 4929bc1be60e..4d7d37eb3c40 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -320,11 +320,6 @@ static inline void activate_page_drain(int cpu)
{
}
-static bool need_activate_page_drain(int cpu)
-{
- return false;
-}
-
void activate_page(struct page *page)
{
struct zone *zone = page_zone(page);
@@ -653,13 +648,15 @@ void lru_add_drain(void)
put_cpu();
}
+#ifdef CONFIG_SMP
+
+static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
+
static void lru_add_drain_per_cpu(struct work_struct *dummy)
{
lru_add_drain();
}
-static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
-
/*
* Doesn't need any cpu hotplug locking because we do rely on per-cpu
* kworkers being shut down before our page_alloc_cpu_dead callback is
@@ -702,6 +699,12 @@ void lru_add_drain_all(void)
mutex_unlock(&lock);
}
+#else
+void lru_add_drain_all(void)
+{
+ lru_add_drain();
+}
+#endif
/**
* release_pages - batched put_page()
diff --git a/mm/util.c b/mm/util.c
index 1ea055138043..379319b1bcfd 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -150,7 +150,7 @@ void *memdup_user(const void __user *src, size_t len)
{
void *p;
- p = kmalloc_track_caller(len, GFP_USER);
+ p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
if (!p)
return ERR_PTR(-ENOMEM);
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index fa2644d276ef..e31e1b20f7f4 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -13,27 +13,13 @@
#include <net/sock.h>
#include <net/tcp.h>
-static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
- struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
-{
- u32 ret;
-
- preempt_disable();
- rcu_read_lock();
- bpf_cgroup_storage_set(storage);
- ret = BPF_PROG_RUN(prog, ctx);
- rcu_read_unlock();
- preempt_enable();
-
- return ret;
-}
-
-static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
- u32 *time)
+static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
+ u32 *retval, u32 *time)
{
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
enum bpf_cgroup_storage_type stype;
u64 time_start, time_spent = 0;
+ int ret = 0;
u32 i;
for_each_cgroup_storage_type(stype) {
@@ -48,25 +34,42 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
if (!repeat)
repeat = 1;
+
+ rcu_read_lock();
+ preempt_disable();
time_start = ktime_get_ns();
for (i = 0; i < repeat; i++) {
- *ret = bpf_test_run_one(prog, ctx, storage);
+ bpf_cgroup_storage_set(storage);
+ *retval = BPF_PROG_RUN(prog, ctx);
+
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+
if (need_resched()) {
- if (signal_pending(current))
- break;
time_spent += ktime_get_ns() - time_start;
+ preempt_enable();
+ rcu_read_unlock();
+
cond_resched();
+
+ rcu_read_lock();
+ preempt_disable();
time_start = ktime_get_ns();
}
}
time_spent += ktime_get_ns() - time_start;
+ preempt_enable();
+ rcu_read_unlock();
+
do_div(time_spent, repeat);
*time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
for_each_cgroup_storage_type(stype)
bpf_cgroup_storage_free(storage[stype]);
- return 0;
+ return ret;
}
static int bpf_test_finish(const union bpf_attr *kattr,
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 3aeff0895669..ac92b2eb32b1 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1204,14 +1204,7 @@ static void br_multicast_query_received(struct net_bridge *br,
return;
br_multicast_update_query_timer(br, query, max_delay);
-
- /* Based on RFC4541, section 2.1.1 IGMP Forwarding Rules,
- * the arrival port for IGMP Queries where the source address
- * is 0.0.0.0 should not be added to router port list.
- */
- if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) ||
- saddr->proto == htons(ETH_P_IPV6))
- br_multicast_mark_router(br, port);
+ br_multicast_mark_router(br, port);
}
static void br_ip4_multicast_query(struct net_bridge *br,
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 3661cdd927f1..7e71b0df1fbc 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -2058,6 +2058,8 @@ static int process_connect(struct ceph_connection *con)
dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
if (con->auth) {
+ int len = le32_to_cpu(con->in_reply.authorizer_len);
+
/*
* Any connection that defines ->get_authorizer()
* should also define ->add_authorizer_challenge() and
@@ -2067,8 +2069,7 @@ static int process_connect(struct ceph_connection *con)
*/
if (con->in_reply.tag == CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER) {
ret = con->ops->add_authorizer_challenge(
- con, con->auth->authorizer_reply_buf,
- le32_to_cpu(con->in_reply.authorizer_len));
+ con, con->auth->authorizer_reply_buf, len);
if (ret < 0)
return ret;
@@ -2078,10 +2079,12 @@ static int process_connect(struct ceph_connection *con)
return 0;
}
- ret = con->ops->verify_authorizer_reply(con);
- if (ret < 0) {
- con->error_msg = "bad authorize reply";
- return ret;
+ if (len) {
+ ret = con->ops->verify_authorizer_reply(con);
+ if (ret < 0) {
+ con->error_msg = "bad authorize reply";
+ return ret;
+ }
}
}
diff --git a/net/compat.c b/net/compat.c
index 959d1c51826d..3d348198004f 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -388,8 +388,12 @@ static int __compat_sys_setsockopt(int fd, int level, int optname,
char __user *optval, unsigned int optlen)
{
int err;
- struct socket *sock = sockfd_lookup(fd, &err);
+ struct socket *sock;
+
+ if (optlen > INT_MAX)
+ return -EINVAL;
+ sock = sockfd_lookup(fd, &err);
if (sock) {
err = security_socket_setsockopt(sock, level, optname);
if (err) {
diff --git a/net/core/dev.c b/net/core/dev.c
index 8e276e0192a1..5d03889502eb 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -8152,7 +8152,7 @@ static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
netdev_features_t feature;
int feature_bit;
- for_each_netdev_feature(&upper_disables, feature_bit) {
+ for_each_netdev_feature(upper_disables, feature_bit) {
feature = __NETIF_F_BIT(feature_bit);
if (!(upper->wanted_features & feature)
&& (features & feature)) {
@@ -8172,7 +8172,7 @@ static void netdev_sync_lower_features(struct net_device *upper,
netdev_features_t feature;
int feature_bit;
- for_each_netdev_feature(&upper_disables, feature_bit) {
+ for_each_netdev_feature(upper_disables, feature_bit) {
feature = __NETIF_F_BIT(feature_bit);
if (!(features & feature) && (lower->features & feature)) {
netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
diff --git a/net/core/filter.c b/net/core/filter.c
index 7a54dc11ac2d..f7d0004fc160 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2789,8 +2789,7 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
u32 off = skb_mac_header_len(skb);
int ret;
- /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
- if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
+ if (!skb_is_gso_tcp(skb))
return -ENOTSUPP;
ret = skb_cow(skb, len_diff);
@@ -2831,8 +2830,7 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
u32 off = skb_mac_header_len(skb);
int ret;
- /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
- if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
+ if (!skb_is_gso_tcp(skb))
return -ENOTSUPP;
ret = skb_unclone(skb, GFP_ATOMIC);
@@ -2957,8 +2955,7 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
int ret;
- /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
- if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
+ if (!skb_is_gso_tcp(skb))
return -ENOTSUPP;
ret = skb_cow(skb, len_diff);
@@ -2987,8 +2984,7 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
int ret;
- /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
- if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
+ if (!skb_is_gso_tcp(skb))
return -ENOTSUPP;
ret = skb_unclone(skb, GFP_ATOMIC);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 26d848484912..2415d9cb9b89 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -356,6 +356,8 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
*/
void *netdev_alloc_frag(unsigned int fragsz)
{
+ fragsz = SKB_DATA_ALIGN(fragsz);
+
return __netdev_alloc_frag(fragsz, GFP_ATOMIC);
}
EXPORT_SYMBOL(netdev_alloc_frag);
@@ -369,6 +371,8 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
void *napi_alloc_frag(unsigned int fragsz)
{
+ fragsz = SKB_DATA_ALIGN(fragsz);
+
return __napi_alloc_frag(fragsz, GFP_ATOMIC);
}
EXPORT_SYMBOL(napi_alloc_frag);
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index a1917025e155..410f19148106 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -612,8 +612,8 @@ static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
{
struct device_node *ports, *port;
struct dsa_port *dp;
+ int err = 0;
u32 reg;
- int err;
ports = of_get_child_by_name(dn, "ports");
if (!ports) {
@@ -624,19 +624,23 @@ static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
for_each_available_child_of_node(ports, port) {
err = of_property_read_u32(port, "reg", &reg);
if (err)
- return err;
+ goto out_put_node;
- if (reg >= ds->num_ports)
- return -EINVAL;
+ if (reg >= ds->num_ports) {
+ err = -EINVAL;
+ goto out_put_node;
+ }
dp = &ds->ports[reg];
err = dsa_port_parse_of(dp, port);
if (err)
- return err;
+ goto out_put_node;
}
- return 0;
+out_put_node:
+ of_node_put(ports);
+ return err;
}
static int dsa_switch_parse_member_of(struct dsa_switch *ds,
diff --git a/net/dsa/port.c b/net/dsa/port.c
index 2d7e01b23572..c2261697ee83 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -69,7 +69,6 @@ static void dsa_port_set_state_now(struct dsa_port *dp, u8 state)
int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
{
- u8 stp_state = dp->bridge_dev ? BR_STATE_BLOCKING : BR_STATE_FORWARDING;
struct dsa_switch *ds = dp->ds;
int port = dp->index;
int err;
@@ -80,7 +79,8 @@ int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
return err;
}
- dsa_port_set_state_now(dp, stp_state);
+ if (!dp->bridge_dev)
+ dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
return 0;
}
@@ -90,7 +90,8 @@ void dsa_port_disable(struct dsa_port *dp, struct phy_device *phy)
struct dsa_switch *ds = dp->ds;
int port = dp->index;
- dsa_port_set_state_now(dp, BR_STATE_DISABLED);
+ if (!dp->bridge_dev)
+ dsa_port_set_state_now(dp, BR_STATE_DISABLED);
if (ds->ops->port_disable)
ds->ops->port_disable(ds, port, phy);
@@ -291,6 +292,7 @@ static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp)
return ERR_PTR(-EPROBE_DEFER);
}
+ of_node_put(phy_dn);
return phydev;
}
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 777fa3b7fb13..f0165c5f376b 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -667,7 +667,8 @@ static int cipso_v4_map_lvl_valid(const struct cipso_v4_doi *doi_def, u8 level)
case CIPSO_V4_MAP_PASS:
return 0;
case CIPSO_V4_MAP_TRANS:
- if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL)
+ if ((level < doi_def->map.std->lvl.cipso_size) &&
+ (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL))
return 0;
break;
}
@@ -1735,13 +1736,26 @@ validate_return:
*/
void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
{
+ unsigned char optbuf[sizeof(struct ip_options) + 40];
+ struct ip_options *opt = (struct ip_options *)optbuf;
+
if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES)
return;
+ /*
+ * We might be called above the IP layer,
+ * so we can not use icmp_send and IPCB here.
+ */
+
+ memset(opt, 0, sizeof(struct ip_options));
+ opt->optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
+ if (__ip_options_compile(dev_net(skb->dev), opt, skb, NULL))
+ return;
+
if (gateway)
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0);
+ __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0, opt);
else
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0);
+ __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0, opt);
}
/**
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 5459f41fc26f..10e809b296ec 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -328,7 +328,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
skb->len += tailen;
skb->data_len += tailen;
skb->truesize += tailen;
- if (sk)
+ if (sk && sk_fullsock(sk))
refcount_add(tailen, &sk->sk_wmem_alloc);
goto out;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index fe4f6a624238..ed14ec245584 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -710,6 +710,10 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
case RTA_GATEWAY:
cfg->fc_gw = nla_get_be32(attr);
break;
+ case RTA_VIA:
+ NL_SET_ERR_MSG(extack, "IPv4 does not support RTA_VIA attribute");
+ err = -EINVAL;
+ goto errout;
case RTA_PRIORITY:
cfg->fc_priority = nla_get_u32(attr);
break;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 065997f414e6..3f24414150e2 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -570,7 +570,8 @@ relookup_failed:
* MUST reply to only the first fragment.
*/
-void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
+void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
+ const struct ip_options *opt)
{
struct iphdr *iph;
int room;
@@ -691,7 +692,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
iph->tos;
mark = IP4_REPLY_MARK(net, skb_in->mark);
- if (ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in))
+ if (__ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in, opt))
goto out_unlock;
@@ -742,7 +743,7 @@ out_bh_enable:
local_bh_enable();
out:;
}
-EXPORT_SYMBOL(icmp_send);
+EXPORT_SYMBOL(__icmp_send);
static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 3978f807fa8b..6ae89f2b541b 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1457,9 +1457,23 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
struct ip_tunnel_parm *p = &t->parms;
__be16 o_flags = p->o_flags;
- if ((t->erspan_ver == 1 || t->erspan_ver == 2) &&
- !t->collect_md)
- o_flags |= TUNNEL_KEY;
+ if (t->erspan_ver == 1 || t->erspan_ver == 2) {
+ if (!t->collect_md)
+ o_flags |= TUNNEL_KEY;
+
+ if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
+ goto nla_put_failure;
+
+ if (t->erspan_ver == 1) {
+ if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
+ goto nla_put_failure;
+ } else {
+ if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
+ goto nla_put_failure;
+ if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
+ goto nla_put_failure;
+ }
+ }
if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
nla_put_be16(skb, IFLA_GRE_IFLAGS,
@@ -1495,19 +1509,6 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
goto nla_put_failure;
}
- if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
- goto nla_put_failure;
-
- if (t->erspan_ver == 1) {
- if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
- goto nla_put_failure;
- } else if (t->erspan_ver == 2) {
- if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
- goto nla_put_failure;
- if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
- goto nla_put_failure;
- }
-
return 0;
nla_put_failure:
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 51d8efba6de2..1f4737b77067 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -307,11 +307,10 @@ drop:
}
static int ip_rcv_finish_core(struct net *net, struct sock *sk,
- struct sk_buff *skb)
+ struct sk_buff *skb, struct net_device *dev)
{
const struct iphdr *iph = ip_hdr(skb);
int (*edemux)(struct sk_buff *skb);
- struct net_device *dev = skb->dev;
struct rtable *rt;
int err;
@@ -400,6 +399,7 @@ drop_error:
static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
+ struct net_device *dev = skb->dev;
int ret;
/* if ingress device is enslaved to an L3 master device pass the
@@ -409,7 +409,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
if (!skb)
return NET_RX_SUCCESS;
- ret = ip_rcv_finish_core(net, sk, skb);
+ ret = ip_rcv_finish_core(net, sk, skb, dev);
if (ret != NET_RX_DROP)
ret = dst_input(skb);
return ret;
@@ -545,6 +545,7 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk,
INIT_LIST_HEAD(&sublist);
list_for_each_entry_safe(skb, next, head, list) {
+ struct net_device *dev = skb->dev;
struct dst_entry *dst;
skb_list_del_init(skb);
@@ -554,7 +555,7 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk,
skb = l3mdev_ip_rcv(skb);
if (!skb)
continue;
- if (ip_rcv_finish_core(net, sk, skb) == NET_RX_DROP)
+ if (ip_rcv_finish_core(net, sk, skb, dev) == NET_RX_DROP)
continue;
dst = skb_dst(skb);
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index ed194d46c00e..32a35043c9f5 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -251,8 +251,9 @@ static void spec_dst_fill(__be32 *spec_dst, struct sk_buff *skb)
* If opt == NULL, then skb->data should point to IP header.
*/
-int ip_options_compile(struct net *net,
- struct ip_options *opt, struct sk_buff *skb)
+int __ip_options_compile(struct net *net,
+ struct ip_options *opt, struct sk_buff *skb,
+ __be32 *info)
{
__be32 spec_dst = htonl(INADDR_ANY);
unsigned char *pp_ptr = NULL;
@@ -468,11 +469,22 @@ eol:
return 0;
error:
- if (skb) {
- icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl((pp_ptr-iph)<<24));
- }
+ if (info)
+ *info = htonl((pp_ptr-iph)<<24);
return -EINVAL;
}
+
+int ip_options_compile(struct net *net,
+ struct ip_options *opt, struct sk_buff *skb)
+{
+ int ret;
+ __be32 info;
+
+ ret = __ip_options_compile(net, opt, skb, &info);
+ if (ret != 0 && skb)
+ icmp_send(skb, ICMP_PARAMETERPROB, 0, info);
+ return ret;
+}
EXPORT_SYMBOL(ip_options_compile);
/*
diff --git a/net/ipv4/netlink.c b/net/ipv4/netlink.c
index f86bb4f06609..d8e3a1fb8e82 100644
--- a/net/ipv4/netlink.c
+++ b/net/ipv4/netlink.c
@@ -3,9 +3,10 @@
#include <linux/types.h>
#include <net/net_namespace.h>
#include <net/netlink.h>
+#include <linux/in6.h>
#include <net/ip.h>
-int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto,
+int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
struct netlink_ext_ack *extack)
{
*ip_proto = nla_get_u8(attr);
@@ -13,11 +14,19 @@ int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto,
switch (*ip_proto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
+ return 0;
case IPPROTO_ICMP:
+ if (family != AF_INET)
+ break;
+ return 0;
+#if IS_ENABLED(CONFIG_IPV6)
+ case IPPROTO_ICMPV6:
+ if (family != AF_INET6)
+ break;
return 0;
- default:
- NL_SET_ERR_MSG(extack, "Unsupported ip proto");
- return -EOPNOTSUPP;
+#endif
}
+ NL_SET_ERR_MSG(extack, "Unsupported ip proto");
+ return -EOPNOTSUPP;
}
EXPORT_SYMBOL_GPL(rtm_getroute_parse_ip_proto);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 5163b64f8fb3..7bb9128c8363 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2803,7 +2803,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
if (tb[RTA_IP_PROTO]) {
err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
- &ip_proto, extack);
+ &ip_proto, AF_INET, extack);
if (err)
return err;
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 2079145a3b7c..cf3c5095c10e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2528,6 +2528,7 @@ void tcp_write_queue_purge(struct sock *sk)
sk_mem_reclaim(sk);
tcp_clear_all_retrans_hints(tcp_sk(sk));
tcp_sk(sk)->packets_out = 0;
+ inet_csk(sk)->icsk_backoff = 0;
}
int tcp_disconnect(struct sock *sk, int flags)
@@ -2576,7 +2577,6 @@ int tcp_disconnect(struct sock *sk, int flags)
tp->write_seq += tp->max_window + 2;
if (tp->write_seq == 0)
tp->write_seq = 1;
- icsk->icsk_backoff = 0;
tp->snd_cwnd = 2;
icsk->icsk_probes_out = 0;
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index efc6fef692ff..ec3cea9d6828 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -536,12 +536,15 @@ int tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
if (sock_owned_by_user(sk))
break;
+ skb = tcp_rtx_queue_head(sk);
+ if (WARN_ON_ONCE(!skb))
+ break;
+
icsk->icsk_backoff--;
icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
TCP_TIMEOUT_INIT;
icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
- skb = tcp_rtx_queue_head(sk);
tcp_mstamp_refresh(tp);
delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 730bc44dbad9..ccc78f3a4b60 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2347,6 +2347,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
/* "skb_mstamp_ns" is used as a start point for the retransmit timer */
skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache;
list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
+ tcp_init_tso_segs(skb, mss_now);
goto repair; /* Skip network transmission */
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 5c3cd5d84a6f..372fdc5381a9 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -562,10 +562,12 @@ static int __udp4_lib_err_encap_no_sk(struct sk_buff *skb, u32 info)
for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
int (*handler)(struct sk_buff *skb, u32 info);
+ const struct ip_tunnel_encap_ops *encap;
- if (!iptun_encaps[i])
+ encap = rcu_dereference(iptun_encaps[i]);
+ if (!encap)
continue;
- handler = rcu_dereference(iptun_encaps[i]->err_handler);
+ handler = encap->err_handler;
if (handler && !handler(skb, info))
return 0;
}
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 5afe9f83374d..239d4a65ad6e 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -296,7 +296,7 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
skb->len += tailen;
skb->data_len += tailen;
skb->truesize += tailen;
- if (sk)
+ if (sk && sk_fullsock(sk))
refcount_add(tailen, &sk->sk_wmem_alloc);
goto out;
diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c
index b858bd5280bf..867474abe269 100644
--- a/net/ipv6/fou6.c
+++ b/net/ipv6/fou6.c
@@ -72,7 +72,7 @@ static int gue6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
static int gue6_err_proto_handler(int proto, struct sk_buff *skb,
struct inet6_skb_parm *opt,
- u8 type, u8 code, int offset, u32 info)
+ u8 type, u8 code, int offset, __be32 info)
{
const struct inet6_protocol *ipprot;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 801a9a0c217e..26f25b6e2833 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1719,6 +1719,27 @@ static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[],
return 0;
}
+static void ip6erspan_set_version(struct nlattr *data[],
+ struct __ip6_tnl_parm *parms)
+{
+ if (!data)
+ return;
+
+ parms->erspan_ver = 1;
+ if (data[IFLA_GRE_ERSPAN_VER])
+ parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
+
+ if (parms->erspan_ver == 1) {
+ if (data[IFLA_GRE_ERSPAN_INDEX])
+ parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
+ } else if (parms->erspan_ver == 2) {
+ if (data[IFLA_GRE_ERSPAN_DIR])
+ parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
+ if (data[IFLA_GRE_ERSPAN_HWID])
+ parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
+ }
+}
+
static void ip6gre_netlink_parms(struct nlattr *data[],
struct __ip6_tnl_parm *parms)
{
@@ -1767,20 +1788,6 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
if (data[IFLA_GRE_COLLECT_METADATA])
parms->collect_md = true;
-
- parms->erspan_ver = 1;
- if (data[IFLA_GRE_ERSPAN_VER])
- parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
-
- if (parms->erspan_ver == 1) {
- if (data[IFLA_GRE_ERSPAN_INDEX])
- parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
- } else if (parms->erspan_ver == 2) {
- if (data[IFLA_GRE_ERSPAN_DIR])
- parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
- if (data[IFLA_GRE_ERSPAN_HWID])
- parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
- }
}
static int ip6gre_tap_init(struct net_device *dev)
@@ -2100,9 +2107,23 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
struct __ip6_tnl_parm *p = &t->parms;
__be16 o_flags = p->o_flags;
- if ((p->erspan_ver == 1 || p->erspan_ver == 2) &&
- !p->collect_md)
- o_flags |= TUNNEL_KEY;
+ if (p->erspan_ver == 1 || p->erspan_ver == 2) {
+ if (!p->collect_md)
+ o_flags |= TUNNEL_KEY;
+
+ if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver))
+ goto nla_put_failure;
+
+ if (p->erspan_ver == 1) {
+ if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
+ goto nla_put_failure;
+ } else {
+ if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir))
+ goto nla_put_failure;
+ if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid))
+ goto nla_put_failure;
+ }
+ }
if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
nla_put_be16(skb, IFLA_GRE_IFLAGS,
@@ -2117,8 +2138,7 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) ||
nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) ||
- nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark) ||
- nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
+ nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark))
goto nla_put_failure;
if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
@@ -2136,19 +2156,6 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
goto nla_put_failure;
}
- if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver))
- goto nla_put_failure;
-
- if (p->erspan_ver == 1) {
- if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
- goto nla_put_failure;
- } else if (p->erspan_ver == 2) {
- if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir))
- goto nla_put_failure;
- if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid))
- goto nla_put_failure;
- }
-
return 0;
nla_put_failure:
@@ -2203,6 +2210,7 @@ static int ip6erspan_newlink(struct net *src_net, struct net_device *dev,
int err;
ip6gre_netlink_parms(data, &nt->parms);
+ ip6erspan_set_version(data, &nt->parms);
ign = net_generic(net, ip6gre_net_id);
if (nt->parms.collect_md) {
@@ -2248,6 +2256,7 @@ static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[],
if (IS_ERR(t))
return PTR_ERR(t);
+ ip6erspan_set_version(data, &p);
ip6gre_tunnel_unlink_md(ign, t);
ip6gre_tunnel_unlink(ign, t);
ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 964491cf3672..8dad1d690b78 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1274,18 +1274,29 @@ static DEFINE_SPINLOCK(rt6_exception_lock);
static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
struct rt6_exception *rt6_ex)
{
+ struct fib6_info *from;
struct net *net;
if (!bucket || !rt6_ex)
return;
net = dev_net(rt6_ex->rt6i->dst.dev);
+ net->ipv6.rt6_stats->fib_rt_cache--;
+
+ /* purge completely the exception to allow releasing the held resources:
+ * some [sk] cache may keep the dst around for unlimited time
+ */
+ from = rcu_dereference_protected(rt6_ex->rt6i->from,
+ lockdep_is_held(&rt6_exception_lock));
+ rcu_assign_pointer(rt6_ex->rt6i->from, NULL);
+ fib6_info_release(from);
+ dst_dev_put(&rt6_ex->rt6i->dst);
+
hlist_del_rcu(&rt6_ex->hlist);
dst_release(&rt6_ex->rt6i->dst);
kfree_rcu(rt6_ex, rcu);
WARN_ON_ONCE(!bucket->depth);
bucket->depth--;
- net->ipv6.rt6_stats->fib_rt_cache--;
}
/* Remove oldest rt6_ex in bucket and free the memory
@@ -1599,15 +1610,15 @@ static int rt6_remove_exception_rt(struct rt6_info *rt)
static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
{
struct rt6_exception_bucket *bucket;
- struct fib6_info *from = rt->from;
struct in6_addr *src_key = NULL;
struct rt6_exception *rt6_ex;
-
- if (!from ||
- !(rt->rt6i_flags & RTF_CACHE))
- return;
+ struct fib6_info *from;
rcu_read_lock();
+ from = rcu_dereference(rt->from);
+ if (!from || !(rt->rt6i_flags & RTF_CACHE))
+ goto unlock;
+
bucket = rcu_dereference(from->rt6i_exception_bucket);
#ifdef CONFIG_IPV6_SUBTREES
@@ -1626,6 +1637,7 @@ static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
if (rt6_ex)
rt6_ex->stamp = jiffies;
+unlock:
rcu_read_unlock();
}
@@ -2742,20 +2754,24 @@ static int ip6_route_check_nh_onlink(struct net *net,
u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
const struct in6_addr *gw_addr = &cfg->fc_gateway;
u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT;
+ struct fib6_info *from;
struct rt6_info *grt;
int err;
err = 0;
grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0);
if (grt) {
+ rcu_read_lock();
+ from = rcu_dereference(grt->from);
if (!grt->dst.error &&
/* ignore match if it is the default route */
- grt->from && !ipv6_addr_any(&grt->from->fib6_dst.addr) &&
+ from && !ipv6_addr_any(&from->fib6_dst.addr) &&
(grt->rt6i_flags & flags || dev != grt->dst.dev)) {
NL_SET_ERR_MSG(extack,
"Nexthop has invalid gateway or device mismatch");
err = -EINVAL;
}
+ rcu_read_unlock();
ip6_rt_put(grt);
}
@@ -4166,6 +4182,10 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
cfg->fc_flags |= RTF_GATEWAY;
}
+ if (tb[RTA_VIA]) {
+ NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute");
+ goto errout;
+ }
if (tb[RTA_DST]) {
int plen = (rtm->rtm_dst_len + 7) >> 3;
@@ -4649,7 +4669,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
table = rt->fib6_table->tb6_id;
else
table = RT6_TABLE_UNSPEC;
- rtm->rtm_table = table;
+ rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
if (nla_put_u32(skb, RTA_TABLE, table))
goto nla_put_failure;
@@ -4873,7 +4893,8 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
if (tb[RTA_IP_PROTO]) {
err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
- &fl6.flowi6_proto, extack);
+ &fl6.flowi6_proto, AF_INET6,
+ extack);
if (err)
goto errout;
}
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index e8a1dabef803..09e440e8dfae 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1873,6 +1873,7 @@ static int __net_init sit_init_net(struct net *net)
err_reg_dev:
ipip6_dev_free(sitn->fb_tunnel_dev);
+ free_netdev(sitn->fb_tunnel_dev);
err_alloc_dev:
return err;
}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 2596ffdeebea..b444483cdb2b 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -288,8 +288,8 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int peeked, peeking, off;
int err;
int is_udplite = IS_UDPLITE(sk);
+ struct udp_mib __percpu *mib;
bool checksum_valid = false;
- struct udp_mib *mib;
int is_udp4;
if (flags & MSG_ERRQUEUE)
@@ -420,17 +420,19 @@ EXPORT_SYMBOL(udpv6_encap_enable);
*/
static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
struct inet6_skb_parm *opt,
- u8 type, u8 code, int offset, u32 info)
+ u8 type, u8 code, int offset, __be32 info)
{
int i;
for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
- u8 type, u8 code, int offset, u32 info);
+ u8 type, u8 code, int offset, __be32 info);
+ const struct ip6_tnl_encap_ops *encap;
- if (!ip6tun_encaps[i])
+ encap = rcu_dereference(ip6tun_encaps[i]);
+ if (!encap)
continue;
- handler = rcu_dereference(ip6tun_encaps[i]->err_handler);
+ handler = encap->err_handler;
if (handler && !handler(skb, opt, type, code, offset, info))
return 0;
}
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index f5b4febeaa25..bc65db782bfb 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -344,8 +344,8 @@ static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
unsigned int i;
- xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
xfrm_flush_gc();
+ xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i]));
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 655c787f9d54..5651c29cb5bd 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -196,30 +196,22 @@ static int pfkey_release(struct socket *sock)
return 0;
}
-static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
- gfp_t allocation, struct sock *sk)
+static int pfkey_broadcast_one(struct sk_buff *skb, gfp_t allocation,
+ struct sock *sk)
{
int err = -ENOBUFS;
- sock_hold(sk);
- if (*skb2 == NULL) {
- if (refcount_read(&skb->users) != 1) {
- *skb2 = skb_clone(skb, allocation);
- } else {
- *skb2 = skb;
- refcount_inc(&skb->users);
- }
- }
- if (*skb2 != NULL) {
- if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
- skb_set_owner_r(*skb2, sk);
- skb_queue_tail(&sk->sk_receive_queue, *skb2);
- sk->sk_data_ready(sk);
- *skb2 = NULL;
- err = 0;
- }
+ if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
+ return err;
+
+ skb = skb_clone(skb, allocation);
+
+ if (skb) {
+ skb_set_owner_r(skb, sk);
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk);
+ err = 0;
}
- sock_put(sk);
return err;
}
@@ -234,7 +226,6 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
{
struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
struct sock *sk;
- struct sk_buff *skb2 = NULL;
int err = -ESRCH;
/* XXX Do we need something like netlink_overrun? I think
@@ -253,7 +244,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
* socket.
*/
if (pfk->promisc)
- pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);
+ pfkey_broadcast_one(skb, GFP_ATOMIC, sk);
/* the exact target will be processed later */
if (sk == one_sk)
@@ -268,7 +259,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
continue;
}
- err2 = pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);
+ err2 = pfkey_broadcast_one(skb, GFP_ATOMIC, sk);
/* Error is cleared after successful sending to at least one
* registered KM */
@@ -278,9 +269,8 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
rcu_read_unlock();
if (one_sk != NULL)
- err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
+ err = pfkey_broadcast_one(skb, allocation, one_sk);
- kfree_skb(skb2);
kfree_skb(skb);
return err;
}
@@ -1783,7 +1773,7 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_m
if (proto == 0)
return -EINVAL;
- err = xfrm_state_flush(net, proto, true);
+ err = xfrm_state_flush(net, proto, true, false);
err2 = unicast_flush_resp(sk, hdr);
if (err || err2) {
if (err == -ESRCH) /* empty table - go quietly */
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 2493c74c2d37..96496b2c1670 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -941,6 +941,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
BSS_CHANGED_P2P_PS |
BSS_CHANGED_TXPOWER;
int err;
+ int prev_beacon_int;
old = sdata_dereference(sdata->u.ap.beacon, sdata);
if (old)
@@ -963,6 +964,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
sdata->needed_rx_chains = sdata->local->rx_chains;
+ prev_beacon_int = sdata->vif.bss_conf.beacon_int;
sdata->vif.bss_conf.beacon_int = params->beacon_interval;
if (params->he_cap)
@@ -974,8 +976,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
if (!err)
ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
mutex_unlock(&local->mtx);
- if (err)
+ if (err) {
+ sdata->vif.bss_conf.beacon_int = prev_beacon_int;
return err;
+ }
/*
* Apply control port protocol, this allows us to
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 87a729926734..977dea436ee8 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -615,13 +615,13 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
* We need a bit of data queued to build aggregates properly, so
* instruct the TCP stack to allow more than a single ms of data
* to be queued in the stack. The value is a bit-shift of 1
- * second, so 8 is ~4ms of queued data. Only affects local TCP
+ * second, so 7 is ~8ms of queued data. Only affects local TCP
* sockets.
* This is the default, anyhow - drivers may need to override it
* for local reasons (longer buffers, longer completion time, or
* similar).
*/
- local->hw.tx_sk_pacing_shift = 8;
+ local->hw.tx_sk_pacing_shift = 7;
/* set up some defaults */
local->hw.queues = 1;
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index cad6592c52a1..2ec7011a4d07 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -70,6 +70,7 @@ enum mesh_deferred_task_flags {
* @dst: mesh path destination mac address
* @mpp: mesh proxy mac address
* @rhash: rhashtable list pointer
+ * @walk_list: linked list containing all mesh_path objects.
* @gate_list: list pointer for known gates list
* @sdata: mesh subif
* @next_hop: mesh neighbor to which frames for this destination will be
@@ -105,6 +106,7 @@ struct mesh_path {
u8 dst[ETH_ALEN];
u8 mpp[ETH_ALEN]; /* used for MPP or MAP */
struct rhash_head rhash;
+ struct hlist_node walk_list;
struct hlist_node gate_list;
struct ieee80211_sub_if_data *sdata;
struct sta_info __rcu *next_hop;
@@ -133,12 +135,16 @@ struct mesh_path {
* gate's mpath may or may not be resolved and active.
* @gates_lock: protects updates to known_gates
* @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
+ * @walk_head: linked list containging all mesh_path objects
+ * @walk_lock: lock protecting walk_head
* @entries: number of entries in the table
*/
struct mesh_table {
struct hlist_head known_gates;
spinlock_t gates_lock;
struct rhashtable rhead;
+ struct hlist_head walk_head;
+ spinlock_t walk_lock;
atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
};
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index a5125624a76d..88a6d5e18ccc 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -59,8 +59,10 @@ static struct mesh_table *mesh_table_alloc(void)
return NULL;
INIT_HLIST_HEAD(&newtbl->known_gates);
+ INIT_HLIST_HEAD(&newtbl->walk_head);
atomic_set(&newtbl->entries, 0);
spin_lock_init(&newtbl->gates_lock);
+ spin_lock_init(&newtbl->walk_lock);
return newtbl;
}
@@ -249,28 +251,15 @@ mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
static struct mesh_path *
__mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
{
- int i = 0, ret;
- struct mesh_path *mpath = NULL;
- struct rhashtable_iter iter;
-
- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
- if (ret)
- return NULL;
-
- rhashtable_walk_start(&iter);
+ int i = 0;
+ struct mesh_path *mpath;
- while ((mpath = rhashtable_walk_next(&iter))) {
- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
- continue;
- if (IS_ERR(mpath))
- break;
+ hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
if (i++ == idx)
break;
}
- rhashtable_walk_stop(&iter);
- rhashtable_walk_exit(&iter);
- if (IS_ERR(mpath) || !mpath)
+ if (!mpath)
return NULL;
if (mpath_expired(mpath)) {
@@ -432,6 +421,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
return ERR_PTR(-ENOMEM);
tbl = sdata->u.mesh.mesh_paths;
+ spin_lock_bh(&tbl->walk_lock);
do {
ret = rhashtable_lookup_insert_fast(&tbl->rhead,
&new_mpath->rhash,
@@ -441,20 +431,20 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
mpath = rhashtable_lookup_fast(&tbl->rhead,
dst,
mesh_rht_params);
-
+ else if (!ret)
+ hlist_add_head(&new_mpath->walk_list, &tbl->walk_head);
} while (unlikely(ret == -EEXIST && !mpath));
+ spin_unlock_bh(&tbl->walk_lock);
- if (ret && ret != -EEXIST)
- return ERR_PTR(ret);
-
- /* At this point either new_mpath was added, or we found a
- * matching entry already in the table; in the latter case
- * free the unnecessary new entry.
- */
- if (ret == -EEXIST) {
+ if (ret) {
kfree(new_mpath);
+
+ if (ret != -EEXIST)
+ return ERR_PTR(ret);
+
new_mpath = mpath;
}
+
sdata->u.mesh.mesh_paths_generation++;
return new_mpath;
}
@@ -480,9 +470,17 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
memcpy(new_mpath->mpp, mpp, ETH_ALEN);
tbl = sdata->u.mesh.mpp_paths;
+
+ spin_lock_bh(&tbl->walk_lock);
ret = rhashtable_lookup_insert_fast(&tbl->rhead,
&new_mpath->rhash,
mesh_rht_params);
+ if (!ret)
+ hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head);
+ spin_unlock_bh(&tbl->walk_lock);
+
+ if (ret)
+ kfree(new_mpath);
sdata->u.mesh.mpp_paths_generation++;
return ret;
@@ -503,20 +501,9 @@ void mesh_plink_broken(struct sta_info *sta)
struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct mesh_path *mpath;
- struct rhashtable_iter iter;
- int ret;
-
- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
- if (ret)
- return;
- rhashtable_walk_start(&iter);
-
- while ((mpath = rhashtable_walk_next(&iter))) {
- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
- continue;
- if (IS_ERR(mpath))
- break;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
if (rcu_access_pointer(mpath->next_hop) == sta &&
mpath->flags & MESH_PATH_ACTIVE &&
!(mpath->flags & MESH_PATH_FIXED)) {
@@ -530,8 +517,7 @@ void mesh_plink_broken(struct sta_info *sta)
WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
}
}
- rhashtable_walk_stop(&iter);
- rhashtable_walk_exit(&iter);
+ rcu_read_unlock();
}
static void mesh_path_free_rcu(struct mesh_table *tbl,
@@ -551,6 +537,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl,
static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
{
+ hlist_del_rcu(&mpath->walk_list);
rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params);
mesh_path_free_rcu(tbl, mpath);
}
@@ -571,27 +558,14 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
struct mesh_path *mpath;
- struct rhashtable_iter iter;
- int ret;
-
- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
- if (ret)
- return;
-
- rhashtable_walk_start(&iter);
-
- while ((mpath = rhashtable_walk_next(&iter))) {
- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
- continue;
- if (IS_ERR(mpath))
- break;
+ struct hlist_node *n;
+ spin_lock_bh(&tbl->walk_lock);
+ hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
if (rcu_access_pointer(mpath->next_hop) == sta)
__mesh_path_del(tbl, mpath);
}
-
- rhashtable_walk_stop(&iter);
- rhashtable_walk_exit(&iter);
+ spin_unlock_bh(&tbl->walk_lock);
}
static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
@@ -599,51 +573,26 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
{
struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
struct mesh_path *mpath;
- struct rhashtable_iter iter;
- int ret;
-
- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
- if (ret)
- return;
-
- rhashtable_walk_start(&iter);
-
- while ((mpath = rhashtable_walk_next(&iter))) {
- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
- continue;
- if (IS_ERR(mpath))
- break;
+ struct hlist_node *n;
+ spin_lock_bh(&tbl->walk_lock);
+ hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
if (ether_addr_equal(mpath->mpp, proxy))
__mesh_path_del(tbl, mpath);
}
-
- rhashtable_walk_stop(&iter);
- rhashtable_walk_exit(&iter);
+ spin_unlock_bh(&tbl->walk_lock);
}
static void table_flush_by_iface(struct mesh_table *tbl)
{
struct mesh_path *mpath;
- struct rhashtable_iter iter;
- int ret;
-
- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
- if (ret)
- return;
-
- rhashtable_walk_start(&iter);
+ struct hlist_node *n;
- while ((mpath = rhashtable_walk_next(&iter))) {
- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
- continue;
- if (IS_ERR(mpath))
- break;
+ spin_lock_bh(&tbl->walk_lock);
+ hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
__mesh_path_del(tbl, mpath);
}
-
- rhashtable_walk_stop(&iter);
- rhashtable_walk_exit(&iter);
+ spin_unlock_bh(&tbl->walk_lock);
}
/**
@@ -675,15 +624,15 @@ static int table_path_del(struct mesh_table *tbl,
{
struct mesh_path *mpath;
- rcu_read_lock();
+ spin_lock_bh(&tbl->walk_lock);
mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params);
if (!mpath) {
- rcu_read_unlock();
+ spin_unlock_bh(&tbl->walk_lock);
return -ENXIO;
}
__mesh_path_del(tbl, mpath);
- rcu_read_unlock();
+ spin_unlock_bh(&tbl->walk_lock);
return 0;
}
@@ -854,28 +803,16 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
struct mesh_table *tbl)
{
struct mesh_path *mpath;
- struct rhashtable_iter iter;
- int ret;
+ struct hlist_node *n;
- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_KERNEL);
- if (ret)
- return;
-
- rhashtable_walk_start(&iter);
-
- while ((mpath = rhashtable_walk_next(&iter))) {
- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
- continue;
- if (IS_ERR(mpath))
- break;
+ spin_lock_bh(&tbl->walk_lock);
+ hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
(!(mpath->flags & MESH_PATH_FIXED)) &&
time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
__mesh_path_del(tbl, mpath);
}
-
- rhashtable_walk_stop(&iter);
- rhashtable_walk_exit(&iter);
+ spin_unlock_bh(&tbl->walk_lock);
}
void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index bb4d71efb6fb..c2a6da5d80da 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2644,6 +2644,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
struct ieee80211_sub_if_data *sdata = rx->sdata;
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
u16 ac, q, hdrlen;
+ int tailroom = 0;
hdr = (struct ieee80211_hdr *) skb->data;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -2732,8 +2733,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
if (!ifmsh->mshcfg.dot11MeshForwarding)
goto out;
+ if (sdata->crypto_tx_tailroom_needed_cnt)
+ tailroom = IEEE80211_ENCRYPT_TAILROOM;
+
fwd_skb = skb_copy_expand(skb, local->tx_headroom +
- sdata->encrypt_headroom, 0, GFP_ATOMIC);
+ sdata->encrypt_headroom,
+ tailroom, GFP_ATOMIC);
if (!fwd_skb)
goto out;
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 7d55d4c04088..fa763e2e50ec 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1838,6 +1838,9 @@ static int rtm_to_route_config(struct sk_buff *skb,
goto errout;
break;
}
+ case RTA_GATEWAY:
+ NL_SET_ERR_MSG(extack, "MPLS does not support RTA_GATEWAY attribute");
+ goto errout;
case RTA_VIA:
{
if (nla_get_via(nla, &cfg->rc_via_alen,
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 86afacb07e5f..ac8d848d7624 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -896,12 +896,13 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
{
struct ip_vs_dest *dest;
unsigned int atype, i;
- int ret = 0;
EnterFunction(2);
#ifdef CONFIG_IP_VS_IPV6
if (udest->af == AF_INET6) {
+ int ret;
+
atype = ipv6_addr_type(&udest->addr.in6);
if ((!(atype & IPV6_ADDR_UNICAST) ||
atype & IPV6_ADDR_LINKLOCAL) &&
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 5a92f23f179f..4893f248dfdc 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -313,6 +313,9 @@ static int nft_delrule_by_chain(struct nft_ctx *ctx)
int err;
list_for_each_entry(rule, &ctx->chain->rules, list) {
+ if (!nft_is_active_next(ctx->net, rule))
+ continue;
+
err = nft_delrule(ctx, rule);
if (err < 0)
return err;
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index ea7c67050792..ee3e5b6471a6 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -903,7 +903,8 @@ int netlbl_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len,
(state == 0 && (byte & bitmask) == 0))
return bit_spot;
- bit_spot++;
+ if (++bit_spot >= bitmap_len)
+ return -1;
bitmask >>= 1;
if (bitmask == 0) {
byte = bitmap[++byte_offset];
diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
index 6a196e438b6c..d1fc019e932e 100644
--- a/net/nfc/llcp_commands.c
+++ b/net/nfc/llcp_commands.c
@@ -419,6 +419,10 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
sock->service_name,
sock->service_name_len,
&service_name_tlv_length);
+ if (!service_name_tlv) {
+ err = -ENOMEM;
+ goto error_tlv;
+ }
size += service_name_tlv_length;
}
@@ -429,9 +433,17 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
&miux_tlv_length);
+ if (!miux_tlv) {
+ err = -ENOMEM;
+ goto error_tlv;
+ }
size += miux_tlv_length;
rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
+ if (!rw_tlv) {
+ err = -ENOMEM;
+ goto error_tlv;
+ }
size += rw_tlv_length;
pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len);
@@ -484,9 +496,17 @@ int nfc_llcp_send_cc(struct nfc_llcp_sock *sock)
miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
&miux_tlv_length);
+ if (!miux_tlv) {
+ err = -ENOMEM;
+ goto error_tlv;
+ }
size += miux_tlv_length;
rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
+ if (!rw_tlv) {
+ err = -ENOMEM;
+ goto error_tlv;
+ }
size += rw_tlv_length;
skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size);
diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
index ef4026a23e80..4fa015208aab 100644
--- a/net/nfc/llcp_core.c
+++ b/net/nfc/llcp_core.c
@@ -532,10 +532,10 @@ static u8 nfc_llcp_reserve_sdp_ssap(struct nfc_llcp_local *local)
static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
{
- u8 *gb_cur, *version_tlv, version, version_length;
- u8 *lto_tlv, lto_length;
- u8 *wks_tlv, wks_length;
- u8 *miux_tlv, miux_length;
+ u8 *gb_cur, version, version_length;
+ u8 lto_length, wks_length, miux_length;
+ u8 *version_tlv = NULL, *lto_tlv = NULL,
+ *wks_tlv = NULL, *miux_tlv = NULL;
__be16 wks = cpu_to_be16(local->local_wks);
u8 gb_len = 0;
int ret = 0;
@@ -543,17 +543,33 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
version = LLCP_VERSION_11;
version_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &version,
1, &version_length);
+ if (!version_tlv) {
+ ret = -ENOMEM;
+ goto out;
+ }
gb_len += version_length;
lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_LTO, &local->lto, 1, &lto_length);
+ if (!lto_tlv) {
+ ret = -ENOMEM;
+ goto out;
+ }
gb_len += lto_length;
pr_debug("Local wks 0x%lx\n", local->local_wks);
wks_tlv = nfc_llcp_build_tlv(LLCP_TLV_WKS, (u8 *)&wks, 2, &wks_length);
+ if (!wks_tlv) {
+ ret = -ENOMEM;
+ goto out;
+ }
gb_len += wks_length;
miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&local->miux, 0,
&miux_length);
+ if (!miux_tlv) {
+ ret = -ENOMEM;
+ goto out;
+ }
gb_len += miux_length;
gb_len += ARRAY_SIZE(llcp_magic);
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 9fc76b19cd3c..db3473540303 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -132,7 +132,7 @@ static int pep_indicate(struct sock *sk, u8 id, u8 code,
ph->utid = 0;
ph->message_id = id;
ph->pipe_handle = pn->pipe_handle;
- ph->data[0] = code;
+ ph->error_code = code;
return pn_skb_send(sk, skb, NULL);
}
@@ -153,7 +153,7 @@ static int pipe_handler_request(struct sock *sk, u8 id, u8 code,
ph->utid = id; /* whatever */
ph->message_id = id;
ph->pipe_handle = pn->pipe_handle;
- ph->data[0] = code;
+ ph->error_code = code;
return pn_skb_send(sk, skb, NULL);
}
@@ -208,7 +208,7 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
struct pnpipehdr *ph;
struct sockaddr_pn dst;
u8 data[4] = {
- oph->data[0], /* PEP type */
+ oph->pep_type, /* PEP type */
code, /* error code, at an unusual offset */
PAD, PAD,
};
@@ -221,7 +221,7 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
ph->utid = oph->utid;
ph->message_id = PNS_PEP_CTRL_RESP;
ph->pipe_handle = oph->pipe_handle;
- ph->data[0] = oph->data[1]; /* CTRL id */
+ ph->data0 = oph->data[0]; /* CTRL id */
pn_skb_get_src_sockaddr(oskb, &dst);
return pn_skb_send(sk, skb, &dst);
@@ -272,17 +272,17 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
return -EINVAL;
hdr = pnp_hdr(skb);
- if (hdr->data[0] != PN_PEP_TYPE_COMMON) {
+ if (hdr->pep_type != PN_PEP_TYPE_COMMON) {
net_dbg_ratelimited("Phonet unknown PEP type: %u\n",
- (unsigned int)hdr->data[0]);
+ (unsigned int)hdr->pep_type);
return -EOPNOTSUPP;
}
- switch (hdr->data[1]) {
+ switch (hdr->data[0]) {
case PN_PEP_IND_FLOW_CONTROL:
switch (pn->tx_fc) {
case PN_LEGACY_FLOW_CONTROL:
- switch (hdr->data[4]) {
+ switch (hdr->data[3]) {
case PEP_IND_BUSY:
atomic_set(&pn->tx_credits, 0);
break;
@@ -292,7 +292,7 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
}
break;
case PN_ONE_CREDIT_FLOW_CONTROL:
- if (hdr->data[4] == PEP_IND_READY)
+ if (hdr->data[3] == PEP_IND_READY)
atomic_set(&pn->tx_credits, wake = 1);
break;
}
@@ -301,12 +301,12 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
case PN_PEP_IND_ID_MCFC_GRANT_CREDITS:
if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL)
break;
- atomic_add(wake = hdr->data[4], &pn->tx_credits);
+ atomic_add(wake = hdr->data[3], &pn->tx_credits);
break;
default:
net_dbg_ratelimited("Phonet unknown PEP indication: %u\n",
- (unsigned int)hdr->data[1]);
+ (unsigned int)hdr->data[0]);
return -EOPNOTSUPP;
}
if (wake)
@@ -318,7 +318,7 @@ static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb)
{
struct pep_sock *pn = pep_sk(sk);
struct pnpipehdr *hdr = pnp_hdr(skb);
- u8 n_sb = hdr->data[0];
+ u8 n_sb = hdr->data0;
pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
__skb_pull(skb, sizeof(*hdr));
@@ -506,7 +506,7 @@ static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
return -ECONNREFUSED;
/* Parse sub-blocks */
- n_sb = hdr->data[4];
+ n_sb = hdr->data[3];
while (n_sb > 0) {
u8 type, buf[6], len = sizeof(buf);
const u8 *data = pep_get_sb(skb, &type, &len, buf);
@@ -739,7 +739,7 @@ static int pipe_do_remove(struct sock *sk)
ph->utid = 0;
ph->message_id = PNS_PIPE_REMOVE_REQ;
ph->pipe_handle = pn->pipe_handle;
- ph->data[0] = PAD;
+ ph->data0 = PAD;
return pn_skb_send(sk, skb, NULL);
}
@@ -817,7 +817,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
peer_type = hdr->other_pep_type << 8;
/* Parse sub-blocks (options) */
- n_sb = hdr->data[4];
+ n_sb = hdr->data[3];
while (n_sb > 0) {
u8 type, buf[1], len = sizeof(buf);
const u8 *data = pep_get_sb(skb, &type, &len, buf);
@@ -1109,7 +1109,7 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
ph->utid = 0;
if (pn->aligned) {
ph->message_id = PNS_PIPE_ALIGNED_DATA;
- ph->data[0] = 0; /* padding */
+ ph->data0 = 0; /* padding */
} else
ph->message_id = PNS_PIPE_DATA;
ph->pipe_handle = pn->pipe_handle;
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 8af6c11d2482..faa1addf89b3 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -199,8 +199,7 @@ err3:
err2:
kfree(tname);
err1:
- if (ret == ACT_P_CREATED)
- tcf_idr_release(*a, bind);
+ tcf_idr_release(*a, bind);
return err;
}
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 64dba3708fce..cfceed28c333 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -189,8 +189,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
if (unlikely(!params_new)) {
- if (ret == ACT_P_CREATED)
- tcf_idr_release(*a, bind);
+ tcf_idr_release(*a, bind);
return -ENOMEM;
}
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 8b43fe0130f7..3f943de9a2c9 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -377,7 +377,8 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
return ret;
release_tun_meta:
- dst_release(&metadata->dst);
+ if (metadata)
+ dst_release(&metadata->dst);
err_out:
if (exists)
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 75046ec72144..cc9d8133afcd 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -447,6 +447,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
int nb = 0;
int count = 1;
int rc = NET_XMIT_SUCCESS;
+ int rc_drop = NET_XMIT_DROP;
/* Do not fool qdisc_drop_all() */
skb->prev = NULL;
@@ -486,6 +487,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
q->duplicate = 0;
rootq->enqueue(skb2, rootq, to_free);
q->duplicate = dupsave;
+ rc_drop = NET_XMIT_SUCCESS;
}
/*
@@ -498,7 +500,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (skb_is_gso(skb)) {
segs = netem_segment(skb, sch, to_free);
if (!segs)
- return NET_XMIT_DROP;
+ return rc_drop;
} else {
segs = skb;
}
@@ -521,8 +523,10 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1<<(prandom_u32() % 8);
}
- if (unlikely(sch->q.qlen >= sch->limit))
- return qdisc_drop_all(skb, sch, to_free);
+ if (unlikely(sch->q.qlen >= sch->limit)) {
+ qdisc_drop_all(skb, sch, to_free);
+ return rc_drop;
+ }
qdisc_qstats_backlog_inc(sch, skb);
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 64bef313d436..5cb7c1ff97e9 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -192,7 +192,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
if (unlikely(!max_data)) {
max_data = sctp_min_frag_point(sctp_sk(asoc->base.sk),
sctp_datachk_len(&asoc->stream));
- pr_warn_ratelimited("%s: asoc:%p frag_point is zero, forcing max_data to default minimum (%Zu)",
+ pr_warn_ratelimited("%s: asoc:%p frag_point is zero, forcing max_data to default minimum (%zu)",
__func__, asoc, max_data);
}
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 033696e6f74f..ad158d311ffa 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -207,7 +207,8 @@ void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
/* When a data chunk is sent, reset the heartbeat interval. */
expires = jiffies + sctp_transport_timeout(transport);
- if (time_before(transport->hb_timer.expires, expires) &&
+ if ((time_before(transport->hb_timer.expires, expires) ||
+ !timer_pending(&transport->hb_timer)) &&
!mod_timer(&transport->hb_timer,
expires + prandom_u32_max(transport->rto)))
sctp_transport_hold(transport);
diff --git a/net/smc/smc.h b/net/smc/smc.h
index 5721416d0605..adbdf195eb08 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -113,9 +113,9 @@ struct smc_host_cdc_msg { /* Connection Data Control message */
} __aligned(8);
enum smc_urg_state {
- SMC_URG_VALID, /* data present */
- SMC_URG_NOTYET, /* data pending */
- SMC_URG_READ /* data was already read */
+ SMC_URG_VALID = 1, /* data present */
+ SMC_URG_NOTYET = 2, /* data pending */
+ SMC_URG_READ = 3, /* data was already read */
};
struct smc_connection {
diff --git a/net/socket.c b/net/socket.c
index d80d87a395ea..320f51b22b19 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -577,6 +577,7 @@ static void __sock_release(struct socket *sock, struct inode *inode)
if (inode)
inode_lock(inode);
sock->ops->release(sock);
+ sock->sk = NULL;
if (inode)
inode_unlock(inode);
sock->ops = NULL;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 1217c90a363b..70343ac448b1 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -379,16 +379,18 @@ static int tipc_sk_sock_err(struct socket *sock, long *timeout)
#define tipc_wait_for_cond(sock_, timeo_, condition_) \
({ \
+ DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
struct sock *sk_; \
int rc_; \
\
while ((rc_ = !(condition_))) { \
- DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
+ /* coupled with smp_wmb() in tipc_sk_proto_rcv() */ \
+ smp_rmb(); \
sk_ = (sock_)->sk; \
rc_ = tipc_sk_sock_err((sock_), timeo_); \
if (rc_) \
break; \
- prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \
+ add_wait_queue(sk_sleep(sk_), &wait_); \
release_sock(sk_); \
*(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
sched_annotate_sleep(); \
@@ -1677,7 +1679,7 @@ static void tipc_sk_send_ack(struct tipc_sock *tsk)
static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
{
struct sock *sk = sock->sk;
- DEFINE_WAIT(wait);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
long timeo = *timeop;
int err = sock_error(sk);
@@ -1685,15 +1687,17 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
return err;
for (;;) {
- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
if (sk->sk_shutdown & RCV_SHUTDOWN) {
err = -ENOTCONN;
break;
}
+ add_wait_queue(sk_sleep(sk), &wait);
release_sock(sk);
- timeo = schedule_timeout(timeo);
+ timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
+ sched_annotate_sleep();
lock_sock(sk);
+ remove_wait_queue(sk_sleep(sk), &wait);
}
err = 0;
if (!skb_queue_empty(&sk->sk_receive_queue))
@@ -1709,7 +1713,6 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
if (err)
break;
}
- finish_wait(sk_sleep(sk), &wait);
*timeop = timeo;
return err;
}
@@ -1982,6 +1985,8 @@ static void tipc_sk_proto_rcv(struct sock *sk,
return;
case SOCK_WAKEUP:
tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
+ /* coupled with smp_rmb() in tipc_wait_for_cond() */
+ smp_wmb();
tsk->cong_link_cnt--;
wakeup = true;
break;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 74d1eed7cbd4..a95d479caeea 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -890,7 +890,7 @@ retry:
addr->hash ^= sk->sk_type;
__unix_remove_socket(sk);
- u->addr = addr;
+ smp_store_release(&u->addr, addr);
__unix_insert_socket(&unix_socket_table[addr->hash], sk);
spin_unlock(&unix_table_lock);
err = 0;
@@ -1060,7 +1060,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
err = 0;
__unix_remove_socket(sk);
- u->addr = addr;
+ smp_store_release(&u->addr, addr);
__unix_insert_socket(list, sk);
out_unlock:
@@ -1331,15 +1331,29 @@ restart:
RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
otheru = unix_sk(other);
- /* copy address information from listening to new sock*/
- if (otheru->addr) {
- refcount_inc(&otheru->addr->refcnt);
- newu->addr = otheru->addr;
- }
+ /* copy address information from listening to new sock
+ *
+ * The contents of *(otheru->addr) and otheru->path
+ * are seen fully set up here, since we have found
+ * otheru in hash under unix_table_lock. Insertion
+ * into the hash chain we'd found it in had been done
+ * in an earlier critical area protected by unix_table_lock,
+ * the same one where we'd set *(otheru->addr) contents,
+ * as well as otheru->path and otheru->addr itself.
+ *
+ * Using smp_store_release() here to set newu->addr
+ * is enough to make those stores, as well as stores
+ * to newu->path visible to anyone who gets newu->addr
+ * by smp_load_acquire(). IOW, the same warranties
+ * as for unix_sock instances bound in unix_bind() or
+ * in unix_autobind().
+ */
if (otheru->path.dentry) {
path_get(&otheru->path);
newu->path = otheru->path;
}
+ refcount_inc(&otheru->addr->refcnt);
+ smp_store_release(&newu->addr, otheru->addr);
/* Set credentials */
copy_peercred(sk, other);
@@ -1453,7 +1467,7 @@ out:
static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
{
struct sock *sk = sock->sk;
- struct unix_sock *u;
+ struct unix_address *addr;
DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
int err = 0;
@@ -1468,19 +1482,15 @@ static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
sock_hold(sk);
}
- u = unix_sk(sk);
- unix_state_lock(sk);
- if (!u->addr) {
+ addr = smp_load_acquire(&unix_sk(sk)->addr);
+ if (!addr) {
sunaddr->sun_family = AF_UNIX;
sunaddr->sun_path[0] = 0;
err = sizeof(short);
} else {
- struct unix_address *addr = u->addr;
-
err = addr->len;
memcpy(sunaddr, addr->name, addr->len);
}
- unix_state_unlock(sk);
sock_put(sk);
out:
return err;
@@ -2073,11 +2083,11 @@ static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
{
- struct unix_sock *u = unix_sk(sk);
+ struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
- if (u->addr) {
- msg->msg_namelen = u->addr->len;
- memcpy(msg->msg_name, u->addr->name, u->addr->len);
+ if (addr) {
+ msg->msg_namelen = addr->len;
+ memcpy(msg->msg_name, addr->name, addr->len);
}
}
@@ -2581,15 +2591,14 @@ static int unix_open_file(struct sock *sk)
if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
return -EPERM;
- unix_state_lock(sk);
+ if (!smp_load_acquire(&unix_sk(sk)->addr))
+ return -ENOENT;
+
path = unix_sk(sk)->path;
- if (!path.dentry) {
- unix_state_unlock(sk);
+ if (!path.dentry)
return -ENOENT;
- }
path_get(&path);
- unix_state_unlock(sk);
fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0)
@@ -2830,7 +2839,7 @@ static int unix_seq_show(struct seq_file *seq, void *v)
(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
sock_i_ino(s));
- if (u->addr) {
+ if (u->addr) { // under unix_table_lock here
int i, len;
seq_putc(seq, ' ');
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 384c84e83462..3183d9b8ab33 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -10,7 +10,8 @@
static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
{
- struct unix_address *addr = unix_sk(sk)->addr;
+ /* might or might not have unix_table_lock */
+ struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
if (!addr)
return 0;
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index ec3a828672ef..eff31348e20b 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -679,8 +679,7 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
int len, i, rc = 0;
- if (!sock_flag(sk, SOCK_ZAPPED) ||
- addr_len != sizeof(struct sockaddr_x25) ||
+ if (addr_len != sizeof(struct sockaddr_x25) ||
addr->sx25_family != AF_X25) {
rc = -EINVAL;
goto out;
@@ -699,9 +698,13 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
}
lock_sock(sk);
- x25_sk(sk)->source_addr = addr->sx25_addr;
- x25_insert_socket(sk);
- sock_reset_flag(sk, SOCK_ZAPPED);
+ if (sock_flag(sk, SOCK_ZAPPED)) {
+ x25_sk(sk)->source_addr = addr->sx25_addr;
+ x25_insert_socket(sk);
+ sock_reset_flag(sk, SOCK_ZAPPED);
+ } else {
+ rc = -EINVAL;
+ }
release_sock(sk);
SOCK_DEBUG(sk, "x25_bind: socket is bound\n");
out:
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index d4de871e7d4d..37e1fe180769 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -125,9 +125,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
return 0;
err_unreg_umem:
- xdp_clear_umem_at_qid(dev, queue_id);
if (!force_zc)
err = 0; /* fallback to copy mode */
+ if (err)
+ xdp_clear_umem_at_qid(dev, queue_id);
out_rtnl_unlock:
rtnl_unlock();
return err;
@@ -259,10 +260,10 @@ static int xdp_umem_pin_pages(struct xdp_umem *umem)
if (!umem->pgs)
return -ENOMEM;
- down_write(&current->mm->mmap_sem);
- npgs = get_user_pages(umem->address, umem->npgs,
- gup_flags, &umem->pgs[0], NULL);
- up_write(&current->mm->mmap_sem);
+ down_read(&current->mm->mmap_sem);
+ npgs = get_user_pages_longterm(umem->address, umem->npgs,
+ gup_flags, &umem->pgs[0], NULL);
+ up_read(&current->mm->mmap_sem);
if (npgs != umem->npgs) {
if (npgs >= 0) {
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index a03268454a27..85e4fe4f18cc 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -366,7 +366,6 @@ static int xsk_release(struct socket *sock)
xskq_destroy(xs->rx);
xskq_destroy(xs->tx);
- xdp_put_umem(xs->umem);
sock_orphan(sk);
sock->sk = NULL;
@@ -669,6 +668,8 @@ static int xsk_mmap(struct file *file, struct socket *sock,
if (!umem)
return -EINVAL;
+ /* Matches the smp_wmb() in XDP_UMEM_REG */
+ smp_rmb();
if (offset == XDP_UMEM_PGOFF_FILL_RING)
q = READ_ONCE(umem->fq);
else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
@@ -678,6 +679,8 @@ static int xsk_mmap(struct file *file, struct socket *sock,
if (!q)
return -EINVAL;
+ /* Matches the smp_wmb() in xsk_init_queue */
+ smp_rmb();
qpg = virt_to_head_page(q->ring);
if (size > (PAGE_SIZE << compound_order(qpg)))
return -EINVAL;
@@ -714,6 +717,18 @@ static const struct proto_ops xsk_proto_ops = {
.sendpage = sock_no_sendpage,
};
+static void xsk_destruct(struct sock *sk)
+{
+ struct xdp_sock *xs = xdp_sk(sk);
+
+ if (!sock_flag(sk, SOCK_DEAD))
+ return;
+
+ xdp_put_umem(xs->umem);
+
+ sk_refcnt_debug_dec(sk);
+}
+
static int xsk_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
@@ -740,6 +755,9 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
sk->sk_family = PF_XDP;
+ sk->sk_destruct = xsk_destruct;
+ sk_refcnt_debug_inc(sk);
+
sock_set_flag(sk, SOCK_RCU_FREE);
xs = xdp_sk(sk);
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
index 6be8c7df15bb..dbb3c1945b5c 100644
--- a/net/xfrm/xfrm_interface.c
+++ b/net/xfrm/xfrm_interface.c
@@ -76,10 +76,10 @@ static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb)
int ifindex;
struct xfrm_if *xi;
- if (!skb->dev)
+ if (!secpath_exists(skb) || !skb->dev)
return NULL;
- xfrmn = net_generic(dev_net(skb->dev), xfrmi_net_id);
+ xfrmn = net_generic(xs_net(xfrm_input_state(skb)), xfrmi_net_id);
ifindex = skb->dev->ifindex;
for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index ba0a4048c846..8d1a898d0ba5 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -3314,8 +3314,10 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
if (ifcb) {
xi = ifcb->decode_session(skb);
- if (xi)
+ if (xi) {
if_id = xi->p.if_id;
+ net = xi->net;
+ }
}
rcu_read_unlock();
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 23c92891758a..1bb971f46fc6 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -432,7 +432,7 @@ void xfrm_state_free(struct xfrm_state *x)
}
EXPORT_SYMBOL(xfrm_state_free);
-static void xfrm_state_gc_destroy(struct xfrm_state *x)
+static void ___xfrm_state_destroy(struct xfrm_state *x)
{
tasklet_hrtimer_cancel(&x->mtimer);
del_timer_sync(&x->rtimer);
@@ -474,7 +474,7 @@ static void xfrm_state_gc_task(struct work_struct *work)
synchronize_rcu();
hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
- xfrm_state_gc_destroy(x);
+ ___xfrm_state_destroy(x);
}
static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
@@ -598,14 +598,19 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
}
EXPORT_SYMBOL(xfrm_state_alloc);
-void __xfrm_state_destroy(struct xfrm_state *x)
+void __xfrm_state_destroy(struct xfrm_state *x, bool sync)
{
WARN_ON(x->km.state != XFRM_STATE_DEAD);
- spin_lock_bh(&xfrm_state_gc_lock);
- hlist_add_head(&x->gclist, &xfrm_state_gc_list);
- spin_unlock_bh(&xfrm_state_gc_lock);
- schedule_work(&xfrm_state_gc_work);
+ if (sync) {
+ synchronize_rcu();
+ ___xfrm_state_destroy(x);
+ } else {
+ spin_lock_bh(&xfrm_state_gc_lock);
+ hlist_add_head(&x->gclist, &xfrm_state_gc_list);
+ spin_unlock_bh(&xfrm_state_gc_lock);
+ schedule_work(&xfrm_state_gc_work);
+ }
}
EXPORT_SYMBOL(__xfrm_state_destroy);
@@ -708,7 +713,7 @@ xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool
}
#endif
-int xfrm_state_flush(struct net *net, u8 proto, bool task_valid)
+int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync)
{
int i, err = 0, cnt = 0;
@@ -730,7 +735,10 @@ restart:
err = xfrm_state_delete(x);
xfrm_audit_state_delete(x, err ? 0 : 1,
task_valid);
- xfrm_state_put(x);
+ if (sync)
+ xfrm_state_put_sync(x);
+ else
+ xfrm_state_put(x);
if (!err)
cnt++;
@@ -2215,7 +2223,7 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x)
if (atomic_read(&t->tunnel_users) == 2)
xfrm_state_delete(t);
atomic_dec(&t->tunnel_users);
- xfrm_state_put(t);
+ xfrm_state_put_sync(t);
x->tunnel = NULL;
}
}
@@ -2375,8 +2383,8 @@ void xfrm_state_fini(struct net *net)
unsigned int sz;
flush_work(&net->xfrm.state_hash_work);
- xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
flush_work(&xfrm_state_gc_work);
+ xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
WARN_ON(!list_empty(&net->xfrm.state_all));
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index c6d26afcf89d..a131f9ff979e 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1932,7 +1932,7 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
struct xfrm_usersa_flush *p = nlmsg_data(nlh);
int err;
- err = xfrm_state_flush(net, p->proto, true);
+ err = xfrm_state_flush(net, p->proto, true, false);
if (err) {
if (err == -ESRCH) /* empty table */
return 0;
diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan
index 25c259df8ffa..6deabedc67fc 100644
--- a/scripts/Makefile.kasan
+++ b/scripts/Makefile.kasan
@@ -26,7 +26,7 @@ else
CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \
$(call cc-param,asan-globals=1) \
$(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \
- $(call cc-param,asan-stack=1) \
+ $(call cc-param,asan-stack=$(CONFIG_KASAN_STACK)) \
$(call cc-param,asan-use-after-scope=1) \
$(call cc-param,asan-instrument-allocas=1)
endif
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 77cebad0474e..f75e7bda4889 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -118,8 +118,8 @@ static int read_symbol(FILE *in, struct sym_entry *s)
fprintf(stderr, "Read error or end of file.\n");
return -1;
}
- if (strlen(sym) > KSYM_NAME_LEN) {
- fprintf(stderr, "Symbol %s too long for kallsyms (%zu vs %d).\n"
+ if (strlen(sym) >= KSYM_NAME_LEN) {
+ fprintf(stderr, "Symbol %s too long for kallsyms (%zu >= %d).\n"
"Please increase KSYM_NAME_LEN both in kernel and kallsyms.c\n",
sym, strlen(sym), KSYM_NAME_LEN);
return -1;
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 479909b858c7..8f533c81aa8d 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -186,20 +186,9 @@ static inline int key_permission(const key_ref_t key_ref, unsigned perm)
return key_task_permission(key_ref, current_cred(), perm);
}
-/*
- * Authorisation record for request_key().
- */
-struct request_key_auth {
- struct key *target_key;
- struct key *dest_keyring;
- const struct cred *cred;
- void *callout_info;
- size_t callout_len;
- pid_t pid;
-} __randomize_layout;
-
extern struct key_type key_type_request_key_auth;
extern struct key *request_key_auth_new(struct key *target,
+ const char *op,
const void *callout_info,
size_t callout_len,
struct key *dest_keyring);
diff --git a/security/keys/key.c b/security/keys/key.c
index 44a80d6741a1..696f1c092c50 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -265,8 +265,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
spin_lock(&user->lock);
if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
- if (user->qnkeys + 1 >= maxkeys ||
- user->qnbytes + quotalen >= maxbytes ||
+ if (user->qnkeys + 1 > maxkeys ||
+ user->qnbytes + quotalen > maxbytes ||
user->qnbytes + quotalen < user->qnbytes)
goto no_quota;
}
@@ -297,6 +297,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
key->gid = gid;
key->perm = perm;
key->restrict_link = restrict_link;
+ key->last_used_at = ktime_get_real_seconds();
if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
key->flags |= 1 << KEY_FLAG_IN_QUOTA;
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index e8093d025966..7bbe03593e58 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -25,6 +25,7 @@
#include <linux/security.h>
#include <linux/uio.h>
#include <linux/uaccess.h>
+#include <keys/request_key_auth-type.h>
#include "internal.h"
#define KEY_MAX_DESC_SIZE 4096
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index eadebb92986a..f81372f53dd7 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -661,9 +661,6 @@ static bool search_nested_keyrings(struct key *keyring,
BUG_ON((ctx->flags & STATE_CHECKS) == 0 ||
(ctx->flags & STATE_CHECKS) == STATE_CHECKS);
- if (ctx->index_key.description)
- ctx->index_key.desc_len = strlen(ctx->index_key.description);
-
/* Check to see if this top-level keyring is what we are looking for
* and whether it is valid or not.
*/
@@ -914,6 +911,7 @@ key_ref_t keyring_search(key_ref_t keyring,
struct keyring_search_context ctx = {
.index_key.type = type,
.index_key.description = description,
+ .index_key.desc_len = strlen(description),
.cred = current_cred(),
.match_data.cmp = key_default_cmp,
.match_data.raw_data = description,
diff --git a/security/keys/proc.c b/security/keys/proc.c
index d2b802072693..78ac305d715e 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -165,8 +165,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
int rc;
struct keyring_search_context ctx = {
- .index_key.type = key->type,
- .index_key.description = key->description,
+ .index_key = key->index_key,
.cred = m->file->f_cred,
.match_data.cmp = lookup_user_key_possessed,
.match_data.raw_data = key,
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 02c77e928f68..0e0b9ccad2f8 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -19,6 +19,7 @@
#include <linux/security.h>
#include <linux/user_namespace.h>
#include <linux/uaccess.h>
+#include <keys/request_key_auth-type.h>
#include "internal.h"
/* Session keyring create vs join semaphore */
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 301f0e300dbd..7a0c6b666ff0 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -18,31 +18,30 @@
#include <linux/keyctl.h>
#include <linux/slab.h>
#include "internal.h"
+#include <keys/request_key_auth-type.h>
#define key_negative_timeout 60 /* default timeout on a negative key's existence */
/**
* complete_request_key - Complete the construction of a key.
- * @cons: The key construction record.
+ * @auth_key: The authorisation key.
* @error: The success or failute of the construction.
*
* Complete the attempt to construct a key. The key will be negated
* if an error is indicated. The authorisation key will be revoked
* unconditionally.
*/
-void complete_request_key(struct key_construction *cons, int error)
+void complete_request_key(struct key *authkey, int error)
{
- kenter("{%d,%d},%d", cons->key->serial, cons->authkey->serial, error);
+ struct request_key_auth *rka = get_request_key_auth(authkey);
+ struct key *key = rka->target_key;
+
+ kenter("%d{%d},%d", authkey->serial, key->serial, error);
if (error < 0)
- key_negate_and_link(cons->key, key_negative_timeout, NULL,
- cons->authkey);
+ key_negate_and_link(key, key_negative_timeout, NULL, authkey);
else
- key_revoke(cons->authkey);
-
- key_put(cons->key);
- key_put(cons->authkey);
- kfree(cons);
+ key_revoke(authkey);
}
EXPORT_SYMBOL(complete_request_key);
@@ -91,21 +90,19 @@ static int call_usermodehelper_keys(const char *path, char **argv, char **envp,
* Request userspace finish the construction of a key
* - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>"
*/
-static int call_sbin_request_key(struct key_construction *cons,
- const char *op,
- void *aux)
+static int call_sbin_request_key(struct key *authkey, void *aux)
{
static char const request_key[] = "/sbin/request-key";
+ struct request_key_auth *rka = get_request_key_auth(authkey);
const struct cred *cred = current_cred();
key_serial_t prkey, sskey;
- struct key *key = cons->key, *authkey = cons->authkey, *keyring,
- *session;
+ struct key *key = rka->target_key, *keyring, *session;
char *argv[9], *envp[3], uid_str[12], gid_str[12];
char key_str[12], keyring_str[3][12];
char desc[20];
int ret, i;
- kenter("{%d},{%d},%s", key->serial, authkey->serial, op);
+ kenter("{%d},{%d},%s", key->serial, authkey->serial, rka->op);
ret = install_user_keyrings();
if (ret < 0)
@@ -163,7 +160,7 @@ static int call_sbin_request_key(struct key_construction *cons,
/* set up the argument list */
i = 0;
argv[i++] = (char *)request_key;
- argv[i++] = (char *) op;
+ argv[i++] = (char *)rka->op;
argv[i++] = key_str;
argv[i++] = uid_str;
argv[i++] = gid_str;
@@ -191,7 +188,7 @@ error_link:
key_put(keyring);
error_alloc:
- complete_request_key(cons, ret);
+ complete_request_key(authkey, ret);
kleave(" = %d", ret);
return ret;
}
@@ -205,42 +202,31 @@ static int construct_key(struct key *key, const void *callout_info,
size_t callout_len, void *aux,
struct key *dest_keyring)
{
- struct key_construction *cons;
request_key_actor_t actor;
struct key *authkey;
int ret;
kenter("%d,%p,%zu,%p", key->serial, callout_info, callout_len, aux);
- cons = kmalloc(sizeof(*cons), GFP_KERNEL);
- if (!cons)
- return -ENOMEM;
-
/* allocate an authorisation key */
- authkey = request_key_auth_new(key, callout_info, callout_len,
+ authkey = request_key_auth_new(key, "create", callout_info, callout_len,
dest_keyring);
- if (IS_ERR(authkey)) {
- kfree(cons);
- ret = PTR_ERR(authkey);
- authkey = NULL;
- } else {
- cons->authkey = key_get(authkey);
- cons->key = key_get(key);
+ if (IS_ERR(authkey))
+ return PTR_ERR(authkey);
- /* make the call */
- actor = call_sbin_request_key;
- if (key->type->request_key)
- actor = key->type->request_key;
+ /* Make the call */
+ actor = call_sbin_request_key;
+ if (key->type->request_key)
+ actor = key->type->request_key;
- ret = actor(cons, "create", aux);
+ ret = actor(authkey, aux);
- /* check that the actor called complete_request_key() prior to
- * returning an error */
- WARN_ON(ret < 0 &&
- !test_bit(KEY_FLAG_REVOKED, &authkey->flags));
- key_put(authkey);
- }
+ /* check that the actor called complete_request_key() prior to
+ * returning an error */
+ WARN_ON(ret < 0 &&
+ !test_bit(KEY_FLAG_REVOKED, &authkey->flags));
+ key_put(authkey);
kleave(" = %d", ret);
return ret;
}
@@ -275,7 +261,7 @@ static int construct_get_dest_keyring(struct key **_dest_keyring)
if (cred->request_key_auth) {
authkey = cred->request_key_auth;
down_read(&authkey->sem);
- rka = authkey->payload.data[0];
+ rka = get_request_key_auth(authkey);
if (!test_bit(KEY_FLAG_REVOKED,
&authkey->flags))
dest_keyring =
@@ -545,6 +531,7 @@ struct key *request_key_and_link(struct key_type *type,
struct keyring_search_context ctx = {
.index_key.type = type,
.index_key.description = description,
+ .index_key.desc_len = strlen(description),
.cred = current_cred(),
.match_data.cmp = key_default_cmp,
.match_data.raw_data = description,
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 87ea2f54dedc..bda6201c6c45 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -17,7 +17,7 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
#include "internal.h"
-#include <keys/user-type.h>
+#include <keys/request_key_auth-type.h>
static int request_key_auth_preparse(struct key_preparsed_payload *);
static void request_key_auth_free_preparse(struct key_preparsed_payload *);
@@ -68,7 +68,7 @@ static int request_key_auth_instantiate(struct key *key,
static void request_key_auth_describe(const struct key *key,
struct seq_file *m)
{
- struct request_key_auth *rka = key->payload.data[0];
+ struct request_key_auth *rka = get_request_key_auth(key);
seq_puts(m, "key:");
seq_puts(m, key->description);
@@ -83,7 +83,7 @@ static void request_key_auth_describe(const struct key *key,
static long request_key_auth_read(const struct key *key,
char __user *buffer, size_t buflen)
{
- struct request_key_auth *rka = key->payload.data[0];
+ struct request_key_auth *rka = get_request_key_auth(key);
size_t datalen;
long ret;
@@ -109,7 +109,7 @@ static long request_key_auth_read(const struct key *key,
*/
static void request_key_auth_revoke(struct key *key)
{
- struct request_key_auth *rka = key->payload.data[0];
+ struct request_key_auth *rka = get_request_key_auth(key);
kenter("{%d}", key->serial);
@@ -136,7 +136,7 @@ static void free_request_key_auth(struct request_key_auth *rka)
*/
static void request_key_auth_destroy(struct key *key)
{
- struct request_key_auth *rka = key->payload.data[0];
+ struct request_key_auth *rka = get_request_key_auth(key);
kenter("{%d}", key->serial);
@@ -147,8 +147,9 @@ static void request_key_auth_destroy(struct key *key)
* Create an authorisation token for /sbin/request-key or whoever to gain
* access to the caller's security data.
*/
-struct key *request_key_auth_new(struct key *target, const void *callout_info,
- size_t callout_len, struct key *dest_keyring)
+struct key *request_key_auth_new(struct key *target, const char *op,
+ const void *callout_info, size_t callout_len,
+ struct key *dest_keyring)
{
struct request_key_auth *rka, *irka;
const struct cred *cred = current->cred;
@@ -166,6 +167,7 @@ struct key *request_key_auth_new(struct key *target, const void *callout_info,
if (!rka->callout_info)
goto error_free_rka;
rka->callout_len = callout_len;
+ strlcpy(rka->op, op, sizeof(rka->op));
/* see if the calling process is already servicing the key request of
* another process */
@@ -245,7 +247,7 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id)
struct key *authkey;
key_ref_t authkey_ref;
- sprintf(description, "%x", target_id);
+ ctx.index_key.desc_len = sprintf(description, "%x", target_id);
authkey_ref = search_process_keyrings(&ctx);
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index f84001019356..33028c098ef3 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -321,6 +321,7 @@ static void dump_common_audit_data(struct audit_buffer *ab,
if (a->u.net->sk) {
struct sock *sk = a->u.net->sk;
struct unix_sock *u;
+ struct unix_address *addr;
int len = 0;
char *p = NULL;
@@ -351,14 +352,15 @@ static void dump_common_audit_data(struct audit_buffer *ab,
#endif
case AF_UNIX:
u = unix_sk(sk);
+ addr = smp_load_acquire(&u->addr);
+ if (!addr)
+ break;
if (u->path.dentry) {
audit_log_d_path(ab, " path=", &u->path);
break;
}
- if (!u->addr)
- break;
- len = u->addr->len-sizeof(short);
- p = &u->addr->name->sun_path[0];
+ len = addr->len-sizeof(short);
+ p = &addr->name->sun_path[0];
audit_log_format(ab, " path=");
if (*p)
audit_log_untrustedstring(ab, p);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 6df758adff84..1ffa36e987b4 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1855,6 +1855,8 @@ enum {
ALC887_FIXUP_BASS_CHMAP,
ALC1220_FIXUP_GB_DUAL_CODECS,
ALC1220_FIXUP_CLEVO_P950,
+ ALC1220_FIXUP_SYSTEM76_ORYP5,
+ ALC1220_FIXUP_SYSTEM76_ORYP5_PINS,
};
static void alc889_fixup_coef(struct hda_codec *codec,
@@ -2056,6 +2058,17 @@ static void alc1220_fixup_clevo_p950(struct hda_codec *codec,
snd_hda_override_conn_list(codec, 0x1b, 1, conn1);
}
+static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action);
+
+static void alc1220_fixup_system76_oryp5(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ alc1220_fixup_clevo_p950(codec, fix, action);
+ alc_fixup_headset_mode_no_hp_mic(codec, fix, action);
+}
+
static const struct hda_fixup alc882_fixups[] = {
[ALC882_FIXUP_ABIT_AW9D_MAX] = {
.type = HDA_FIXUP_PINS,
@@ -2300,6 +2313,19 @@ static const struct hda_fixup alc882_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc1220_fixup_clevo_p950,
},
+ [ALC1220_FIXUP_SYSTEM76_ORYP5] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc1220_fixup_system76_oryp5,
+ },
+ [ALC1220_FIXUP_SYSTEM76_ORYP5_PINS] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+ {}
+ },
+ .chained = true,
+ .chain_id = ALC1220_FIXUP_SYSTEM76_ORYP5,
+ },
};
static const struct snd_pci_quirk alc882_fixup_tbl[] = {
@@ -2376,6 +2402,8 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
+ SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
@@ -5632,6 +5660,7 @@ enum {
ALC294_FIXUP_ASUS_SPK,
ALC225_FIXUP_HEADSET_JACK,
ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
+ ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -6587,6 +6616,17 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
},
+ [ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ /* Disable PCBEEP-IN passthrough */
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x36 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x57d7 },
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC285_FIXUP_LENOVO_HEADPHONE_NOISE
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -7272,7 +7312,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x12, 0x90a60130},
{0x19, 0x03a11020},
{0x21, 0x0321101f}),
- SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
+ SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
{0x12, 0x90a60130},
{0x14, 0x90170110},
{0x19, 0x04a11040},
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index 37e001cf9cd1..3fe34417ec89 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -462,7 +462,7 @@ static int asoc_simple_card_parse_of(struct simple_card_data *priv)
conf_idx = 0;
node = of_get_child_by_name(top, PREFIX "dai-link");
if (!node) {
- node = dev->of_node;
+ node = of_node_get(top);
loop = 0;
}
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index ce00fe2f6aae..d4bde4834ce5 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -604,6 +604,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
unsigned int fmt)
{
struct i2s_dai *i2s = to_info(dai);
+ struct i2s_dai *other = get_other_dai(i2s);
int lrp_shift, sdf_shift, sdf_mask, lrp_rlow, mod_slave;
u32 mod, tmp = 0;
unsigned long flags;
@@ -661,7 +662,8 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
* CLK_I2S_RCLK_SRC clock is not exposed so we ensure any
* clock configuration assigned in DT is not overwritten.
*/
- if (i2s->rclk_srcrate == 0 && i2s->clk_data.clks == NULL)
+ if (i2s->rclk_srcrate == 0 && i2s->clk_data.clks == NULL &&
+ other->clk_data.clks == NULL)
i2s_set_sysclk(dai, SAMSUNG_I2S_RCLKSRC_0,
0, SND_SOC_CLOCK_IN);
break;
@@ -699,6 +701,7 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
{
struct i2s_dai *i2s = to_info(dai);
+ struct i2s_dai *other = get_other_dai(i2s);
u32 mod, mask = 0, val = 0;
struct clk *rclksrc;
unsigned long flags;
@@ -784,6 +787,9 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
i2s->frmclk = params_rate(params);
rclksrc = i2s->clk_table[CLK_I2S_RCLK_SRC];
+ if (!rclksrc || IS_ERR(rclksrc))
+ rclksrc = other->clk_table[CLK_I2S_RCLK_SRC];
+
if (rclksrc && !IS_ERR(rclksrc))
i2s->rclk_srcrate = clk_get_rate(rclksrc);
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index fc79ec6927e3..731b963b6995 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -2487,6 +2487,7 @@ int snd_soc_tplg_component_load(struct snd_soc_component *comp,
struct snd_soc_tplg_ops *ops, const struct firmware *fw, u32 id)
{
struct soc_tplg tplg;
+ int ret;
/* setup parsing context */
memset(&tplg, 0, sizeof(tplg));
@@ -2500,7 +2501,12 @@ int snd_soc_tplg_component_load(struct snd_soc_component *comp,
tplg.bytes_ext_ops = ops->bytes_ext_ops;
tplg.bytes_ext_ops_count = ops->bytes_ext_ops_count;
- return soc_tplg_load(&tplg);
+ ret = soc_tplg_load(&tplg);
+ /* free the created components if fail to load topology */
+ if (ret)
+ snd_soc_tplg_component_remove(comp, SND_SOC_TPLG_INDEX_ALL);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_tplg_component_load);
diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c
index 147e34cfceb7..02d7c871862a 100644
--- a/tools/testing/selftests/bpf/test_lpm_map.c
+++ b/tools/testing/selftests/bpf/test_lpm_map.c
@@ -474,6 +474,16 @@ static void test_lpm_delete(void)
assert(bpf_map_lookup_elem(map_fd, key, &value) == -1 &&
errno == ENOENT);
+ key->prefixlen = 30; // unused prefix so far
+ inet_pton(AF_INET, "192.255.0.0", key->data);
+ assert(bpf_map_delete_elem(map_fd, key) == -1 &&
+ errno == ENOENT);
+
+ key->prefixlen = 16; // same prefix as the root node
+ inet_pton(AF_INET, "192.255.0.0", key->data);
+ assert(bpf_map_delete_elem(map_fd, key) == -1 &&
+ errno == ENOENT);
+
/* assert initial lookup */
key->prefixlen = 32;
inet_pton(AF_INET, "192.168.0.1", key->data);
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index 802b4af18729..1080ff55a788 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -388,6 +388,7 @@ fib_carrier_unicast_test()
set -e
$IP link set dev dummy0 carrier off
+ sleep 1
set +e
echo " Carrier down"
diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
index e2c94e47707c..912b2dc50be3 100755
--- a/tools/testing/selftests/net/pmtu.sh
+++ b/tools/testing/selftests/net/pmtu.sh
@@ -103,6 +103,15 @@
# and check that configured MTU is used on link creation and changes, and
# that MTU is properly calculated instead when MTU is not configured from
# userspace
+#
+# - cleanup_ipv4_exception
+# Similar to pmtu_ipv4_vxlan4_exception, but explicitly generate PMTU
+# exceptions on multiple CPUs and check that the veth device tear-down
+# happens in a timely manner
+#
+# - cleanup_ipv6_exception
+# Same as above, but use IPv6 transport from A to B
+
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
@@ -135,7 +144,9 @@ tests="
pmtu_vti6_default_mtu vti6: default MTU assignment
pmtu_vti4_link_add_mtu vti4: MTU setting on link creation
pmtu_vti6_link_add_mtu vti6: MTU setting on link creation
- pmtu_vti6_link_change_mtu vti6: MTU changes on link changes"
+ pmtu_vti6_link_change_mtu vti6: MTU changes on link changes
+ cleanup_ipv4_exception ipv4: cleanup of cached exceptions
+ cleanup_ipv6_exception ipv6: cleanup of cached exceptions"
NS_A="ns-$(mktemp -u XXXXXX)"
NS_B="ns-$(mktemp -u XXXXXX)"
@@ -263,8 +274,6 @@ setup_fou_or_gue() {
${ns_a} ip link set ${encap}_a up
${ns_b} ip link set ${encap}_b up
-
- sleep 1
}
setup_fou44() {
@@ -302,6 +311,10 @@ setup_gue66() {
setup_namespaces() {
for n in ${NS_A} ${NS_B} ${NS_R1} ${NS_R2}; do
ip netns add ${n} || return 1
+
+ # Disable DAD, so that we don't have to wait to use the
+ # configured IPv6 addresses
+ ip netns exec ${n} sysctl -q net/ipv6/conf/default/accept_dad=0
done
}
@@ -337,8 +350,6 @@ setup_vti() {
${ns_a} ip link set vti${proto}_a up
${ns_b} ip link set vti${proto}_b up
-
- sleep 1
}
setup_vti4() {
@@ -375,8 +386,6 @@ setup_vxlan_or_geneve() {
${ns_a} ip link set ${type}_a up
${ns_b} ip link set ${type}_b up
-
- sleep 1
}
setup_geneve4() {
@@ -588,8 +597,8 @@ test_pmtu_ipvX() {
mtu "${ns_b}" veth_B-R2 1500
# Create route exceptions
- ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1800 ${dst1} > /dev/null
- ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1800 ${dst2} > /dev/null
+ ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst1} > /dev/null
+ ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst2} > /dev/null
# Check that exceptions have been created with the correct PMTU
pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst1})"
@@ -621,7 +630,7 @@ test_pmtu_ipvX() {
# Decrease remote MTU on path via R2, get new exception
mtu "${ns_r2}" veth_R2-B 400
mtu "${ns_b}" veth_B-R2 400
- ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1400 ${dst2} > /dev/null
+ ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1400 ${dst2} > /dev/null
pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})"
check_pmtu_value "lock 552" "${pmtu_2}" "exceeding MTU, with MTU < min_pmtu" || return 1
@@ -638,7 +647,7 @@ test_pmtu_ipvX() {
check_pmtu_value "1500" "${pmtu_2}" "increasing local MTU" || return 1
# Get new exception
- ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1400 ${dst2} > /dev/null
+ ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1400 ${dst2} > /dev/null
pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})"
check_pmtu_value "lock 552" "${pmtu_2}" "exceeding MTU, with MTU < min_pmtu" || return 1
}
@@ -687,7 +696,7 @@ test_pmtu_ipvX_over_vxlanY_or_geneveY_exception() {
mtu "${ns_a}" ${type}_a $((${ll_mtu} + 1000))
mtu "${ns_b}" ${type}_b $((${ll_mtu} + 1000))
- ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s $((${ll_mtu} + 500)) ${dst} > /dev/null
+ ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s $((${ll_mtu} + 500)) ${dst} > /dev/null
# Check that exception was created
pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst})"
@@ -767,7 +776,7 @@ test_pmtu_ipvX_over_fouY_or_gueY() {
mtu "${ns_a}" ${encap}_a $((${ll_mtu} + 1000))
mtu "${ns_b}" ${encap}_b $((${ll_mtu} + 1000))
- ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s $((${ll_mtu} + 500)) ${dst} > /dev/null
+ ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s $((${ll_mtu} + 500)) ${dst} > /dev/null
# Check that exception was created
pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst})"
@@ -825,13 +834,13 @@ test_pmtu_vti4_exception() {
# Send DF packet without exceeding link layer MTU, check that no
# exception is created
- ${ns_a} ping -q -M want -i 0.1 -w 2 -s ${ping_payload} ${tunnel4_b_addr} > /dev/null
+ ${ns_a} ping -q -M want -i 0.1 -w 1 -s ${ping_payload} ${tunnel4_b_addr} > /dev/null
pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})"
check_pmtu_value "" "${pmtu}" "sending packet smaller than PMTU (IP payload length ${esp_payload_rfc4106})" || return 1
# Now exceed link layer MTU by one byte, check that exception is created
# with the right PMTU value
- ${ns_a} ping -q -M want -i 0.1 -w 2 -s $((ping_payload + 1)) ${tunnel4_b_addr} > /dev/null
+ ${ns_a} ping -q -M want -i 0.1 -w 1 -s $((ping_payload + 1)) ${tunnel4_b_addr} > /dev/null
pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})"
check_pmtu_value "${esp_payload_rfc4106}" "${pmtu}" "exceeding PMTU (IP payload length $((esp_payload_rfc4106 + 1)))"
}
@@ -847,7 +856,7 @@ test_pmtu_vti6_exception() {
mtu "${ns_b}" veth_b 4000
mtu "${ns_a}" vti6_a 5000
mtu "${ns_b}" vti6_b 5000
- ${ns_a} ${ping6} -q -i 0.1 -w 2 -s 60000 ${tunnel6_b_addr} > /dev/null
+ ${ns_a} ${ping6} -q -i 0.1 -w 1 -s 60000 ${tunnel6_b_addr} > /dev/null
# Check that exception was created
pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})"
@@ -1008,6 +1017,61 @@ test_pmtu_vti6_link_change_mtu() {
return ${fail}
}
+check_command() {
+ cmd=${1}
+
+ if ! which ${cmd} > /dev/null 2>&1; then
+ err " missing required command: '${cmd}'"
+ return 1
+ fi
+ return 0
+}
+
+test_cleanup_vxlanX_exception() {
+ outer="${1}"
+ encap="vxlan"
+ ll_mtu=4000
+
+ check_command taskset || return 2
+ cpu_list=$(grep -m 2 processor /proc/cpuinfo | cut -d ' ' -f 2)
+
+ setup namespaces routing ${encap}${outer} || return 2
+ trace "${ns_a}" ${encap}_a "${ns_b}" ${encap}_b \
+ "${ns_a}" veth_A-R1 "${ns_r1}" veth_R1-A \
+ "${ns_b}" veth_B-R1 "${ns_r1}" veth_R1-B
+
+ # Create route exception by exceeding link layer MTU
+ mtu "${ns_a}" veth_A-R1 $((${ll_mtu} + 1000))
+ mtu "${ns_r1}" veth_R1-A $((${ll_mtu} + 1000))
+ mtu "${ns_b}" veth_B-R1 ${ll_mtu}
+ mtu "${ns_r1}" veth_R1-B ${ll_mtu}
+
+ mtu "${ns_a}" ${encap}_a $((${ll_mtu} + 1000))
+ mtu "${ns_b}" ${encap}_b $((${ll_mtu} + 1000))
+
+ # Fill exception cache for multiple CPUs (2)
+ # we can always use inner IPv4 for that
+ for cpu in ${cpu_list}; do
+ taskset --cpu-list ${cpu} ${ns_a} ping -q -M want -i 0.1 -w 1 -s $((${ll_mtu} + 500)) ${tunnel4_b_addr} > /dev/null
+ done
+
+ ${ns_a} ip link del dev veth_A-R1 &
+ iplink_pid=$!
+ sleep 1
+ if [ "$(cat /proc/${iplink_pid}/cmdline 2>/dev/null | tr -d '\0')" = "iplinkdeldevveth_A-R1" ]; then
+ err " can't delete veth device in a timely manner, PMTU dst likely leaked"
+ return 1
+ fi
+}
+
+test_cleanup_ipv6_exception() {
+ test_cleanup_vxlanX_exception 6
+}
+
+test_cleanup_ipv4_exception() {
+ test_cleanup_vxlanX_exception 4
+}
+
usage() {
echo
echo "$0 [OPTIONS] [TEST]..."
diff --git a/tools/testing/selftests/net/udpgro.sh b/tools/testing/selftests/net/udpgro.sh
index aeac53a99aeb..ac2a30be9b32 100755
--- a/tools/testing/selftests/net/udpgro.sh
+++ b/tools/testing/selftests/net/udpgro.sh
@@ -37,7 +37,7 @@ run_one() {
cfg_veth
- ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} && \
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${rx_args} && \
echo "ok" || \
echo "failed" &
@@ -81,7 +81,7 @@ run_one_nat() {
# will land on the 'plain' one
ip netns exec "${PEER_NS}" ./udpgso_bench_rx -G ${family} -b ${addr1} -n 0 &
pid=$!
- ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${family} -b ${addr2%/*} ${rx_args} && \
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${family} -b ${addr2%/*} ${rx_args} && \
echo "ok" || \
echo "failed"&
@@ -99,8 +99,8 @@ run_one_2sock() {
cfg_veth
- ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} -p 12345 &
- ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} && \
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${rx_args} -p 12345 &
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 2000 -R 10 ${rx_args} && \
echo "ok" || \
echo "failed" &
diff --git a/tools/testing/selftests/net/udpgso_bench_rx.c b/tools/testing/selftests/net/udpgso_bench_rx.c
index 0c960f673324..db3d4a8b5a4c 100644
--- a/tools/testing/selftests/net/udpgso_bench_rx.c
+++ b/tools/testing/selftests/net/udpgso_bench_rx.c
@@ -45,6 +45,8 @@ static int cfg_alen = sizeof(struct sockaddr_in6);
static int cfg_expected_pkt_nr;
static int cfg_expected_pkt_len;
static int cfg_expected_gso_size;
+static int cfg_connect_timeout_ms;
+static int cfg_rcv_timeout_ms;
static struct sockaddr_storage cfg_bind_addr;
static bool interrupted;
@@ -87,7 +89,7 @@ static unsigned long gettimeofday_ms(void)
return (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
}
-static void do_poll(int fd)
+static void do_poll(int fd, int timeout_ms)
{
struct pollfd pfd;
int ret;
@@ -102,8 +104,16 @@ static void do_poll(int fd)
break;
if (ret == -1)
error(1, errno, "poll");
- if (ret == 0)
- continue;
+ if (ret == 0) {
+ if (!timeout_ms)
+ continue;
+
+ timeout_ms -= 10;
+ if (timeout_ms <= 0) {
+ interrupted = true;
+ break;
+ }
+ }
if (pfd.revents != POLLIN)
error(1, errno, "poll: 0x%x expected 0x%x\n",
pfd.revents, POLLIN);
@@ -134,7 +144,7 @@ static int do_socket(bool do_tcp)
if (listen(accept_fd, 1))
error(1, errno, "listen");
- do_poll(accept_fd);
+ do_poll(accept_fd, cfg_connect_timeout_ms);
if (interrupted)
exit(0);
@@ -273,7 +283,9 @@ static void do_flush_udp(int fd)
static void usage(const char *filepath)
{
- error(1, 0, "Usage: %s [-Grtv] [-b addr] [-p port] [-l pktlen] [-n packetnr] [-S gsosize]", filepath);
+ error(1, 0, "Usage: %s [-C connect_timeout] [-Grtv] [-b addr] [-p port]"
+ " [-l pktlen] [-n packetnr] [-R rcv_timeout] [-S gsosize]",
+ filepath);
}
static void parse_opts(int argc, char **argv)
@@ -282,7 +294,7 @@ static void parse_opts(int argc, char **argv)
/* bind to any by default */
setup_sockaddr(PF_INET6, "::", &cfg_bind_addr);
- while ((c = getopt(argc, argv, "4b:Gl:n:p:rS:tv")) != -1) {
+ while ((c = getopt(argc, argv, "4b:C:Gl:n:p:rR:S:tv")) != -1) {
switch (c) {
case '4':
cfg_family = PF_INET;
@@ -292,6 +304,9 @@ static void parse_opts(int argc, char **argv)
case 'b':
setup_sockaddr(cfg_family, optarg, &cfg_bind_addr);
break;
+ case 'C':
+ cfg_connect_timeout_ms = strtoul(optarg, NULL, 0);
+ break;
case 'G':
cfg_gro_segment = true;
break;
@@ -307,6 +322,9 @@ static void parse_opts(int argc, char **argv)
case 'r':
cfg_read_all = true;
break;
+ case 'R':
+ cfg_rcv_timeout_ms = strtoul(optarg, NULL, 0);
+ break;
case 'S':
cfg_expected_gso_size = strtol(optarg, NULL, 0);
break;
@@ -329,8 +347,9 @@ static void parse_opts(int argc, char **argv)
static void do_recv(void)
{
+ int timeout_ms = cfg_tcp ? cfg_rcv_timeout_ms : cfg_connect_timeout_ms;
unsigned long tnow, treport;
- int fd, loop = 0;
+ int fd;
fd = do_socket(cfg_tcp);
@@ -342,12 +361,7 @@ static void do_recv(void)
treport = gettimeofday_ms() + 1000;
do {
- /* force termination after the second poll(); this cope both
- * with sender slower than receiver and missing packet errors
- */
- if (cfg_expected_pkt_nr && loop++)
- interrupted = true;
- do_poll(fd);
+ do_poll(fd, timeout_ms);
if (cfg_tcp)
do_flush_tcp(fd);
@@ -365,6 +379,8 @@ static void do_recv(void)
treport = tnow + 1000;
}
+ timeout_ms = cfg_rcv_timeout_ms;
+
} while (!interrupted);
if (cfg_expected_pkt_nr && (packets != cfg_expected_pkt_nr))
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 585845203db8..076bc38963bf 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -4044,7 +4044,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
}
add_uevent_var(env, "PID=%d", kvm->userspace_pid);
- if (kvm->debugfs_dentry) {
+ if (!IS_ERR_OR_NULL(kvm->debugfs_dentry)) {
char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL);
if (p) {