summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/net/dsa/dsa.txt4
-rw-r--r--Documentation/devicetree/bindings/thermal/rcar-thermal.txt2
-rw-r--r--Documentation/input/alps.txt8
-rw-r--r--Documentation/input/event-codes.txt6
-rw-r--r--Documentation/input/multi-touch-protocol.txt9
-rw-r--r--Documentation/kernel-parameters.txt3
-rw-r--r--Documentation/rtc.txt264
-rw-r--r--Documentation/virtual/kvm/api.txt335
-rw-r--r--Documentation/virtual/kvm/devices/s390_flic.txt3
-rw-r--r--MAINTAINERS47
-rw-r--r--Makefile3
-rw-r--r--arch/arc/kernel/signal.c24
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/boot/dts/dm8168-evm.dts19
-rw-r--r--arch/arm/boot/dts/dm816x.dtsi18
-rw-r--r--arch/arm/boot/dts/dra7.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3.dtsi4
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi1
-rw-r--r--arch/arm/boot/dts/socfpga.dtsi2
-rw-r--r--arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts16
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi3
-rw-r--r--arch/arm/boot/dts/sun5i-a13.dtsi3
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi3
-rw-r--r--arch/arm/include/asm/jump_label.h5
-rw-r--r--arch/arm/include/asm/kvm_arm.h1
-rw-r--r--arch/arm/include/asm/kvm_host.h15
-rw-r--r--arch/arm/include/asm/kvm_mmio.h22
-rw-r--r--arch/arm/include/uapi/asm/kvm.h3
-rw-r--r--arch/arm/kernel/asm-offsets.c4
-rw-r--r--arch/arm/kvm/Kconfig30
-rw-r--r--arch/arm/kvm/Makefile12
-rw-r--r--arch/arm/kvm/arm.c45
-rw-r--r--arch/arm/kvm/guest.c18
-rw-r--r--arch/arm/kvm/interrupts_head.S8
-rw-r--r--arch/arm/kvm/mmio.c64
-rw-r--r--arch/arm/kvm/mmu.c134
-rw-r--r--arch/arm/kvm/trace.h48
-rw-r--r--arch/arm/mach-omap2/id.c2
-rw-r--r--arch/arm/mach-pxa/irq.c111
-rw-r--r--arch/arm/mach-pxa/zeus.c2
-rw-r--r--arch/arm/mach-sunxi/Kconfig8
-rw-r--r--arch/arm/plat-omap/dmtimer.c15
-rw-r--r--arch/arm64/boot/dts/arm/juno-clocks.dtsi2
-rw-r--r--arch/arm64/include/asm/cmpxchg.h32
-rw-r--r--arch/arm64/include/asm/esr.h1
-rw-r--r--arch/arm64/include/asm/jump_label.h8
-rw-r--r--arch/arm64/include/asm/kvm_arm.h1
-rw-r--r--arch/arm64/include/asm/kvm_host.h15
-rw-r--r--arch/arm64/include/asm/kvm_mmio.h22
-rw-r--r--arch/arm64/include/asm/mmu_context.h9
-rw-r--r--arch/arm64/include/asm/percpu.h44
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h3
-rw-r--r--arch/arm64/kvm/Kconfig18
-rw-r--r--arch/arm64/kvm/Makefile20
-rw-r--r--arch/metag/include/asm/io.h1
-rw-r--r--arch/metag/include/asm/pgtable-bits.h104
-rw-r--r--arch/metag/include/asm/pgtable.h95
-rw-r--r--arch/mips/include/asm/asmmacro-32.h128
-rw-r--r--arch/mips/include/asm/asmmacro.h218
-rw-r--r--arch/mips/include/asm/fpu.h20
-rw-r--r--arch/mips/include/asm/jump_label.h7
-rw-r--r--arch/mips/include/asm/kdebug.h3
-rw-r--r--arch/mips/include/asm/kvm_host.h125
-rw-r--r--arch/mips/include/asm/processor.h2
-rw-r--r--arch/mips/include/uapi/asm/kvm.h164
-rw-r--r--arch/mips/kernel/asm-offsets.c105
-rw-r--r--arch/mips/kernel/genex.S15
-rw-r--r--arch/mips/kernel/ptrace.c30
-rw-r--r--arch/mips/kernel/r4k_fpu.S2
-rw-r--r--arch/mips/kernel/traps.c33
-rw-r--r--arch/mips/kvm/Makefile8
-rw-r--r--arch/mips/kvm/emulate.c332
-rw-r--r--arch/mips/kvm/fpu.S122
-rw-r--r--arch/mips/kvm/locore.S38
-rw-r--r--arch/mips/kvm/mips.c472
-rw-r--r--arch/mips/kvm/msa.S161
-rw-r--r--arch/mips/kvm/stats.c4
-rw-r--r--arch/mips/kvm/tlb.c6
-rw-r--r--arch/mips/kvm/trap_emul.c199
-rw-r--r--arch/nios2/include/asm/thread_info.h4
-rw-r--r--arch/nios2/include/uapi/asm/ptrace.h9
-rw-r--r--arch/nios2/kernel/entry.S2
-rw-r--r--arch/nios2/kernel/signal.c2
-rw-r--r--arch/nios2/mm/cacheflush.c3
-rw-r--r--arch/parisc/include/asm/pgalloc.h17
-rw-r--r--arch/parisc/kernel/syscall_table.S9
-rw-r--r--arch/powerpc/include/asm/cputhreads.h2
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h3
-rw-r--r--arch/powerpc/include/asm/reg.h3
-rw-r--r--arch/powerpc/kernel/cputable.c20
-rw-r--r--arch/powerpc/kernel/dbell.c2
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2
-rw-r--r--arch/powerpc/kvm/book3s_hv.c8
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S1
-rw-r--r--arch/powerpc/kvm/mpic.c17
-rw-r--r--arch/powerpc/kvm/powerpc.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S2
-rw-r--r--arch/powerpc/platforms/powernv/smp.c14
-rw-r--r--arch/powerpc/platforms/pseries/hvCall.S2
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c2
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c44
-rw-r--r--arch/s390/include/asm/elf.h2
-rw-r--r--arch/s390/include/asm/jump_label.h3
-rw-r--r--arch/s390/include/asm/kvm_host.h46
-rw-r--r--arch/s390/include/uapi/asm/kvm.h4
-rw-r--r--arch/s390/include/uapi/asm/sie.h4
-rw-r--r--arch/s390/kernel/asm-offsets.c1
-rw-r--r--arch/s390/kernel/ftrace.c61
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c7
-rw-r--r--arch/s390/kernel/swsusp_asm64.S11
-rw-r--r--arch/s390/kvm/diag.c6
-rw-r--r--arch/s390/kvm/gaccess.c296
-rw-r--r--arch/s390/kvm/gaccess.h21
-rw-r--r--arch/s390/kvm/guestdbg.c8
-rw-r--r--arch/s390/kvm/intercept.c5
-rw-r--r--arch/s390/kvm/interrupt.c1101
-rw-r--r--arch/s390/kvm/kvm-s390.c398
-rw-r--r--arch/s390/kvm/kvm-s390.h51
-rw-r--r--arch/s390/kvm/priv.c144
-rw-r--r--arch/s390/kvm/sigp.c7
-rw-r--r--arch/sparc/include/asm/hypervisor.h12
-rw-r--r--arch/sparc/include/asm/jump_label.h5
-rw-r--r--arch/sparc/kernel/hvapi.c1
-rw-r--r--arch/sparc/kernel/hvcalls.S16
-rw-r--r--arch/sparc/kernel/pci.c5
-rw-r--r--arch/sparc/kernel/pcr.c33
-rw-r--r--arch/sparc/kernel/perf_event.c55
-rw-r--r--arch/sparc/kernel/process_64.c4
-rw-r--r--arch/sparc/lib/memmove.S35
-rw-r--r--arch/x86/include/asm/efi.h6
-rw-r--r--arch/x86/include/asm/jump_label.h5
-rw-r--r--arch/x86/include/asm/kvm_host.h28
-rw-r--r--arch/x86/include/asm/kvm_para.h2
-rw-r--r--arch/x86/include/asm/pvclock.h1
-rw-r--r--arch/x86/include/asm/uaccess_64.h2
-rw-r--r--arch/x86/include/uapi/asm/vmx.h1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c10
-rw-r--r--arch/x86/kernel/entry_64.S34
-rw-r--r--arch/x86/kernel/kgdb.c2
-rw-r--r--arch/x86/kernel/pvclock.c44
-rw-r--r--arch/x86/kernel/reboot.c10
-rw-r--r--arch/x86/kvm/Makefile2
-rw-r--r--arch/x86/kvm/cpuid.c33
-rw-r--r--arch/x86/kvm/cpuid.h8
-rw-r--r--arch/x86/kvm/emulate.c193
-rw-r--r--arch/x86/kvm/i8254.c14
-rw-r--r--arch/x86/kvm/i8254.h2
-rw-r--r--arch/x86/kvm/i8259.c12
-rw-r--r--arch/x86/kvm/ioapic.c26
-rw-r--r--arch/x86/kvm/ioapic.h11
-rw-r--r--arch/x86/kvm/irq.h2
-rw-r--r--arch/x86/kvm/lapic.c150
-rw-r--r--arch/x86/kvm/lapic.h17
-rw-r--r--arch/x86/kvm/mmu.c73
-rw-r--r--arch/x86/kvm/pmu.c2
-rw-r--r--arch/x86/kvm/svm.c43
-rw-r--r--arch/x86/kvm/vmx.c153
-rw-r--r--arch/x86/kvm/x86.c157
-rw-r--r--arch/x86/lib/usercopy_64.c15
-rw-r--r--arch/x86/platform/efi/efi.c17
-rw-r--r--arch/x86/platform/efi/efi_32.c22
-rw-r--r--arch/x86/platform/efi/efi_64.c29
-rw-r--r--arch/x86/vdso/vclock_gettime.c34
-rw-r--r--arch/x86/xen/p2m.c10
-rw-r--r--block/blk-merge.c2
-rw-r--r--block/blk-mq-tag.c6
-rw-r--r--block/blk-mq.c10
-rw-r--r--block/blk-settings.c6
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/ata/libata-core.c19
-rw-r--r--drivers/base/regmap/internal.h8
-rw-r--r--drivers/base/regmap/regcache.c16
-rw-r--r--drivers/base/regmap/regmap.c32
-rw-r--r--drivers/block/nbd.c8
-rw-r--r--drivers/block/nvme-core.c1
-rw-r--r--drivers/char/ipmi/ipmi_powernv.c1
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c109
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c8
-rw-r--r--drivers/clocksource/Kconfig3
-rw-r--r--drivers/clocksource/timer-sun5i.c7
-rw-r--r--drivers/cpufreq/cpufreq.c19
-rw-r--r--drivers/cpuidle/cpuidle.c3
-rw-r--r--drivers/cpuidle/sysfs.c5
-rw-r--r--drivers/dma/bcm2835-dma.c1
-rw-r--r--drivers/dma/cppi41.c9
-rw-r--r--drivers/dma/dma-jz4740.c7
-rw-r--r--drivers/dma/dmaengine.c3
-rw-r--r--drivers/dma/edma.c7
-rw-r--r--drivers/dma/moxart-dma.c4
-rw-r--r--drivers/dma/omap-dma.c1
-rw-r--r--drivers/firmware/dmi_scan.c56
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c7
-rw-r--r--drivers/firmware/efi/libstub/efistub.h2
-rw-r--r--drivers/firmware/efi/libstub/fdt.c7
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c2
-rw-r--r--drivers/gpio/gpio-syscon.c2
-rw-r--r--drivers/gpio/gpiolib-acpi.c10
-rw-r--r--drivers/gpu/drm/drm_crtc.c17
-rw-r--r--drivers/gpu/drm/drm_edid_load.c1
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c17
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c14
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c38
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c18
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c4
-rw-r--r--drivers/gpu/drm/radeon/cikd.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/drm/radeon/vce_v2_0.c3
-rw-r--r--drivers/iio/accel/bma180.c2
-rw-r--r--drivers/iio/accel/bmc150-accel.c20
-rw-r--r--drivers/iio/accel/kxcjk-1013.c2
-rw-r--r--drivers/iio/adc/Kconfig3
-rw-r--r--drivers/iio/adc/at91_adc.c5
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c3
-rw-r--r--drivers/iio/adc/vf610_adc.c91
-rw-r--r--drivers/iio/gyro/bmg160.c2
-rw-r--r--drivers/iio/imu/adis_trigger.c2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c56
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c25
-rw-r--r--drivers/iio/imu/kmx61.c2
-rw-r--r--drivers/iio/industrialio-core.c5
-rw-r--r--drivers/iio/industrialio-event.c1
-rw-r--r--drivers/iio/proximity/sx9500.c2
-rw-r--r--drivers/infiniband/core/umem.c8
-rw-r--r--drivers/input/mouse/alps.c48
-rw-r--r--drivers/input/mouse/synaptics.c7
-rw-r--r--drivers/iommu/arm-smmu.c9
-rw-r--r--drivers/iommu/intel-iommu.c7
-rw-r--r--drivers/iommu/ipmmu-vmsa.c1
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c57
-rw-r--r--drivers/lguest/Kconfig2
-rw-r--r--drivers/md/dm.c26
-rw-r--r--drivers/md/md.c6
-rw-r--r--drivers/md/raid0.c3
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c13
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c3
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c1
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_common.h2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr.h2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c6
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c6
-rw-r--r--drivers/media/platform/s5p-tv/Kconfig1
-rw-r--r--drivers/media/platform/sh_veu.c1
-rw-r--r--drivers/media/platform/soc_camera/atmel-isi.c2
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c2
-rw-r--r--drivers/media/usb/gspca/Kconfig1
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c11
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c3
-rw-r--r--drivers/mfd/kempld-core.c2
-rw-r--r--drivers/mfd/rtsx_usb.c30
-rw-r--r--drivers/misc/enclosure.c11
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/can/flexcan.c18
-rw-r--r--drivers/net/can/usb/gs_usb.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb.c69
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_ucan.h15
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c73
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c31
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c99
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c162
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c137
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c53
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h39
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c17
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c131
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c30
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c3
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c6
-rw-r--r--drivers/net/ethernet/rocker/rocker.c8
-rw-r--r--drivers/net/ipvlan/ipvlan.h4
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c28
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c30
-rw-r--r--drivers/net/usb/asix_common.c2
-rw-r--r--drivers/net/usb/cdc_ether.c8
-rw-r--r--drivers/net/usb/cdc_ncm.c6
-rw-r--r--drivers/net/usb/cx82310_eth.c30
-rw-r--r--drivers/net/usb/r8152.c2
-rw-r--r--drivers/net/usb/sr9800.c1
-rw-r--r--drivers/net/usb/usbnet.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/feature.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h1
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c17
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c24
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c6
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c6
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c12
-rw-r--r--drivers/net/xen-netfront.c5
-rw-r--r--drivers/of/address.c11
-rw-r--r--drivers/pci/host/pcie-designware.c2
-rw-r--r--drivers/pci/host/pcie-spear13xx.c6
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_pci.c3
-rw-r--r--drivers/pci/pci-acpi.c3
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c12
-rw-r--r--drivers/regulator/palmas-regulator.c4
-rw-r--r--drivers/rtc/rtc-mrst.c17
-rw-r--r--drivers/scsi/be2iscsi/be_main.c2
-rw-r--r--drivers/scsi/ipr.c3
-rw-r--r--drivers/scsi/libsas/sas_ata.c3
-rw-r--r--drivers/scsi/scsi_lib.c4
-rw-r--r--drivers/spi/spi-dw-mid.c6
-rw-r--r--drivers/spi/spi-qup.c9
-rw-r--r--drivers/spi/spi.c5
-rw-r--r--drivers/staging/iio/Kconfig1
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843_core.c1
-rw-r--r--drivers/target/iscsi/iscsi_target.c11
-rw-r--r--drivers/target/target_core_device.c4
-rw-r--r--drivers/thermal/st/st_thermal.c2
-rw-r--r--drivers/thermal/st/st_thermal_memmap.c10
-rw-r--r--drivers/thermal/st/st_thermal_syscfg.c14
-rw-r--r--drivers/thermal/thermal_core.c6
-rw-r--r--drivers/tty/serial/fsl_lpuart.c5
-rw-r--r--drivers/tty/serial/samsung.c1
-rw-r--r--drivers/usb/host/xhci-hub.c9
-rw-r--r--drivers/usb/host/xhci-pci.c2
-rw-r--r--drivers/usb/isp1760/isp1760-udc.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c9
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/usb/serial/keyspan_pda.c3
-rw-r--r--drivers/watchdog/imgpdc_wdt.c8
-rw-r--r--drivers/watchdog/mtk_wdt.c2
-rw-r--r--drivers/xen/Kconfig17
-rw-r--r--drivers/xen/balloon.c23
-rw-r--r--fs/affs/file.c19
-rw-r--r--fs/aio.c20
-rw-r--r--fs/cifs/cifsencrypt.c6
-rw-r--r--fs/cifs/connect.c13
-rw-r--r--fs/cifs/file.c1
-rw-r--r--fs/cifs/inode.c2
-rw-r--r--fs/cifs/smb2misc.c2
-rw-r--r--fs/cifs/smb2ops.c3
-rw-r--r--fs/cifs/smb2pdu.c17
-rw-r--r--fs/fs-writeback.c93
-rw-r--r--fs/hfsplus/brec.c20
-rw-r--r--fs/locks.c5
-rw-r--r--fs/nfsd/blocklayout.c2
-rw-r--r--fs/nfsd/blocklayoutxdr.c6
-rw-r--r--fs/nfsd/nfs4layouts.c12
-rw-r--r--fs/nfsd/nfs4proc.c2
-rw-r--r--fs/nfsd/nfs4state.c4
-rw-r--r--fs/nfsd/nfs4xdr.c20
-rw-r--r--fs/nfsd/nfscache.c6
-rw-r--r--fs/ocfs2/file.c17
-rw-r--r--include/kvm/arm_arch_timer.h31
-rw-r--r--include/kvm/arm_vgic.h117
-rw-r--r--include/kvm/iodev.h (renamed from virt/kvm/iodev.h)28
-rw-r--r--include/linux/blk_types.h4
-rw-r--r--include/linux/compiler.h16
-rw-r--r--include/linux/cpuidle.h1
-rw-r--r--include/linux/dmapool.h2
-rw-r--r--include/linux/efi.h1
-rw-r--r--include/linux/fs.h3
-rw-r--r--include/linux/irqchip/arm-gic-v3.h17
-rw-r--r--include/linux/jump_label.h21
-rw-r--r--include/linux/kvm_host.h32
-rw-r--r--include/linux/lcm.h1
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/mfd/palmas.h3
-rw-r--r--include/linux/mmzone.h7
-rw-r--r--include/linux/netdevice.h6
-rw-r--r--include/linux/regulator/driver.h2
-rw-r--r--include/linux/sched.h17
-rw-r--r--include/linux/seqlock.h6
-rw-r--r--include/linux/sunrpc/debug.h18
-rw-r--r--include/linux/usb/usbnet.h16
-rw-r--r--include/linux/writeback.h3
-rw-r--r--include/media/atmel-isi.h4
-rw-r--r--include/net/ip.h16
-rw-r--r--include/net/ip6_route.h3
-rw-r--r--include/net/netfilter/nf_log.h10
-rw-r--r--include/net/sock.h2
-rw-r--r--include/trace/events/regmap.h123
-rw-r--r--include/uapi/linux/input.h3
-rw-r--r--include/uapi/linux/kvm.h65
-rw-r--r--include/uapi/linux/nfsd/export.h2
-rw-r--r--kernel/events/core.c10
-rw-r--r--kernel/futex.c2
-rw-r--r--kernel/locking/lockdep.c81
-rw-r--r--kernel/locking/mcs_spinlock.h6
-rw-r--r--kernel/locking/mutex.c51
-rw-r--r--kernel/locking/osq_lock.c14
-rw-r--r--kernel/locking/rtmutex.c2
-rw-r--r--kernel/locking/rwsem-spinlock.c7
-rw-r--r--kernel/locking/rwsem-xadd.c98
-rw-r--r--kernel/locking/rwsem.c22
-rw-r--r--kernel/locking/rwsem.h20
-rw-r--r--kernel/module.c27
-rw-r--r--kernel/power/snapshot.c21
-rw-r--r--kernel/sched/core.c15
-rw-r--r--kernel/sched/fair.c12
-rw-r--r--kernel/sysctl.c8
-rw-r--r--kernel/time/tick-broadcast-hrtimer.c11
-rw-r--r--lib/lcm.c11
-rw-r--r--lib/lockref.c2
-rw-r--r--lib/nlattr.c2
-rw-r--r--mm/huge_memory.c26
-rw-r--r--mm/memory.c22
-rw-r--r--mm/memory_hotplug.c13
-rw-r--r--mm/mmap.c4
-rw-r--r--mm/mprotect.c3
-rw-r--r--mm/mremap.c10
-rw-r--r--mm/page-writeback.c7
-rw-r--r--mm/page_isolation.c1
-rw-r--r--mm/pagewalk.c9
-rw-r--r--mm/rmap.c7
-rw-r--r--mm/slub.c6
-rw-r--r--net/ceph/messenger.c9
-rw-r--r--net/compat.c7
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/fib_rules.c2
-rw-r--r--net/core/net_namespace.c4
-rw-r--r--net/core/rtnetlink.c4
-rw-r--r--net/core/sock.c19
-rw-r--r--net/decnet/dn_rules.c2
-rw-r--r--net/dsa/dsa.c23
-rw-r--r--net/ipv4/fib_frontend.c3
-rw-r--r--net/ipv4/ipmr.c7
-rw-r--r--net/ipv4/netfilter/ip_tables.c6
-rw-r--r--net/ipv4/tcp_input.c7
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv6/fib6_rules.c3
-rw-r--r--net/ipv6/ip6_output.c3
-rw-r--r--net/ipv6/ip6mr.c6
-rw-r--r--net/ipv6/ndisc.c9
-rw-r--r--net/ipv6/netfilter/ip6_tables.c6
-rw-r--r--net/ipv6/tcp_ipv6.c13
-rw-r--r--net/ipv6/udp_offload.c8
-rw-r--r--net/iucv/af_iucv.c4
-rw-r--r--net/l2tp/l2tp_core.c1
-rw-r--r--net/mac80211/agg-rx.c8
-rw-r--r--net/mac80211/rx.c7
-rw-r--r--net/mac80211/sta_info.h2
-rw-r--r--net/netfilter/nf_log.c24
-rw-r--r--net/netfilter/nf_tables_api.c5
-rw-r--r--net/netfilter/nf_tables_core.c8
-rw-r--r--net/netfilter/nfnetlink_cthelper.c3
-rw-r--r--net/netfilter/nft_compat.c6
-rw-r--r--net/netfilter/nft_hash.c2
-rw-r--r--net/netfilter/xt_TPROXY.c4
-rw-r--r--net/openvswitch/vport.c4
-rw-r--r--net/socket.c4
-rw-r--r--net/sunrpc/clnt.c4
-rw-r--r--net/sunrpc/debugfs.c52
-rw-r--r--net/sunrpc/sunrpc_syms.c7
-rw-r--r--net/sunrpc/xprt.c7
-rw-r--r--net/tipc/core.c2
-rw-r--r--security/selinux/selinuxfs.c2
-rw-r--r--sound/firewire/bebob/bebob_maudio.c8
-rw-r--r--sound/pci/hda/hda_intel.c2
-rw-r--r--sound/pci/hda/patch_realtek.c30
-rw-r--r--sound/soc/codecs/pcm512x.c19
-rw-r--r--sound/usb/mixer_quirks.c1
-rw-r--r--sound/usb/quirks.c9
-rw-r--r--tools/testing/selftests/Makefile41
-rw-r--r--tools/testing/selftests/breakpoints/Makefile5
-rw-r--r--tools/testing/selftests/cpu-hotplug/Makefile7
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh (renamed from tools/testing/selftests/cpu-hotplug/on-off-test.sh)0
-rw-r--r--tools/testing/selftests/efivarfs/Makefile7
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/efivarfs/efivarfs.sh0
-rw-r--r--tools/testing/selftests/exec/Makefile9
-rw-r--r--tools/testing/selftests/firmware/Makefile20
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/firmware/fw_filesystem.sh0
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/firmware/fw_userhelper.sh0
-rw-r--r--tools/testing/selftests/ftrace/Makefile5
-rw-r--r--tools/testing/selftests/ftrace/test.d/00basic/basic4.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/event/event-enable.tc15
-rw-r--r--tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc15
-rw-r--r--tools/testing/selftests/ftrace/test.d/event/toplevel-enable.tc15
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter-stack.tc6
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_profiler.tc2
-rwxr-xr-xtools/testing/selftests/gen_kselftest_tar.sh55
-rw-r--r--tools/testing/selftests/ipc/Makefile11
-rw-r--r--tools/testing/selftests/kcmp/Makefile6
-rwxr-xr-xtools/testing/selftests/kselftest_install.sh37
-rw-r--r--tools/testing/selftests/lib.mk35
-rw-r--r--tools/testing/selftests/memfd/Makefile14
-rw-r--r--tools/testing/selftests/memory-hotplug/Makefile9
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/memory-hotplug/mem-on-off-test.sh (renamed from tools/testing/selftests/memory-hotplug/on-off-test.sh)0
-rw-r--r--tools/testing/selftests/mount/.gitignore1
-rw-r--r--tools/testing/selftests/mount/Makefile15
-rw-r--r--tools/testing/selftests/mqueue/Makefile22
-rw-r--r--tools/testing/selftests/net/Makefile10
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/net/run_afpackettests0
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/net/run_netsocktests0
-rw-r--r--tools/testing/selftests/powerpc/Makefile22
-rw-r--r--tools/testing/selftests/powerpc/copyloops/Makefile15
-rw-r--r--tools/testing/selftests/powerpc/mm/Makefile15
-rw-r--r--tools/testing/selftests/powerpc/pmu/Makefile48
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/Makefile13
-rw-r--r--tools/testing/selftests/powerpc/primitives/Makefile15
-rw-r--r--tools/testing/selftests/powerpc/stringloops/Makefile15
-rw-r--r--tools/testing/selftests/powerpc/tm/Makefile15
-rw-r--r--tools/testing/selftests/ptrace/Makefile5
-rw-r--r--tools/testing/selftests/size/Makefile7
-rw-r--r--tools/testing/selftests/sysctl/Makefile12
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/sysctl/run_numerictests0
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/sysctl/run_stringtests0
-rw-r--r--tools/testing/selftests/timers/Makefile38
-rw-r--r--tools/testing/selftests/timers/alarmtimer-suspend.c185
-rw-r--r--tools/testing/selftests/timers/change_skew.c107
-rw-r--r--tools/testing/selftests/timers/clocksource-switch.c179
-rw-r--r--tools/testing/selftests/timers/inconsistency-check.c204
-rw-r--r--tools/testing/selftests/timers/leap-a-day.c319
-rw-r--r--tools/testing/selftests/timers/leapcrash.c120
-rw-r--r--tools/testing/selftests/timers/mqueue-lat.c124
-rw-r--r--tools/testing/selftests/timers/nanosleep.c174
-rw-r--r--tools/testing/selftests/timers/nsleep-lat.c190
-rw-r--r--tools/testing/selftests/timers/posix_timers.c9
-rw-r--r--tools/testing/selftests/timers/raw_skew.c154
-rw-r--r--tools/testing/selftests/timers/rtctest.c271
-rw-r--r--tools/testing/selftests/timers/set-2038.c144
-rw-r--r--tools/testing/selftests/timers/set-tai.c79
-rw-r--r--tools/testing/selftests/timers/set-timer-lat.c216
-rw-r--r--tools/testing/selftests/timers/skew_consistency.c89
-rw-r--r--tools/testing/selftests/timers/threadtest.c204
-rw-r--r--tools/testing/selftests/timers/valid-adjtimex.c202
-rw-r--r--tools/testing/selftests/user/Makefile5
-rw-r--r--tools/testing/selftests/vm/Makefile7
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/vm/run_vmtests0
-rw-r--r--virt/kvm/arm/arch_timer.c45
-rw-r--r--virt/kvm/arm/vgic-v2-emul.c71
-rw-r--r--virt/kvm/arm/vgic-v3-emul.c246
-rw-r--r--virt/kvm/arm/vgic.c479
-rw-r--r--virt/kvm/arm/vgic.h37
-rw-r--r--virt/kvm/coalesced_mmio.c7
-rw-r--r--virt/kvm/eventfd.c9
-rw-r--r--virt/kvm/irqchip.c2
-rw-r--r--virt/kvm/kvm_main.c162
562 files changed, 12185 insertions, 4313 deletions
diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt
index e124847443f8..f0b4cd72411d 100644
--- a/Documentation/devicetree/bindings/net/dsa/dsa.txt
+++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt
@@ -19,7 +19,9 @@ the parent DSA node. The maximum number of allowed child nodes is 4
(DSA_MAX_SWITCHES).
Each of these switch child nodes should have the following required properties:
-- reg : Describes the switch address on the MII bus
+- reg : Contains two fields. The first one describes the
+ address on the MII bus. The second is the switch
+ number that must be unique in cascaded configurations
- #address-cells : Must be 1
- #size-cells : Must be 0
diff --git a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt
index 43404b197933..332e625f6ed0 100644
--- a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt
@@ -4,7 +4,7 @@ Required properties:
- compatible : "renesas,thermal-<soctype>", "renesas,rcar-thermal"
as fallback.
Examples with soctypes are:
- - "renesas,thermal-r8a73a4" (R-Mobile AP6)
+ - "renesas,thermal-r8a73a4" (R-Mobile APE6)
- "renesas,thermal-r8a7779" (R-Car H1)
- "renesas,thermal-r8a7790" (R-Car H2)
- "renesas,thermal-r8a7791" (R-Car M2-W)
diff --git a/Documentation/input/alps.txt b/Documentation/input/alps.txt
index a63e5e013a8c..92ae734c00c3 100644
--- a/Documentation/input/alps.txt
+++ b/Documentation/input/alps.txt
@@ -114,6 +114,9 @@ ALPS Absolute Mode - Protocol Version 2
byte 4: 0 y6 y5 y4 y3 y2 y1 y0
byte 5: 0 z6 z5 z4 z3 z2 z1 z0
+Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for
+the DualPoint Stick.
+
Dualpoint device -- interleaved packet format
---------------------------------------------
@@ -127,6 +130,11 @@ Dualpoint device -- interleaved packet format
byte 7: 0 y6 y5 y4 y3 y2 y1 y0
byte 8: 0 z6 z5 z4 z3 z2 z1 z0
+Devices which use the interleaving format normally send standard PS/2 mouse
+packets for the DualPoint Stick + ALPS Absolute Mode packets for the
+touchpad, switching to the interleaved packet format when both the stick and
+the touchpad are used at the same time.
+
ALPS Absolute Mode - Protocol Version 3
---------------------------------------
diff --git a/Documentation/input/event-codes.txt b/Documentation/input/event-codes.txt
index c587a966413e..96705616f582 100644
--- a/Documentation/input/event-codes.txt
+++ b/Documentation/input/event-codes.txt
@@ -294,6 +294,12 @@ accordingly. This property does not affect kernel behavior.
The kernel does not provide button emulation for such devices but treats
them as any other INPUT_PROP_BUTTONPAD device.
+INPUT_PROP_ACCELEROMETER
+-------------------------
+Directional axes on this device (absolute and/or relative x, y, z) represent
+accelerometer data. All other axes retain their meaning. A device must not mix
+regular directional axes and accelerometer axes on the same event node.
+
Guidelines:
==========
The guidelines below ensure proper single-touch and multi-finger functionality.
diff --git a/Documentation/input/multi-touch-protocol.txt b/Documentation/input/multi-touch-protocol.txt
index 7b4f59c09ee2..b85d000faeb4 100644
--- a/Documentation/input/multi-touch-protocol.txt
+++ b/Documentation/input/multi-touch-protocol.txt
@@ -312,9 +312,12 @@ ABS_MT_TOOL_TYPE
The type of approaching tool. A lot of kernel drivers cannot distinguish
between different tool types, such as a finger or a pen. In such cases, the
-event should be omitted. The protocol currently supports MT_TOOL_FINGER and
-MT_TOOL_PEN [2]. For type B devices, this event is handled by input core;
-drivers should instead use input_mt_report_slot_state().
+event should be omitted. The protocol currently supports MT_TOOL_FINGER,
+MT_TOOL_PEN, and MT_TOOL_PALM [2]. For type B devices, this event is handled
+by input core; drivers should instead use input_mt_report_slot_state().
+A contact's ABS_MT_TOOL_TYPE may change over time while still touching the
+device, because the firmware may not be able to determine which tool is being
+used when it first appears.
ABS_MT_BLOB_ID
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index bfcb1a62a7b4..01aa47d3b6ab 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1036,7 +1036,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Format: {"off" | "on" | "skip[mbr]"}
efi= [EFI]
- Format: { "old_map", "nochunk", "noruntime" }
+ Format: { "old_map", "nochunk", "noruntime", "debug" }
old_map [X86-64]: switch to the old ioremap-based EFI
runtime services mapping. 32-bit still uses this one by
default.
@@ -1044,6 +1044,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
boot stub, as chunking can cause problems with some
firmware implementations.
noruntime : disable EFI runtime services support
+ debug: enable misc debug output
efi_no_storage_paranoia [EFI; X86]
Using this parameter you can use more than 50% of
diff --git a/Documentation/rtc.txt b/Documentation/rtc.txt
index 596b60c08b74..8446f1ea1410 100644
--- a/Documentation/rtc.txt
+++ b/Documentation/rtc.txt
@@ -204,266 +204,4 @@ Some common examples:
* RTC_PIE_ON, RTC_PIE_OFF: These are also emulated by the generic code.
-If all else fails, check out the rtc-test.c driver!
-
-
--------------------- 8< ---------------- 8< -----------------------------
-
-/*
- * Real Time Clock Driver Test/Example Program
- *
- * Compile with:
- * gcc -s -Wall -Wstrict-prototypes rtctest.c -o rtctest
- *
- * Copyright (C) 1996, Paul Gortmaker.
- *
- * Released under the GNU General Public License, version 2,
- * included herein by reference.
- *
- */
-
-#include <stdio.h>
-#include <linux/rtc.h>
-#include <sys/ioctl.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <fcntl.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <errno.h>
-
-
-/*
- * This expects the new RTC class driver framework, working with
- * clocks that will often not be clones of what the PC-AT had.
- * Use the command line to specify another RTC if you need one.
- */
-static const char default_rtc[] = "/dev/rtc0";
-
-
-int main(int argc, char **argv)
-{
- int i, fd, retval, irqcount = 0;
- unsigned long tmp, data;
- struct rtc_time rtc_tm;
- const char *rtc = default_rtc;
-
- switch (argc) {
- case 2:
- rtc = argv[1];
- /* FALLTHROUGH */
- case 1:
- break;
- default:
- fprintf(stderr, "usage: rtctest [rtcdev]\n");
- return 1;
- }
-
- fd = open(rtc, O_RDONLY);
-
- if (fd == -1) {
- perror(rtc);
- exit(errno);
- }
-
- fprintf(stderr, "\n\t\t\tRTC Driver Test Example.\n\n");
-
- /* Turn on update interrupts (one per second) */
- retval = ioctl(fd, RTC_UIE_ON, 0);
- if (retval == -1) {
- if (errno == ENOTTY) {
- fprintf(stderr,
- "\n...Update IRQs not supported.\n");
- goto test_READ;
- }
- perror("RTC_UIE_ON ioctl");
- exit(errno);
- }
-
- fprintf(stderr, "Counting 5 update (1/sec) interrupts from reading %s:",
- rtc);
- fflush(stderr);
- for (i=1; i<6; i++) {
- /* This read will block */
- retval = read(fd, &data, sizeof(unsigned long));
- if (retval == -1) {
- perror("read");
- exit(errno);
- }
- fprintf(stderr, " %d",i);
- fflush(stderr);
- irqcount++;
- }
-
- fprintf(stderr, "\nAgain, from using select(2) on /dev/rtc:");
- fflush(stderr);
- for (i=1; i<6; i++) {
- struct timeval tv = {5, 0}; /* 5 second timeout on select */
- fd_set readfds;
-
- FD_ZERO(&readfds);
- FD_SET(fd, &readfds);
- /* The select will wait until an RTC interrupt happens. */
- retval = select(fd+1, &readfds, NULL, NULL, &tv);
- if (retval == -1) {
- perror("select");
- exit(errno);
- }
- /* This read won't block unlike the select-less case above. */
- retval = read(fd, &data, sizeof(unsigned long));
- if (retval == -1) {
- perror("read");
- exit(errno);
- }
- fprintf(stderr, " %d",i);
- fflush(stderr);
- irqcount++;
- }
-
- /* Turn off update interrupts */
- retval = ioctl(fd, RTC_UIE_OFF, 0);
- if (retval == -1) {
- perror("RTC_UIE_OFF ioctl");
- exit(errno);
- }
-
-test_READ:
- /* Read the RTC time/date */
- retval = ioctl(fd, RTC_RD_TIME, &rtc_tm);
- if (retval == -1) {
- perror("RTC_RD_TIME ioctl");
- exit(errno);
- }
-
- fprintf(stderr, "\n\nCurrent RTC date/time is %d-%d-%d, %02d:%02d:%02d.\n",
- rtc_tm.tm_mday, rtc_tm.tm_mon + 1, rtc_tm.tm_year + 1900,
- rtc_tm.tm_hour, rtc_tm.tm_min, rtc_tm.tm_sec);
-
- /* Set the alarm to 5 sec in the future, and check for rollover */
- rtc_tm.tm_sec += 5;
- if (rtc_tm.tm_sec >= 60) {
- rtc_tm.tm_sec %= 60;
- rtc_tm.tm_min++;
- }
- if (rtc_tm.tm_min == 60) {
- rtc_tm.tm_min = 0;
- rtc_tm.tm_hour++;
- }
- if (rtc_tm.tm_hour == 24)
- rtc_tm.tm_hour = 0;
-
- retval = ioctl(fd, RTC_ALM_SET, &rtc_tm);
- if (retval == -1) {
- if (errno == ENOTTY) {
- fprintf(stderr,
- "\n...Alarm IRQs not supported.\n");
- goto test_PIE;
- }
- perror("RTC_ALM_SET ioctl");
- exit(errno);
- }
-
- /* Read the current alarm settings */
- retval = ioctl(fd, RTC_ALM_READ, &rtc_tm);
- if (retval == -1) {
- perror("RTC_ALM_READ ioctl");
- exit(errno);
- }
-
- fprintf(stderr, "Alarm time now set to %02d:%02d:%02d.\n",
- rtc_tm.tm_hour, rtc_tm.tm_min, rtc_tm.tm_sec);
-
- /* Enable alarm interrupts */
- retval = ioctl(fd, RTC_AIE_ON, 0);
- if (retval == -1) {
- perror("RTC_AIE_ON ioctl");
- exit(errno);
- }
-
- fprintf(stderr, "Waiting 5 seconds for alarm...");
- fflush(stderr);
- /* This blocks until the alarm ring causes an interrupt */
- retval = read(fd, &data, sizeof(unsigned long));
- if (retval == -1) {
- perror("read");
- exit(errno);
- }
- irqcount++;
- fprintf(stderr, " okay. Alarm rang.\n");
-
- /* Disable alarm interrupts */
- retval = ioctl(fd, RTC_AIE_OFF, 0);
- if (retval == -1) {
- perror("RTC_AIE_OFF ioctl");
- exit(errno);
- }
-
-test_PIE:
- /* Read periodic IRQ rate */
- retval = ioctl(fd, RTC_IRQP_READ, &tmp);
- if (retval == -1) {
- /* not all RTCs support periodic IRQs */
- if (errno == ENOTTY) {
- fprintf(stderr, "\nNo periodic IRQ support\n");
- goto done;
- }
- perror("RTC_IRQP_READ ioctl");
- exit(errno);
- }
- fprintf(stderr, "\nPeriodic IRQ rate is %ldHz.\n", tmp);
-
- fprintf(stderr, "Counting 20 interrupts at:");
- fflush(stderr);
-
- /* The frequencies 128Hz, 256Hz, ... 8192Hz are only allowed for root. */
- for (tmp=2; tmp<=64; tmp*=2) {
-
- retval = ioctl(fd, RTC_IRQP_SET, tmp);
- if (retval == -1) {
- /* not all RTCs can change their periodic IRQ rate */
- if (errno == ENOTTY) {
- fprintf(stderr,
- "\n...Periodic IRQ rate is fixed\n");
- goto done;
- }
- perror("RTC_IRQP_SET ioctl");
- exit(errno);
- }
-
- fprintf(stderr, "\n%ldHz:\t", tmp);
- fflush(stderr);
-
- /* Enable periodic interrupts */
- retval = ioctl(fd, RTC_PIE_ON, 0);
- if (retval == -1) {
- perror("RTC_PIE_ON ioctl");
- exit(errno);
- }
-
- for (i=1; i<21; i++) {
- /* This blocks */
- retval = read(fd, &data, sizeof(unsigned long));
- if (retval == -1) {
- perror("read");
- exit(errno);
- }
- fprintf(stderr, " %d",i);
- fflush(stderr);
- irqcount++;
- }
-
- /* Disable periodic interrupts */
- retval = ioctl(fd, RTC_PIE_OFF, 0);
- if (retval == -1) {
- perror("RTC_PIE_OFF ioctl");
- exit(errno);
- }
- }
-
-done:
- fprintf(stderr, "\n\n\t\t\t *** Test complete ***\n");
-
- close(fd);
-
- return 0;
-}
+If all else fails, check out the tools/testing/selftests/timers/rtctest.c test!
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index b112efc816f1..bc9f6fe44e27 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -997,7 +997,7 @@ for vm-wide capabilities.
4.38 KVM_GET_MP_STATE
Capability: KVM_CAP_MP_STATE
-Architectures: x86, s390
+Architectures: x86, s390, arm, arm64
Type: vcpu ioctl
Parameters: struct kvm_mp_state (out)
Returns: 0 on success; -1 on error
@@ -1011,7 +1011,7 @@ uniprocessor guests).
Possible values are:
- - KVM_MP_STATE_RUNNABLE: the vcpu is currently running [x86]
+ - KVM_MP_STATE_RUNNABLE: the vcpu is currently running [x86,arm/arm64]
- KVM_MP_STATE_UNINITIALIZED: the vcpu is an application processor (AP)
which has not yet received an INIT signal [x86]
- KVM_MP_STATE_INIT_RECEIVED: the vcpu has received an INIT signal, and is
@@ -1020,7 +1020,7 @@ Possible values are:
is waiting for an interrupt [x86]
- KVM_MP_STATE_SIPI_RECEIVED: the vcpu has just received a SIPI (vector
accessible via KVM_GET_VCPU_EVENTS) [x86]
- - KVM_MP_STATE_STOPPED: the vcpu is stopped [s390]
+ - KVM_MP_STATE_STOPPED: the vcpu is stopped [s390,arm/arm64]
- KVM_MP_STATE_CHECK_STOP: the vcpu is in a special error state [s390]
- KVM_MP_STATE_OPERATING: the vcpu is operating (running or halted)
[s390]
@@ -1031,11 +1031,15 @@ On x86, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an
in-kernel irqchip, the multiprocessing state must be maintained by userspace on
these architectures.
+For arm/arm64:
+
+The only states that are valid are KVM_MP_STATE_STOPPED and
+KVM_MP_STATE_RUNNABLE which reflect if the vcpu is paused or not.
4.39 KVM_SET_MP_STATE
Capability: KVM_CAP_MP_STATE
-Architectures: x86, s390
+Architectures: x86, s390, arm, arm64
Type: vcpu ioctl
Parameters: struct kvm_mp_state (in)
Returns: 0 on success; -1 on error
@@ -1047,6 +1051,10 @@ On x86, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an
in-kernel irqchip, the multiprocessing state must be maintained by userspace on
these architectures.
+For arm/arm64:
+
+The only states that are valid are KVM_MP_STATE_STOPPED and
+KVM_MP_STATE_RUNNABLE which reflect if the vcpu should be paused or not.
4.40 KVM_SET_IDENTITY_MAP_ADDR
@@ -1967,15 +1975,25 @@ registers, find a list below:
MIPS | KVM_REG_MIPS_CP0_STATUS | 32
MIPS | KVM_REG_MIPS_CP0_CAUSE | 32
MIPS | KVM_REG_MIPS_CP0_EPC | 64
+ MIPS | KVM_REG_MIPS_CP0_PRID | 32
MIPS | KVM_REG_MIPS_CP0_CONFIG | 32
MIPS | KVM_REG_MIPS_CP0_CONFIG1 | 32
MIPS | KVM_REG_MIPS_CP0_CONFIG2 | 32
MIPS | KVM_REG_MIPS_CP0_CONFIG3 | 32
+ MIPS | KVM_REG_MIPS_CP0_CONFIG4 | 32
+ MIPS | KVM_REG_MIPS_CP0_CONFIG5 | 32
MIPS | KVM_REG_MIPS_CP0_CONFIG7 | 32
MIPS | KVM_REG_MIPS_CP0_ERROREPC | 64
MIPS | KVM_REG_MIPS_COUNT_CTL | 64
MIPS | KVM_REG_MIPS_COUNT_RESUME | 64
MIPS | KVM_REG_MIPS_COUNT_HZ | 64
+ MIPS | KVM_REG_MIPS_FPR_32(0..31) | 32
+ MIPS | KVM_REG_MIPS_FPR_64(0..31) | 64
+ MIPS | KVM_REG_MIPS_VEC_128(0..31) | 128
+ MIPS | KVM_REG_MIPS_FCR_IR | 32
+ MIPS | KVM_REG_MIPS_FCR_CSR | 32
+ MIPS | KVM_REG_MIPS_MSA_IR | 32
+ MIPS | KVM_REG_MIPS_MSA_CSR | 32
ARM registers are mapped using the lower 32 bits. The upper 16 of that
is the register group type, or coprocessor number:
@@ -2029,6 +2047,25 @@ patterns depending on whether they're 32-bit or 64-bit registers:
MIPS KVM control registers (see above) have the following id bit patterns:
0x7030 0000 0002 <reg:16>
+MIPS FPU registers (see KVM_REG_MIPS_FPR_{32,64}() above) have the following
+id bit patterns depending on the size of the register being accessed. They are
+always accessed according to the current guest FPU mode (Status.FR and
+Config5.FRE), i.e. as the guest would see them, and they become unpredictable
+if the guest FPU mode is changed. MIPS SIMD Architecture (MSA) vector
+registers (see KVM_REG_MIPS_VEC_128() above) have similar patterns as they
+overlap the FPU registers:
+ 0x7020 0000 0003 00 <0:3> <reg:5> (32-bit FPU registers)
+ 0x7030 0000 0003 00 <0:3> <reg:5> (64-bit FPU registers)
+ 0x7040 0000 0003 00 <0:3> <reg:5> (128-bit MSA vector registers)
+
+MIPS FPU control registers (see KVM_REG_MIPS_FCR_{IR,CSR} above) have the
+following id bit patterns:
+ 0x7020 0000 0003 01 <0:3> <reg:5>
+
+MIPS MSA control registers (see KVM_REG_MIPS_MSA_{IR,CSR} above) have the
+following id bit patterns:
+ 0x7020 0000 0003 02 <0:3> <reg:5>
+
4.69 KVM_GET_ONE_REG
@@ -2234,7 +2271,7 @@ into the hash PTE second double word).
4.75 KVM_IRQFD
Capability: KVM_CAP_IRQFD
-Architectures: x86 s390
+Architectures: x86 s390 arm arm64
Type: vm ioctl
Parameters: struct kvm_irqfd (in)
Returns: 0 on success, -1 on error
@@ -2260,6 +2297,10 @@ Note that closing the resamplefd is not sufficient to disable the
irqfd. The KVM_IRQFD_FLAG_RESAMPLE is only necessary on assignment
and need not be specified with KVM_IRQFD_FLAG_DEASSIGN.
+On ARM/ARM64, the gsi field in the kvm_irqfd struct specifies the Shared
+Peripheral Interrupt (SPI) index, such that the GIC interrupt ID is
+given by gsi + 32.
+
4.76 KVM_PPC_ALLOCATE_HTAB
Capability: KVM_CAP_PPC_ALLOC_HTAB
@@ -2716,6 +2757,227 @@ The fields in each entry are defined as follows:
eax, ebx, ecx, edx: the values returned by the cpuid instruction for
this function/index combination
+4.89 KVM_S390_MEM_OP
+
+Capability: KVM_CAP_S390_MEM_OP
+Architectures: s390
+Type: vcpu ioctl
+Parameters: struct kvm_s390_mem_op (in)
+Returns: = 0 on success,
+ < 0 on generic error (e.g. -EFAULT or -ENOMEM),
+ > 0 if an exception occurred while walking the page tables
+
+Read or write data from/to the logical (virtual) memory of a VPCU.
+
+Parameters are specified via the following structure:
+
+struct kvm_s390_mem_op {
+ __u64 gaddr; /* the guest address */
+ __u64 flags; /* flags */
+ __u32 size; /* amount of bytes */
+ __u32 op; /* type of operation */
+ __u64 buf; /* buffer in userspace */
+ __u8 ar; /* the access register number */
+ __u8 reserved[31]; /* should be set to 0 */
+};
+
+The type of operation is specified in the "op" field. It is either
+KVM_S390_MEMOP_LOGICAL_READ for reading from logical memory space or
+KVM_S390_MEMOP_LOGICAL_WRITE for writing to logical memory space. The
+KVM_S390_MEMOP_F_CHECK_ONLY flag can be set in the "flags" field to check
+whether the corresponding memory access would create an access exception
+(without touching the data in the memory at the destination). In case an
+access exception occurred while walking the MMU tables of the guest, the
+ioctl returns a positive error number to indicate the type of exception.
+This exception is also raised directly at the corresponding VCPU if the
+flag KVM_S390_MEMOP_F_INJECT_EXCEPTION is set in the "flags" field.
+
+The start address of the memory region has to be specified in the "gaddr"
+field, and the length of the region in the "size" field. "buf" is the buffer
+supplied by the userspace application where the read data should be written
+to for KVM_S390_MEMOP_LOGICAL_READ, or where the data that should be written
+is stored for a KVM_S390_MEMOP_LOGICAL_WRITE. "buf" is unused and can be NULL
+when KVM_S390_MEMOP_F_CHECK_ONLY is specified. "ar" designates the access
+register number to be used.
+
+The "reserved" field is meant for future extensions. It is not used by
+KVM with the currently defined set of flags.
+
+4.90 KVM_S390_GET_SKEYS
+
+Capability: KVM_CAP_S390_SKEYS
+Architectures: s390
+Type: vm ioctl
+Parameters: struct kvm_s390_skeys
+Returns: 0 on success, KVM_S390_GET_KEYS_NONE if guest is not using storage
+ keys, negative value on error
+
+This ioctl is used to get guest storage key values on the s390
+architecture. The ioctl takes parameters via the kvm_s390_skeys struct.
+
+struct kvm_s390_skeys {
+ __u64 start_gfn;
+ __u64 count;
+ __u64 skeydata_addr;
+ __u32 flags;
+ __u32 reserved[9];
+};
+
+The start_gfn field is the number of the first guest frame whose storage keys
+you want to get.
+
+The count field is the number of consecutive frames (starting from start_gfn)
+whose storage keys to get. The count field must be at least 1 and the maximum
+allowed value is defined as KVM_S390_SKEYS_ALLOC_MAX. Values outside this range
+will cause the ioctl to return -EINVAL.
+
+The skeydata_addr field is the address to a buffer large enough to hold count
+bytes. This buffer will be filled with storage key data by the ioctl.
+
+4.91 KVM_S390_SET_SKEYS
+
+Capability: KVM_CAP_S390_SKEYS
+Architectures: s390
+Type: vm ioctl
+Parameters: struct kvm_s390_skeys
+Returns: 0 on success, negative value on error
+
+This ioctl is used to set guest storage key values on the s390
+architecture. The ioctl takes parameters via the kvm_s390_skeys struct.
+See section on KVM_S390_GET_SKEYS for struct definition.
+
+The start_gfn field is the number of the first guest frame whose storage keys
+you want to set.
+
+The count field is the number of consecutive frames (starting from start_gfn)
+whose storage keys to get. The count field must be at least 1 and the maximum
+allowed value is defined as KVM_S390_SKEYS_ALLOC_MAX. Values outside this range
+will cause the ioctl to return -EINVAL.
+
+The skeydata_addr field is the address to a buffer containing count bytes of
+storage keys. Each byte in the buffer will be set as the storage key for a
+single frame starting at start_gfn for count frames.
+
+Note: If any architecturally invalid key value is found in the given data then
+the ioctl will return -EINVAL.
+
+4.92 KVM_S390_IRQ
+
+Capability: KVM_CAP_S390_INJECT_IRQ
+Architectures: s390
+Type: vcpu ioctl
+Parameters: struct kvm_s390_irq (in)
+Returns: 0 on success, -1 on error
+Errors:
+ EINVAL: interrupt type is invalid
+ type is KVM_S390_SIGP_STOP and flag parameter is invalid value
+ type is KVM_S390_INT_EXTERNAL_CALL and code is bigger
+ than the maximum of VCPUs
+ EBUSY: type is KVM_S390_SIGP_SET_PREFIX and vcpu is not stopped
+ type is KVM_S390_SIGP_STOP and a stop irq is already pending
+ type is KVM_S390_INT_EXTERNAL_CALL and an external call interrupt
+ is already pending
+
+Allows to inject an interrupt to the guest.
+
+Using struct kvm_s390_irq as a parameter allows
+to inject additional payload which is not
+possible via KVM_S390_INTERRUPT.
+
+Interrupt parameters are passed via kvm_s390_irq:
+
+struct kvm_s390_irq {
+ __u64 type;
+ union {
+ struct kvm_s390_io_info io;
+ struct kvm_s390_ext_info ext;
+ struct kvm_s390_pgm_info pgm;
+ struct kvm_s390_emerg_info emerg;
+ struct kvm_s390_extcall_info extcall;
+ struct kvm_s390_prefix_info prefix;
+ struct kvm_s390_stop_info stop;
+ struct kvm_s390_mchk_info mchk;
+ char reserved[64];
+ } u;
+};
+
+type can be one of the following:
+
+KVM_S390_SIGP_STOP - sigp stop; parameter in .stop
+KVM_S390_PROGRAM_INT - program check; parameters in .pgm
+KVM_S390_SIGP_SET_PREFIX - sigp set prefix; parameters in .prefix
+KVM_S390_RESTART - restart; no parameters
+KVM_S390_INT_CLOCK_COMP - clock comparator interrupt; no parameters
+KVM_S390_INT_CPU_TIMER - CPU timer interrupt; no parameters
+KVM_S390_INT_EMERGENCY - sigp emergency; parameters in .emerg
+KVM_S390_INT_EXTERNAL_CALL - sigp external call; parameters in .extcall
+KVM_S390_MCHK - machine check interrupt; parameters in .mchk
+
+
+Note that the vcpu ioctl is asynchronous to vcpu execution.
+
+4.94 KVM_S390_GET_IRQ_STATE
+
+Capability: KVM_CAP_S390_IRQ_STATE
+Architectures: s390
+Type: vcpu ioctl
+Parameters: struct kvm_s390_irq_state (out)
+Returns: >= number of bytes copied into buffer,
+ -EINVAL if buffer size is 0,
+ -ENOBUFS if buffer size is too small to fit all pending interrupts,
+ -EFAULT if the buffer address was invalid
+
+This ioctl allows userspace to retrieve the complete state of all currently
+pending interrupts in a single buffer. Use cases include migration
+and introspection. The parameter structure contains the address of a
+userspace buffer and its length:
+
+struct kvm_s390_irq_state {
+ __u64 buf;
+ __u32 flags;
+ __u32 len;
+ __u32 reserved[4];
+};
+
+Userspace passes in the above struct and for each pending interrupt a
+struct kvm_s390_irq is copied to the provided buffer.
+
+If -ENOBUFS is returned the buffer provided was too small and userspace
+may retry with a bigger buffer.
+
+4.95 KVM_S390_SET_IRQ_STATE
+
+Capability: KVM_CAP_S390_IRQ_STATE
+Architectures: s390
+Type: vcpu ioctl
+Parameters: struct kvm_s390_irq_state (in)
+Returns: 0 on success,
+ -EFAULT if the buffer address was invalid,
+ -EINVAL for an invalid buffer length (see below),
+ -EBUSY if there were already interrupts pending,
+ errors occurring when actually injecting the
+ interrupt. See KVM_S390_IRQ.
+
+This ioctl allows userspace to set the complete state of all cpu-local
+interrupts currently pending for the vcpu. It is intended for restoring
+interrupt state after a migration. The input parameter is a userspace buffer
+containing a struct kvm_s390_irq_state:
+
+struct kvm_s390_irq_state {
+ __u64 buf;
+ __u32 len;
+ __u32 pad;
+};
+
+The userspace memory referenced by buf contains a struct kvm_s390_irq
+for each interrupt to be injected into the guest.
+If one of the interrupts could not be injected for some reason the
+ioctl aborts.
+
+len must be a multiple of sizeof(struct kvm_s390_irq). It must be > 0
+and it must not exceed (max_vcpus + 32) * sizeof(struct kvm_s390_irq),
+which is the maximum number of possibly pending cpu-local interrupts.
+
5. The kvm_run structure
------------------------
@@ -3189,6 +3451,31 @@ Parameters: none
This capability enables the in-kernel irqchip for s390. Please refer to
"4.24 KVM_CREATE_IRQCHIP" for details.
+6.9 KVM_CAP_MIPS_FPU
+
+Architectures: mips
+Target: vcpu
+Parameters: args[0] is reserved for future use (should be 0).
+
+This capability allows the use of the host Floating Point Unit by the guest. It
+allows the Config1.FP bit to be set to enable the FPU in the guest. Once this is
+done the KVM_REG_MIPS_FPR_* and KVM_REG_MIPS_FCR_* registers can be accessed
+(depending on the current guest FPU register mode), and the Status.FR,
+Config5.FRE bits are accessible via the KVM API and also from the guest,
+depending on them being supported by the FPU.
+
+6.10 KVM_CAP_MIPS_MSA
+
+Architectures: mips
+Target: vcpu
+Parameters: args[0] is reserved for future use (should be 0).
+
+This capability allows the use of the MIPS SIMD Architecture (MSA) by the guest.
+It allows the Config3.MSAP bit to be set to enable the use of MSA by the guest.
+Once this is done the KVM_REG_MIPS_VEC_* and KVM_REG_MIPS_MSA_* registers can be
+accessed, and the Config5.MSAEn bit is accessible via the KVM API and also from
+the guest.
+
7. Capabilities that can be enabled on VMs
------------------------------------------
@@ -3248,3 +3535,41 @@ All other orders will be handled completely in user space.
Only privileged operation exceptions will be checked for in the kernel (or even
in the hardware prior to interception). If this capability is not enabled, the
old way of handling SIGP orders is used (partially in kernel and user space).
+
+7.3 KVM_CAP_S390_VECTOR_REGISTERS
+
+Architectures: s390
+Parameters: none
+Returns: 0 on success, negative value on error
+
+Allows use of the vector registers introduced with z13 processor, and
+provides for the synchronization between host and user space. Will
+return -EINVAL if the machine does not support vectors.
+
+7.4 KVM_CAP_S390_USER_STSI
+
+Architectures: s390
+Parameters: none
+
+This capability allows post-handlers for the STSI instruction. After
+initial handling in the kernel, KVM exits to user space with
+KVM_EXIT_S390_STSI to allow user space to insert further data.
+
+Before exiting to userspace, kvm handlers should fill in s390_stsi field of
+vcpu->run:
+struct {
+ __u64 addr;
+ __u8 ar;
+ __u8 reserved;
+ __u8 fc;
+ __u8 sel1;
+ __u16 sel2;
+} s390_stsi;
+
+@addr - guest address of STSI SYSIB
+@fc - function code
+@sel1 - selector 1
+@sel2 - selector 2
+@ar - access register number
+
+KVM handlers should exit to userspace with rc = -EREMOTE.
diff --git a/Documentation/virtual/kvm/devices/s390_flic.txt b/Documentation/virtual/kvm/devices/s390_flic.txt
index 4ceef53164b0..d1ad9d5cae46 100644
--- a/Documentation/virtual/kvm/devices/s390_flic.txt
+++ b/Documentation/virtual/kvm/devices/s390_flic.txt
@@ -27,6 +27,9 @@ Groups:
Copies all floating interrupts into a buffer provided by userspace.
When the buffer is too small it returns -ENOMEM, which is the indication
for userspace to try again with a bigger buffer.
+ -ENOBUFS is returned when the allocation of a kernelspace buffer has
+ failed.
+ -EFAULT is returned when copying data to userspace failed.
All interrupts remain pending, i.e. are not deleted from the list of
currently pending interrupts.
attr->addr contains the userspace address of the buffer into which all
diff --git a/MAINTAINERS b/MAINTAINERS
index 358eb0105e00..b84686826b23 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -637,8 +637,7 @@ F: drivers/gpu/drm/radeon/radeon_kfd.h
F: include/uapi/linux/kfd_ioctl.h
AMD MICROCODE UPDATE SUPPORT
-M: Andreas Herrmann <herrmann.der.user@googlemail.com>
-L: amd64-microcode@amd64.org
+M: Borislav Petkov <bp@alien8.de>
S: Maintained
F: arch/x86/kernel/cpu/microcode/amd*
@@ -1186,7 +1185,7 @@ M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-mvebu/
-F: drivers/rtc/armada38x-rtc
+F: drivers/rtc/rtc-armada38x.c
ARM/Marvell Berlin SoC support
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
@@ -1362,6 +1361,7 @@ F: drivers/i2c/busses/i2c-rk3x.c
F: drivers/*/*rockchip*
F: drivers/*/*/*rockchip*
F: sound/soc/rockchip/
+N: rockchip
ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
M: Kukjin Kim <kgene@kernel.org>
@@ -1675,8 +1675,8 @@ F: drivers/misc/eeprom/at24.c
F: include/linux/platform_data/at24.h
ATA OVER ETHERNET (AOE) DRIVER
-M: "Ed L. Cashin" <ecashin@coraid.com>
-W: http://support.coraid.com/support/linux
+M: "Ed L. Cashin" <ed.cashin@acm.org>
+W: http://www.openaoe.org/
S: Supported
F: Documentation/aoe/
F: drivers/block/aoe/
@@ -3252,6 +3252,13 @@ S: Maintained
F: Documentation/hwmon/dme1737
F: drivers/hwmon/dme1737.c
+DMI/SMBIOS SUPPORT
+M: Jean Delvare <jdelvare@suse.de>
+S: Maintained
+F: drivers/firmware/dmi-id.c
+F: drivers/firmware/dmi_scan.c
+F: include/linux/dmi.h
+
DOCKING STATION DRIVER
M: Shaohua Li <shaohua.li@intel.com>
L: linux-acpi@vger.kernel.org
@@ -5087,7 +5094,7 @@ S: Supported
F: drivers/platform/x86/intel_menlow.c
INTEL IA32 MICROCODE UPDATE SUPPORT
-M: Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
+M: Borislav Petkov <bp@alien8.de>
S: Maintained
F: arch/x86/kernel/cpu/microcode/core*
F: arch/x86/kernel/cpu/microcode/intel*
@@ -5128,22 +5135,21 @@ M: Deepak Saxena <dsaxena@plexity.net>
S: Maintained
F: drivers/char/hw_random/ixp4xx-rng.c
-INTEL ETHERNET DRIVERS (e100/e1000/e1000e/fm10k/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e/i40evf)
+INTEL ETHERNET DRIVERS
M: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
-M: Jesse Brandeburg <jesse.brandeburg@intel.com>
-M: Bruce Allan <bruce.w.allan@intel.com>
-M: Carolyn Wyborny <carolyn.wyborny@intel.com>
-M: Don Skidmore <donald.c.skidmore@intel.com>
-M: Greg Rose <gregory.v.rose@intel.com>
-M: Matthew Vick <matthew.vick@intel.com>
-M: John Ronciak <john.ronciak@intel.com>
-M: Mitch Williams <mitch.a.williams@intel.com>
-M: Linux NICS <linux.nics@intel.com>
-L: e1000-devel@lists.sourceforge.net
+R: Jesse Brandeburg <jesse.brandeburg@intel.com>
+R: Shannon Nelson <shannon.nelson@intel.com>
+R: Carolyn Wyborny <carolyn.wyborny@intel.com>
+R: Don Skidmore <donald.c.skidmore@intel.com>
+R: Matthew Vick <matthew.vick@intel.com>
+R: John Ronciak <john.ronciak@intel.com>
+R: Mitch Williams <mitch.a.williams@intel.com>
+L: intel-wired-lan@lists.osuosl.org
W: http://www.intel.com/support/feedback.htm
W: http://e1000.sourceforge.net/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net.git
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next.git
+Q: http://patchwork.ozlabs.org/project/intel-wired-lan/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue.git
S: Supported
F: Documentation/networking/e100.txt
F: Documentation/networking/e1000.txt
@@ -5585,6 +5591,8 @@ S: Supported
F: Documentation/*/kvm*.txt
F: Documentation/virtual/kvm/
F: arch/*/kvm/
+F: arch/x86/kernel/kvm.c
+F: arch/x86/kernel/kvmclock.c
F: arch/*/include/asm/kvm*
F: include/linux/kvm*
F: include/uapi/linux/kvm*
@@ -8553,6 +8561,7 @@ F: include/uapi/linux/timex.h
F: kernel/time/clocksource.c
F: kernel/time/time*.c
F: kernel/time/ntp.c
+F: tools/testing/selftests/timers/
SC1200 WDT DRIVER
M: Zwane Mwaikambo <zwanem@gmail.com>
diff --git a/Makefile b/Makefile
index 14c722f96877..9b76ce1e08bb 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 0
SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION =
NAME = Hurr durr I'ma sheep
# *DOCUMENTATION*
@@ -779,6 +779,7 @@ KBUILD_ARFLAGS := $(call ar-option,D)
# check for 'asm goto'
ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
+ KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
endif
include $(srctree)/scripts/Makefile.kasan
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index 114234e83caa..edda76fae83f 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -67,7 +67,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
sigset_t *set)
{
int err;
- err = __copy_to_user(&(sf->uc.uc_mcontext.regs), regs,
+ err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), regs,
sizeof(sf->uc.uc_mcontext.regs.scratch));
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
@@ -83,7 +83,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
if (!err)
set_current_blocked(&set);
- err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs),
+ err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch),
sizeof(sf->uc.uc_mcontext.regs.scratch));
return err;
@@ -131,6 +131,15 @@ SYSCALL_DEFINE0(rt_sigreturn)
/* Don't restart from sigreturn */
syscall_wont_restart(regs);
+ /*
+ * Ensure that sigreturn always returns to user mode (in case the
+ * regs saved on user stack got fudged between save and sigreturn)
+ * Otherwise it is easy to panic the kernel with a custom
+ * signal handler and/or restorer which clobberes the status32/ret
+ * to return to a bogus location in kernel mode.
+ */
+ regs->status32 |= STATUS_U_MASK;
+
return regs->r0;
badframe:
@@ -229,8 +238,11 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
/*
* handler returns using sigreturn stub provided already by userpsace
+ * If not, nuke the process right away
*/
- BUG_ON(!(ksig->ka.sa.sa_flags & SA_RESTORER));
+ if(!(ksig->ka.sa.sa_flags & SA_RESTORER))
+ return 1;
+
regs->blink = (unsigned long)ksig->ka.sa.sa_restorer;
/* User Stack for signal handler will be above the frame just carved */
@@ -296,12 +308,12 @@ static void
handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
sigset_t *oldset = sigmask_to_save();
- int ret;
+ int failed;
/* Set up the stack frame */
- ret = setup_rt_frame(ksig, oldset, regs);
+ failed = setup_rt_frame(ksig, oldset, regs);
- signal_setup_done(ret, ksig, 0);
+ signal_setup_done(failed, ksig, 0);
}
void do_signal(struct pt_regs *regs)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 9f1f09a2bc9b..cf4c0c99aa25 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -619,6 +619,7 @@ config ARCH_PXA
select GENERIC_CLOCKEVENTS
select GPIO_PXA
select HAVE_IDE
+ select IRQ_DOMAIN
select MULTI_IRQ_HANDLER
select PLAT_PXA
select SPARSE_IRQ
diff --git a/arch/arm/boot/dts/dm8168-evm.dts b/arch/arm/boot/dts/dm8168-evm.dts
index d3a29c1b8417..afe678f6d2e9 100644
--- a/arch/arm/boot/dts/dm8168-evm.dts
+++ b/arch/arm/boot/dts/dm8168-evm.dts
@@ -36,6 +36,20 @@
>;
};
+ mmc_pins: pinmux_mmc_pins {
+ pinctrl-single,pins = <
+ DM816X_IOPAD(0x0a70, MUX_MODE0) /* SD_POW */
+ DM816X_IOPAD(0x0a74, MUX_MODE0) /* SD_CLK */
+ DM816X_IOPAD(0x0a78, MUX_MODE0) /* SD_CMD */
+ DM816X_IOPAD(0x0a7C, MUX_MODE0) /* SD_DAT0 */
+ DM816X_IOPAD(0x0a80, MUX_MODE0) /* SD_DAT1 */
+ DM816X_IOPAD(0x0a84, MUX_MODE0) /* SD_DAT2 */
+ DM816X_IOPAD(0x0a88, MUX_MODE0) /* SD_DAT2 */
+ DM816X_IOPAD(0x0a8c, MUX_MODE2) /* GP1[7] */
+ DM816X_IOPAD(0x0a90, MUX_MODE2) /* GP1[8] */
+ >;
+ };
+
usb0_pins: pinmux_usb0_pins {
pinctrl-single,pins = <
DM816X_IOPAD(0x0d00, MUX_MODE0) /* USB0_DRVVBUS */
@@ -137,7 +151,12 @@
};
&mmc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc_pins>;
vmmc-supply = <&vmmcsd_fixed>;
+ bus-width = <4>;
+ cd-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio2 8 GPIO_ACTIVE_LOW>;
};
/* At least dm8168-evm rev c won't support multipoint, later may */
diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi
index 3c97b5f2addc..f35715bc6992 100644
--- a/arch/arm/boot/dts/dm816x.dtsi
+++ b/arch/arm/boot/dts/dm816x.dtsi
@@ -150,17 +150,27 @@
};
gpio1: gpio@48032000 {
- compatible = "ti,omap3-gpio";
+ compatible = "ti,omap4-gpio";
ti,hwmods = "gpio1";
+ ti,gpio-always-on;
reg = <0x48032000 0x1000>;
- interrupts = <97>;
+ interrupts = <96>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
};
gpio2: gpio@4804c000 {
- compatible = "ti,omap3-gpio";
+ compatible = "ti,omap4-gpio";
ti,hwmods = "gpio2";
+ ti,gpio-always-on;
reg = <0x4804c000 0x1000>;
- interrupts = <99>;
+ interrupts = <98>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
};
gpmc: gpmc@50000000 {
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 127608d79033..c4659a979c41 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -1111,7 +1111,6 @@
"wkupclk", "refclk",
"div-clk", "phy-div";
#phy-cells = <0>;
- ti,hwmods = "pcie1-phy";
};
pcie2_phy: pciephy@4a095000 {
@@ -1130,7 +1129,6 @@
"wkupclk", "refclk",
"div-clk", "phy-div";
#phy-cells = <0>;
- ti,hwmods = "pcie2-phy";
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index f4f78c40b564..3fdc84fddb70 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -92,6 +92,8 @@
ti,hwmods = "aes";
reg = <0x480c5000 0x50>;
interrupts = <0>;
+ dmas = <&sdma 65 &sdma 66>;
+ dma-names = "tx", "rx";
};
prm: prm@48306000 {
@@ -550,6 +552,8 @@
ti,hwmods = "sham";
reg = <0x480c3000 0x64>;
interrupts = <49>;
+ dmas = <&sdma 69>;
+ dma-names = "rx";
};
smartreflex_core: smartreflex@480cb000 {
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index d771f687a13b..eccc78d3220b 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -411,6 +411,7 @@
"mac_clk_rx", "mac_clk_tx",
"clk_mac_ref", "clk_mac_refout",
"aclk_mac", "pclk_mac";
+ status = "disabled";
};
usb_host0_ehci: usb@ff500000 {
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
index 9d8760956752..d9176e606173 100644
--- a/arch/arm/boot/dts/socfpga.dtsi
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -660,7 +660,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0xfff01000 0x1000>;
- interrupts = <0 156 4>;
+ interrupts = <0 155 4>;
num-cs = <4>;
clocks = <&spi_m_clk>;
status = "disabled";
diff --git a/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts b/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
index ab7891c43231..75742f8f96f3 100644
--- a/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
+++ b/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
@@ -56,6 +56,22 @@
model = "Olimex A10-OLinuXino-LIME";
compatible = "olimex,a10-olinuxino-lime", "allwinner,sun4i-a10";
+ cpus {
+ cpu0: cpu@0 {
+ /*
+ * The A10-Lime is known to be unstable
+ * when running at 1008 MHz
+ */
+ operating-points = <
+ /* kHz uV */
+ 912000 1350000
+ 864000 1300000
+ 624000 1250000
+ >;
+ cooling-max-level = <2>;
+ };
+ };
+
soc@01c00000 {
emac: ethernet@01c0b000 {
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index 5c2925831f20..eebb7853e00b 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -75,7 +75,6 @@
clock-latency = <244144>; /* 8 32k periods */
operating-points = <
/* kHz uV */
- 1056000 1500000
1008000 1400000
912000 1350000
864000 1300000
@@ -83,7 +82,7 @@
>;
#cooling-cells = <2>;
cooling-min-level = <0>;
- cooling-max-level = <4>;
+ cooling-max-level = <3>;
};
};
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index f8818f1edbbe..883cb4873688 100644
--- a/arch/arm/boot/dts/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
@@ -47,7 +47,6 @@
clock-latency = <244144>; /* 8 32k periods */
operating-points = <
/* kHz uV */
- 1104000 1500000
1008000 1400000
912000 1350000
864000 1300000
@@ -57,7 +56,7 @@
>;
#cooling-cells = <2>;
cooling-min-level = <0>;
- cooling-max-level = <6>;
+ cooling-max-level = <5>;
};
};
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 3a8530b79f1c..fdd181792b4b 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -105,7 +105,6 @@
clock-latency = <244144>; /* 8 32k periods */
operating-points = <
/* kHz uV */
- 1008000 1450000
960000 1400000
912000 1400000
864000 1300000
@@ -116,7 +115,7 @@
>;
#cooling-cells = <2>;
cooling-min-level = <0>;
- cooling-max-level = <7>;
+ cooling-max-level = <6>;
};
cpu@1 {
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
index 70f9b9bfb1f9..5f337dc5c108 100644
--- a/arch/arm/include/asm/jump_label.h
+++ b/arch/arm/include/asm/jump_label.h
@@ -1,7 +1,7 @@
#ifndef _ASM_ARM_JUMP_LABEL_H
#define _ASM_ARM_JUMP_LABEL_H
-#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
#include <linux/types.h>
@@ -27,8 +27,6 @@ l_yes:
return true;
}
-#endif /* __KERNEL__ */
-
typedef u32 jump_label_t;
struct jump_entry {
@@ -37,4 +35,5 @@ struct jump_entry {
jump_label_t key;
};
+#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index 816db0bf2dd8..d995821f1698 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -185,6 +185,7 @@
#define HSR_COND (0xfU << HSR_COND_SHIFT)
#define FSC_FAULT (0x04)
+#define FSC_ACCESS (0x08)
#define FSC_PERM (0x0c)
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 41008cd7c53f..d71607c16601 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -27,6 +27,8 @@
#include <asm/fpstate.h>
#include <kvm/arm_arch_timer.h>
+#define __KVM_HAVE_ARCH_INTC_INITIALIZED
+
#if defined(CONFIG_KVM_ARM_MAX_VCPUS)
#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
#else
@@ -165,19 +167,10 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
/* We do not have shadow page tables, hence the empty hooks */
-static inline int kvm_age_hva(struct kvm *kvm, unsigned long start,
- unsigned long end)
-{
- return 0;
-}
-
-static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
-{
- return 0;
-}
-
static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
unsigned long address)
{
diff --git a/arch/arm/include/asm/kvm_mmio.h b/arch/arm/include/asm/kvm_mmio.h
index 3f83db2f6cf0..d8e90c8cb5fa 100644
--- a/arch/arm/include/asm/kvm_mmio.h
+++ b/arch/arm/include/asm/kvm_mmio.h
@@ -28,28 +28,6 @@ struct kvm_decode {
bool sign_extend;
};
-/*
- * The in-kernel MMIO emulation code wants to use a copy of run->mmio,
- * which is an anonymous type. Use our own type instead.
- */
-struct kvm_exit_mmio {
- phys_addr_t phys_addr;
- u8 data[8];
- u32 len;
- bool is_write;
- void *private;
-};
-
-static inline void kvm_prepare_mmio(struct kvm_run *run,
- struct kvm_exit_mmio *mmio)
-{
- run->mmio.phys_addr = mmio->phys_addr;
- run->mmio.len = mmio->len;
- run->mmio.is_write = mmio->is_write;
- memcpy(run->mmio.data, mmio->data, mmio->len);
- run->exit_reason = KVM_EXIT_MMIO;
-}
-
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
phys_addr_t fault_ipa);
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index 0db25bc32864..2499867dd0d8 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -198,6 +198,9 @@ struct kvm_arch_memory_slot {
/* Highest supported SPI, from VGIC_NR_IRQS */
#define KVM_ARM_IRQ_GIC_MAX 127
+/* One single KVM irqchip, ie. the VGIC */
+#define KVM_NR_IRQCHIPS 1
+
/* PSCI interface */
#define KVM_PSCI_FN_BASE 0x95c1ba5e
#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n))
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 2d2d6087b9b1..488eaac56028 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -190,7 +190,6 @@ int main(void)
DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar));
DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.fault.hpfar));
DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc));
-#ifdef CONFIG_KVM_ARM_VGIC
DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
@@ -200,14 +199,11 @@ int main(void)
DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
-#ifdef CONFIG_KVM_ARM_TIMER
DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval));
DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff));
DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled));
-#endif
DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
-#endif
DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
#endif
return 0;
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index 338ace78ed18..f1f79d104309 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -18,6 +18,7 @@ if VIRTUALIZATION
config KVM
bool "Kernel-based Virtual Machine (KVM) support"
+ depends on MMU && OF
select PREEMPT_NOTIFIERS
select ANON_INODES
select HAVE_KVM_CPU_RELAX_INTERCEPT
@@ -26,10 +27,12 @@ config KVM
select KVM_ARM_HOST
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select SRCU
- depends on ARM_VIRT_EXT && ARM_LPAE
+ select MMU_NOTIFIER
+ select HAVE_KVM_EVENTFD
+ select HAVE_KVM_IRQFD
+ depends on ARM_VIRT_EXT && ARM_LPAE && ARM_ARCH_TIMER
---help---
- Support hosting virtualized guest machines. You will also
- need to select one or more of the processor modules below.
+ Support hosting virtualized guest machines.
This module provides access to the hardware capabilities through
a character device node named /dev/kvm.
@@ -37,10 +40,7 @@ config KVM
If unsure, say N.
config KVM_ARM_HOST
- bool "KVM host support for ARM cpus."
- depends on KVM
- depends on MMU
- select MMU_NOTIFIER
+ bool
---help---
Provides host support for ARM processors.
@@ -55,20 +55,4 @@ config KVM_ARM_MAX_VCPUS
large, so only choose a reasonable number that you expect to
actually use.
-config KVM_ARM_VGIC
- bool "KVM support for Virtual GIC"
- depends on KVM_ARM_HOST && OF
- select HAVE_KVM_IRQCHIP
- default y
- ---help---
- Adds support for a hardware assisted, in-kernel GIC emulation.
-
-config KVM_ARM_TIMER
- bool "KVM support for Architected Timers"
- depends on KVM_ARM_VGIC && ARM_ARCH_TIMER
- select HAVE_KVM_IRQCHIP
- default y
- ---help---
- Adds support for the Architected Timers in virtual machines
-
endif # VIRTUALIZATION
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index 443b8bea43e9..139e46c08b6e 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -7,7 +7,7 @@ ifeq ($(plus_virt),+virt)
plus_virt_def := -DREQUIRES_VIRT=1
endif
-ccflags-y += -Ivirt/kvm -Iarch/arm/kvm
+ccflags-y += -Iarch/arm/kvm
CFLAGS_arm.o := -I. $(plus_virt_def)
CFLAGS_mmu.o := -I.
@@ -15,12 +15,12 @@ AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt)
AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
KVM := ../../../virt/kvm
-kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o
+kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o
obj-y += kvm-arm.o init.o interrupts.o
obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
-obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
-obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
-obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2-emul.o
-obj-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
+obj-y += $(KVM)/arm/vgic.o
+obj-y += $(KVM)/arm/vgic-v2.o
+obj-y += $(KVM)/arm/vgic-v2-emul.o
+obj-y += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 5560f74f9eee..6f536451ab78 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -61,8 +61,6 @@ static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
static u8 kvm_next_vmid;
static DEFINE_SPINLOCK(kvm_vmid_lock);
-static bool vgic_present;
-
static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
{
BUG_ON(preemptible());
@@ -173,8 +171,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
int r;
switch (ext) {
case KVM_CAP_IRQCHIP:
- r = vgic_present;
- break;
+ case KVM_CAP_IRQFD:
+ case KVM_CAP_IOEVENTFD:
case KVM_CAP_DEVICE_CTRL:
case KVM_CAP_USER_MEMORY:
case KVM_CAP_SYNC_MMU:
@@ -183,6 +181,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_ARM_PSCI:
case KVM_CAP_ARM_PSCI_0_2:
case KVM_CAP_READONLY_MEM:
+ case KVM_CAP_MP_STATE:
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
@@ -268,7 +267,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
- return 0;
+ return kvm_timer_should_fire(vcpu);
}
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
@@ -313,13 +312,29 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
- return -EINVAL;
+ if (vcpu->arch.pause)
+ mp_state->mp_state = KVM_MP_STATE_STOPPED;
+ else
+ mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
+
+ return 0;
}
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
- return -EINVAL;
+ switch (mp_state->mp_state) {
+ case KVM_MP_STATE_RUNNABLE:
+ vcpu->arch.pause = false;
+ break;
+ case KVM_MP_STATE_STOPPED:
+ vcpu->arch.pause = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
}
/**
@@ -452,6 +467,11 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
return 0;
}
+bool kvm_arch_intc_initialized(struct kvm *kvm)
+{
+ return vgic_initialized(kvm);
+}
+
static void vcpu_pause(struct kvm_vcpu *vcpu)
{
wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
@@ -831,8 +851,6 @@ static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
switch (dev_id) {
case KVM_ARM_DEVICE_VGIC_V2:
- if (!vgic_present)
- return -ENXIO;
return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
default:
return -ENODEV;
@@ -847,10 +865,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
switch (ioctl) {
case KVM_CREATE_IRQCHIP: {
- if (vgic_present)
- return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
- else
- return -ENXIO;
+ return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
}
case KVM_ARM_SET_DEVICE_ADDR: {
struct kvm_arm_device_addr dev_addr;
@@ -1035,10 +1050,6 @@ static int init_hyp_mode(void)
if (err)
goto out_free_context;
-#ifdef CONFIG_KVM_ARM_VGIC
- vgic_present = true;
-#endif
-
/*
* Init HYP architected timer support
*/
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index 384bab67c462..d503fbb787d3 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -109,22 +109,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
return -EINVAL;
}
-#ifndef CONFIG_KVM_ARM_TIMER
-
-#define NUM_TIMER_REGS 0
-
-static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
-{
- return 0;
-}
-
-static bool is_timer_reg(u64 index)
-{
- return false;
-}
-
-#else
-
#define NUM_TIMER_REGS 3
static bool is_timer_reg(u64 index)
@@ -152,8 +136,6 @@ static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
return 0;
}
-#endif
-
static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
void __user *uaddr = (void __user *)(long)reg->addr;
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
index 14d488388480..35e4a3a0c476 100644
--- a/arch/arm/kvm/interrupts_head.S
+++ b/arch/arm/kvm/interrupts_head.S
@@ -402,7 +402,6 @@ vcpu .req r0 @ vcpu pointer always in r0
* Assumes vcpu pointer in vcpu reg
*/
.macro save_vgic_state
-#ifdef CONFIG_KVM_ARM_VGIC
/* Get VGIC VCTRL base into r2 */
ldr r2, [vcpu, #VCPU_KVM]
ldr r2, [r2, #KVM_VGIC_VCTRL]
@@ -460,7 +459,6 @@ ARM_BE8(rev r6, r6 )
subs r4, r4, #1
bne 1b
2:
-#endif
.endm
/*
@@ -469,7 +467,6 @@ ARM_BE8(rev r6, r6 )
* Assumes vcpu pointer in vcpu reg
*/
.macro restore_vgic_state
-#ifdef CONFIG_KVM_ARM_VGIC
/* Get VGIC VCTRL base into r2 */
ldr r2, [vcpu, #VCPU_KVM]
ldr r2, [r2, #KVM_VGIC_VCTRL]
@@ -501,7 +498,6 @@ ARM_BE8(rev r6, r6 )
subs r4, r4, #1
bne 1b
2:
-#endif
.endm
#define CNTHCTL_PL1PCTEN (1 << 0)
@@ -515,7 +511,6 @@ ARM_BE8(rev r6, r6 )
* Clobbers r2-r5
*/
.macro save_timer_state
-#ifdef CONFIG_KVM_ARM_TIMER
ldr r4, [vcpu, #VCPU_KVM]
ldr r2, [r4, #KVM_TIMER_ENABLED]
cmp r2, #0
@@ -537,7 +532,6 @@ ARM_BE8(rev r6, r6 )
mcrr p15, 4, r2, r2, c14 @ CNTVOFF
1:
-#endif
@ Allow physical timer/counter access for the host
mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN)
@@ -559,7 +553,6 @@ ARM_BE8(rev r6, r6 )
bic r2, r2, #CNTHCTL_PL1PCEN
mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL
-#ifdef CONFIG_KVM_ARM_TIMER
ldr r4, [vcpu, #VCPU_KVM]
ldr r2, [r4, #KVM_TIMER_ENABLED]
cmp r2, #0
@@ -579,7 +572,6 @@ ARM_BE8(rev r6, r6 )
and r2, r2, #3
mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
1:
-#endif
.endm
.equ vmentry, 0
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 5d3bfc0eb3f0..974b1c606d04 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -121,12 +121,11 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 0;
}
-static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
- struct kvm_exit_mmio *mmio)
+static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
{
unsigned long rt;
- int len;
- bool is_write, sign_extend;
+ int access_size;
+ bool sign_extend;
if (kvm_vcpu_dabt_isextabt(vcpu)) {
/* cache operation on I/O addr, tell guest unsupported */
@@ -140,17 +139,15 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return 1;
}
- len = kvm_vcpu_dabt_get_as(vcpu);
- if (unlikely(len < 0))
- return len;
+ access_size = kvm_vcpu_dabt_get_as(vcpu);
+ if (unlikely(access_size < 0))
+ return access_size;
- is_write = kvm_vcpu_dabt_iswrite(vcpu);
+ *is_write = kvm_vcpu_dabt_iswrite(vcpu);
sign_extend = kvm_vcpu_dabt_issext(vcpu);
rt = kvm_vcpu_dabt_get_rd(vcpu);
- mmio->is_write = is_write;
- mmio->phys_addr = fault_ipa;
- mmio->len = len;
+ *len = access_size;
vcpu->arch.mmio_decode.sign_extend = sign_extend;
vcpu->arch.mmio_decode.rt = rt;
@@ -165,20 +162,20 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
phys_addr_t fault_ipa)
{
- struct kvm_exit_mmio mmio;
unsigned long data;
unsigned long rt;
int ret;
+ bool is_write;
+ int len;
+ u8 data_buf[8];
/*
- * Prepare MMIO operation. First stash it in a private
- * structure that we can use for in-kernel emulation. If the
- * kernel can't handle it, copy it into run->mmio and let user
- * space do its magic.
+ * Prepare MMIO operation. First decode the syndrome data we get
+ * from the CPU. Then try if some in-kernel emulation feels
+ * responsible, otherwise let user space do its magic.
*/
-
if (kvm_vcpu_dabt_isvalid(vcpu)) {
- ret = decode_hsr(vcpu, fault_ipa, &mmio);
+ ret = decode_hsr(vcpu, &is_write, &len);
if (ret)
return ret;
} else {
@@ -188,21 +185,34 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
rt = vcpu->arch.mmio_decode.rt;
- if (mmio.is_write) {
- data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt),
- mmio.len);
+ if (is_write) {
+ data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), len);
+
+ trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
+ mmio_write_buf(data_buf, len, data);
- trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, mmio.len,
- fault_ipa, data);
- mmio_write_buf(mmio.data, mmio.len, data);
+ ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
+ data_buf);
} else {
- trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, mmio.len,
+ trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
fault_ipa, 0);
+
+ ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
+ data_buf);
}
- if (vgic_handle_mmio(vcpu, run, &mmio))
+ /* Now prepare kvm_run for the potential return to userland. */
+ run->mmio.is_write = is_write;
+ run->mmio.phys_addr = fault_ipa;
+ run->mmio.len = len;
+ memcpy(run->mmio.data, data_buf, len);
+
+ if (!ret) {
+ /* We handled the access successfully in the kernel. */
+ kvm_handle_mmio_return(vcpu, run);
return 1;
+ }
- kvm_prepare_mmio(run, &mmio);
+ run->exit_reason = KVM_EXIT_MMIO;
return 0;
}
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 5656d79c5a44..15b050d46fc9 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -1330,10 +1330,51 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
out_unlock:
spin_unlock(&kvm->mmu_lock);
+ kvm_set_pfn_accessed(pfn);
kvm_release_pfn_clean(pfn);
return ret;
}
+/*
+ * Resolve the access fault by making the page young again.
+ * Note that because the faulting entry is guaranteed not to be
+ * cached in the TLB, we don't need to invalidate anything.
+ */
+static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
+{
+ pmd_t *pmd;
+ pte_t *pte;
+ pfn_t pfn;
+ bool pfn_valid = false;
+
+ trace_kvm_access_fault(fault_ipa);
+
+ spin_lock(&vcpu->kvm->mmu_lock);
+
+ pmd = stage2_get_pmd(vcpu->kvm, NULL, fault_ipa);
+ if (!pmd || pmd_none(*pmd)) /* Nothing there */
+ goto out;
+
+ if (kvm_pmd_huge(*pmd)) { /* THP, HugeTLB */
+ *pmd = pmd_mkyoung(*pmd);
+ pfn = pmd_pfn(*pmd);
+ pfn_valid = true;
+ goto out;
+ }
+
+ pte = pte_offset_kernel(pmd, fault_ipa);
+ if (pte_none(*pte)) /* Nothing there either */
+ goto out;
+
+ *pte = pte_mkyoung(*pte); /* Just a page... */
+ pfn = pte_pfn(*pte);
+ pfn_valid = true;
+out:
+ spin_unlock(&vcpu->kvm->mmu_lock);
+ if (pfn_valid)
+ kvm_set_pfn_accessed(pfn);
+}
+
/**
* kvm_handle_guest_abort - handles all 2nd stage aborts
* @vcpu: the VCPU pointer
@@ -1364,7 +1405,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
/* Check the stage-2 fault is trans. fault or write fault */
fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
- if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
+ if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
+ fault_status != FSC_ACCESS) {
kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
kvm_vcpu_trap_get_class(vcpu),
(unsigned long)kvm_vcpu_trap_get_fault(vcpu),
@@ -1400,6 +1442,12 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
/* Userspace should not be able to register out-of-bounds IPAs */
VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
+ if (fault_status == FSC_ACCESS) {
+ handle_access_fault(vcpu, fault_ipa);
+ ret = 1;
+ goto out_unlock;
+ }
+
ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
if (ret == 0)
ret = 1;
@@ -1408,15 +1456,16 @@ out_unlock:
return ret;
}
-static void handle_hva_to_gpa(struct kvm *kvm,
- unsigned long start,
- unsigned long end,
- void (*handler)(struct kvm *kvm,
- gpa_t gpa, void *data),
- void *data)
+static int handle_hva_to_gpa(struct kvm *kvm,
+ unsigned long start,
+ unsigned long end,
+ int (*handler)(struct kvm *kvm,
+ gpa_t gpa, void *data),
+ void *data)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
+ int ret = 0;
slots = kvm_memslots(kvm);
@@ -1440,14 +1489,17 @@ static void handle_hva_to_gpa(struct kvm *kvm,
for (; gfn < gfn_end; ++gfn) {
gpa_t gpa = gfn << PAGE_SHIFT;
- handler(kvm, gpa, data);
+ ret |= handler(kvm, gpa, data);
}
}
+
+ return ret;
}
-static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
+static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
{
unmap_stage2_range(kvm, gpa, PAGE_SIZE);
+ return 0;
}
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
@@ -1473,7 +1525,7 @@ int kvm_unmap_hva_range(struct kvm *kvm,
return 0;
}
-static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
+static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
{
pte_t *pte = (pte_t *)data;
@@ -1485,6 +1537,7 @@ static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
* through this calling path.
*/
stage2_set_pte(kvm, NULL, gpa, pte, 0);
+ return 0;
}
@@ -1501,6 +1554,67 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
}
+static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
+{
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pmd = stage2_get_pmd(kvm, NULL, gpa);
+ if (!pmd || pmd_none(*pmd)) /* Nothing there */
+ return 0;
+
+ if (kvm_pmd_huge(*pmd)) { /* THP, HugeTLB */
+ if (pmd_young(*pmd)) {
+ *pmd = pmd_mkold(*pmd);
+ return 1;
+ }
+
+ return 0;
+ }
+
+ pte = pte_offset_kernel(pmd, gpa);
+ if (pte_none(*pte))
+ return 0;
+
+ if (pte_young(*pte)) {
+ *pte = pte_mkold(*pte); /* Just a page... */
+ return 1;
+ }
+
+ return 0;
+}
+
+static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
+{
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pmd = stage2_get_pmd(kvm, NULL, gpa);
+ if (!pmd || pmd_none(*pmd)) /* Nothing there */
+ return 0;
+
+ if (kvm_pmd_huge(*pmd)) /* THP, HugeTLB */
+ return pmd_young(*pmd);
+
+ pte = pte_offset_kernel(pmd, gpa);
+ if (!pte_none(*pte)) /* Just a page... */
+ return pte_young(*pte);
+
+ return 0;
+}
+
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
+{
+ trace_kvm_age_hva(start, end);
+ return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
+}
+
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+{
+ trace_kvm_test_age_hva(hva);
+ return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
+}
+
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h
index 6817664b46b8..0ec35392d208 100644
--- a/arch/arm/kvm/trace.h
+++ b/arch/arm/kvm/trace.h
@@ -68,6 +68,21 @@ TRACE_EVENT(kvm_guest_fault,
__entry->hxfar, __entry->vcpu_pc)
);
+TRACE_EVENT(kvm_access_fault,
+ TP_PROTO(unsigned long ipa),
+ TP_ARGS(ipa),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, ipa )
+ ),
+
+ TP_fast_assign(
+ __entry->ipa = ipa;
+ ),
+
+ TP_printk("IPA: %lx", __entry->ipa)
+);
+
TRACE_EVENT(kvm_irq_line,
TP_PROTO(unsigned int type, int vcpu_idx, int irq_num, int level),
TP_ARGS(type, vcpu_idx, irq_num, level),
@@ -210,6 +225,39 @@ TRACE_EVENT(kvm_set_spte_hva,
TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva)
);
+TRACE_EVENT(kvm_age_hva,
+ TP_PROTO(unsigned long start, unsigned long end),
+ TP_ARGS(start, end),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, start )
+ __field( unsigned long, end )
+ ),
+
+ TP_fast_assign(
+ __entry->start = start;
+ __entry->end = end;
+ ),
+
+ TP_printk("mmu notifier age hva: %#08lx -- %#08lx",
+ __entry->start, __entry->end)
+);
+
+TRACE_EVENT(kvm_test_age_hva,
+ TP_PROTO(unsigned long hva),
+ TP_ARGS(hva),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, hva )
+ ),
+
+ TP_fast_assign(
+ __entry->hva = hva;
+ ),
+
+ TP_printk("mmu notifier test age hva: %#08lx", __entry->hva)
+);
+
TRACE_EVENT(kvm_hvc,
TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm),
TP_ARGS(vcpu_pc, r0, imm),
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 2a2f4d56e4c8..25f1beea453e 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -720,6 +720,8 @@ static const char * __init omap_get_family(void)
return kasprintf(GFP_KERNEL, "OMAP4");
else if (soc_is_omap54xx())
return kasprintf(GFP_KERNEL, "OMAP5");
+ else if (soc_is_am33xx() || soc_is_am335x())
+ return kasprintf(GFP_KERNEL, "AM33xx");
else if (soc_is_am43xx())
return kasprintf(GFP_KERNEL, "AM43xx");
else if (soc_is_dra7xx())
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c
index 0eecd83c624e..89a7c06570d3 100644
--- a/arch/arm/mach-pxa/irq.c
+++ b/arch/arm/mach-pxa/irq.c
@@ -11,6 +11,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -40,7 +41,6 @@
#define ICHP_VAL_IRQ (1 << 31)
#define ICHP_IRQ(i) (((i) >> 16) & 0x7fff)
#define IPR_VALID (1 << 31)
-#define IRQ_BIT(n) (((n) - PXA_IRQ(0)) & 0x1f)
#define MAX_INTERNAL_IRQS 128
@@ -51,6 +51,7 @@
static void __iomem *pxa_irq_base;
static int pxa_internal_irq_nr;
static bool cpu_has_ipr;
+static struct irq_domain *pxa_irq_domain;
static inline void __iomem *irq_base(int i)
{
@@ -66,18 +67,20 @@ static inline void __iomem *irq_base(int i)
void pxa_mask_irq(struct irq_data *d)
{
void __iomem *base = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t irq = irqd_to_hwirq(d);
uint32_t icmr = __raw_readl(base + ICMR);
- icmr &= ~(1 << IRQ_BIT(d->irq));
+ icmr &= ~BIT(irq & 0x1f);
__raw_writel(icmr, base + ICMR);
}
void pxa_unmask_irq(struct irq_data *d)
{
void __iomem *base = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t irq = irqd_to_hwirq(d);
uint32_t icmr = __raw_readl(base + ICMR);
- icmr |= 1 << IRQ_BIT(d->irq);
+ icmr |= BIT(irq & 0x1f);
__raw_writel(icmr, base + ICMR);
}
@@ -118,40 +121,63 @@ asmlinkage void __exception_irq_entry ichp_handle_irq(struct pt_regs *regs)
} while (1);
}
-void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int))
+static int pxa_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
{
- int irq, i, n;
+ void __iomem *base = irq_base(hw / 32);
- BUG_ON(irq_nr > MAX_INTERNAL_IRQS);
+ /* initialize interrupt priority */
+ if (cpu_has_ipr)
+ __raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw));
+
+ irq_set_chip_and_handler(virq, &pxa_internal_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(virq, base);
+ set_irq_flags(virq, IRQF_VALID);
+
+ return 0;
+}
+
+static struct irq_domain_ops pxa_irq_ops = {
+ .map = pxa_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static __init void
+pxa_init_irq_common(struct device_node *node, int irq_nr,
+ int (*fn)(struct irq_data *, unsigned int))
+{
+ int n;
pxa_internal_irq_nr = irq_nr;
- cpu_has_ipr = !cpu_is_pxa25x();
- pxa_irq_base = io_p2v(0x40d00000);
+ pxa_irq_domain = irq_domain_add_legacy(node, irq_nr,
+ PXA_IRQ(0), 0,
+ &pxa_irq_ops, NULL);
+ if (!pxa_irq_domain)
+ panic("Unable to add PXA IRQ domain\n");
+ irq_set_default_host(pxa_irq_domain);
for (n = 0; n < irq_nr; n += 32) {
void __iomem *base = irq_base(n >> 5);
__raw_writel(0, base + ICMR); /* disable all IRQs */
__raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */
- for (i = n; (i < (n + 32)) && (i < irq_nr); i++) {
- /* initialize interrupt priority */
- if (cpu_has_ipr)
- __raw_writel(i | IPR_VALID, pxa_irq_base + IPR(i));
-
- irq = PXA_IRQ(i);
- irq_set_chip_and_handler(irq, &pxa_internal_irq_chip,
- handle_level_irq);
- irq_set_chip_data(irq, base);
- set_irq_flags(irq, IRQF_VALID);
- }
}
-
/* only unmasked interrupts kick us out of idle */
__raw_writel(1, irq_base(0) + ICCR);
pxa_internal_irq_chip.irq_set_wake = fn;
}
+void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int))
+{
+ BUG_ON(irq_nr > MAX_INTERNAL_IRQS);
+
+ pxa_irq_base = io_p2v(0x40d00000);
+ cpu_has_ipr = !cpu_is_pxa25x();
+ pxa_init_irq_common(NULL, irq_nr, fn);
+}
+
#ifdef CONFIG_PM
static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32];
static unsigned long saved_ipr[MAX_INTERNAL_IRQS];
@@ -203,30 +229,6 @@ struct syscore_ops pxa_irq_syscore_ops = {
};
#ifdef CONFIG_OF
-static struct irq_domain *pxa_irq_domain;
-
-static int pxa_irq_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- void __iomem *base = irq_base(hw / 32);
-
- /* initialize interrupt priority */
- if (cpu_has_ipr)
- __raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw));
-
- irq_set_chip_and_handler(hw, &pxa_internal_irq_chip,
- handle_level_irq);
- irq_set_chip_data(hw, base);
- set_irq_flags(hw, IRQF_VALID);
-
- return 0;
-}
-
-static struct irq_domain_ops pxa_irq_ops = {
- .map = pxa_irq_map,
- .xlate = irq_domain_xlate_onecell,
-};
-
static const struct of_device_id intc_ids[] __initconst = {
{ .compatible = "marvell,pxa-intc", },
{}
@@ -236,7 +238,7 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int))
{
struct device_node *node;
struct resource res;
- int n, ret;
+ int ret;
node = of_find_matching_node(NULL, intc_ids);
if (!node) {
@@ -267,23 +269,6 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int))
return;
}
- pxa_irq_domain = irq_domain_add_legacy(node, pxa_internal_irq_nr, 0, 0,
- &pxa_irq_ops, NULL);
- if (!pxa_irq_domain)
- panic("Unable to add PXA IRQ domain\n");
-
- irq_set_default_host(pxa_irq_domain);
-
- for (n = 0; n < pxa_internal_irq_nr; n += 32) {
- void __iomem *base = irq_base(n >> 5);
-
- __raw_writel(0, base + ICMR); /* disable all IRQs */
- __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */
- }
-
- /* only unmasked interrupts kick us out of idle */
- __raw_writel(1, irq_base(0) + ICCR);
-
- pxa_internal_irq_chip.irq_set_wake = fn;
+ pxa_init_irq_common(node, pxa_internal_irq_nr, fn);
}
#endif /* CONFIG_OF */
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index 205f9bf3821e..ac2ae5c71ab4 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -412,7 +412,7 @@ static struct fixed_voltage_config can_regulator_pdata = {
};
static struct platform_device can_regulator_device = {
- .name = "reg-fixed-volage",
+ .name = "reg-fixed-voltage",
.id = 0,
.dev = {
.platform_data = &can_regulator_pdata,
diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig
index a77604fbaf25..81502b90dd91 100644
--- a/arch/arm/mach-sunxi/Kconfig
+++ b/arch/arm/mach-sunxi/Kconfig
@@ -1,10 +1,12 @@
menuconfig ARCH_SUNXI
bool "Allwinner SoCs" if ARCH_MULTI_V7
select ARCH_REQUIRE_GPIOLIB
+ select ARCH_HAS_RESET_CONTROLLER
select CLKSRC_MMIO
select GENERIC_IRQ_CHIP
select PINCTRL
select SUN4I_TIMER
+ select RESET_CONTROLLER
if ARCH_SUNXI
@@ -20,10 +22,8 @@ config MACH_SUN5I
config MACH_SUN6I
bool "Allwinner A31 (sun6i) SoCs support"
default ARCH_SUNXI
- select ARCH_HAS_RESET_CONTROLLER
select ARM_GIC
select MFD_SUN6I_PRCM
- select RESET_CONTROLLER
select SUN5I_HSTIMER
config MACH_SUN7I
@@ -37,16 +37,12 @@ config MACH_SUN7I
config MACH_SUN8I
bool "Allwinner A23 (sun8i) SoCs support"
default ARCH_SUNXI
- select ARCH_HAS_RESET_CONTROLLER
select ARM_GIC
select MFD_SUN6I_PRCM
- select RESET_CONTROLLER
config MACH_SUN9I
bool "Allwinner (sun9i) SoCs support"
default ARCH_SUNXI
- select ARCH_HAS_RESET_CONTROLLER
select ARM_GIC
- select RESET_CONTROLLER
endif
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index db10169a08de..8ca94d379bc3 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -799,6 +799,7 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const struct of_device_id *match;
const struct dmtimer_platform_data *pdata;
+ int ret;
match = of_match_device(of_match_ptr(omap_timer_match), dev);
pdata = match ? match->data : dev->platform_data;
@@ -860,7 +861,12 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
}
if (!timer->reserved) {
- pm_runtime_get_sync(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "%s: pm_runtime_get_sync failed!\n",
+ __func__);
+ goto err_get_sync;
+ }
__omap_dm_timer_init_regs(timer);
pm_runtime_put(dev);
}
@@ -873,6 +879,11 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
dev_dbg(dev, "Device Probed.\n");
return 0;
+
+err_get_sync:
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ return ret;
}
/**
@@ -899,6 +910,8 @@ static int omap_dm_timer_remove(struct platform_device *pdev)
}
spin_unlock_irqrestore(&dm_timer_lock, flags);
+ pm_runtime_disable(&pdev->dev);
+
return ret;
}
diff --git a/arch/arm64/boot/dts/arm/juno-clocks.dtsi b/arch/arm64/boot/dts/arm/juno-clocks.dtsi
index ea2b5666a16f..c9b89efe0f56 100644
--- a/arch/arm64/boot/dts/arm/juno-clocks.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-clocks.dtsi
@@ -8,7 +8,7 @@
*/
/* SoC fixed clocks */
- soc_uartclk: refclk72738khz {
+ soc_uartclk: refclk7273800hz {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <7273800>;
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index cb9593079f29..d8c25b7b18fb 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -246,14 +246,30 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
__ret; \
})
-#define this_cpu_cmpxchg_1(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
-#define this_cpu_cmpxchg_2(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
-#define this_cpu_cmpxchg_4(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
-#define this_cpu_cmpxchg_8(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
-
-#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
- cmpxchg_double_local(raw_cpu_ptr(&(ptr1)), raw_cpu_ptr(&(ptr2)), \
- o1, o2, n1, n2)
+#define _protect_cmpxchg_local(pcp, o, n) \
+({ \
+ typeof(*raw_cpu_ptr(&(pcp))) __ret; \
+ preempt_disable(); \
+ __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
+ preempt_enable(); \
+ __ret; \
+})
+
+#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+
+#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
+({ \
+ int __ret; \
+ preempt_disable(); \
+ __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \
+ raw_cpu_ptr(&(ptr2)), \
+ o1, o2, n1, n2); \
+ preempt_enable(); \
+ __ret; \
+})
#define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n))
#define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n))
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 92bbae381598..70522450ca23 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -90,6 +90,7 @@
#define ESR_ELx_FSC (0x3F)
#define ESR_ELx_FSC_TYPE (0x3C)
#define ESR_ELx_FSC_EXTABT (0x10)
+#define ESR_ELx_FSC_ACCESS (0x08)
#define ESR_ELx_FSC_FAULT (0x04)
#define ESR_ELx_FSC_PERM (0x0C)
#define ESR_ELx_CV (UL(1) << 24)
diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
index 076a1c714049..c0e5165c2f76 100644
--- a/arch/arm64/include/asm/jump_label.h
+++ b/arch/arm64/include/asm/jump_label.h
@@ -18,11 +18,12 @@
*/
#ifndef __ASM_JUMP_LABEL_H
#define __ASM_JUMP_LABEL_H
+
+#ifndef __ASSEMBLY__
+
#include <linux/types.h>
#include <asm/insn.h>
-#ifdef __KERNEL__
-
#define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE
static __always_inline bool arch_static_branch(struct static_key *key)
@@ -39,8 +40,6 @@ l_yes:
return true;
}
-#endif /* __KERNEL__ */
-
typedef u64 jump_label_t;
struct jump_entry {
@@ -49,4 +48,5 @@ struct jump_entry {
jump_label_t key;
};
+#endif /* __ASSEMBLY__ */
#endif /* __ASM_JUMP_LABEL_H */
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 54bb4ba97441..ac6fafb95fe7 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -188,6 +188,7 @@
/* For compatibility with fault code shared with 32-bit */
#define FSC_FAULT ESR_ELx_FSC_FAULT
+#define FSC_ACCESS ESR_ELx_FSC_ACCESS
#define FSC_PERM ESR_ELx_FSC_PERM
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 8ac3c70fe3c6..f0f58c9beec0 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -28,6 +28,8 @@
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
+#define __KVM_HAVE_ARCH_INTC_INITIALIZED
+
#if defined(CONFIG_KVM_ARM_MAX_VCPUS)
#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
#else
@@ -177,19 +179,10 @@ int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
/* We do not have shadow page tables, hence the empty hooks */
-static inline int kvm_age_hva(struct kvm *kvm, unsigned long start,
- unsigned long end)
-{
- return 0;
-}
-
-static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
-{
- return 0;
-}
-
static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
unsigned long address)
{
diff --git a/arch/arm64/include/asm/kvm_mmio.h b/arch/arm64/include/asm/kvm_mmio.h
index 9f52beb7cb13..889c908ee631 100644
--- a/arch/arm64/include/asm/kvm_mmio.h
+++ b/arch/arm64/include/asm/kvm_mmio.h
@@ -31,28 +31,6 @@ struct kvm_decode {
bool sign_extend;
};
-/*
- * The in-kernel MMIO emulation code wants to use a copy of run->mmio,
- * which is an anonymous type. Use our own type instead.
- */
-struct kvm_exit_mmio {
- phys_addr_t phys_addr;
- u8 data[8];
- u32 len;
- bool is_write;
- void *private;
-};
-
-static inline void kvm_prepare_mmio(struct kvm_run *run,
- struct kvm_exit_mmio *mmio)
-{
- run->mmio.phys_addr = mmio->phys_addr;
- run->mmio.len = mmio->len;
- run->mmio.is_write = mmio->is_write;
- memcpy(run->mmio.data, mmio->data, mmio->len);
- run->exit_reason = KVM_EXIT_MMIO;
-}
-
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
phys_addr_t fault_ipa);
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index a9eee33dfa62..101a42bde728 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -151,6 +151,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
{
unsigned int cpu = smp_processor_id();
+ /*
+ * init_mm.pgd does not contain any user mappings and it is always
+ * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
+ */
+ if (next == &init_mm) {
+ cpu_set_reserved_ttbr0();
+ return;
+ }
+
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
check_and_switch_context(next, tsk);
}
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 09da25bc596f..4fde8c1df97f 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -204,25 +204,47 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
return ret;
}
+#define _percpu_read(pcp) \
+({ \
+ typeof(pcp) __retval; \
+ preempt_disable(); \
+ __retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), \
+ sizeof(pcp)); \
+ preempt_enable(); \
+ __retval; \
+})
+
+#define _percpu_write(pcp, val) \
+do { \
+ preempt_disable(); \
+ __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), \
+ sizeof(pcp)); \
+ preempt_enable(); \
+} while(0) \
+
+#define _pcp_protect(operation, pcp, val) \
+({ \
+ typeof(pcp) __retval; \
+ preempt_disable(); \
+ __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \
+ (val), sizeof(pcp)); \
+ preempt_enable(); \
+ __retval; \
+})
+
#define _percpu_add(pcp, val) \
- __percpu_add(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
+ _pcp_protect(__percpu_add, pcp, val)
-#define _percpu_add_return(pcp, val) (typeof(pcp)) (_percpu_add(pcp, val))
+#define _percpu_add_return(pcp, val) _percpu_add(pcp, val)
#define _percpu_and(pcp, val) \
- __percpu_and(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
+ _pcp_protect(__percpu_and, pcp, val)
#define _percpu_or(pcp, val) \
- __percpu_or(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
-
-#define _percpu_read(pcp) (typeof(pcp)) \
- (__percpu_read(raw_cpu_ptr(&(pcp)), sizeof(pcp)))
-
-#define _percpu_write(pcp, val) \
- __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp))
+ _pcp_protect(__percpu_or, pcp, val)
#define _percpu_xchg(pcp, val) (typeof(pcp)) \
- (__percpu_xchg(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp)))
+ _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val))
#define this_cpu_add_1(pcp, val) _percpu_add(pcp, val)
#define this_cpu_add_2(pcp, val) _percpu_add(pcp, val)
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 3ef77a466018..c154c0b7eb60 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -191,6 +191,9 @@ struct kvm_arch_memory_slot {
/* Highest supported SPI, from VGIC_NR_IRQS */
#define KVM_ARM_IRQ_GIC_MAX 127
+/* One single KVM irqchip, ie. the VGIC */
+#define KVM_NR_IRQCHIPS 1
+
/* PSCI interface */
#define KVM_PSCI_FN_BASE 0x95c1ba5e
#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n))
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index f5590c81d95f..5105e297ed5f 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -18,6 +18,7 @@ if VIRTUALIZATION
config KVM
bool "Kernel-based Virtual Machine (KVM) support"
+ depends on OF
select MMU_NOTIFIER
select PREEMPT_NOTIFIERS
select ANON_INODES
@@ -25,10 +26,10 @@ config KVM
select HAVE_KVM_ARCH_TLB_FLUSH_ALL
select KVM_MMIO
select KVM_ARM_HOST
- select KVM_ARM_VGIC
- select KVM_ARM_TIMER
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select SRCU
+ select HAVE_KVM_EVENTFD
+ select HAVE_KVM_IRQFD
---help---
Support hosting virtualized guest machines.
@@ -50,17 +51,4 @@ config KVM_ARM_MAX_VCPUS
large, so only choose a reasonable number that you expect to
actually use.
-config KVM_ARM_VGIC
- bool
- depends on KVM_ARM_HOST && OF
- select HAVE_KVM_IRQCHIP
- ---help---
- Adds support for a hardware assisted, in-kernel GIC emulation.
-
-config KVM_ARM_TIMER
- bool
- depends on KVM_ARM_VGIC
- ---help---
- Adds support for the Architected Timers in virtual machines.
-
endif # VIRTUALIZATION
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 4e6e09ee4033..d5904f876cdb 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -2,7 +2,7 @@
# Makefile for Kernel-based Virtual Machine module
#
-ccflags-y += -Ivirt/kvm -Iarch/arm64/kvm
+ccflags-y += -Iarch/arm64/kvm
CFLAGS_arm.o := -I.
CFLAGS_mmu.o := -I.
@@ -11,7 +11,7 @@ ARM=../../../arch/arm/kvm
obj-$(CONFIG_KVM_ARM_HOST) += kvm.o
-kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o
@@ -19,11 +19,11 @@ kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o
kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
-kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
-kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
-kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2-emul.o
-kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v2-switch.o
-kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3.o
-kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3-emul.o
-kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v3-switch.o
-kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2-emul.o
+kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v2-switch.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o
+kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v3-switch.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o
diff --git a/arch/metag/include/asm/io.h b/arch/metag/include/asm/io.h
index 9359e5048442..d5779b0ec573 100644
--- a/arch/metag/include/asm/io.h
+++ b/arch/metag/include/asm/io.h
@@ -2,6 +2,7 @@
#define _ASM_METAG_IO_H
#include <linux/types.h>
+#include <asm/pgtable-bits.h>
#define IO_SPACE_LIMIT 0
diff --git a/arch/metag/include/asm/pgtable-bits.h b/arch/metag/include/asm/pgtable-bits.h
new file mode 100644
index 000000000000..25ba6729f496
--- /dev/null
+++ b/arch/metag/include/asm/pgtable-bits.h
@@ -0,0 +1,104 @@
+/*
+ * Meta page table definitions.
+ */
+
+#ifndef _METAG_PGTABLE_BITS_H
+#define _METAG_PGTABLE_BITS_H
+
+#include <asm/metag_mem.h>
+
+/*
+ * Definitions for MMU descriptors
+ *
+ * These are the hardware bits in the MMCU pte entries.
+ * Derived from the Meta toolkit headers.
+ */
+#define _PAGE_PRESENT MMCU_ENTRY_VAL_BIT
+#define _PAGE_WRITE MMCU_ENTRY_WR_BIT
+#define _PAGE_PRIV MMCU_ENTRY_PRIV_BIT
+/* Write combine bit - this can cause writes to occur out of order */
+#define _PAGE_WR_COMBINE MMCU_ENTRY_WRC_BIT
+/* Sys coherent bit - this bit is never used by Linux */
+#define _PAGE_SYS_COHERENT MMCU_ENTRY_SYS_BIT
+#define _PAGE_ALWAYS_ZERO_1 0x020
+#define _PAGE_CACHE_CTRL0 0x040
+#define _PAGE_CACHE_CTRL1 0x080
+#define _PAGE_ALWAYS_ZERO_2 0x100
+#define _PAGE_ALWAYS_ZERO_3 0x200
+#define _PAGE_ALWAYS_ZERO_4 0x400
+#define _PAGE_ALWAYS_ZERO_5 0x800
+
+/* These are software bits that we stuff into the gaps in the hardware
+ * pte entries that are not used. Note, these DO get stored in the actual
+ * hardware, but the hardware just does not use them.
+ */
+#define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1
+#define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2
+
+/* Pages owned, and protected by, the kernel. */
+#define _PAGE_KERNEL _PAGE_PRIV
+
+/* No cacheing of this page */
+#define _PAGE_CACHE_WIN0 (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S)
+/* burst cacheing - good for data streaming */
+#define _PAGE_CACHE_WIN1 (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S)
+/* One cache way per thread */
+#define _PAGE_CACHE_WIN2 (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S)
+/* Full on cacheing */
+#define _PAGE_CACHE_WIN3 (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S)
+
+#define _PAGE_CACHEABLE (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE)
+
+/* which bits are used for cache control ... */
+#define _PAGE_CACHE_MASK (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \
+ _PAGE_WR_COMBINE)
+
+/* This is a mask of the bits that pte_modify is allowed to change. */
+#define _PAGE_CHG_MASK (PAGE_MASK)
+
+#define _PAGE_SZ_SHIFT 1
+#define _PAGE_SZ_4K (0x0)
+#define _PAGE_SZ_8K (0x1 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_16K (0x2 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_32K (0x3 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_64K (0x4 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_128K (0x5 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_256K (0x6 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_512K (0x7 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_1M (0x8 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_2M (0x9 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_4M (0xa << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_MASK (0xf << _PAGE_SZ_SHIFT)
+
+#if defined(CONFIG_PAGE_SIZE_4K)
+#define _PAGE_SZ (_PAGE_SZ_4K)
+#elif defined(CONFIG_PAGE_SIZE_8K)
+#define _PAGE_SZ (_PAGE_SZ_8K)
+#elif defined(CONFIG_PAGE_SIZE_16K)
+#define _PAGE_SZ (_PAGE_SZ_16K)
+#endif
+#define _PAGE_TABLE (_PAGE_SZ | _PAGE_PRESENT)
+
+#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
+# define _PAGE_SZHUGE (_PAGE_SZ_8K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
+# define _PAGE_SZHUGE (_PAGE_SZ_16K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
+# define _PAGE_SZHUGE (_PAGE_SZ_32K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
+# define _PAGE_SZHUGE (_PAGE_SZ_64K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
+# define _PAGE_SZHUGE (_PAGE_SZ_128K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
+# define _PAGE_SZHUGE (_PAGE_SZ_256K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
+# define _PAGE_SZHUGE (_PAGE_SZ_512K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
+# define _PAGE_SZHUGE (_PAGE_SZ_1M)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
+# define _PAGE_SZHUGE (_PAGE_SZ_2M)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
+# define _PAGE_SZHUGE (_PAGE_SZ_4M)
+#endif
+
+#endif /* _METAG_PGTABLE_BITS_H */
diff --git a/arch/metag/include/asm/pgtable.h b/arch/metag/include/asm/pgtable.h
index d0604c0a8702..ffa3a3a2ecad 100644
--- a/arch/metag/include/asm/pgtable.h
+++ b/arch/metag/include/asm/pgtable.h
@@ -5,6 +5,7 @@
#ifndef _METAG_PGTABLE_H
#define _METAG_PGTABLE_H
+#include <asm/pgtable-bits.h>
#include <asm-generic/pgtable-nopmd.h>
/* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */
@@ -21,100 +22,6 @@
#endif
/*
- * Definitions for MMU descriptors
- *
- * These are the hardware bits in the MMCU pte entries.
- * Derived from the Meta toolkit headers.
- */
-#define _PAGE_PRESENT MMCU_ENTRY_VAL_BIT
-#define _PAGE_WRITE MMCU_ENTRY_WR_BIT
-#define _PAGE_PRIV MMCU_ENTRY_PRIV_BIT
-/* Write combine bit - this can cause writes to occur out of order */
-#define _PAGE_WR_COMBINE MMCU_ENTRY_WRC_BIT
-/* Sys coherent bit - this bit is never used by Linux */
-#define _PAGE_SYS_COHERENT MMCU_ENTRY_SYS_BIT
-#define _PAGE_ALWAYS_ZERO_1 0x020
-#define _PAGE_CACHE_CTRL0 0x040
-#define _PAGE_CACHE_CTRL1 0x080
-#define _PAGE_ALWAYS_ZERO_2 0x100
-#define _PAGE_ALWAYS_ZERO_3 0x200
-#define _PAGE_ALWAYS_ZERO_4 0x400
-#define _PAGE_ALWAYS_ZERO_5 0x800
-
-/* These are software bits that we stuff into the gaps in the hardware
- * pte entries that are not used. Note, these DO get stored in the actual
- * hardware, but the hardware just does not use them.
- */
-#define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1
-#define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2
-
-/* Pages owned, and protected by, the kernel. */
-#define _PAGE_KERNEL _PAGE_PRIV
-
-/* No cacheing of this page */
-#define _PAGE_CACHE_WIN0 (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S)
-/* burst cacheing - good for data streaming */
-#define _PAGE_CACHE_WIN1 (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S)
-/* One cache way per thread */
-#define _PAGE_CACHE_WIN2 (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S)
-/* Full on cacheing */
-#define _PAGE_CACHE_WIN3 (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S)
-
-#define _PAGE_CACHEABLE (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE)
-
-/* which bits are used for cache control ... */
-#define _PAGE_CACHE_MASK (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \
- _PAGE_WR_COMBINE)
-
-/* This is a mask of the bits that pte_modify is allowed to change. */
-#define _PAGE_CHG_MASK (PAGE_MASK)
-
-#define _PAGE_SZ_SHIFT 1
-#define _PAGE_SZ_4K (0x0)
-#define _PAGE_SZ_8K (0x1 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_16K (0x2 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_32K (0x3 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_64K (0x4 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_128K (0x5 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_256K (0x6 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_512K (0x7 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_1M (0x8 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_2M (0x9 << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_4M (0xa << _PAGE_SZ_SHIFT)
-#define _PAGE_SZ_MASK (0xf << _PAGE_SZ_SHIFT)
-
-#if defined(CONFIG_PAGE_SIZE_4K)
-#define _PAGE_SZ (_PAGE_SZ_4K)
-#elif defined(CONFIG_PAGE_SIZE_8K)
-#define _PAGE_SZ (_PAGE_SZ_8K)
-#elif defined(CONFIG_PAGE_SIZE_16K)
-#define _PAGE_SZ (_PAGE_SZ_16K)
-#endif
-#define _PAGE_TABLE (_PAGE_SZ | _PAGE_PRESENT)
-
-#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
-# define _PAGE_SZHUGE (_PAGE_SZ_8K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
-# define _PAGE_SZHUGE (_PAGE_SZ_16K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
-# define _PAGE_SZHUGE (_PAGE_SZ_32K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
-# define _PAGE_SZHUGE (_PAGE_SZ_64K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
-# define _PAGE_SZHUGE (_PAGE_SZ_128K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
-# define _PAGE_SZHUGE (_PAGE_SZ_256K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
-# define _PAGE_SZHUGE (_PAGE_SZ_512K)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
-# define _PAGE_SZHUGE (_PAGE_SZ_1M)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
-# define _PAGE_SZHUGE (_PAGE_SZ_2M)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
-# define _PAGE_SZHUGE (_PAGE_SZ_4M)
-#endif
-
-/*
* The Linux memory management assumes a three-level page table setup. On
* Meta, we use that, but "fold" the mid level into the top-level page
* table.
diff --git a/arch/mips/include/asm/asmmacro-32.h b/arch/mips/include/asm/asmmacro-32.h
index cdac7b3eeaf7..80386470d3a4 100644
--- a/arch/mips/include/asm/asmmacro-32.h
+++ b/arch/mips/include/asm/asmmacro-32.h
@@ -16,38 +16,38 @@
.set push
SET_HARDFLOAT
cfc1 \tmp, fcr31
- swc1 $f0, THREAD_FPR0_LS64(\thread)
- swc1 $f1, THREAD_FPR1_LS64(\thread)
- swc1 $f2, THREAD_FPR2_LS64(\thread)
- swc1 $f3, THREAD_FPR3_LS64(\thread)
- swc1 $f4, THREAD_FPR4_LS64(\thread)
- swc1 $f5, THREAD_FPR5_LS64(\thread)
- swc1 $f6, THREAD_FPR6_LS64(\thread)
- swc1 $f7, THREAD_FPR7_LS64(\thread)
- swc1 $f8, THREAD_FPR8_LS64(\thread)
- swc1 $f9, THREAD_FPR9_LS64(\thread)
- swc1 $f10, THREAD_FPR10_LS64(\thread)
- swc1 $f11, THREAD_FPR11_LS64(\thread)
- swc1 $f12, THREAD_FPR12_LS64(\thread)
- swc1 $f13, THREAD_FPR13_LS64(\thread)
- swc1 $f14, THREAD_FPR14_LS64(\thread)
- swc1 $f15, THREAD_FPR15_LS64(\thread)
- swc1 $f16, THREAD_FPR16_LS64(\thread)
- swc1 $f17, THREAD_FPR17_LS64(\thread)
- swc1 $f18, THREAD_FPR18_LS64(\thread)
- swc1 $f19, THREAD_FPR19_LS64(\thread)
- swc1 $f20, THREAD_FPR20_LS64(\thread)
- swc1 $f21, THREAD_FPR21_LS64(\thread)
- swc1 $f22, THREAD_FPR22_LS64(\thread)
- swc1 $f23, THREAD_FPR23_LS64(\thread)
- swc1 $f24, THREAD_FPR24_LS64(\thread)
- swc1 $f25, THREAD_FPR25_LS64(\thread)
- swc1 $f26, THREAD_FPR26_LS64(\thread)
- swc1 $f27, THREAD_FPR27_LS64(\thread)
- swc1 $f28, THREAD_FPR28_LS64(\thread)
- swc1 $f29, THREAD_FPR29_LS64(\thread)
- swc1 $f30, THREAD_FPR30_LS64(\thread)
- swc1 $f31, THREAD_FPR31_LS64(\thread)
+ swc1 $f0, THREAD_FPR0(\thread)
+ swc1 $f1, THREAD_FPR1(\thread)
+ swc1 $f2, THREAD_FPR2(\thread)
+ swc1 $f3, THREAD_FPR3(\thread)
+ swc1 $f4, THREAD_FPR4(\thread)
+ swc1 $f5, THREAD_FPR5(\thread)
+ swc1 $f6, THREAD_FPR6(\thread)
+ swc1 $f7, THREAD_FPR7(\thread)
+ swc1 $f8, THREAD_FPR8(\thread)
+ swc1 $f9, THREAD_FPR9(\thread)
+ swc1 $f10, THREAD_FPR10(\thread)
+ swc1 $f11, THREAD_FPR11(\thread)
+ swc1 $f12, THREAD_FPR12(\thread)
+ swc1 $f13, THREAD_FPR13(\thread)
+ swc1 $f14, THREAD_FPR14(\thread)
+ swc1 $f15, THREAD_FPR15(\thread)
+ swc1 $f16, THREAD_FPR16(\thread)
+ swc1 $f17, THREAD_FPR17(\thread)
+ swc1 $f18, THREAD_FPR18(\thread)
+ swc1 $f19, THREAD_FPR19(\thread)
+ swc1 $f20, THREAD_FPR20(\thread)
+ swc1 $f21, THREAD_FPR21(\thread)
+ swc1 $f22, THREAD_FPR22(\thread)
+ swc1 $f23, THREAD_FPR23(\thread)
+ swc1 $f24, THREAD_FPR24(\thread)
+ swc1 $f25, THREAD_FPR25(\thread)
+ swc1 $f26, THREAD_FPR26(\thread)
+ swc1 $f27, THREAD_FPR27(\thread)
+ swc1 $f28, THREAD_FPR28(\thread)
+ swc1 $f29, THREAD_FPR29(\thread)
+ swc1 $f30, THREAD_FPR30(\thread)
+ swc1 $f31, THREAD_FPR31(\thread)
sw \tmp, THREAD_FCR31(\thread)
.set pop
.endm
@@ -56,38 +56,38 @@
.set push
SET_HARDFLOAT
lw \tmp, THREAD_FCR31(\thread)
- lwc1 $f0, THREAD_FPR0_LS64(\thread)
- lwc1 $f1, THREAD_FPR1_LS64(\thread)
- lwc1 $f2, THREAD_FPR2_LS64(\thread)
- lwc1 $f3, THREAD_FPR3_LS64(\thread)
- lwc1 $f4, THREAD_FPR4_LS64(\thread)
- lwc1 $f5, THREAD_FPR5_LS64(\thread)
- lwc1 $f6, THREAD_FPR6_LS64(\thread)
- lwc1 $f7, THREAD_FPR7_LS64(\thread)
- lwc1 $f8, THREAD_FPR8_LS64(\thread)
- lwc1 $f9, THREAD_FPR9_LS64(\thread)
- lwc1 $f10, THREAD_FPR10_LS64(\thread)
- lwc1 $f11, THREAD_FPR11_LS64(\thread)
- lwc1 $f12, THREAD_FPR12_LS64(\thread)
- lwc1 $f13, THREAD_FPR13_LS64(\thread)
- lwc1 $f14, THREAD_FPR14_LS64(\thread)
- lwc1 $f15, THREAD_FPR15_LS64(\thread)
- lwc1 $f16, THREAD_FPR16_LS64(\thread)
- lwc1 $f17, THREAD_FPR17_LS64(\thread)
- lwc1 $f18, THREAD_FPR18_LS64(\thread)
- lwc1 $f19, THREAD_FPR19_LS64(\thread)
- lwc1 $f20, THREAD_FPR20_LS64(\thread)
- lwc1 $f21, THREAD_FPR21_LS64(\thread)
- lwc1 $f22, THREAD_FPR22_LS64(\thread)
- lwc1 $f23, THREAD_FPR23_LS64(\thread)
- lwc1 $f24, THREAD_FPR24_LS64(\thread)
- lwc1 $f25, THREAD_FPR25_LS64(\thread)
- lwc1 $f26, THREAD_FPR26_LS64(\thread)
- lwc1 $f27, THREAD_FPR27_LS64(\thread)
- lwc1 $f28, THREAD_FPR28_LS64(\thread)
- lwc1 $f29, THREAD_FPR29_LS64(\thread)
- lwc1 $f30, THREAD_FPR30_LS64(\thread)
- lwc1 $f31, THREAD_FPR31_LS64(\thread)
+ lwc1 $f0, THREAD_FPR0(\thread)
+ lwc1 $f1, THREAD_FPR1(\thread)
+ lwc1 $f2, THREAD_FPR2(\thread)
+ lwc1 $f3, THREAD_FPR3(\thread)
+ lwc1 $f4, THREAD_FPR4(\thread)
+ lwc1 $f5, THREAD_FPR5(\thread)
+ lwc1 $f6, THREAD_FPR6(\thread)
+ lwc1 $f7, THREAD_FPR7(\thread)
+ lwc1 $f8, THREAD_FPR8(\thread)
+ lwc1 $f9, THREAD_FPR9(\thread)
+ lwc1 $f10, THREAD_FPR10(\thread)
+ lwc1 $f11, THREAD_FPR11(\thread)
+ lwc1 $f12, THREAD_FPR12(\thread)
+ lwc1 $f13, THREAD_FPR13(\thread)
+ lwc1 $f14, THREAD_FPR14(\thread)
+ lwc1 $f15, THREAD_FPR15(\thread)
+ lwc1 $f16, THREAD_FPR16(\thread)
+ lwc1 $f17, THREAD_FPR17(\thread)
+ lwc1 $f18, THREAD_FPR18(\thread)
+ lwc1 $f19, THREAD_FPR19(\thread)
+ lwc1 $f20, THREAD_FPR20(\thread)
+ lwc1 $f21, THREAD_FPR21(\thread)
+ lwc1 $f22, THREAD_FPR22(\thread)
+ lwc1 $f23, THREAD_FPR23(\thread)
+ lwc1 $f24, THREAD_FPR24(\thread)
+ lwc1 $f25, THREAD_FPR25(\thread)
+ lwc1 $f26, THREAD_FPR26(\thread)
+ lwc1 $f27, THREAD_FPR27(\thread)
+ lwc1 $f28, THREAD_FPR28(\thread)
+ lwc1 $f29, THREAD_FPR29(\thread)
+ lwc1 $f30, THREAD_FPR30(\thread)
+ lwc1 $f31, THREAD_FPR31(\thread)
ctc1 \tmp, fcr31
.set pop
.endm
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 0cae4595e985..6156ac8c4cfb 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -60,22 +60,22 @@
.set push
SET_HARDFLOAT
cfc1 \tmp, fcr31
- sdc1 $f0, THREAD_FPR0_LS64(\thread)
- sdc1 $f2, THREAD_FPR2_LS64(\thread)
- sdc1 $f4, THREAD_FPR4_LS64(\thread)
- sdc1 $f6, THREAD_FPR6_LS64(\thread)
- sdc1 $f8, THREAD_FPR8_LS64(\thread)
- sdc1 $f10, THREAD_FPR10_LS64(\thread)
- sdc1 $f12, THREAD_FPR12_LS64(\thread)
- sdc1 $f14, THREAD_FPR14_LS64(\thread)
- sdc1 $f16, THREAD_FPR16_LS64(\thread)
- sdc1 $f18, THREAD_FPR18_LS64(\thread)
- sdc1 $f20, THREAD_FPR20_LS64(\thread)
- sdc1 $f22, THREAD_FPR22_LS64(\thread)
- sdc1 $f24, THREAD_FPR24_LS64(\thread)
- sdc1 $f26, THREAD_FPR26_LS64(\thread)
- sdc1 $f28, THREAD_FPR28_LS64(\thread)
- sdc1 $f30, THREAD_FPR30_LS64(\thread)
+ sdc1 $f0, THREAD_FPR0(\thread)
+ sdc1 $f2, THREAD_FPR2(\thread)
+ sdc1 $f4, THREAD_FPR4(\thread)
+ sdc1 $f6, THREAD_FPR6(\thread)
+ sdc1 $f8, THREAD_FPR8(\thread)
+ sdc1 $f10, THREAD_FPR10(\thread)
+ sdc1 $f12, THREAD_FPR12(\thread)
+ sdc1 $f14, THREAD_FPR14(\thread)
+ sdc1 $f16, THREAD_FPR16(\thread)
+ sdc1 $f18, THREAD_FPR18(\thread)
+ sdc1 $f20, THREAD_FPR20(\thread)
+ sdc1 $f22, THREAD_FPR22(\thread)
+ sdc1 $f24, THREAD_FPR24(\thread)
+ sdc1 $f26, THREAD_FPR26(\thread)
+ sdc1 $f28, THREAD_FPR28(\thread)
+ sdc1 $f30, THREAD_FPR30(\thread)
sw \tmp, THREAD_FCR31(\thread)
.set pop
.endm
@@ -84,22 +84,22 @@
.set push
.set mips64r2
SET_HARDFLOAT
- sdc1 $f1, THREAD_FPR1_LS64(\thread)
- sdc1 $f3, THREAD_FPR3_LS64(\thread)
- sdc1 $f5, THREAD_FPR5_LS64(\thread)
- sdc1 $f7, THREAD_FPR7_LS64(\thread)
- sdc1 $f9, THREAD_FPR9_LS64(\thread)
- sdc1 $f11, THREAD_FPR11_LS64(\thread)
- sdc1 $f13, THREAD_FPR13_LS64(\thread)
- sdc1 $f15, THREAD_FPR15_LS64(\thread)
- sdc1 $f17, THREAD_FPR17_LS64(\thread)
- sdc1 $f19, THREAD_FPR19_LS64(\thread)
- sdc1 $f21, THREAD_FPR21_LS64(\thread)
- sdc1 $f23, THREAD_FPR23_LS64(\thread)
- sdc1 $f25, THREAD_FPR25_LS64(\thread)
- sdc1 $f27, THREAD_FPR27_LS64(\thread)
- sdc1 $f29, THREAD_FPR29_LS64(\thread)
- sdc1 $f31, THREAD_FPR31_LS64(\thread)
+ sdc1 $f1, THREAD_FPR1(\thread)
+ sdc1 $f3, THREAD_FPR3(\thread)
+ sdc1 $f5, THREAD_FPR5(\thread)
+ sdc1 $f7, THREAD_FPR7(\thread)
+ sdc1 $f9, THREAD_FPR9(\thread)
+ sdc1 $f11, THREAD_FPR11(\thread)
+ sdc1 $f13, THREAD_FPR13(\thread)
+ sdc1 $f15, THREAD_FPR15(\thread)
+ sdc1 $f17, THREAD_FPR17(\thread)
+ sdc1 $f19, THREAD_FPR19(\thread)
+ sdc1 $f21, THREAD_FPR21(\thread)
+ sdc1 $f23, THREAD_FPR23(\thread)
+ sdc1 $f25, THREAD_FPR25(\thread)
+ sdc1 $f27, THREAD_FPR27(\thread)
+ sdc1 $f29, THREAD_FPR29(\thread)
+ sdc1 $f31, THREAD_FPR31(\thread)
.set pop
.endm
@@ -118,22 +118,22 @@
.set push
SET_HARDFLOAT
lw \tmp, THREAD_FCR31(\thread)
- ldc1 $f0, THREAD_FPR0_LS64(\thread)
- ldc1 $f2, THREAD_FPR2_LS64(\thread)
- ldc1 $f4, THREAD_FPR4_LS64(\thread)
- ldc1 $f6, THREAD_FPR6_LS64(\thread)
- ldc1 $f8, THREAD_FPR8_LS64(\thread)
- ldc1 $f10, THREAD_FPR10_LS64(\thread)
- ldc1 $f12, THREAD_FPR12_LS64(\thread)
- ldc1 $f14, THREAD_FPR14_LS64(\thread)
- ldc1 $f16, THREAD_FPR16_LS64(\thread)
- ldc1 $f18, THREAD_FPR18_LS64(\thread)
- ldc1 $f20, THREAD_FPR20_LS64(\thread)
- ldc1 $f22, THREAD_FPR22_LS64(\thread)
- ldc1 $f24, THREAD_FPR24_LS64(\thread)
- ldc1 $f26, THREAD_FPR26_LS64(\thread)
- ldc1 $f28, THREAD_FPR28_LS64(\thread)
- ldc1 $f30, THREAD_FPR30_LS64(\thread)
+ ldc1 $f0, THREAD_FPR0(\thread)
+ ldc1 $f2, THREAD_FPR2(\thread)
+ ldc1 $f4, THREAD_FPR4(\thread)
+ ldc1 $f6, THREAD_FPR6(\thread)
+ ldc1 $f8, THREAD_FPR8(\thread)
+ ldc1 $f10, THREAD_FPR10(\thread)
+ ldc1 $f12, THREAD_FPR12(\thread)
+ ldc1 $f14, THREAD_FPR14(\thread)
+ ldc1 $f16, THREAD_FPR16(\thread)
+ ldc1 $f18, THREAD_FPR18(\thread)
+ ldc1 $f20, THREAD_FPR20(\thread)
+ ldc1 $f22, THREAD_FPR22(\thread)
+ ldc1 $f24, THREAD_FPR24(\thread)
+ ldc1 $f26, THREAD_FPR26(\thread)
+ ldc1 $f28, THREAD_FPR28(\thread)
+ ldc1 $f30, THREAD_FPR30(\thread)
ctc1 \tmp, fcr31
.endm
@@ -141,22 +141,22 @@
.set push
.set mips64r2
SET_HARDFLOAT
- ldc1 $f1, THREAD_FPR1_LS64(\thread)
- ldc1 $f3, THREAD_FPR3_LS64(\thread)
- ldc1 $f5, THREAD_FPR5_LS64(\thread)
- ldc1 $f7, THREAD_FPR7_LS64(\thread)
- ldc1 $f9, THREAD_FPR9_LS64(\thread)
- ldc1 $f11, THREAD_FPR11_LS64(\thread)
- ldc1 $f13, THREAD_FPR13_LS64(\thread)
- ldc1 $f15, THREAD_FPR15_LS64(\thread)
- ldc1 $f17, THREAD_FPR17_LS64(\thread)
- ldc1 $f19, THREAD_FPR19_LS64(\thread)
- ldc1 $f21, THREAD_FPR21_LS64(\thread)
- ldc1 $f23, THREAD_FPR23_LS64(\thread)
- ldc1 $f25, THREAD_FPR25_LS64(\thread)
- ldc1 $f27, THREAD_FPR27_LS64(\thread)
- ldc1 $f29, THREAD_FPR29_LS64(\thread)
- ldc1 $f31, THREAD_FPR31_LS64(\thread)
+ ldc1 $f1, THREAD_FPR1(\thread)
+ ldc1 $f3, THREAD_FPR3(\thread)
+ ldc1 $f5, THREAD_FPR5(\thread)
+ ldc1 $f7, THREAD_FPR7(\thread)
+ ldc1 $f9, THREAD_FPR9(\thread)
+ ldc1 $f11, THREAD_FPR11(\thread)
+ ldc1 $f13, THREAD_FPR13(\thread)
+ ldc1 $f15, THREAD_FPR15(\thread)
+ ldc1 $f17, THREAD_FPR17(\thread)
+ ldc1 $f19, THREAD_FPR19(\thread)
+ ldc1 $f21, THREAD_FPR21(\thread)
+ ldc1 $f23, THREAD_FPR23(\thread)
+ ldc1 $f25, THREAD_FPR25(\thread)
+ ldc1 $f27, THREAD_FPR27(\thread)
+ ldc1 $f29, THREAD_FPR29(\thread)
+ ldc1 $f31, THREAD_FPR31(\thread)
.set pop
.endm
@@ -211,6 +211,22 @@
.endm
#ifdef TOOLCHAIN_SUPPORTS_MSA
+ .macro _cfcmsa rd, cs
+ .set push
+ .set mips32r2
+ .set msa
+ cfcmsa \rd, $\cs
+ .set pop
+ .endm
+
+ .macro _ctcmsa cd, rs
+ .set push
+ .set mips32r2
+ .set msa
+ ctcmsa $\cd, \rs
+ .set pop
+ .endm
+
.macro ld_d wd, off, base
.set push
.set mips32r2
@@ -227,35 +243,35 @@
.set pop
.endm
- .macro copy_u_w rd, ws, n
+ .macro copy_u_w ws, n
.set push
.set mips32r2
.set msa
- copy_u.w \rd, $w\ws[\n]
+ copy_u.w $1, $w\ws[\n]
.set pop
.endm
- .macro copy_u_d rd, ws, n
+ .macro copy_u_d ws, n
.set push
.set mips64r2
.set msa
- copy_u.d \rd, $w\ws[\n]
+ copy_u.d $1, $w\ws[\n]
.set pop
.endm
- .macro insert_w wd, n, rs
+ .macro insert_w wd, n
.set push
.set mips32r2
.set msa
- insert.w $w\wd[\n], \rs
+ insert.w $w\wd[\n], $1
.set pop
.endm
- .macro insert_d wd, n, rs
+ .macro insert_d wd, n
.set push
.set mips64r2
.set msa
- insert.d $w\wd[\n], \rs
+ insert.d $w\wd[\n], $1
.set pop
.endm
#else
@@ -283,7 +299,7 @@
/*
* Temporary until all toolchains in use include MSA support.
*/
- .macro cfcmsa rd, cs
+ .macro _cfcmsa rd, cs
.set push
.set noat
SET_HARDFLOAT
@@ -293,7 +309,7 @@
.set pop
.endm
- .macro ctcmsa cd, rs
+ .macro _ctcmsa cd, rs
.set push
.set noat
SET_HARDFLOAT
@@ -320,44 +336,36 @@
.set pop
.endm
- .macro copy_u_w rd, ws, n
+ .macro copy_u_w ws, n
.set push
.set noat
SET_HARDFLOAT
.insn
.word COPY_UW_MSA_INSN | (\n << 16) | (\ws << 11)
- /* move triggers an assembler bug... */
- or \rd, $1, zero
.set pop
.endm
- .macro copy_u_d rd, ws, n
+ .macro copy_u_d ws, n
.set push
.set noat
SET_HARDFLOAT
.insn
.word COPY_UD_MSA_INSN | (\n << 16) | (\ws << 11)
- /* move triggers an assembler bug... */
- or \rd, $1, zero
.set pop
.endm
- .macro insert_w wd, n, rs
+ .macro insert_w wd, n
.set push
.set noat
SET_HARDFLOAT
- /* move triggers an assembler bug... */
- or $1, \rs, zero
.word INSERT_W_MSA_INSN | (\n << 16) | (\wd << 6)
.set pop
.endm
- .macro insert_d wd, n, rs
+ .macro insert_d wd, n
.set push
.set noat
SET_HARDFLOAT
- /* move triggers an assembler bug... */
- or $1, \rs, zero
.word INSERT_D_MSA_INSN | (\n << 16) | (\wd << 6)
.set pop
.endm
@@ -399,7 +407,7 @@
.set push
.set noat
SET_HARDFLOAT
- cfcmsa $1, MSA_CSR
+ _cfcmsa $1, MSA_CSR
sw $1, THREAD_MSA_CSR(\thread)
.set pop
.endm
@@ -409,7 +417,7 @@
.set noat
SET_HARDFLOAT
lw $1, THREAD_MSA_CSR(\thread)
- ctcmsa MSA_CSR, $1
+ _ctcmsa MSA_CSR, $1
.set pop
ld_d 0, THREAD_FPR0, \thread
ld_d 1, THREAD_FPR1, \thread
@@ -452,9 +460,6 @@
insert_w \wd, 2
insert_w \wd, 3
#endif
- .if 31-\wd
- msa_init_upper (\wd+1)
- .endif
.endm
.macro msa_init_all_upper
@@ -463,6 +468,37 @@
SET_HARDFLOAT
not $1, zero
msa_init_upper 0
+ msa_init_upper 1
+ msa_init_upper 2
+ msa_init_upper 3
+ msa_init_upper 4
+ msa_init_upper 5
+ msa_init_upper 6
+ msa_init_upper 7
+ msa_init_upper 8
+ msa_init_upper 9
+ msa_init_upper 10
+ msa_init_upper 11
+ msa_init_upper 12
+ msa_init_upper 13
+ msa_init_upper 14
+ msa_init_upper 15
+ msa_init_upper 16
+ msa_init_upper 17
+ msa_init_upper 18
+ msa_init_upper 19
+ msa_init_upper 20
+ msa_init_upper 21
+ msa_init_upper 22
+ msa_init_upper 23
+ msa_init_upper 24
+ msa_init_upper 25
+ msa_init_upper 26
+ msa_init_upper 27
+ msa_init_upper 28
+ msa_init_upper 29
+ msa_init_upper 30
+ msa_init_upper 31
.set pop
.endm
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index dd083e999b08..b104ad9d655f 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -48,6 +48,12 @@ enum fpu_mode {
#define FPU_FR_MASK 0x1
};
+#define __disable_fpu() \
+do { \
+ clear_c0_status(ST0_CU1); \
+ disable_fpu_hazard(); \
+} while (0)
+
static inline int __enable_fpu(enum fpu_mode mode)
{
int fr;
@@ -86,7 +92,12 @@ fr_common:
enable_fpu_hazard();
/* check FR has the desired value */
- return (!!(read_c0_status() & ST0_FR) == !!fr) ? 0 : SIGFPE;
+ if (!!(read_c0_status() & ST0_FR) == !!fr)
+ return 0;
+
+ /* unsupported FR value */
+ __disable_fpu();
+ return SIGFPE;
default:
BUG();
@@ -95,12 +106,6 @@ fr_common:
return SIGFPE;
}
-#define __disable_fpu() \
-do { \
- clear_c0_status(ST0_CU1); \
- disable_fpu_hazard(); \
-} while (0)
-
#define clear_fpu_owner() clear_thread_flag(TIF_USEDFPU)
static inline int __is_fpu_owner(void)
@@ -170,6 +175,7 @@ static inline void lose_fpu(int save)
}
disable_msa();
clear_thread_flag(TIF_USEDMSA);
+ __disable_fpu();
} else if (is_fpu_owner()) {
if (save)
_save_fp(current);
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
index fdbff44e5482..608aa57799c8 100644
--- a/arch/mips/include/asm/jump_label.h
+++ b/arch/mips/include/asm/jump_label.h
@@ -8,9 +8,9 @@
#ifndef _ASM_MIPS_JUMP_LABEL_H
#define _ASM_MIPS_JUMP_LABEL_H
-#include <linux/types.h>
+#ifndef __ASSEMBLY__
-#ifdef __KERNEL__
+#include <linux/types.h>
#define JUMP_LABEL_NOP_SIZE 4
@@ -39,8 +39,6 @@ l_yes:
return true;
}
-#endif /* __KERNEL__ */
-
#ifdef CONFIG_64BIT
typedef u64 jump_label_t;
#else
@@ -53,4 +51,5 @@ struct jump_entry {
jump_label_t key;
};
+#endif /* __ASSEMBLY__ */
#endif /* _ASM_MIPS_JUMP_LABEL_H */
diff --git a/arch/mips/include/asm/kdebug.h b/arch/mips/include/asm/kdebug.h
index 6a9af5fcb5d7..cba22ab7ad4d 100644
--- a/arch/mips/include/asm/kdebug.h
+++ b/arch/mips/include/asm/kdebug.h
@@ -10,7 +10,8 @@ enum die_val {
DIE_RI,
DIE_PAGE_FAULT,
DIE_BREAK,
- DIE_SSTEPBP
+ DIE_SSTEPBP,
+ DIE_MSAFP
};
#endif /* _ASM_MIPS_KDEBUG_H */
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index ac4fc716062b..4c25823563fe 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -21,10 +21,10 @@
/* MIPS KVM register ids */
#define MIPS_CP0_32(_R, _S) \
- (KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S)))
+ (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
#define MIPS_CP0_64(_R, _S) \
- (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S)))
+ (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
#define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
#define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
@@ -42,11 +42,14 @@
#define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
#define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
#define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
+#define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0)
#define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
#define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
#define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
#define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
+#define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
+#define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
@@ -119,6 +122,10 @@ struct kvm_vcpu_stat {
u32 syscall_exits;
u32 resvd_inst_exits;
u32 break_inst_exits;
+ u32 trap_inst_exits;
+ u32 msa_fpe_exits;
+ u32 fpe_exits;
+ u32 msa_disabled_exits;
u32 flush_dcache_exits;
u32 halt_successful_poll;
u32 halt_wakeup;
@@ -138,6 +145,10 @@ enum kvm_mips_exit_types {
SYSCALL_EXITS,
RESVD_INST_EXITS,
BREAK_INST_EXITS,
+ TRAP_INST_EXITS,
+ MSA_FPE_EXITS,
+ FPE_EXITS,
+ MSA_DISABLED_EXITS,
FLUSH_DCACHE_EXITS,
MAX_KVM_MIPS_EXIT_TYPES
};
@@ -206,6 +217,8 @@ struct mips_coproc {
#define MIPS_CP0_CONFIG1_SEL 1
#define MIPS_CP0_CONFIG2_SEL 2
#define MIPS_CP0_CONFIG3_SEL 3
+#define MIPS_CP0_CONFIG4_SEL 4
+#define MIPS_CP0_CONFIG5_SEL 5
/* Config0 register bits */
#define CP0C0_M 31
@@ -262,31 +275,6 @@ struct mips_coproc {
#define CP0C3_SM 1
#define CP0C3_TL 0
-/* Have config1, Cacheable, noncoherent, write-back, write allocate*/
-#define MIPS_CONFIG0 \
- ((1 << CP0C0_M) | (0x3 << CP0C0_K0))
-
-/* Have config2, no coprocessor2 attached, no MDMX support attached,
- no performance counters, watch registers present,
- no code compression, EJTAG present, no FPU, no watch registers */
-#define MIPS_CONFIG1 \
-((1 << CP0C1_M) | \
- (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) | \
- (0 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) | \
- (0 << CP0C1_FP))
-
-/* Have config3, no tertiary/secondary caches implemented */
-#define MIPS_CONFIG2 \
-((1 << CP0C2_M))
-
-/* No config4, no DSP ASE, no large physaddr (PABITS),
- no external interrupt controller, no vectored interrupts,
- no 1kb pages, no SmartMIPS ASE, no trace logic */
-#define MIPS_CONFIG3 \
-((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) | \
- (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) | \
- (0 << CP0C3_SM) | (0 << CP0C3_TL))
-
/* MMU types, the first four entries have the same layout as the
CP0C0_MT field. */
enum mips_mmu_types {
@@ -321,7 +309,9 @@ enum mips_mmu_types {
*/
#define T_TRAP 13 /* Trap instruction */
#define T_VCEI 14 /* Virtual coherency exception */
+#define T_MSAFPE 14 /* MSA floating point exception */
#define T_FPE 15 /* Floating point exception */
+#define T_MSADIS 21 /* MSA disabled exception */
#define T_WATCH 23 /* Watch address reference */
#define T_VCED 31 /* Virtual coherency data */
@@ -374,6 +364,9 @@ struct kvm_mips_tlb {
long tlb_lo1;
};
+#define KVM_MIPS_FPU_FPU 0x1
+#define KVM_MIPS_FPU_MSA 0x2
+
#define KVM_MIPS_GUEST_TLB_SIZE 64
struct kvm_vcpu_arch {
void *host_ebase, *guest_ebase;
@@ -395,6 +388,8 @@ struct kvm_vcpu_arch {
/* FPU State */
struct mips_fpu_struct fpu;
+ /* Which FPU state is loaded (KVM_MIPS_FPU_*) */
+ unsigned int fpu_inuse;
/* COP0 State */
struct mips_coproc *cop0;
@@ -441,6 +436,9 @@ struct kvm_vcpu_arch {
/* WAIT executed */
int wait;
+
+ u8 fpu_enabled;
+ u8 msa_enabled;
};
@@ -482,11 +480,15 @@ struct kvm_vcpu_arch {
#define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1])
#define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2])
#define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3])
+#define kvm_read_c0_guest_config4(cop0) (cop0->reg[MIPS_CP0_CONFIG][4])
+#define kvm_read_c0_guest_config5(cop0) (cop0->reg[MIPS_CP0_CONFIG][5])
#define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7])
#define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val))
#define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val))
#define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val))
#define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val))
+#define kvm_write_c0_guest_config4(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][4] = (val))
+#define kvm_write_c0_guest_config5(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][5] = (val))
#define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0])
#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
@@ -567,6 +569,31 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \
}
+/* Helpers */
+
+static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
+{
+ return (!__builtin_constant_p(cpu_has_fpu) || cpu_has_fpu) &&
+ vcpu->fpu_enabled;
+}
+
+static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
+{
+ return kvm_mips_guest_can_have_fpu(vcpu) &&
+ kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP;
+}
+
+static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
+{
+ return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) &&
+ vcpu->msa_enabled;
+}
+
+static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu)
+{
+ return kvm_mips_guest_can_have_msa(vcpu) &&
+ kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA;
+}
struct kvm_mips_callbacks {
int (*handle_cop_unusable)(struct kvm_vcpu *vcpu);
@@ -578,6 +605,10 @@ struct kvm_mips_callbacks {
int (*handle_syscall)(struct kvm_vcpu *vcpu);
int (*handle_res_inst)(struct kvm_vcpu *vcpu);
int (*handle_break)(struct kvm_vcpu *vcpu);
+ int (*handle_trap)(struct kvm_vcpu *vcpu);
+ int (*handle_msa_fpe)(struct kvm_vcpu *vcpu);
+ int (*handle_fpe)(struct kvm_vcpu *vcpu);
+ int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
int (*vm_init)(struct kvm *kvm);
int (*vcpu_init)(struct kvm_vcpu *vcpu);
int (*vcpu_setup)(struct kvm_vcpu *vcpu);
@@ -596,6 +627,8 @@ struct kvm_mips_callbacks {
const struct kvm_one_reg *reg, s64 *v);
int (*set_one_reg)(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg, s64 v);
+ int (*vcpu_get_regs)(struct kvm_vcpu *vcpu);
+ int (*vcpu_set_regs)(struct kvm_vcpu *vcpu);
};
extern struct kvm_mips_callbacks *kvm_mips_callbacks;
int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
@@ -606,6 +639,19 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
/* Trampoline ASM routine to start running in "Guest" context */
extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
+/* FPU/MSA context management */
+void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu);
+void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu);
+void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu);
+void __kvm_save_msa(struct kvm_vcpu_arch *vcpu);
+void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu);
+void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu);
+void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu);
+void kvm_own_fpu(struct kvm_vcpu *vcpu);
+void kvm_own_msa(struct kvm_vcpu *vcpu);
+void kvm_drop_fpu(struct kvm_vcpu *vcpu);
+void kvm_lose_fpu(struct kvm_vcpu *vcpu);
+
/* TLB handling */
uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
@@ -711,6 +757,26 @@ extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
+extern enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+
extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
struct kvm_run *run);
@@ -749,6 +815,11 @@ enum emulation_result kvm_mips_emulate_load(uint32_t inst,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
+unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu);
+unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu);
+unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu);
+unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu);
+
/* Dynamic binary translation */
extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
struct kvm_vcpu *vcpu);
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index b5dcbee01fd7..9b3b48e21c22 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -105,7 +105,7 @@ union fpureg {
#ifdef CONFIG_CPU_LITTLE_ENDIAN
# define FPR_IDX(width, idx) (idx)
#else
-# define FPR_IDX(width, idx) ((FPU_REG_WIDTH / (width)) - 1 - (idx))
+# define FPR_IDX(width, idx) ((idx) ^ ((64 / (width)) - 1))
#endif
#define BUILD_FPR_ACCESS(width) \
diff --git a/arch/mips/include/uapi/asm/kvm.h b/arch/mips/include/uapi/asm/kvm.h
index 2c04b6d9ff85..6985eb59b085 100644
--- a/arch/mips/include/uapi/asm/kvm.h
+++ b/arch/mips/include/uapi/asm/kvm.h
@@ -36,77 +36,85 @@ struct kvm_regs {
/*
* for KVM_GET_FPU and KVM_SET_FPU
- *
- * If Status[FR] is zero (32-bit FPU), the upper 32-bits of the FPRs
- * are zero filled.
*/
struct kvm_fpu {
- __u64 fpr[32];
- __u32 fir;
- __u32 fccr;
- __u32 fexr;
- __u32 fenr;
- __u32 fcsr;
- __u32 pad;
};
/*
- * For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access CP0
+ * For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various
* registers. The id field is broken down as follows:
*
- * bits[2..0] - Register 'sel' index.
- * bits[7..3] - Register 'rd' index.
- * bits[15..8] - Must be zero.
- * bits[31..16] - 1 -> CP0 registers.
- * bits[51..32] - Must be zero.
* bits[63..52] - As per linux/kvm.h
+ * bits[51..32] - Must be zero.
+ * bits[31..16] - Register set.
+ *
+ * Register set = 0: GP registers from kvm_regs (see definitions below).
+ *
+ * Register set = 1: CP0 registers.
+ * bits[15..8] - Must be zero.
+ * bits[7..3] - Register 'rd' index.
+ * bits[2..0] - Register 'sel' index.
+ *
+ * Register set = 2: KVM specific registers (see definitions below).
+ *
+ * Register set = 3: FPU / MSA registers (see definitions below).
*
* Other sets registers may be added in the future. Each set would
* have its own identifier in bits[31..16].
- *
- * The registers defined in struct kvm_regs are also accessible, the
- * id values for these are below.
*/
-#define KVM_REG_MIPS_R0 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0)
-#define KVM_REG_MIPS_R1 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 1)
-#define KVM_REG_MIPS_R2 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 2)
-#define KVM_REG_MIPS_R3 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 3)
-#define KVM_REG_MIPS_R4 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 4)
-#define KVM_REG_MIPS_R5 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 5)
-#define KVM_REG_MIPS_R6 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 6)
-#define KVM_REG_MIPS_R7 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 7)
-#define KVM_REG_MIPS_R8 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 8)
-#define KVM_REG_MIPS_R9 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 9)
-#define KVM_REG_MIPS_R10 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 10)
-#define KVM_REG_MIPS_R11 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 11)
-#define KVM_REG_MIPS_R12 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 12)
-#define KVM_REG_MIPS_R13 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 13)
-#define KVM_REG_MIPS_R14 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 14)
-#define KVM_REG_MIPS_R15 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 15)
-#define KVM_REG_MIPS_R16 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 16)
-#define KVM_REG_MIPS_R17 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 17)
-#define KVM_REG_MIPS_R18 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 18)
-#define KVM_REG_MIPS_R19 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 19)
-#define KVM_REG_MIPS_R20 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 20)
-#define KVM_REG_MIPS_R21 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 21)
-#define KVM_REG_MIPS_R22 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 22)
-#define KVM_REG_MIPS_R23 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 23)
-#define KVM_REG_MIPS_R24 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 24)
-#define KVM_REG_MIPS_R25 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 25)
-#define KVM_REG_MIPS_R26 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 26)
-#define KVM_REG_MIPS_R27 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 27)
-#define KVM_REG_MIPS_R28 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 28)
-#define KVM_REG_MIPS_R29 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 29)
-#define KVM_REG_MIPS_R30 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 30)
-#define KVM_REG_MIPS_R31 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 31)
-
-#define KVM_REG_MIPS_HI (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 32)
-#define KVM_REG_MIPS_LO (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 33)
-#define KVM_REG_MIPS_PC (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 34)
-
-/* KVM specific control registers */
+#define KVM_REG_MIPS_GP (KVM_REG_MIPS | 0x0000000000000000ULL)
+#define KVM_REG_MIPS_CP0 (KVM_REG_MIPS | 0x0000000000010000ULL)
+#define KVM_REG_MIPS_KVM (KVM_REG_MIPS | 0x0000000000020000ULL)
+#define KVM_REG_MIPS_FPU (KVM_REG_MIPS | 0x0000000000030000ULL)
+
+
+/*
+ * KVM_REG_MIPS_GP - General purpose registers from kvm_regs.
+ */
+
+#define KVM_REG_MIPS_R0 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 0)
+#define KVM_REG_MIPS_R1 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 1)
+#define KVM_REG_MIPS_R2 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 2)
+#define KVM_REG_MIPS_R3 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 3)
+#define KVM_REG_MIPS_R4 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 4)
+#define KVM_REG_MIPS_R5 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 5)
+#define KVM_REG_MIPS_R6 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 6)
+#define KVM_REG_MIPS_R7 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 7)
+#define KVM_REG_MIPS_R8 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 8)
+#define KVM_REG_MIPS_R9 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 9)
+#define KVM_REG_MIPS_R10 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 10)
+#define KVM_REG_MIPS_R11 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 11)
+#define KVM_REG_MIPS_R12 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 12)
+#define KVM_REG_MIPS_R13 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 13)
+#define KVM_REG_MIPS_R14 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 14)
+#define KVM_REG_MIPS_R15 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 15)
+#define KVM_REG_MIPS_R16 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 16)
+#define KVM_REG_MIPS_R17 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 17)
+#define KVM_REG_MIPS_R18 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 18)
+#define KVM_REG_MIPS_R19 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 19)
+#define KVM_REG_MIPS_R20 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 20)
+#define KVM_REG_MIPS_R21 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 21)
+#define KVM_REG_MIPS_R22 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 22)
+#define KVM_REG_MIPS_R23 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 23)
+#define KVM_REG_MIPS_R24 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 24)
+#define KVM_REG_MIPS_R25 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 25)
+#define KVM_REG_MIPS_R26 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 26)
+#define KVM_REG_MIPS_R27 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 27)
+#define KVM_REG_MIPS_R28 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 28)
+#define KVM_REG_MIPS_R29 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 29)
+#define KVM_REG_MIPS_R30 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 30)
+#define KVM_REG_MIPS_R31 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 31)
+
+#define KVM_REG_MIPS_HI (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 32)
+#define KVM_REG_MIPS_LO (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 33)
+#define KVM_REG_MIPS_PC (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 34)
+
+
+/*
+ * KVM_REG_MIPS_KVM - KVM specific control registers.
+ */
/*
* CP0_Count control
@@ -118,8 +126,7 @@ struct kvm_fpu {
* safely without losing time or guest timer interrupts.
* Other: Reserved, do not change.
*/
-#define KVM_REG_MIPS_COUNT_CTL (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \
- 0x20000 | 0)
+#define KVM_REG_MIPS_COUNT_CTL (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 0)
#define KVM_REG_MIPS_COUNT_CTL_DC 0x00000001
/*
@@ -131,15 +138,46 @@ struct kvm_fpu {
* emulated.
* Modifications to times in the future are rejected.
*/
-#define KVM_REG_MIPS_COUNT_RESUME (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \
- 0x20000 | 1)
+#define KVM_REG_MIPS_COUNT_RESUME (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 1)
/*
* CP0_Count rate in Hz
* Specifies the rate of the CP0_Count timer in Hz. Modifications occur without
* discontinuities in CP0_Count.
*/
-#define KVM_REG_MIPS_COUNT_HZ (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \
- 0x20000 | 2)
+#define KVM_REG_MIPS_COUNT_HZ (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 2)
+
+
+/*
+ * KVM_REG_MIPS_FPU - Floating Point and MIPS SIMD Architecture (MSA) registers.
+ *
+ * bits[15..8] - Register subset (see definitions below).
+ * bits[7..5] - Must be zero.
+ * bits[4..0] - Register number within register subset.
+ */
+
+#define KVM_REG_MIPS_FPR (KVM_REG_MIPS_FPU | 0x0000000000000000ULL)
+#define KVM_REG_MIPS_FCR (KVM_REG_MIPS_FPU | 0x0000000000000100ULL)
+#define KVM_REG_MIPS_MSACR (KVM_REG_MIPS_FPU | 0x0000000000000200ULL)
+
+/*
+ * KVM_REG_MIPS_FPR - Floating point / Vector registers.
+ */
+#define KVM_REG_MIPS_FPR_32(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U32 | (n))
+#define KVM_REG_MIPS_FPR_64(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U64 | (n))
+#define KVM_REG_MIPS_VEC_128(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U128 | (n))
+
+/*
+ * KVM_REG_MIPS_FCR - Floating point control registers.
+ */
+#define KVM_REG_MIPS_FCR_IR (KVM_REG_MIPS_FCR | KVM_REG_SIZE_U32 | 0)
+#define KVM_REG_MIPS_FCR_CSR (KVM_REG_MIPS_FCR | KVM_REG_SIZE_U32 | 31)
+
+/*
+ * KVM_REG_MIPS_MSACR - MIPS SIMD Architecture (MSA) control registers.
+ */
+#define KVM_REG_MIPS_MSA_IR (KVM_REG_MIPS_MSACR | KVM_REG_SIZE_U32 | 0)
+#define KVM_REG_MIPS_MSA_CSR (KVM_REG_MIPS_MSACR | KVM_REG_SIZE_U32 | 1)
+
/*
* KVM MIPS specific structures and definitions
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 750d67ac41e9..e59fd7cfac9e 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -167,72 +167,6 @@ void output_thread_fpu_defines(void)
OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]);
OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]);
- /* the least significant 64 bits of each FP register */
- OFFSET(THREAD_FPR0_LS64, task_struct,
- thread.fpu.fpr[0].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR1_LS64, task_struct,
- thread.fpu.fpr[1].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR2_LS64, task_struct,
- thread.fpu.fpr[2].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR3_LS64, task_struct,
- thread.fpu.fpr[3].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR4_LS64, task_struct,
- thread.fpu.fpr[4].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR5_LS64, task_struct,
- thread.fpu.fpr[5].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR6_LS64, task_struct,
- thread.fpu.fpr[6].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR7_LS64, task_struct,
- thread.fpu.fpr[7].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR8_LS64, task_struct,
- thread.fpu.fpr[8].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR9_LS64, task_struct,
- thread.fpu.fpr[9].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR10_LS64, task_struct,
- thread.fpu.fpr[10].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR11_LS64, task_struct,
- thread.fpu.fpr[11].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR12_LS64, task_struct,
- thread.fpu.fpr[12].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR13_LS64, task_struct,
- thread.fpu.fpr[13].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR14_LS64, task_struct,
- thread.fpu.fpr[14].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR15_LS64, task_struct,
- thread.fpu.fpr[15].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR16_LS64, task_struct,
- thread.fpu.fpr[16].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR17_LS64, task_struct,
- thread.fpu.fpr[17].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR18_LS64, task_struct,
- thread.fpu.fpr[18].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR19_LS64, task_struct,
- thread.fpu.fpr[19].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR20_LS64, task_struct,
- thread.fpu.fpr[20].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR21_LS64, task_struct,
- thread.fpu.fpr[21].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR22_LS64, task_struct,
- thread.fpu.fpr[22].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR23_LS64, task_struct,
- thread.fpu.fpr[23].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR24_LS64, task_struct,
- thread.fpu.fpr[24].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR25_LS64, task_struct,
- thread.fpu.fpr[25].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR26_LS64, task_struct,
- thread.fpu.fpr[26].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR27_LS64, task_struct,
- thread.fpu.fpr[27].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR28_LS64, task_struct,
- thread.fpu.fpr[28].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR29_LS64, task_struct,
- thread.fpu.fpr[29].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR30_LS64, task_struct,
- thread.fpu.fpr[30].val64[FPR_IDX(64, 0)]);
- OFFSET(THREAD_FPR31_LS64, task_struct,
- thread.fpu.fpr[31].val64[FPR_IDX(64, 0)]);
-
OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31);
OFFSET(THREAD_MSA_CSR, task_struct, thread.fpu.msacsr);
BLANK();
@@ -470,6 +404,45 @@ void output_kvm_defines(void)
OFFSET(VCPU_LO, kvm_vcpu_arch, lo);
OFFSET(VCPU_HI, kvm_vcpu_arch, hi);
OFFSET(VCPU_PC, kvm_vcpu_arch, pc);
+ BLANK();
+
+ OFFSET(VCPU_FPR0, kvm_vcpu_arch, fpu.fpr[0]);
+ OFFSET(VCPU_FPR1, kvm_vcpu_arch, fpu.fpr[1]);
+ OFFSET(VCPU_FPR2, kvm_vcpu_arch, fpu.fpr[2]);
+ OFFSET(VCPU_FPR3, kvm_vcpu_arch, fpu.fpr[3]);
+ OFFSET(VCPU_FPR4, kvm_vcpu_arch, fpu.fpr[4]);
+ OFFSET(VCPU_FPR5, kvm_vcpu_arch, fpu.fpr[5]);
+ OFFSET(VCPU_FPR6, kvm_vcpu_arch, fpu.fpr[6]);
+ OFFSET(VCPU_FPR7, kvm_vcpu_arch, fpu.fpr[7]);
+ OFFSET(VCPU_FPR8, kvm_vcpu_arch, fpu.fpr[8]);
+ OFFSET(VCPU_FPR9, kvm_vcpu_arch, fpu.fpr[9]);
+ OFFSET(VCPU_FPR10, kvm_vcpu_arch, fpu.fpr[10]);
+ OFFSET(VCPU_FPR11, kvm_vcpu_arch, fpu.fpr[11]);
+ OFFSET(VCPU_FPR12, kvm_vcpu_arch, fpu.fpr[12]);
+ OFFSET(VCPU_FPR13, kvm_vcpu_arch, fpu.fpr[13]);
+ OFFSET(VCPU_FPR14, kvm_vcpu_arch, fpu.fpr[14]);
+ OFFSET(VCPU_FPR15, kvm_vcpu_arch, fpu.fpr[15]);
+ OFFSET(VCPU_FPR16, kvm_vcpu_arch, fpu.fpr[16]);
+ OFFSET(VCPU_FPR17, kvm_vcpu_arch, fpu.fpr[17]);
+ OFFSET(VCPU_FPR18, kvm_vcpu_arch, fpu.fpr[18]);
+ OFFSET(VCPU_FPR19, kvm_vcpu_arch, fpu.fpr[19]);
+ OFFSET(VCPU_FPR20, kvm_vcpu_arch, fpu.fpr[20]);
+ OFFSET(VCPU_FPR21, kvm_vcpu_arch, fpu.fpr[21]);
+ OFFSET(VCPU_FPR22, kvm_vcpu_arch, fpu.fpr[22]);
+ OFFSET(VCPU_FPR23, kvm_vcpu_arch, fpu.fpr[23]);
+ OFFSET(VCPU_FPR24, kvm_vcpu_arch, fpu.fpr[24]);
+ OFFSET(VCPU_FPR25, kvm_vcpu_arch, fpu.fpr[25]);
+ OFFSET(VCPU_FPR26, kvm_vcpu_arch, fpu.fpr[26]);
+ OFFSET(VCPU_FPR27, kvm_vcpu_arch, fpu.fpr[27]);
+ OFFSET(VCPU_FPR28, kvm_vcpu_arch, fpu.fpr[28]);
+ OFFSET(VCPU_FPR29, kvm_vcpu_arch, fpu.fpr[29]);
+ OFFSET(VCPU_FPR30, kvm_vcpu_arch, fpu.fpr[30]);
+ OFFSET(VCPU_FPR31, kvm_vcpu_arch, fpu.fpr[31]);
+
+ OFFSET(VCPU_FCR31, kvm_vcpu_arch, fpu.fcr31);
+ OFFSET(VCPU_MSA_CSR, kvm_vcpu_arch, fpu.msacsr);
+ BLANK();
+
OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0);
OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid);
OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid);
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 2ebaabe3af15..af42e7003f12 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -360,12 +360,15 @@ NESTED(nmi_handler, PT_SIZE, sp)
.set mips1
SET_HARDFLOAT
cfc1 a1, fcr31
- li a2, ~(0x3f << 12)
- and a2, a1
- ctc1 a2, fcr31
.set pop
- TRACE_IRQS_ON
- STI
+ CLI
+ TRACE_IRQS_OFF
+ .endm
+
+ .macro __build_clear_msa_fpe
+ _cfcmsa a1, MSA_CSR
+ CLI
+ TRACE_IRQS_OFF
.endm
.macro __build_clear_ade
@@ -426,7 +429,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
BUILD_HANDLER cpu cpu sti silent /* #11 */
BUILD_HANDLER ov ov sti silent /* #12 */
BUILD_HANDLER tr tr sti silent /* #13 */
- BUILD_HANDLER msa_fpe msa_fpe sti silent /* #14 */
+ BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */
BUILD_HANDLER fpe fpe fpe silent /* #15 */
BUILD_HANDLER ftlb ftlb none silent /* #16 */
BUILD_HANDLER msa msa sti silent /* #21 */
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 510452812594..7da6e324dd35 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -46,6 +46,26 @@
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
+static void init_fp_ctx(struct task_struct *target)
+{
+ /* If FP has been used then the target already has context */
+ if (tsk_used_math(target))
+ return;
+
+ /* Begin with data registers set to all 1s... */
+ memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
+
+ /* ...and FCSR zeroed */
+ target->thread.fpu.fcr31 = 0;
+
+ /*
+ * Record that the target has "used" math, such that the context
+ * just initialised, and any modifications made by the caller,
+ * aren't discarded.
+ */
+ set_stopped_child_used_math(target);
+}
+
/*
* Called by kernel/ptrace.c when detaching..
*
@@ -142,6 +162,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
if (!access_ok(VERIFY_READ, data, 33 * 8))
return -EIO;
+ init_fp_ctx(child);
fregs = get_fpu_regs(child);
for (i = 0; i < 32; i++) {
@@ -439,6 +460,8 @@ static int fpr_set(struct task_struct *target,
/* XXX fcr31 */
+ init_fp_ctx(target);
+
if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fpu,
@@ -660,12 +683,7 @@ long arch_ptrace(struct task_struct *child, long request,
case FPR_BASE ... FPR_BASE + 31: {
union fpureg *fregs = get_fpu_regs(child);
- if (!tsk_used_math(child)) {
- /* FP not yet used */
- memset(&child->thread.fpu, ~0,
- sizeof(child->thread.fpu));
- child->thread.fpu.fcr31 = 0;
- }
+ init_fp_ctx(child);
#ifdef CONFIG_32BIT
if (test_thread_flag(TIF_32BIT_FPREGS)) {
/*
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 676c5030a953..1d88af26ba82 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -34,7 +34,6 @@
.endm
.set noreorder
- .set MIPS_ISA_ARCH_LEVEL_RAW
LEAF(_save_fp_context)
.set push
@@ -103,6 +102,7 @@ LEAF(_save_fp_context)
/* Save 32-bit process floating point context */
LEAF(_save_fp_context32)
.set push
+ .set MIPS_ISA_ARCH_LEVEL_RAW
SET_HARDFLOAT
cfc1 t1, fcr31
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 33984c04b60b..5b4d711f878d 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -701,6 +701,13 @@ asmlinkage void do_ov(struct pt_regs *regs)
int process_fpemu_return(int sig, void __user *fault_addr)
{
+ /*
+ * We can't allow the emulated instruction to leave any of the cause
+ * bits set in FCSR. If they were then the kernel would take an FP
+ * exception when restoring FP context.
+ */
+ current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+
if (sig == SIGSEGV || sig == SIGBUS) {
struct siginfo si = {0};
si.si_addr = fault_addr;
@@ -781,6 +788,11 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs),
SIGFPE) == NOTIFY_STOP)
goto out;
+
+ /* Clear FCSR.Cause before enabling interrupts */
+ write_32bit_cp1_register(CP1_STATUS, fcr31 & ~FPU_CSR_ALL_X);
+ local_irq_enable();
+
die_if_kernel("FP exception in kernel code", regs);
if (fcr31 & FPU_CSR_UNI_X) {
@@ -804,18 +816,12 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
&fault_addr);
- /*
- * We can't allow the emulated instruction to leave any of
- * the cause bit set in $fcr31.
- */
- current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ /* If something went wrong, signal */
+ process_fpemu_return(sig, fault_addr);
/* Restore the hardware register state */
own_fpu(1); /* Using the FPU again. */
- /* If something went wrong, signal */
- process_fpemu_return(sig, fault_addr);
-
goto out;
} else if (fcr31 & FPU_CSR_INV_X)
info.si_code = FPE_FLTINV;
@@ -1392,13 +1398,22 @@ out:
exception_exit(prev_state);
}
-asmlinkage void do_msa_fpe(struct pt_regs *regs)
+asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
{
enum ctx_state prev_state;
prev_state = exception_enter();
+ if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
+ regs_to_trapnr(regs), SIGFPE) == NOTIFY_STOP)
+ goto out;
+
+ /* Clear MSACSR.Cause before enabling interrupts */
+ write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
+ local_irq_enable();
+
die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
force_sig(SIGFPE, current);
+out:
exception_exit(prev_state);
}
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
index 401fe027c261..637ebbebd549 100644
--- a/arch/mips/kvm/Makefile
+++ b/arch/mips/kvm/Makefile
@@ -1,13 +1,15 @@
# Makefile for KVM support for MIPS
#
-common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
+common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
-kvm-objs := $(common-objs) mips.o emulate.o locore.o \
+common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o
+
+kvm-objs := $(common-objs-y) mips.o emulate.o locore.o \
interrupt.o stats.o commpage.o \
- dyntrans.o trap_emul.o
+ dyntrans.o trap_emul.o fpu.o
obj-$(CONFIG_KVM) += kvm.o
obj-y += callback.o tlb.o
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index fb3e8dfd1ff6..6230f376a44e 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -884,6 +884,84 @@ enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
return EMULATE_DONE;
}
+/**
+ * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
+ * @vcpu: Virtual CPU.
+ *
+ * Finds the mask of bits which are writable in the guest's Config1 CP0
+ * register, by userland (currently read-only to the guest).
+ */
+unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
+{
+ unsigned int mask = 0;
+
+ /* Permit FPU to be present if FPU is supported */
+ if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
+ mask |= MIPS_CONF1_FP;
+
+ return mask;
+}
+
+/**
+ * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
+ * @vcpu: Virtual CPU.
+ *
+ * Finds the mask of bits which are writable in the guest's Config3 CP0
+ * register, by userland (currently read-only to the guest).
+ */
+unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
+{
+ /* Config4 is optional */
+ unsigned int mask = MIPS_CONF_M;
+
+ /* Permit MSA to be present if MSA is supported */
+ if (kvm_mips_guest_can_have_msa(&vcpu->arch))
+ mask |= MIPS_CONF3_MSA;
+
+ return mask;
+}
+
+/**
+ * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
+ * @vcpu: Virtual CPU.
+ *
+ * Finds the mask of bits which are writable in the guest's Config4 CP0
+ * register, by userland (currently read-only to the guest).
+ */
+unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
+{
+ /* Config5 is optional */
+ return MIPS_CONF_M;
+}
+
+/**
+ * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
+ * @vcpu: Virtual CPU.
+ *
+ * Finds the mask of bits which are writable in the guest's Config5 CP0
+ * register, by the guest itself.
+ */
+unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
+{
+ unsigned int mask = 0;
+
+ /* Permit MSAEn changes if MSA supported and enabled */
+ if (kvm_mips_guest_has_msa(&vcpu->arch))
+ mask |= MIPS_CONF5_MSAEN;
+
+ /*
+ * Permit guest FPU mode changes if FPU is enabled and the relevant
+ * feature exists according to FIR register.
+ */
+ if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
+ if (cpu_has_fre)
+ mask |= MIPS_CONF5_FRE;
+ /* We don't support UFR or UFE */
+ }
+
+ return mask;
+}
+
enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
uint32_t cause, struct kvm_run *run,
struct kvm_vcpu *vcpu)
@@ -1021,18 +1099,114 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
kvm_mips_write_compare(vcpu,
vcpu->arch.gprs[rt]);
} else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
- kvm_write_c0_guest_status(cop0,
- vcpu->arch.gprs[rt]);
+ unsigned int old_val, val, change;
+
+ old_val = kvm_read_c0_guest_status(cop0);
+ val = vcpu->arch.gprs[rt];
+ change = val ^ old_val;
+
+ /* Make sure that the NMI bit is never set */
+ val &= ~ST0_NMI;
+
+ /*
+ * Don't allow CU1 or FR to be set unless FPU
+ * capability enabled and exists in guest
+ * configuration.
+ */
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ val &= ~(ST0_CU1 | ST0_FR);
+
+ /*
+ * Also don't allow FR to be set if host doesn't
+ * support it.
+ */
+ if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64))
+ val &= ~ST0_FR;
+
+
+ /* Handle changes in FPU mode */
+ preempt_disable();
+
+ /*
+ * FPU and Vector register state is made
+ * UNPREDICTABLE by a change of FR, so don't
+ * even bother saving it.
+ */
+ if (change & ST0_FR)
+ kvm_drop_fpu(vcpu);
+
+ /*
+ * If MSA state is already live, it is undefined
+ * how it interacts with FR=0 FPU state, and we
+ * don't want to hit reserved instruction
+ * exceptions trying to save the MSA state later
+ * when CU=1 && FR=1, so play it safe and save
+ * it first.
+ */
+ if (change & ST0_CU1 && !(val & ST0_FR) &&
+ vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
+ kvm_lose_fpu(vcpu);
+
/*
- * Make sure that CU1 and NMI bits are
- * never set
+ * Propagate CU1 (FPU enable) changes
+ * immediately if the FPU context is already
+ * loaded. When disabling we leave the context
+ * loaded so it can be quickly enabled again in
+ * the near future.
*/
- kvm_clear_c0_guest_status(cop0,
- (ST0_CU1 | ST0_NMI));
+ if (change & ST0_CU1 &&
+ vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
+ change_c0_status(ST0_CU1, val);
+
+ preempt_enable();
+
+ kvm_write_c0_guest_status(cop0, val);
#ifdef CONFIG_KVM_MIPS_DYN_TRANS
- kvm_mips_trans_mtc0(inst, opc, vcpu);
+ /*
+ * If FPU present, we need CU1/FR bits to take
+ * effect fairly soon.
+ */
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ kvm_mips_trans_mtc0(inst, opc, vcpu);
#endif
+ } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
+ unsigned int old_val, val, change, wrmask;
+
+ old_val = kvm_read_c0_guest_config5(cop0);
+ val = vcpu->arch.gprs[rt];
+
+ /* Only a few bits are writable in Config5 */
+ wrmask = kvm_mips_config5_wrmask(vcpu);
+ change = (val ^ old_val) & wrmask;
+ val = old_val ^ change;
+
+
+ /* Handle changes in FPU/MSA modes */
+ preempt_disable();
+
+ /*
+ * Propagate FRE changes immediately if the FPU
+ * context is already loaded.
+ */
+ if (change & MIPS_CONF5_FRE &&
+ vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
+ change_c0_config5(MIPS_CONF5_FRE, val);
+
+ /*
+ * Propagate MSAEn changes immediately if the
+ * MSA context is already loaded. When disabling
+ * we leave the context loaded so it can be
+ * quickly enabled again in the near future.
+ */
+ if (change & MIPS_CONF5_MSAEN &&
+ vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
+ change_c0_config5(MIPS_CONF5_MSAEN,
+ val);
+
+ preempt_enable();
+
+ kvm_write_c0_guest_config5(cop0, val);
} else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
uint32_t old_cause, new_cause;
@@ -1970,6 +2144,146 @@ enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
return er;
}
+enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc);
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (T_TRAP << CAUSEB_EXCCODE));
+
+ /* Set PC to the exception entry point */
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+ } else {
+ kvm_err("Trying to deliver TRAP when EXL is already set\n");
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
+enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc);
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (T_MSAFPE << CAUSEB_EXCCODE));
+
+ /* Set PC to the exception entry point */
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+ } else {
+ kvm_err("Trying to deliver MSAFPE when EXL is already set\n");
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
+enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc);
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (T_FPE << CAUSEB_EXCCODE));
+
+ /* Set PC to the exception entry point */
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+ } else {
+ kvm_err("Trying to deliver FPE when EXL is already set\n");
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
+enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
+ uint32_t *opc,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc);
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (T_MSADIS << CAUSEB_EXCCODE));
+
+ /* Set PC to the exception entry point */
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+ } else {
+ kvm_err("Trying to deliver MSADIS when EXL is already set\n");
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
/* ll/sc, rdhwr, sync emulation */
#define OPCODE 0xfc000000
@@ -2176,6 +2490,10 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause,
case T_SYSCALL:
case T_BREAK:
case T_RES_INST:
+ case T_TRAP:
+ case T_MSAFPE:
+ case T_FPE:
+ case T_MSADIS:
break;
case T_COP_UNUSABLE:
diff --git a/arch/mips/kvm/fpu.S b/arch/mips/kvm/fpu.S
new file mode 100644
index 000000000000..531fbf5131c0
--- /dev/null
+++ b/arch/mips/kvm/fpu.S
@@ -0,0 +1,122 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * FPU context handling code for KVM.
+ *
+ * Copyright (C) 2015 Imagination Technologies Ltd.
+ */
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/fpregdef.h>
+#include <asm/mipsregs.h>
+#include <asm/regdef.h>
+
+ .set noreorder
+ .set noat
+
+LEAF(__kvm_save_fpu)
+ .set push
+ .set mips64r2
+ SET_HARDFLOAT
+ mfc0 t0, CP0_STATUS
+ sll t0, t0, 5 # is Status.FR set?
+ bgez t0, 1f # no: skip odd doubles
+ nop
+ sdc1 $f1, VCPU_FPR1(a0)
+ sdc1 $f3, VCPU_FPR3(a0)
+ sdc1 $f5, VCPU_FPR5(a0)
+ sdc1 $f7, VCPU_FPR7(a0)
+ sdc1 $f9, VCPU_FPR9(a0)
+ sdc1 $f11, VCPU_FPR11(a0)
+ sdc1 $f13, VCPU_FPR13(a0)
+ sdc1 $f15, VCPU_FPR15(a0)
+ sdc1 $f17, VCPU_FPR17(a0)
+ sdc1 $f19, VCPU_FPR19(a0)
+ sdc1 $f21, VCPU_FPR21(a0)
+ sdc1 $f23, VCPU_FPR23(a0)
+ sdc1 $f25, VCPU_FPR25(a0)
+ sdc1 $f27, VCPU_FPR27(a0)
+ sdc1 $f29, VCPU_FPR29(a0)
+ sdc1 $f31, VCPU_FPR31(a0)
+1: sdc1 $f0, VCPU_FPR0(a0)
+ sdc1 $f2, VCPU_FPR2(a0)
+ sdc1 $f4, VCPU_FPR4(a0)
+ sdc1 $f6, VCPU_FPR6(a0)
+ sdc1 $f8, VCPU_FPR8(a0)
+ sdc1 $f10, VCPU_FPR10(a0)
+ sdc1 $f12, VCPU_FPR12(a0)
+ sdc1 $f14, VCPU_FPR14(a0)
+ sdc1 $f16, VCPU_FPR16(a0)
+ sdc1 $f18, VCPU_FPR18(a0)
+ sdc1 $f20, VCPU_FPR20(a0)
+ sdc1 $f22, VCPU_FPR22(a0)
+ sdc1 $f24, VCPU_FPR24(a0)
+ sdc1 $f26, VCPU_FPR26(a0)
+ sdc1 $f28, VCPU_FPR28(a0)
+ jr ra
+ sdc1 $f30, VCPU_FPR30(a0)
+ .set pop
+ END(__kvm_save_fpu)
+
+LEAF(__kvm_restore_fpu)
+ .set push
+ .set mips64r2
+ SET_HARDFLOAT
+ mfc0 t0, CP0_STATUS
+ sll t0, t0, 5 # is Status.FR set?
+ bgez t0, 1f # no: skip odd doubles
+ nop
+ ldc1 $f1, VCPU_FPR1(a0)
+ ldc1 $f3, VCPU_FPR3(a0)
+ ldc1 $f5, VCPU_FPR5(a0)
+ ldc1 $f7, VCPU_FPR7(a0)
+ ldc1 $f9, VCPU_FPR9(a0)
+ ldc1 $f11, VCPU_FPR11(a0)
+ ldc1 $f13, VCPU_FPR13(a0)
+ ldc1 $f15, VCPU_FPR15(a0)
+ ldc1 $f17, VCPU_FPR17(a0)
+ ldc1 $f19, VCPU_FPR19(a0)
+ ldc1 $f21, VCPU_FPR21(a0)
+ ldc1 $f23, VCPU_FPR23(a0)
+ ldc1 $f25, VCPU_FPR25(a0)
+ ldc1 $f27, VCPU_FPR27(a0)
+ ldc1 $f29, VCPU_FPR29(a0)
+ ldc1 $f31, VCPU_FPR31(a0)
+1: ldc1 $f0, VCPU_FPR0(a0)
+ ldc1 $f2, VCPU_FPR2(a0)
+ ldc1 $f4, VCPU_FPR4(a0)
+ ldc1 $f6, VCPU_FPR6(a0)
+ ldc1 $f8, VCPU_FPR8(a0)
+ ldc1 $f10, VCPU_FPR10(a0)
+ ldc1 $f12, VCPU_FPR12(a0)
+ ldc1 $f14, VCPU_FPR14(a0)
+ ldc1 $f16, VCPU_FPR16(a0)
+ ldc1 $f18, VCPU_FPR18(a0)
+ ldc1 $f20, VCPU_FPR20(a0)
+ ldc1 $f22, VCPU_FPR22(a0)
+ ldc1 $f24, VCPU_FPR24(a0)
+ ldc1 $f26, VCPU_FPR26(a0)
+ ldc1 $f28, VCPU_FPR28(a0)
+ jr ra
+ ldc1 $f30, VCPU_FPR30(a0)
+ .set pop
+ END(__kvm_restore_fpu)
+
+LEAF(__kvm_restore_fcsr)
+ .set push
+ SET_HARDFLOAT
+ lw t0, VCPU_FCR31(a0)
+ /*
+ * The ctc1 must stay at this offset in __kvm_restore_fcsr.
+ * See kvm_mips_csr_die_notify() which handles t0 containing a value
+ * which triggers an FP Exception, which must be stepped over and
+ * ignored since the set cause bits must remain there for the guest.
+ */
+ ctc1 t0, fcr31
+ jr ra
+ nop
+ .set pop
+ END(__kvm_restore_fcsr)
diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
index 4a68b176d6e4..c567240386a0 100644
--- a/arch/mips/kvm/locore.S
+++ b/arch/mips/kvm/locore.S
@@ -36,6 +36,8 @@
#define PT_HOST_USERLOCAL PT_EPC
#define CP0_DDATA_LO $28,3
+#define CP0_CONFIG3 $16,3
+#define CP0_CONFIG5 $16,5
#define CP0_EBASE $15,1
#define CP0_INTCTL $12,1
@@ -353,6 +355,42 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
LONG_L k0, VCPU_HOST_EBASE(k1)
mtc0 k0,CP0_EBASE
+ /*
+ * If FPU is enabled, save FCR31 and clear it so that later ctc1's don't
+ * trigger FPE for pending exceptions.
+ */
+ .set at
+ and v1, v0, ST0_CU1
+ beqz v1, 1f
+ nop
+ .set push
+ SET_HARDFLOAT
+ cfc1 t0, fcr31
+ sw t0, VCPU_FCR31(k1)
+ ctc1 zero,fcr31
+ .set pop
+ .set noat
+1:
+
+#ifdef CONFIG_CPU_HAS_MSA
+ /*
+ * If MSA is enabled, save MSACSR and clear it so that later
+ * instructions don't trigger MSAFPE for pending exceptions.
+ */
+ mfc0 t0, CP0_CONFIG3
+ ext t0, t0, 28, 1 /* MIPS_CONF3_MSAP */
+ beqz t0, 1f
+ nop
+ mfc0 t0, CP0_CONFIG5
+ ext t0, t0, 27, 1 /* MIPS_CONF5_MSAEN */
+ beqz t0, 1f
+ nop
+ _cfcmsa t0, MSA_CSR
+ sw t0, VCPU_MSA_CSR(k1)
+ _ctcmsa MSA_CSR, zero
+1:
+#endif
+
/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
.set at
and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index c9eccf5df912..bb68e8d520e8 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -11,6 +11,7 @@
#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/kdebug.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
@@ -48,6 +49,10 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU },
{ "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU },
{ "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU },
+ { "trap_inst", VCPU_STAT(trap_inst_exits), KVM_STAT_VCPU },
+ { "msa_fpe", VCPU_STAT(msa_fpe_exits), KVM_STAT_VCPU },
+ { "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU },
+ { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
{ "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
{ "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
@@ -504,10 +509,13 @@ static u64 kvm_mips_get_one_regs[] = {
KVM_REG_MIPS_CP0_STATUS,
KVM_REG_MIPS_CP0_CAUSE,
KVM_REG_MIPS_CP0_EPC,
+ KVM_REG_MIPS_CP0_PRID,
KVM_REG_MIPS_CP0_CONFIG,
KVM_REG_MIPS_CP0_CONFIG1,
KVM_REG_MIPS_CP0_CONFIG2,
KVM_REG_MIPS_CP0_CONFIG3,
+ KVM_REG_MIPS_CP0_CONFIG4,
+ KVM_REG_MIPS_CP0_CONFIG5,
KVM_REG_MIPS_CP0_CONFIG7,
KVM_REG_MIPS_CP0_ERROREPC,
@@ -520,10 +528,14 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
int ret;
s64 v;
+ s64 vs[2];
+ unsigned int idx;
switch (reg->id) {
+ /* General purpose registers */
case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
break;
@@ -537,6 +549,67 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
v = (long)vcpu->arch.pc;
break;
+ /* Floating point registers */
+ case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ idx = reg->id - KVM_REG_MIPS_FPR_32(0);
+ /* Odd singles in top of even double when FR=0 */
+ if (kvm_read_c0_guest_status(cop0) & ST0_FR)
+ v = get_fpr32(&fpu->fpr[idx], 0);
+ else
+ v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1);
+ break;
+ case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ idx = reg->id - KVM_REG_MIPS_FPR_64(0);
+ /* Can't access odd doubles in FR=0 mode */
+ if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
+ return -EINVAL;
+ v = get_fpr64(&fpu->fpr[idx], 0);
+ break;
+ case KVM_REG_MIPS_FCR_IR:
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ v = boot_cpu_data.fpu_id;
+ break;
+ case KVM_REG_MIPS_FCR_CSR:
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ v = fpu->fcr31;
+ break;
+
+ /* MIPS SIMD Architecture (MSA) registers */
+ case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
+ if (!kvm_mips_guest_has_msa(&vcpu->arch))
+ return -EINVAL;
+ /* Can't access MSA registers in FR=0 mode */
+ if (!(kvm_read_c0_guest_status(cop0) & ST0_FR))
+ return -EINVAL;
+ idx = reg->id - KVM_REG_MIPS_VEC_128(0);
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ /* least significant byte first */
+ vs[0] = get_fpr64(&fpu->fpr[idx], 0);
+ vs[1] = get_fpr64(&fpu->fpr[idx], 1);
+#else
+ /* most significant byte first */
+ vs[0] = get_fpr64(&fpu->fpr[idx], 1);
+ vs[1] = get_fpr64(&fpu->fpr[idx], 0);
+#endif
+ break;
+ case KVM_REG_MIPS_MSA_IR:
+ if (!kvm_mips_guest_has_msa(&vcpu->arch))
+ return -EINVAL;
+ v = boot_cpu_data.msa_id;
+ break;
+ case KVM_REG_MIPS_MSA_CSR:
+ if (!kvm_mips_guest_has_msa(&vcpu->arch))
+ return -EINVAL;
+ v = fpu->msacsr;
+ break;
+
+ /* Co-processor 0 registers */
case KVM_REG_MIPS_CP0_INDEX:
v = (long)kvm_read_c0_guest_index(cop0);
break;
@@ -573,8 +646,8 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
case KVM_REG_MIPS_CP0_EPC:
v = (long)kvm_read_c0_guest_epc(cop0);
break;
- case KVM_REG_MIPS_CP0_ERROREPC:
- v = (long)kvm_read_c0_guest_errorepc(cop0);
+ case KVM_REG_MIPS_CP0_PRID:
+ v = (long)kvm_read_c0_guest_prid(cop0);
break;
case KVM_REG_MIPS_CP0_CONFIG:
v = (long)kvm_read_c0_guest_config(cop0);
@@ -588,9 +661,18 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
case KVM_REG_MIPS_CP0_CONFIG3:
v = (long)kvm_read_c0_guest_config3(cop0);
break;
+ case KVM_REG_MIPS_CP0_CONFIG4:
+ v = (long)kvm_read_c0_guest_config4(cop0);
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG5:
+ v = (long)kvm_read_c0_guest_config5(cop0);
+ break;
case KVM_REG_MIPS_CP0_CONFIG7:
v = (long)kvm_read_c0_guest_config7(cop0);
break;
+ case KVM_REG_MIPS_CP0_ERROREPC:
+ v = (long)kvm_read_c0_guest_errorepc(cop0);
+ break;
/* registers to be handled specially */
case KVM_REG_MIPS_CP0_COUNT:
case KVM_REG_MIPS_COUNT_CTL:
@@ -612,6 +694,10 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
u32 v32 = (u32)v;
return put_user(v32, uaddr32);
+ } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
+ void __user *uaddr = (void __user *)(long)reg->addr;
+
+ return copy_to_user(uaddr, vs, 16);
} else {
return -EINVAL;
}
@@ -621,7 +707,10 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
- u64 v;
+ struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
+ s64 v;
+ s64 vs[2];
+ unsigned int idx;
if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
@@ -635,11 +724,16 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
if (get_user(v32, uaddr32) != 0)
return -EFAULT;
v = (s64)v32;
+ } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
+ void __user *uaddr = (void __user *)(long)reg->addr;
+
+ return copy_from_user(vs, uaddr, 16);
} else {
return -EINVAL;
}
switch (reg->id) {
+ /* General purpose registers */
case KVM_REG_MIPS_R0:
/* Silently ignore requests to set $0 */
break;
@@ -656,6 +750,64 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
vcpu->arch.pc = v;
break;
+ /* Floating point registers */
+ case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ idx = reg->id - KVM_REG_MIPS_FPR_32(0);
+ /* Odd singles in top of even double when FR=0 */
+ if (kvm_read_c0_guest_status(cop0) & ST0_FR)
+ set_fpr32(&fpu->fpr[idx], 0, v);
+ else
+ set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v);
+ break;
+ case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ idx = reg->id - KVM_REG_MIPS_FPR_64(0);
+ /* Can't access odd doubles in FR=0 mode */
+ if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
+ return -EINVAL;
+ set_fpr64(&fpu->fpr[idx], 0, v);
+ break;
+ case KVM_REG_MIPS_FCR_IR:
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ /* Read-only */
+ break;
+ case KVM_REG_MIPS_FCR_CSR:
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ return -EINVAL;
+ fpu->fcr31 = v;
+ break;
+
+ /* MIPS SIMD Architecture (MSA) registers */
+ case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
+ if (!kvm_mips_guest_has_msa(&vcpu->arch))
+ return -EINVAL;
+ idx = reg->id - KVM_REG_MIPS_VEC_128(0);
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ /* least significant byte first */
+ set_fpr64(&fpu->fpr[idx], 0, vs[0]);
+ set_fpr64(&fpu->fpr[idx], 1, vs[1]);
+#else
+ /* most significant byte first */
+ set_fpr64(&fpu->fpr[idx], 1, vs[0]);
+ set_fpr64(&fpu->fpr[idx], 0, vs[1]);
+#endif
+ break;
+ case KVM_REG_MIPS_MSA_IR:
+ if (!kvm_mips_guest_has_msa(&vcpu->arch))
+ return -EINVAL;
+ /* Read-only */
+ break;
+ case KVM_REG_MIPS_MSA_CSR:
+ if (!kvm_mips_guest_has_msa(&vcpu->arch))
+ return -EINVAL;
+ fpu->msacsr = v;
+ break;
+
+ /* Co-processor 0 registers */
case KVM_REG_MIPS_CP0_INDEX:
kvm_write_c0_guest_index(cop0, v);
break;
@@ -686,6 +838,9 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
case KVM_REG_MIPS_CP0_EPC:
kvm_write_c0_guest_epc(cop0, v);
break;
+ case KVM_REG_MIPS_CP0_PRID:
+ kvm_write_c0_guest_prid(cop0, v);
+ break;
case KVM_REG_MIPS_CP0_ERROREPC:
kvm_write_c0_guest_errorepc(cop0, v);
break;
@@ -693,6 +848,12 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
case KVM_REG_MIPS_CP0_COUNT:
case KVM_REG_MIPS_CP0_COMPARE:
case KVM_REG_MIPS_CP0_CAUSE:
+ case KVM_REG_MIPS_CP0_CONFIG:
+ case KVM_REG_MIPS_CP0_CONFIG1:
+ case KVM_REG_MIPS_CP0_CONFIG2:
+ case KVM_REG_MIPS_CP0_CONFIG3:
+ case KVM_REG_MIPS_CP0_CONFIG4:
+ case KVM_REG_MIPS_CP0_CONFIG5:
case KVM_REG_MIPS_COUNT_CTL:
case KVM_REG_MIPS_COUNT_RESUME:
case KVM_REG_MIPS_COUNT_HZ:
@@ -703,6 +864,33 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
return 0;
}
+static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
+ struct kvm_enable_cap *cap)
+{
+ int r = 0;
+
+ if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
+ return -EINVAL;
+ if (cap->flags)
+ return -EINVAL;
+ if (cap->args[0])
+ return -EINVAL;
+
+ switch (cap->cap) {
+ case KVM_CAP_MIPS_FPU:
+ vcpu->arch.fpu_enabled = true;
+ break;
+ case KVM_CAP_MIPS_MSA:
+ vcpu->arch.msa_enabled = true;
+ break;
+ default:
+ r = -EINVAL;
+ break;
+ }
+
+ return r;
+}
+
long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
{
@@ -760,6 +948,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
break;
}
+ case KVM_ENABLE_CAP: {
+ struct kvm_enable_cap cap;
+
+ r = -EFAULT;
+ if (copy_from_user(&cap, argp, sizeof(cap)))
+ goto out;
+ r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
+ break;
+ }
default:
r = -ENOIOCTLCMD;
}
@@ -868,11 +1065,30 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
switch (ext) {
case KVM_CAP_ONE_REG:
+ case KVM_CAP_ENABLE_CAP:
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break;
+ case KVM_CAP_MIPS_FPU:
+ r = !!cpu_has_fpu;
+ break;
+ case KVM_CAP_MIPS_MSA:
+ /*
+ * We don't support MSA vector partitioning yet:
+ * 1) It would require explicit support which can't be tested
+ * yet due to lack of support in current hardware.
+ * 2) It extends the state that would need to be saved/restored
+ * by e.g. QEMU for migration.
+ *
+ * When vector partitioning hardware becomes available, support
+ * could be added by requiring a flag when enabling
+ * KVM_CAP_MIPS_MSA capability to indicate that userland knows
+ * to save/restore the appropriate extra state.
+ */
+ r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
+ break;
default:
r = 0;
break;
@@ -1119,6 +1335,30 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
ret = kvm_mips_callbacks->handle_break(vcpu);
break;
+ case T_TRAP:
+ ++vcpu->stat.trap_inst_exits;
+ trace_kvm_exit(vcpu, TRAP_INST_EXITS);
+ ret = kvm_mips_callbacks->handle_trap(vcpu);
+ break;
+
+ case T_MSAFPE:
+ ++vcpu->stat.msa_fpe_exits;
+ trace_kvm_exit(vcpu, MSA_FPE_EXITS);
+ ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
+ break;
+
+ case T_FPE:
+ ++vcpu->stat.fpe_exits;
+ trace_kvm_exit(vcpu, FPE_EXITS);
+ ret = kvm_mips_callbacks->handle_fpe(vcpu);
+ break;
+
+ case T_MSADIS:
+ ++vcpu->stat.msa_disabled_exits;
+ trace_kvm_exit(vcpu, MSA_DISABLED_EXITS);
+ ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
+ break;
+
default:
kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
@@ -1146,12 +1386,233 @@ skip_emul:
}
}
+ if (ret == RESUME_GUEST) {
+ /*
+ * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
+ * is live), restore FCR31 / MSACSR.
+ *
+ * This should be before returning to the guest exception
+ * vector, as it may well cause an [MSA] FP exception if there
+ * are pending exception bits unmasked. (see
+ * kvm_mips_csr_die_notifier() for how that is handled).
+ */
+ if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
+ read_c0_status() & ST0_CU1)
+ __kvm_restore_fcsr(&vcpu->arch);
+
+ if (kvm_mips_guest_has_msa(&vcpu->arch) &&
+ read_c0_config5() & MIPS_CONF5_MSAEN)
+ __kvm_restore_msacsr(&vcpu->arch);
+ }
+
/* Disable HTW before returning to guest or host */
htw_stop();
return ret;
}
+/* Enable FPU for guest and restore context */
+void kvm_own_fpu(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ unsigned int sr, cfg5;
+
+ preempt_disable();
+
+ sr = kvm_read_c0_guest_status(cop0);
+
+ /*
+ * If MSA state is already live, it is undefined how it interacts with
+ * FR=0 FPU state, and we don't want to hit reserved instruction
+ * exceptions trying to save the MSA state later when CU=1 && FR=1, so
+ * play it safe and save it first.
+ *
+ * In theory we shouldn't ever hit this case since kvm_lose_fpu() should
+ * get called when guest CU1 is set, however we can't trust the guest
+ * not to clobber the status register directly via the commpage.
+ */
+ if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
+ vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
+ kvm_lose_fpu(vcpu);
+
+ /*
+ * Enable FPU for guest
+ * We set FR and FRE according to guest context
+ */
+ change_c0_status(ST0_CU1 | ST0_FR, sr);
+ if (cpu_has_fre) {
+ cfg5 = kvm_read_c0_guest_config5(cop0);
+ change_c0_config5(MIPS_CONF5_FRE, cfg5);
+ }
+ enable_fpu_hazard();
+
+ /* If guest FPU state not active, restore it now */
+ if (!(vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)) {
+ __kvm_restore_fpu(&vcpu->arch);
+ vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU;
+ }
+
+ preempt_enable();
+}
+
+#ifdef CONFIG_CPU_HAS_MSA
+/* Enable MSA for guest and restore context */
+void kvm_own_msa(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ unsigned int sr, cfg5;
+
+ preempt_disable();
+
+ /*
+ * Enable FPU if enabled in guest, since we're restoring FPU context
+ * anyway. We set FR and FRE according to guest context.
+ */
+ if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
+ sr = kvm_read_c0_guest_status(cop0);
+
+ /*
+ * If FR=0 FPU state is already live, it is undefined how it
+ * interacts with MSA state, so play it safe and save it first.
+ */
+ if (!(sr & ST0_FR) &&
+ (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU |
+ KVM_MIPS_FPU_MSA)) == KVM_MIPS_FPU_FPU)
+ kvm_lose_fpu(vcpu);
+
+ change_c0_status(ST0_CU1 | ST0_FR, sr);
+ if (sr & ST0_CU1 && cpu_has_fre) {
+ cfg5 = kvm_read_c0_guest_config5(cop0);
+ change_c0_config5(MIPS_CONF5_FRE, cfg5);
+ }
+ }
+
+ /* Enable MSA for guest */
+ set_c0_config5(MIPS_CONF5_MSAEN);
+ enable_fpu_hazard();
+
+ switch (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA)) {
+ case KVM_MIPS_FPU_FPU:
+ /*
+ * Guest FPU state already loaded, only restore upper MSA state
+ */
+ __kvm_restore_msa_upper(&vcpu->arch);
+ vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
+ break;
+ case 0:
+ /* Neither FPU or MSA already active, restore full MSA state */
+ __kvm_restore_msa(&vcpu->arch);
+ vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
+ if (kvm_mips_guest_has_fpu(&vcpu->arch))
+ vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU;
+ break;
+ default:
+ break;
+ }
+
+ preempt_enable();
+}
+#endif
+
+/* Drop FPU & MSA without saving it */
+void kvm_drop_fpu(struct kvm_vcpu *vcpu)
+{
+ preempt_disable();
+ if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
+ disable_msa();
+ vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_MSA;
+ }
+ if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
+ clear_c0_status(ST0_CU1 | ST0_FR);
+ vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
+ }
+ preempt_enable();
+}
+
+/* Save and disable FPU & MSA */
+void kvm_lose_fpu(struct kvm_vcpu *vcpu)
+{
+ /*
+ * FPU & MSA get disabled in root context (hardware) when it is disabled
+ * in guest context (software), but the register state in the hardware
+ * may still be in use. This is why we explicitly re-enable the hardware
+ * before saving.
+ */
+
+ preempt_disable();
+ if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
+ set_c0_config5(MIPS_CONF5_MSAEN);
+ enable_fpu_hazard();
+
+ __kvm_save_msa(&vcpu->arch);
+
+ /* Disable MSA & FPU */
+ disable_msa();
+ if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
+ clear_c0_status(ST0_CU1 | ST0_FR);
+ vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA);
+ } else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
+ set_c0_status(ST0_CU1);
+ enable_fpu_hazard();
+
+ __kvm_save_fpu(&vcpu->arch);
+ vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
+
+ /* Disable FPU */
+ clear_c0_status(ST0_CU1 | ST0_FR);
+ }
+ preempt_enable();
+}
+
+/*
+ * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
+ * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
+ * exception if cause bits are set in the value being written.
+ */
+static int kvm_mips_csr_die_notify(struct notifier_block *self,
+ unsigned long cmd, void *ptr)
+{
+ struct die_args *args = (struct die_args *)ptr;
+ struct pt_regs *regs = args->regs;
+ unsigned long pc;
+
+ /* Only interested in FPE and MSAFPE */
+ if (cmd != DIE_FP && cmd != DIE_MSAFP)
+ return NOTIFY_DONE;
+
+ /* Return immediately if guest context isn't active */
+ if (!(current->flags & PF_VCPU))
+ return NOTIFY_DONE;
+
+ /* Should never get here from user mode */
+ BUG_ON(user_mode(regs));
+
+ pc = instruction_pointer(regs);
+ switch (cmd) {
+ case DIE_FP:
+ /* match 2nd instruction in __kvm_restore_fcsr */
+ if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
+ return NOTIFY_DONE;
+ break;
+ case DIE_MSAFP:
+ /* match 2nd/3rd instruction in __kvm_restore_msacsr */
+ if (!cpu_has_msa ||
+ pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
+ pc > (unsigned long)&__kvm_restore_msacsr + 8)
+ return NOTIFY_DONE;
+ break;
+ }
+
+ /* Move PC forward a little and continue executing */
+ instruction_pointer(regs) += 4;
+
+ return NOTIFY_STOP;
+}
+
+static struct notifier_block kvm_mips_csr_die_notifier = {
+ .notifier_call = kvm_mips_csr_die_notify,
+};
+
int __init kvm_mips_init(void)
{
int ret;
@@ -1161,6 +1622,8 @@ int __init kvm_mips_init(void)
if (ret)
return ret;
+ register_die_notifier(&kvm_mips_csr_die_notifier);
+
/*
* On MIPS, kernel modules are executed from "mapped space", which
* requires TLBs. The TLB handling code is statically linked with
@@ -1173,7 +1636,6 @@ int __init kvm_mips_init(void)
kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
kvm_mips_is_error_pfn = is_error_pfn;
- pr_info("KVM/MIPS Initialized\n");
return 0;
}
@@ -1185,7 +1647,7 @@ void __exit kvm_mips_exit(void)
kvm_mips_release_pfn_clean = NULL;
kvm_mips_is_error_pfn = NULL;
- pr_info("KVM/MIPS unloaded\n");
+ unregister_die_notifier(&kvm_mips_csr_die_notifier);
}
module_init(kvm_mips_init);
diff --git a/arch/mips/kvm/msa.S b/arch/mips/kvm/msa.S
new file mode 100644
index 000000000000..d02f0c6cc2cc
--- /dev/null
+++ b/arch/mips/kvm/msa.S
@@ -0,0 +1,161 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * MIPS SIMD Architecture (MSA) context handling code for KVM.
+ *
+ * Copyright (C) 2015 Imagination Technologies Ltd.
+ */
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/asmmacro.h>
+#include <asm/regdef.h>
+
+ .set noreorder
+ .set noat
+
+LEAF(__kvm_save_msa)
+ st_d 0, VCPU_FPR0, a0
+ st_d 1, VCPU_FPR1, a0
+ st_d 2, VCPU_FPR2, a0
+ st_d 3, VCPU_FPR3, a0
+ st_d 4, VCPU_FPR4, a0
+ st_d 5, VCPU_FPR5, a0
+ st_d 6, VCPU_FPR6, a0
+ st_d 7, VCPU_FPR7, a0
+ st_d 8, VCPU_FPR8, a0
+ st_d 9, VCPU_FPR9, a0
+ st_d 10, VCPU_FPR10, a0
+ st_d 11, VCPU_FPR11, a0
+ st_d 12, VCPU_FPR12, a0
+ st_d 13, VCPU_FPR13, a0
+ st_d 14, VCPU_FPR14, a0
+ st_d 15, VCPU_FPR15, a0
+ st_d 16, VCPU_FPR16, a0
+ st_d 17, VCPU_FPR17, a0
+ st_d 18, VCPU_FPR18, a0
+ st_d 19, VCPU_FPR19, a0
+ st_d 20, VCPU_FPR20, a0
+ st_d 21, VCPU_FPR21, a0
+ st_d 22, VCPU_FPR22, a0
+ st_d 23, VCPU_FPR23, a0
+ st_d 24, VCPU_FPR24, a0
+ st_d 25, VCPU_FPR25, a0
+ st_d 26, VCPU_FPR26, a0
+ st_d 27, VCPU_FPR27, a0
+ st_d 28, VCPU_FPR28, a0
+ st_d 29, VCPU_FPR29, a0
+ st_d 30, VCPU_FPR30, a0
+ st_d 31, VCPU_FPR31, a0
+ jr ra
+ nop
+ END(__kvm_save_msa)
+
+LEAF(__kvm_restore_msa)
+ ld_d 0, VCPU_FPR0, a0
+ ld_d 1, VCPU_FPR1, a0
+ ld_d 2, VCPU_FPR2, a0
+ ld_d 3, VCPU_FPR3, a0
+ ld_d 4, VCPU_FPR4, a0
+ ld_d 5, VCPU_FPR5, a0
+ ld_d 6, VCPU_FPR6, a0
+ ld_d 7, VCPU_FPR7, a0
+ ld_d 8, VCPU_FPR8, a0
+ ld_d 9, VCPU_FPR9, a0
+ ld_d 10, VCPU_FPR10, a0
+ ld_d 11, VCPU_FPR11, a0
+ ld_d 12, VCPU_FPR12, a0
+ ld_d 13, VCPU_FPR13, a0
+ ld_d 14, VCPU_FPR14, a0
+ ld_d 15, VCPU_FPR15, a0
+ ld_d 16, VCPU_FPR16, a0
+ ld_d 17, VCPU_FPR17, a0
+ ld_d 18, VCPU_FPR18, a0
+ ld_d 19, VCPU_FPR19, a0
+ ld_d 20, VCPU_FPR20, a0
+ ld_d 21, VCPU_FPR21, a0
+ ld_d 22, VCPU_FPR22, a0
+ ld_d 23, VCPU_FPR23, a0
+ ld_d 24, VCPU_FPR24, a0
+ ld_d 25, VCPU_FPR25, a0
+ ld_d 26, VCPU_FPR26, a0
+ ld_d 27, VCPU_FPR27, a0
+ ld_d 28, VCPU_FPR28, a0
+ ld_d 29, VCPU_FPR29, a0
+ ld_d 30, VCPU_FPR30, a0
+ ld_d 31, VCPU_FPR31, a0
+ jr ra
+ nop
+ END(__kvm_restore_msa)
+
+ .macro kvm_restore_msa_upper wr, off, base
+ .set push
+ .set noat
+#ifdef CONFIG_64BIT
+ ld $1, \off(\base)
+ insert_d \wr, 1
+#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
+ lw $1, \off(\base)
+ insert_w \wr, 2
+ lw $1, (\off+4)(\base)
+ insert_w \wr, 3
+#else /* CONFIG_CPU_BIG_ENDIAN */
+ lw $1, (\off+4)(\base)
+ insert_w \wr, 2
+ lw $1, \off(\base)
+ insert_w \wr, 3
+#endif
+ .set pop
+ .endm
+
+LEAF(__kvm_restore_msa_upper)
+ kvm_restore_msa_upper 0, VCPU_FPR0 +8, a0
+ kvm_restore_msa_upper 1, VCPU_FPR1 +8, a0
+ kvm_restore_msa_upper 2, VCPU_FPR2 +8, a0
+ kvm_restore_msa_upper 3, VCPU_FPR3 +8, a0
+ kvm_restore_msa_upper 4, VCPU_FPR4 +8, a0
+ kvm_restore_msa_upper 5, VCPU_FPR5 +8, a0
+ kvm_restore_msa_upper 6, VCPU_FPR6 +8, a0
+ kvm_restore_msa_upper 7, VCPU_FPR7 +8, a0
+ kvm_restore_msa_upper 8, VCPU_FPR8 +8, a0
+ kvm_restore_msa_upper 9, VCPU_FPR9 +8, a0
+ kvm_restore_msa_upper 10, VCPU_FPR10+8, a0
+ kvm_restore_msa_upper 11, VCPU_FPR11+8, a0
+ kvm_restore_msa_upper 12, VCPU_FPR12+8, a0
+ kvm_restore_msa_upper 13, VCPU_FPR13+8, a0
+ kvm_restore_msa_upper 14, VCPU_FPR14+8, a0
+ kvm_restore_msa_upper 15, VCPU_FPR15+8, a0
+ kvm_restore_msa_upper 16, VCPU_FPR16+8, a0
+ kvm_restore_msa_upper 17, VCPU_FPR17+8, a0
+ kvm_restore_msa_upper 18, VCPU_FPR18+8, a0
+ kvm_restore_msa_upper 19, VCPU_FPR19+8, a0
+ kvm_restore_msa_upper 20, VCPU_FPR20+8, a0
+ kvm_restore_msa_upper 21, VCPU_FPR21+8, a0
+ kvm_restore_msa_upper 22, VCPU_FPR22+8, a0
+ kvm_restore_msa_upper 23, VCPU_FPR23+8, a0
+ kvm_restore_msa_upper 24, VCPU_FPR24+8, a0
+ kvm_restore_msa_upper 25, VCPU_FPR25+8, a0
+ kvm_restore_msa_upper 26, VCPU_FPR26+8, a0
+ kvm_restore_msa_upper 27, VCPU_FPR27+8, a0
+ kvm_restore_msa_upper 28, VCPU_FPR28+8, a0
+ kvm_restore_msa_upper 29, VCPU_FPR29+8, a0
+ kvm_restore_msa_upper 30, VCPU_FPR30+8, a0
+ kvm_restore_msa_upper 31, VCPU_FPR31+8, a0
+ jr ra
+ nop
+ END(__kvm_restore_msa_upper)
+
+LEAF(__kvm_restore_msacsr)
+ lw t0, VCPU_MSA_CSR(a0)
+ /*
+ * The ctcmsa must stay at this offset in __kvm_restore_msacsr.
+ * See kvm_mips_csr_die_notify() which handles t0 containing a value
+ * which triggers an MSA FP Exception, which must be stepped over and
+ * ignored since the set cause bits must remain there for the guest.
+ */
+ _ctcmsa MSA_CSR, t0
+ jr ra
+ nop
+ END(__kvm_restore_msacsr)
diff --git a/arch/mips/kvm/stats.c b/arch/mips/kvm/stats.c
index a74d6024c5ad..888bb67070ac 100644
--- a/arch/mips/kvm/stats.c
+++ b/arch/mips/kvm/stats.c
@@ -25,6 +25,10 @@ char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = {
"System Call",
"Reserved Inst",
"Break Inst",
+ "Trap Inst",
+ "MSA FPE",
+ "FPE",
+ "MSA Disabled",
"D-Cache Flushes",
};
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index b6beb0e07b1b..aed0ac2a4972 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -733,6 +733,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
}
+ /* restore guest state to registers */
+ kvm_mips_callbacks->vcpu_set_regs(vcpu);
+
local_irq_restore(flags);
}
@@ -751,6 +754,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
vcpu->arch.preempt_entryhi = read_c0_entryhi();
vcpu->arch.last_sched_cpu = cpu;
+ /* save guest state in registers */
+ kvm_mips_callbacks->vcpu_get_regs(vcpu);
+
if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
ASID_VERSION_MASK)) {
kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index fd7257b70e65..d836ed5b0bc7 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -39,16 +39,30 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_run *run = vcpu->run;
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
unsigned long cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
- if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1)
- er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
- else
+ if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
+ /* FPU Unusable */
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch) ||
+ (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) {
+ /*
+ * Unusable/no FPU in guest:
+ * deliver guest COP1 Unusable Exception
+ */
+ er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
+ } else {
+ /* Restore FPU state */
+ kvm_own_fpu(vcpu);
+ er = EMULATE_DONE;
+ }
+ } else {
er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+ }
switch (er) {
case EMULATE_DONE:
@@ -330,6 +344,107 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
return ret;
}
+static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
+ unsigned long cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
+ if (er == EMULATE_DONE) {
+ ret = RESUME_GUEST;
+ } else {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
+ unsigned long cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu);
+ if (er == EMULATE_DONE) {
+ ret = RESUME_GUEST;
+ } else {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
+ unsigned long cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu);
+ if (er == EMULATE_DONE) {
+ ret = RESUME_GUEST;
+ } else {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+/**
+ * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
+ * @vcpu: Virtual CPU context.
+ *
+ * Handle when the guest attempts to use MSA when it is disabled.
+ */
+static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_run *run = vcpu->run;
+ uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ unsigned long cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
+ (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) {
+ /*
+ * No MSA in guest, or FPU enabled and not in FR=1 mode,
+ * guest reserved instruction exception
+ */
+ er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
+ } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
+ /* MSA disabled by guest, guest MSA disabled exception */
+ er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu);
+ } else {
+ /* Restore MSA/FPU state */
+ kvm_own_msa(vcpu);
+ er = EMULATE_DONE;
+ }
+
+ switch (er) {
+ case EMULATE_DONE:
+ ret = RESUME_GUEST;
+ break;
+
+ case EMULATE_FAIL:
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ break;
+
+ default:
+ BUG();
+ }
+ return ret;
+}
+
static int kvm_trap_emul_vm_init(struct kvm *kvm)
{
return 0;
@@ -351,8 +466,9 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
* guest will come up as expected, for now we simulate a MIPS 24kc
*/
kvm_write_c0_guest_prid(cop0, 0x00019300);
- kvm_write_c0_guest_config(cop0,
- MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
+ /* Have config1, Cacheable, noncoherent, write-back, write allocate */
+ kvm_write_c0_guest_config(cop0, MIPS_CONF_M | (0x3 << CP0C0_K0) |
+ (0x1 << CP0C0_AR) |
(MMU_TYPE_R4000 << CP0C0_MT));
/* Read the cache characteristics from the host Config1 Register */
@@ -368,10 +484,18 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
(1 << CP0C1_WR) | (1 << CP0C1_CA));
kvm_write_c0_guest_config1(cop0, config1);
- kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
- /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
- kvm_write_c0_guest_config3(cop0, MIPS_CONFIG3 | (0 << CP0C3_VInt) |
- (1 << CP0C3_ULRI));
+ /* Have config3, no tertiary/secondary caches implemented */
+ kvm_write_c0_guest_config2(cop0, MIPS_CONF_M);
+ /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
+
+ /* Have config4, UserLocal */
+ kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI);
+
+ /* Have config5 */
+ kvm_write_c0_guest_config4(cop0, MIPS_CONF_M);
+
+ /* No config6 */
+ kvm_write_c0_guest_config5(cop0, 0);
/* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
@@ -416,6 +540,7 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
int ret = 0;
+ unsigned int cur, change;
switch (reg->id) {
case KVM_REG_MIPS_CP0_COUNT:
@@ -444,6 +569,44 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
kvm_write_c0_guest_cause(cop0, v);
}
break;
+ case KVM_REG_MIPS_CP0_CONFIG:
+ /* read-only for now */
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG1:
+ cur = kvm_read_c0_guest_config1(cop0);
+ change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ kvm_write_c0_guest_config1(cop0, v);
+ }
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG2:
+ /* read-only for now */
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG3:
+ cur = kvm_read_c0_guest_config3(cop0);
+ change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ kvm_write_c0_guest_config3(cop0, v);
+ }
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG4:
+ cur = kvm_read_c0_guest_config4(cop0);
+ change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ kvm_write_c0_guest_config4(cop0, v);
+ }
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG5:
+ cur = kvm_read_c0_guest_config5(cop0);
+ change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ kvm_write_c0_guest_config5(cop0, v);
+ }
+ break;
case KVM_REG_MIPS_COUNT_CTL:
ret = kvm_mips_set_count_ctl(vcpu, v);
break;
@@ -459,6 +622,18 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
return ret;
}
+static int kvm_trap_emul_vcpu_get_regs(struct kvm_vcpu *vcpu)
+{
+ kvm_lose_fpu(vcpu);
+
+ return 0;
+}
+
+static int kvm_trap_emul_vcpu_set_regs(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
/* exit handlers */
.handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
@@ -470,6 +645,10 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
.handle_syscall = kvm_trap_emul_handle_syscall,
.handle_res_inst = kvm_trap_emul_handle_res_inst,
.handle_break = kvm_trap_emul_handle_break,
+ .handle_trap = kvm_trap_emul_handle_trap,
+ .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
+ .handle_fpe = kvm_trap_emul_handle_fpe,
+ .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
.vm_init = kvm_trap_emul_vm_init,
.vcpu_init = kvm_trap_emul_vcpu_init,
@@ -483,6 +662,8 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
.irq_clear = kvm_mips_irq_clear_cb,
.get_one_reg = kvm_trap_emul_get_one_reg,
.set_one_reg = kvm_trap_emul_set_one_reg,
+ .vcpu_get_regs = kvm_trap_emul_vcpu_get_regs,
+ .vcpu_set_regs = kvm_trap_emul_vcpu_set_regs,
};
int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
diff --git a/arch/nios2/include/asm/thread_info.h b/arch/nios2/include/asm/thread_info.h
index 1f266575beb5..a16e55cbd8ad 100644
--- a/arch/nios2/include/asm/thread_info.h
+++ b/arch/nios2/include/asm/thread_info.h
@@ -47,7 +47,6 @@ struct thread_info {
0-0x7FFFFFFF for user-thead
0-0xFFFFFFFF for kernel-thread
*/
- struct restart_block restart_block;
struct pt_regs *regs;
};
@@ -64,9 +63,6 @@ struct thread_info {
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/nios2/include/uapi/asm/ptrace.h b/arch/nios2/include/uapi/asm/ptrace.h
index 71a330597adf..eff00e67c0a2 100644
--- a/arch/nios2/include/uapi/asm/ptrace.h
+++ b/arch/nios2/include/uapi/asm/ptrace.h
@@ -60,12 +60,17 @@
#define PTR_IPENDING 37
#define PTR_CPUID 38
#define PTR_CTL6 39
-#define PTR_CTL7 40
+#define PTR_EXCEPTION 40
#define PTR_PTEADDR 41
#define PTR_TLBACC 42
#define PTR_TLBMISC 43
+#define PTR_ECCINJ 44
+#define PTR_BADADDR 45
+#define PTR_CONFIG 46
+#define PTR_MPUBASE 47
+#define PTR_MPUACC 48
-#define NUM_PTRACE_REG (PTR_TLBMISC + 1)
+#define NUM_PTRACE_REG (PTR_MPUACC + 1)
/* User structures for general purpose registers. */
struct user_pt_regs {
diff --git a/arch/nios2/kernel/entry.S b/arch/nios2/kernel/entry.S
index 7729bd3f2e79..27b006c52e12 100644
--- a/arch/nios2/kernel/entry.S
+++ b/arch/nios2/kernel/entry.S
@@ -161,7 +161,7 @@ ENTRY(inthandler)
***********************************************************************
*/
ENTRY(handle_trap)
- ldw r24, -4(ea) /* instruction that caused the exception */
+ ldwio r24, -4(ea) /* instruction that caused the exception */
srli r24, r24, 4
andi r24, r24, 0x7c
movia r9,trap_table
diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c
index dda41e4fe707..20662b0f6c9e 100644
--- a/arch/nios2/kernel/signal.c
+++ b/arch/nios2/kernel/signal.c
@@ -43,7 +43,7 @@ static inline int rt_restore_ucontext(struct pt_regs *regs,
int err;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
err = __get_user(temp, &uc->uc_mcontext.version);
if (temp != MCONTEXT_VERSION)
diff --git a/arch/nios2/mm/cacheflush.c b/arch/nios2/mm/cacheflush.c
index 2ae482b42669..796642932e2e 100644
--- a/arch/nios2/mm/cacheflush.c
+++ b/arch/nios2/mm/cacheflush.c
@@ -23,9 +23,6 @@ static void __flush_dcache(unsigned long start, unsigned long end)
end += (cpuinfo.dcache_line_size - 1);
end &= ~(cpuinfo.dcache_line_size - 1);
- if (end > start + cpuinfo.dcache_size)
- end = start + cpuinfo.dcache_size;
-
for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
__asm__ __volatile__ (" flushda 0(%0)\n"
: /* Outputs */
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index f213f5b4c423..d17437238a2c 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -26,7 +26,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
if (likely(pgd != NULL)) {
memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
-#ifdef CONFIG_64BIT
+#if PT_NLEVELS == 3
actual_pgd += PTRS_PER_PGD;
/* Populate first pmd with allocated memory. We mark it
* with PxD_FLAG_ATTACHED as a signal to the system that this
@@ -45,7 +45,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
-#ifdef CONFIG_64BIT
+#if PT_NLEVELS == 3
pgd -= PTRS_PER_PGD;
#endif
free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
@@ -72,12 +72,15 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
-#ifdef CONFIG_64BIT
if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
- /* This is the permanent pmd attached to the pgd;
- * cannot free it */
+ /*
+ * This is the permanent pmd attached to the pgd;
+ * cannot free it.
+ * Increment the counter to compensate for the decrement
+ * done by generic mm code.
+ */
+ mm_inc_nr_pmds(mm);
return;
-#endif
free_pages((unsigned long)pmd, PMD_ORDER);
}
@@ -99,7 +102,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{
-#ifdef CONFIG_64BIT
+#if PT_NLEVELS == 3
/* preserve the gateway marker if this is the beginning of
* the permanent pmd */
if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 5a8997d63899..8eefb12d1d33 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -55,8 +55,8 @@
#define ENTRY_COMP(_name_) .word sys_##_name_
#endif
- ENTRY_SAME(restart_syscall) /* 0 */
- ENTRY_SAME(exit)
+90: ENTRY_SAME(restart_syscall) /* 0 */
+91: ENTRY_SAME(exit)
ENTRY_SAME(fork_wrapper)
ENTRY_SAME(read)
ENTRY_SAME(write)
@@ -439,7 +439,10 @@
ENTRY_SAME(bpf)
ENTRY_COMP(execveat)
- /* Nothing yet */
+
+.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
+.error "size of syscall table does not fit value of __NR_Linux_syscalls"
+.endif
#undef ENTRY_SAME
#undef ENTRY_DIFF
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
index 2bf8e9307be9..4c8ad592ae33 100644
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -55,7 +55,7 @@ static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
static inline int cpu_nr_cores(void)
{
- return NR_CPUS >> threads_shift;
+ return nr_cpu_ids >> threads_shift;
}
static inline cpumask_t cpu_online_cores_map(void)
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 03cd858a401c..4cbe23af400a 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -153,6 +153,7 @@
#define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff
#define PPC_INST_MFTMR 0x7c0002dc
#define PPC_INST_MSGSND 0x7c00019c
+#define PPC_INST_MSGCLR 0x7c0001dc
#define PPC_INST_MSGSNDP 0x7c00011c
#define PPC_INST_MTTMR 0x7c0003dc
#define PPC_INST_NOP 0x60000000
@@ -309,6 +310,8 @@
___PPC_RB(b) | __PPC_EH(eh))
#define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \
___PPC_RB(b))
+#define PPC_MSGCLR(b) stringify_in_c(.long PPC_INST_MSGCLR | \
+ ___PPC_RB(b))
#define PPC_MSGSNDP(b) stringify_in_c(.long PPC_INST_MSGSNDP | \
___PPC_RB(b))
#define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 1c874fb533bb..af56b5c6c81a 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -608,13 +608,16 @@
#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */
#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */
#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */
+#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 */
#define SRR1_WAKESYSERR 0x00300000 /* System error */
#define SRR1_WAKEEE 0x00200000 /* External interrupt */
#define SRR1_WAKEMT 0x00280000 /* mtctrl */
#define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */
#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */
+#define SRR1_WAKEDBELL 0x00140000 /* Privileged doorbell on P8 */
#define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */
#define SRR1_WAKERESET 0x00100000 /* System reset */
+#define SRR1_WAKEHDBELL 0x000c0000 /* Hypervisor doorbell on P8 */
#define SRR1_WAKESTATE 0x00030000 /* Powersave exit mask [46:47] */
#define SRR1_WS_DEEPEST 0x00030000 /* Some resources not maintained,
* may not be recoverable */
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index f337666768a7..f83046878336 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -437,6 +437,26 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check_early = __machine_check_early_realmode_p8,
.platform = "power8",
},
+ { /* Power8NVL */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x004c0000,
+ .cpu_name = "POWER8NVL (raw)",
+ .cpu_features = CPU_FTRS_POWER8,
+ .cpu_user_features = COMMON_USER_POWER8,
+ .cpu_user_features2 = COMMON_USER2_POWER8,
+ .mmu_features = MMU_FTRS_POWER8,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_IBM,
+ .oprofile_cpu_type = "ppc64/power8",
+ .oprofile_type = PPC_OPROFILE_INVALID,
+ .cpu_setup = __setup_cpu_power8,
+ .cpu_restore = __restore_cpu_power8,
+ .flush_tlb = __flush_tlb_power8,
+ .machine_check_early = __machine_check_early_realmode_p8,
+ .platform = "power8",
+ },
{ /* Power8 DD1: Does not support doorbell IPIs */
.pvr_mask = 0xffffff00,
.pvr_value = 0x004d0100,
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index f4217819cc31..2128f3a96c32 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -17,6 +17,7 @@
#include <asm/dbell.h>
#include <asm/irq_regs.h>
+#include <asm/kvm_ppc.h>
#ifdef CONFIG_SMP
void doorbell_setup_this_cpu(void)
@@ -41,6 +42,7 @@ void doorbell_exception(struct pt_regs *regs)
may_hard_irq_enable();
+ kvmppc_set_host_ipi(smp_processor_id(), 0);
__this_cpu_inc(irq_stat.doorbell_irqs);
smp_ipi_demux();
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index c2df8150bd7a..9519e6bdc6d7 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1408,7 +1408,7 @@ machine_check_handle_early:
bne 9f /* continue in V mode if we are. */
5:
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
/*
* We are coming from kernel context. Check if we are coming from
* guest. if yes, then we can continue. We will fall through
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index de4018a1bc4b..de747563d29d 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -636,7 +636,7 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
spin_lock(&vcpu->arch.vpa_update_lock);
lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
if (lppaca)
- yield_count = lppaca->yield_count;
+ yield_count = be32_to_cpu(lppaca->yield_count);
spin_unlock(&vcpu->arch.vpa_update_lock);
return yield_count;
}
@@ -942,20 +942,20 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
bool preserve_top32)
{
+ struct kvm *kvm = vcpu->kvm;
struct kvmppc_vcore *vc = vcpu->arch.vcore;
u64 mask;
+ mutex_lock(&kvm->lock);
spin_lock(&vc->lock);
/*
* If ILE (interrupt little-endian) has changed, update the
* MSR_LE bit in the intr_msr for each vcpu in this vcore.
*/
if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
- struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu *vcpu;
int i;
- mutex_lock(&kvm->lock);
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->arch.vcore != vc)
continue;
@@ -964,7 +964,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
else
vcpu->arch.intr_msr &= ~MSR_LE;
}
- mutex_unlock(&kvm->lock);
}
/*
@@ -981,6 +980,7 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
mask &= 0xFFFFFFFF;
vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
spin_unlock(&vc->lock);
+ mutex_unlock(&kvm->lock);
}
static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index bb94e6f20c81..6cbf1630cb70 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1005,6 +1005,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
/* Save HEIR (HV emulation assist reg) in emul_inst
if this is an HEI (HV emulation interrupt, e40) */
li r3,KVM_INST_FETCH_FAILED
+ stw r3,VCPU_LAST_INST(r9)
cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
bne 11f
mfspr r3,SPRN_HEIR
diff --git a/arch/powerpc/kvm/mpic.c b/arch/powerpc/kvm/mpic.c
index 39b3a8f816f2..6249cdc834d1 100644
--- a/arch/powerpc/kvm/mpic.c
+++ b/arch/powerpc/kvm/mpic.c
@@ -34,7 +34,7 @@
#include <asm/kvm_para.h>
#include <asm/kvm_host.h>
#include <asm/kvm_ppc.h>
-#include "iodev.h"
+#include <kvm/iodev.h>
#define MAX_CPU 32
#define MAX_SRC 256
@@ -289,11 +289,6 @@ static inline void IRQ_resetbit(struct irq_queue *q, int n_IRQ)
clear_bit(n_IRQ, q->queue);
}
-static inline int IRQ_testbit(struct irq_queue *q, int n_IRQ)
-{
- return test_bit(n_IRQ, q->queue);
-}
-
static void IRQ_check(struct openpic *opp, struct irq_queue *q)
{
int irq = -1;
@@ -1374,8 +1369,9 @@ static int kvm_mpic_write_internal(struct openpic *opp, gpa_t addr, u32 val)
return -ENXIO;
}
-static int kvm_mpic_read(struct kvm_io_device *this, gpa_t addr,
- int len, void *ptr)
+static int kvm_mpic_read(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *this,
+ gpa_t addr, int len, void *ptr)
{
struct openpic *opp = container_of(this, struct openpic, mmio);
int ret;
@@ -1415,8 +1411,9 @@ static int kvm_mpic_read(struct kvm_io_device *this, gpa_t addr,
return ret;
}
-static int kvm_mpic_write(struct kvm_io_device *this, gpa_t addr,
- int len, const void *ptr)
+static int kvm_mpic_write(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *this,
+ gpa_t addr, int len, const void *ptr)
{
struct openpic *opp = container_of(this, struct openpic, mmio);
int ret;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 27c0face86f4..24bfe401373e 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -807,7 +807,7 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
idx = srcu_read_lock(&vcpu->kvm->srcu);
- ret = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
+ ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
bytes, &run->mmio.data);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
@@ -880,7 +880,7 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
idx = srcu_read_lock(&vcpu->kvm->srcu);
- ret = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
+ ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
bytes, &run->mmio.data);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index 0509bca5e830..fcbe899fe299 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -9,11 +9,11 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/jump_label.h>
#include <asm/ppc_asm.h>
#include <asm/hvcall.h>
#include <asm/asm-offsets.h>
#include <asm/opal.h>
-#include <asm/jump_label.h>
.section ".text"
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index fc34025ef822..38a45088f633 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -33,6 +33,8 @@
#include <asm/runlatch.h>
#include <asm/code-patching.h>
#include <asm/dbell.h>
+#include <asm/kvm_ppc.h>
+#include <asm/ppc-opcode.h>
#include "powernv.h"
@@ -149,7 +151,7 @@ static int pnv_smp_cpu_disable(void)
static void pnv_smp_cpu_kill_self(void)
{
unsigned int cpu;
- unsigned long srr1;
+ unsigned long srr1, wmask;
u32 idle_states;
/* Standard hot unplug procedure */
@@ -161,6 +163,10 @@ static void pnv_smp_cpu_kill_self(void)
generic_set_cpu_dead(cpu);
smp_wmb();
+ wmask = SRR1_WAKEMASK;
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ wmask = SRR1_WAKEMASK_P8;
+
idle_states = pnv_get_supported_cpuidle_states();
/* We don't want to take decrementer interrupts while we are offline,
* so clear LPCR:PECE1. We keep PECE2 enabled.
@@ -191,10 +197,14 @@ static void pnv_smp_cpu_kill_self(void)
* having finished executing in a KVM guest, then srr1
* contains 0.
*/
- if ((srr1 & SRR1_WAKEMASK) == SRR1_WAKEEE) {
+ if ((srr1 & wmask) == SRR1_WAKEEE) {
icp_native_flush_interrupt();
local_paca->irq_happened &= PACA_IRQ_HARD_DIS;
smp_mb();
+ } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
+ unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
+ asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
+ kvmppc_set_host_ipi(cpu, 0);
}
if (cpu_core_split_required())
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index ccd53f91e8aa..74b5b8e239c8 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -7,12 +7,12 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/jump_label.h>
#include <asm/hvcall.h>
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
-#include <asm/jump_label.h>
.section ".text"
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index b5682fd6c984..b7a67e3d2201 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -26,7 +26,7 @@
#include <linux/dma-mapping.h>
#include <linux/console.h>
#include <linux/export.h>
-#include <linux/static_key.h>
+#include <linux/jump_label.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/page.h>
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index 90cf3dcbd9f2..8f35d525cede 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -25,10 +25,10 @@
static struct kobject *mobility_kobj;
struct update_props_workarea {
- u32 phandle;
- u32 state;
- u64 reserved;
- u32 nprops;
+ __be32 phandle;
+ __be32 state;
+ __be64 reserved;
+ __be32 nprops;
} __packed;
#define NODE_ACTION_MASK 0xff000000
@@ -54,11 +54,11 @@ static int mobility_rtas_call(int token, char *buf, s32 scope)
return rc;
}
-static int delete_dt_node(u32 phandle)
+static int delete_dt_node(__be32 phandle)
{
struct device_node *dn;
- dn = of_find_node_by_phandle(phandle);
+ dn = of_find_node_by_phandle(be32_to_cpu(phandle));
if (!dn)
return -ENOENT;
@@ -127,7 +127,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop,
return 0;
}
-static int update_dt_node(u32 phandle, s32 scope)
+static int update_dt_node(__be32 phandle, s32 scope)
{
struct update_props_workarea *upwa;
struct device_node *dn;
@@ -136,6 +136,7 @@ static int update_dt_node(u32 phandle, s32 scope)
char *prop_data;
char *rtas_buf;
int update_properties_token;
+ u32 nprops;
u32 vd;
update_properties_token = rtas_token("ibm,update-properties");
@@ -146,7 +147,7 @@ static int update_dt_node(u32 phandle, s32 scope)
if (!rtas_buf)
return -ENOMEM;
- dn = of_find_node_by_phandle(phandle);
+ dn = of_find_node_by_phandle(be32_to_cpu(phandle));
if (!dn) {
kfree(rtas_buf);
return -ENOENT;
@@ -162,6 +163,7 @@ static int update_dt_node(u32 phandle, s32 scope)
break;
prop_data = rtas_buf + sizeof(*upwa);
+ nprops = be32_to_cpu(upwa->nprops);
/* On the first call to ibm,update-properties for a node the
* the first property value descriptor contains an empty
@@ -170,17 +172,17 @@ static int update_dt_node(u32 phandle, s32 scope)
*/
if (*prop_data == 0) {
prop_data++;
- vd = *(u32 *)prop_data;
+ vd = be32_to_cpu(*(__be32 *)prop_data);
prop_data += vd + sizeof(vd);
- upwa->nprops--;
+ nprops--;
}
- for (i = 0; i < upwa->nprops; i++) {
+ for (i = 0; i < nprops; i++) {
char *prop_name;
prop_name = prop_data;
prop_data += strlen(prop_name) + 1;
- vd = *(u32 *)prop_data;
+ vd = be32_to_cpu(*(__be32 *)prop_data);
prop_data += sizeof(vd);
switch (vd) {
@@ -212,13 +214,13 @@ static int update_dt_node(u32 phandle, s32 scope)
return 0;
}
-static int add_dt_node(u32 parent_phandle, u32 drc_index)
+static int add_dt_node(__be32 parent_phandle, __be32 drc_index)
{
struct device_node *dn;
struct device_node *parent_dn;
int rc;
- parent_dn = of_find_node_by_phandle(parent_phandle);
+ parent_dn = of_find_node_by_phandle(be32_to_cpu(parent_phandle));
if (!parent_dn)
return -ENOENT;
@@ -237,7 +239,7 @@ static int add_dt_node(u32 parent_phandle, u32 drc_index)
int pseries_devicetree_update(s32 scope)
{
char *rtas_buf;
- u32 *data;
+ __be32 *data;
int update_nodes_token;
int rc;
@@ -254,17 +256,17 @@ int pseries_devicetree_update(s32 scope)
if (rc && rc != 1)
break;
- data = (u32 *)rtas_buf + 4;
- while (*data & NODE_ACTION_MASK) {
+ data = (__be32 *)rtas_buf + 4;
+ while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
int i;
- u32 action = *data & NODE_ACTION_MASK;
- int node_count = *data & NODE_COUNT_MASK;
+ u32 action = be32_to_cpu(*data) & NODE_ACTION_MASK;
+ u32 node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
data++;
for (i = 0; i < node_count; i++) {
- u32 phandle = *data++;
- u32 drc_index;
+ __be32 phandle = *data++;
+ __be32 drc_index;
switch (action) {
case DELETE_DT_NODE:
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index c9df40b5c0ac..c9c875d9ed31 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -211,7 +211,7 @@ do { \
extern unsigned long mmap_rnd_mask;
-#define STACK_RND_MASK (mmap_rnd_mask)
+#define STACK_RND_MASK (test_thread_flag(TIF_31BIT) ? 0x7ff : mmap_rnd_mask)
#define ARCH_DLINFO \
do { \
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index 58642fd29c87..2b77e235b5fb 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -1,6 +1,8 @@
#ifndef _ASM_S390_JUMP_LABEL_H
#define _ASM_S390_JUMP_LABEL_H
+#ifndef __ASSEMBLY__
+
#include <linux/types.h>
#define JUMP_LABEL_NOP_SIZE 6
@@ -39,4 +41,5 @@ struct jump_entry {
jump_label_t key;
};
+#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index f407bbf5ee94..d01fc588b5c3 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -172,7 +172,9 @@ struct kvm_s390_sie_block {
__u32 fac; /* 0x01a0 */
__u8 reserved1a4[20]; /* 0x01a4 */
__u64 cbrlo; /* 0x01b8 */
- __u8 reserved1c0[30]; /* 0x01c0 */
+ __u8 reserved1c0[8]; /* 0x01c0 */
+ __u32 ecd; /* 0x01c8 */
+ __u8 reserved1cc[18]; /* 0x01cc */
__u64 pp; /* 0x01de */
__u8 reserved1e6[2]; /* 0x01e6 */
__u64 itdba; /* 0x01e8 */
@@ -183,11 +185,17 @@ struct kvm_s390_itdb {
__u8 data[256];
} __packed;
+struct kvm_s390_vregs {
+ __vector128 vrs[32];
+ __u8 reserved200[512]; /* for future vector expansion */
+} __packed;
+
struct sie_page {
struct kvm_s390_sie_block sie_block;
__u8 reserved200[1024]; /* 0x0200 */
struct kvm_s390_itdb itdb; /* 0x0600 */
- __u8 reserved700[2304]; /* 0x0700 */
+ __u8 reserved700[1280]; /* 0x0700 */
+ struct kvm_s390_vregs vregs; /* 0x0c00 */
} __packed;
struct kvm_vcpu_stat {
@@ -238,6 +246,7 @@ struct kvm_vcpu_stat {
u32 instruction_sigp_stop;
u32 instruction_sigp_stop_store_status;
u32 instruction_sigp_store_status;
+ u32 instruction_sigp_store_adtl_status;
u32 instruction_sigp_arch;
u32 instruction_sigp_prefix;
u32 instruction_sigp_restart;
@@ -270,6 +279,7 @@ struct kvm_vcpu_stat {
#define PGM_SPECIAL_OPERATION 0x13
#define PGM_OPERAND 0x15
#define PGM_TRACE_TABEL 0x16
+#define PGM_VECTOR_PROCESSING 0x1b
#define PGM_SPACE_SWITCH 0x1c
#define PGM_HFP_SQUARE_ROOT 0x1d
#define PGM_PC_TRANSLATION_SPEC 0x1f
@@ -334,6 +344,11 @@ enum irq_types {
IRQ_PEND_COUNT
};
+/* We have 2M for virtio device descriptor pages. Smallest amount of
+ * memory per page is 24 bytes (1 queue), so (2048*1024) / 24 = 87381
+ */
+#define KVM_S390_MAX_VIRTIO_IRQS 87381
+
/*
* Repressible (non-floating) machine check interrupts
* subclass bits in MCIC
@@ -411,13 +426,32 @@ struct kvm_s390_local_interrupt {
unsigned long pending_irqs;
};
+#define FIRQ_LIST_IO_ISC_0 0
+#define FIRQ_LIST_IO_ISC_1 1
+#define FIRQ_LIST_IO_ISC_2 2
+#define FIRQ_LIST_IO_ISC_3 3
+#define FIRQ_LIST_IO_ISC_4 4
+#define FIRQ_LIST_IO_ISC_5 5
+#define FIRQ_LIST_IO_ISC_6 6
+#define FIRQ_LIST_IO_ISC_7 7
+#define FIRQ_LIST_PFAULT 8
+#define FIRQ_LIST_VIRTIO 9
+#define FIRQ_LIST_COUNT 10
+#define FIRQ_CNTR_IO 0
+#define FIRQ_CNTR_SERVICE 1
+#define FIRQ_CNTR_VIRTIO 2
+#define FIRQ_CNTR_PFAULT 3
+#define FIRQ_MAX_COUNT 4
+
struct kvm_s390_float_interrupt {
+ unsigned long pending_irqs;
spinlock_t lock;
- struct list_head list;
- atomic_t active;
+ struct list_head lists[FIRQ_LIST_COUNT];
+ int counters[FIRQ_MAX_COUNT];
+ struct kvm_s390_mchk_info mchk;
+ struct kvm_s390_ext_info srv_signal;
int next_rr_cpu;
unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
- unsigned int irq_count;
};
struct kvm_hw_wp_info_arch {
@@ -465,6 +499,7 @@ struct kvm_vcpu_arch {
s390_fp_regs host_fpregs;
unsigned int host_acrs[NUM_ACRS];
s390_fp_regs guest_fpregs;
+ struct kvm_s390_vregs *host_vregs;
struct kvm_s390_local_interrupt local_int;
struct hrtimer ckc_timer;
struct kvm_s390_pgm_info pgm;
@@ -553,6 +588,7 @@ struct kvm_arch{
int use_cmma;
int user_cpu_state_ctrl;
int user_sigp;
+ int user_stsi;
struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
wait_queue_head_t ipte_wq;
int ipte_lock_count;
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index 9c77e60b9a26..ef1a5fcc6c66 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -150,6 +150,7 @@ struct kvm_guest_debug_arch {
#define KVM_SYNC_CRS (1UL << 3)
#define KVM_SYNC_ARCH0 (1UL << 4)
#define KVM_SYNC_PFAULT (1UL << 5)
+#define KVM_SYNC_VRS (1UL << 6)
/* definition of registers in kvm_run */
struct kvm_sync_regs {
__u64 prefix; /* prefix register */
@@ -164,6 +165,9 @@ struct kvm_sync_regs {
__u64 pft; /* pfault token [PFAULT] */
__u64 pfs; /* pfault select [PFAULT] */
__u64 pfc; /* pfault compare [PFAULT] */
+ __u64 vrs[32][2]; /* vector registers */
+ __u8 reserved[512]; /* for future vector expansion */
+ __u32 fpc; /* only valid with vector registers */
};
#define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
diff --git a/arch/s390/include/uapi/asm/sie.h b/arch/s390/include/uapi/asm/sie.h
index d4096fdfc6ab..ee69c0854c88 100644
--- a/arch/s390/include/uapi/asm/sie.h
+++ b/arch/s390/include/uapi/asm/sie.h
@@ -230,7 +230,7 @@
* and returns a key, which can be used to find a mnemonic name
* of the instruction in the icpt_insn_codes table.
*/
-#define icpt_insn_decoder(insn) \
+#define icpt_insn_decoder(insn) ( \
INSN_DECODE_IPA0(0x01, insn, 48, 0xff) \
INSN_DECODE_IPA0(0xaa, insn, 48, 0x0f) \
INSN_DECODE_IPA0(0xb2, insn, 48, 0xff) \
@@ -239,6 +239,6 @@
INSN_DECODE_IPA0(0xe5, insn, 48, 0xff) \
INSN_DECODE_IPA0(0xeb, insn, 16, 0xff) \
INSN_DECODE_IPA0(0xc8, insn, 48, 0x0f) \
- INSN_DECODE(insn)
+ INSN_DECODE(insn))
#endif /* _UAPI_ASM_S390_SIE_H */
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index e07e91605353..8dc4db10d160 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -171,6 +171,7 @@ int main(void)
#else /* CONFIG_32BIT */
DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code));
DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address));
+ DEFINE(__LC_VX_SAVE_AREA_ADDR, offsetof(struct _lowcore, vector_save_area_addr));
DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2));
DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area));
DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste));
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 82c19899574f..6c79f1b44fe7 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -57,6 +57,44 @@
unsigned long ftrace_plt;
+static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn)
+{
+#ifdef CC_USING_HOTPATCH
+ /* brcl 0,0 */
+ insn->opc = 0xc004;
+ insn->disp = 0;
+#else
+ /* stg r14,8(r15) */
+ insn->opc = 0xe3e0;
+ insn->disp = 0xf0080024;
+#endif
+}
+
+static inline int is_kprobe_on_ftrace(struct ftrace_insn *insn)
+{
+#ifdef CONFIG_KPROBES
+ if (insn->opc == BREAKPOINT_INSTRUCTION)
+ return 1;
+#endif
+ return 0;
+}
+
+static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn)
+{
+#ifdef CONFIG_KPROBES
+ insn->opc = BREAKPOINT_INSTRUCTION;
+ insn->disp = KPROBE_ON_FTRACE_NOP;
+#endif
+}
+
+static inline void ftrace_generate_kprobe_call_insn(struct ftrace_insn *insn)
+{
+#ifdef CONFIG_KPROBES
+ insn->opc = BREAKPOINT_INSTRUCTION;
+ insn->disp = KPROBE_ON_FTRACE_CALL;
+#endif
+}
+
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
@@ -72,16 +110,9 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
return -EFAULT;
if (addr == MCOUNT_ADDR) {
/* Initial code replacement */
-#ifdef CC_USING_HOTPATCH
- /* We expect to see brcl 0,0 */
- ftrace_generate_nop_insn(&orig);
-#else
- /* We expect to see stg r14,8(r15) */
- orig.opc = 0xe3e0;
- orig.disp = 0xf0080024;
-#endif
+ ftrace_generate_orig_insn(&orig);
ftrace_generate_nop_insn(&new);
- } else if (old.opc == BREAKPOINT_INSTRUCTION) {
+ } else if (is_kprobe_on_ftrace(&old)) {
/*
* If we find a breakpoint instruction, a kprobe has been
* placed at the beginning of the function. We write the
@@ -89,9 +120,8 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
* bytes of the original instruction so that the kprobes
* handler can execute a nop, if it reaches this breakpoint.
*/
- new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
- orig.disp = KPROBE_ON_FTRACE_CALL;
- new.disp = KPROBE_ON_FTRACE_NOP;
+ ftrace_generate_kprobe_call_insn(&orig);
+ ftrace_generate_kprobe_nop_insn(&new);
} else {
/* Replace ftrace call with a nop. */
ftrace_generate_call_insn(&orig, rec->ip);
@@ -111,7 +141,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
return -EFAULT;
- if (old.opc == BREAKPOINT_INSTRUCTION) {
+ if (is_kprobe_on_ftrace(&old)) {
/*
* If we find a breakpoint instruction, a kprobe has been
* placed at the beginning of the function. We write the
@@ -119,9 +149,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
* bytes of the original instruction so that the kprobes
* handler can execute a brasl if it reaches this breakpoint.
*/
- new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
- orig.disp = KPROBE_ON_FTRACE_NOP;
- new.disp = KPROBE_ON_FTRACE_CALL;
+ ftrace_generate_kprobe_nop_insn(&orig);
+ ftrace_generate_kprobe_call_insn(&new);
} else {
/* Replace nop with an ftrace call. */
ftrace_generate_nop_insn(&orig);
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index c3f8d157cb0d..e6a1578fc000 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1415,7 +1415,7 @@ CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG);
static struct attribute *cpumsf_pmu_events_attr[] = {
CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC),
- CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG),
+ NULL,
NULL,
};
@@ -1606,8 +1606,11 @@ static int __init init_cpum_sampling_pmu(void)
return -EINVAL;
}
- if (si.ad)
+ if (si.ad) {
sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
+ cpumsf_pmu_events_attr[1] =
+ CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG);
+ }
sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80);
if (!sfdbg)
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index 6b09fdffbd2f..ca6294645dd3 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -177,6 +177,17 @@ restart_entry:
lhi %r1,1
sigp %r1,%r0,SIGP_SET_ARCHITECTURE
sam64
+#ifdef CONFIG_SMP
+ larl %r1,smp_cpu_mt_shift
+ icm %r1,15,0(%r1)
+ jz smt_done
+ llgfr %r1,%r1
+smt_loop:
+ sigp %r1,%r0,SIGP_SET_MULTI_THREADING
+ brc 8,smt_done /* accepted */
+ brc 2,smt_loop /* busy, try again */
+smt_done:
+#endif
larl %r1,.Lnew_pgm_check_psw
lpswe 0(%r1)
pgm_check_entry:
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 9254afff250c..fc7ec95848c3 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -77,7 +77,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
if (vcpu->run->s.regs.gprs[rx] & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], &parm, sizeof(parm));
+ rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258)
@@ -213,7 +213,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
* - gpr 3 contains the virtqueue index (passed as datamatch)
* - gpr 4 contains the index on the bus (optionally)
*/
- ret = kvm_io_bus_write_cookie(vcpu->kvm, KVM_VIRTIO_CCW_NOTIFY_BUS,
+ ret = kvm_io_bus_write_cookie(vcpu, KVM_VIRTIO_CCW_NOTIFY_BUS,
vcpu->run->s.regs.gprs[2] & 0xffffffff,
8, &vcpu->run->s.regs.gprs[3],
vcpu->run->s.regs.gprs[4]);
@@ -230,7 +230,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
{
- int code = kvm_s390_get_base_disp_rs(vcpu) & 0xffff;
+ int code = kvm_s390_get_base_disp_rs(vcpu, NULL) & 0xffff;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 267523cac6de..a7559f7207df 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -10,6 +10,7 @@
#include <asm/pgtable.h>
#include "kvm-s390.h"
#include "gaccess.h"
+#include <asm/switch_to.h>
union asce {
unsigned long val;
@@ -207,6 +208,54 @@ union raddress {
unsigned long pfra : 52; /* Page-Frame Real Address */
};
+union alet {
+ u32 val;
+ struct {
+ u32 reserved : 7;
+ u32 p : 1;
+ u32 alesn : 8;
+ u32 alen : 16;
+ };
+};
+
+union ald {
+ u32 val;
+ struct {
+ u32 : 1;
+ u32 alo : 24;
+ u32 all : 7;
+ };
+};
+
+struct ale {
+ unsigned long i : 1; /* ALEN-Invalid Bit */
+ unsigned long : 5;
+ unsigned long fo : 1; /* Fetch-Only Bit */
+ unsigned long p : 1; /* Private Bit */
+ unsigned long alesn : 8; /* Access-List-Entry Sequence Number */
+ unsigned long aleax : 16; /* Access-List-Entry Authorization Index */
+ unsigned long : 32;
+ unsigned long : 1;
+ unsigned long asteo : 25; /* ASN-Second-Table-Entry Origin */
+ unsigned long : 6;
+ unsigned long astesn : 32; /* ASTE Sequence Number */
+} __packed;
+
+struct aste {
+ unsigned long i : 1; /* ASX-Invalid Bit */
+ unsigned long ato : 29; /* Authority-Table Origin */
+ unsigned long : 1;
+ unsigned long b : 1; /* Base-Space Bit */
+ unsigned long ax : 16; /* Authorization Index */
+ unsigned long atl : 12; /* Authority-Table Length */
+ unsigned long : 2;
+ unsigned long ca : 1; /* Controlled-ASN Bit */
+ unsigned long ra : 1; /* Reusable-ASN Bit */
+ unsigned long asce : 64; /* Address-Space-Control Element */
+ unsigned long ald : 32;
+ unsigned long astesn : 32;
+ /* .. more fields there */
+} __packed;
int ipte_lock_held(struct kvm_vcpu *vcpu)
{
@@ -307,15 +356,157 @@ void ipte_unlock(struct kvm_vcpu *vcpu)
ipte_unlock_simple(vcpu);
}
-static unsigned long get_vcpu_asce(struct kvm_vcpu *vcpu)
+static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar,
+ int write)
+{
+ union alet alet;
+ struct ale ale;
+ struct aste aste;
+ unsigned long ald_addr, authority_table_addr;
+ union ald ald;
+ int eax, rc;
+ u8 authority_table;
+
+ if (ar >= NUM_ACRS)
+ return -EINVAL;
+
+ save_access_regs(vcpu->run->s.regs.acrs);
+ alet.val = vcpu->run->s.regs.acrs[ar];
+
+ if (ar == 0 || alet.val == 0) {
+ asce->val = vcpu->arch.sie_block->gcr[1];
+ return 0;
+ } else if (alet.val == 1) {
+ asce->val = vcpu->arch.sie_block->gcr[7];
+ return 0;
+ }
+
+ if (alet.reserved)
+ return PGM_ALET_SPECIFICATION;
+
+ if (alet.p)
+ ald_addr = vcpu->arch.sie_block->gcr[5];
+ else
+ ald_addr = vcpu->arch.sie_block->gcr[2];
+ ald_addr &= 0x7fffffc0;
+
+ rc = read_guest_real(vcpu, ald_addr + 16, &ald.val, sizeof(union ald));
+ if (rc)
+ return rc;
+
+ if (alet.alen / 8 > ald.all)
+ return PGM_ALEN_TRANSLATION;
+
+ if (0x7fffffff - ald.alo * 128 < alet.alen * 16)
+ return PGM_ADDRESSING;
+
+ rc = read_guest_real(vcpu, ald.alo * 128 + alet.alen * 16, &ale,
+ sizeof(struct ale));
+ if (rc)
+ return rc;
+
+ if (ale.i == 1)
+ return PGM_ALEN_TRANSLATION;
+ if (ale.alesn != alet.alesn)
+ return PGM_ALE_SEQUENCE;
+
+ rc = read_guest_real(vcpu, ale.asteo * 64, &aste, sizeof(struct aste));
+ if (rc)
+ return rc;
+
+ if (aste.i)
+ return PGM_ASTE_VALIDITY;
+ if (aste.astesn != ale.astesn)
+ return PGM_ASTE_SEQUENCE;
+
+ if (ale.p == 1) {
+ eax = (vcpu->arch.sie_block->gcr[8] >> 16) & 0xffff;
+ if (ale.aleax != eax) {
+ if (eax / 16 > aste.atl)
+ return PGM_EXTENDED_AUTHORITY;
+
+ authority_table_addr = aste.ato * 4 + eax / 4;
+
+ rc = read_guest_real(vcpu, authority_table_addr,
+ &authority_table,
+ sizeof(u8));
+ if (rc)
+ return rc;
+
+ if ((authority_table & (0x40 >> ((eax & 3) * 2))) == 0)
+ return PGM_EXTENDED_AUTHORITY;
+ }
+ }
+
+ if (ale.fo == 1 && write)
+ return PGM_PROTECTION;
+
+ asce->val = aste.asce;
+ return 0;
+}
+
+struct trans_exc_code_bits {
+ unsigned long addr : 52; /* Translation-exception Address */
+ unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
+ unsigned long : 6;
+ unsigned long b60 : 1;
+ unsigned long b61 : 1;
+ unsigned long as : 2; /* ASCE Identifier */
+};
+
+enum {
+ FSI_UNKNOWN = 0, /* Unknown wether fetch or store */
+ FSI_STORE = 1, /* Exception was due to store operation */
+ FSI_FETCH = 2 /* Exception was due to fetch operation */
+};
+
+static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
+ ar_t ar, int write)
{
+ int rc;
+ psw_t *psw = &vcpu->arch.sie_block->gpsw;
+ struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
+ struct trans_exc_code_bits *tec_bits;
+
+ memset(pgm, 0, sizeof(*pgm));
+ tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
+ tec_bits->fsi = write ? FSI_STORE : FSI_FETCH;
+ tec_bits->as = psw_bits(*psw).as;
+
+ if (!psw_bits(*psw).t) {
+ asce->val = 0;
+ asce->r = 1;
+ return 0;
+ }
+
switch (psw_bits(vcpu->arch.sie_block->gpsw).as) {
case PSW_AS_PRIMARY:
- return vcpu->arch.sie_block->gcr[1];
+ asce->val = vcpu->arch.sie_block->gcr[1];
+ return 0;
case PSW_AS_SECONDARY:
- return vcpu->arch.sie_block->gcr[7];
+ asce->val = vcpu->arch.sie_block->gcr[7];
+ return 0;
case PSW_AS_HOME:
- return vcpu->arch.sie_block->gcr[13];
+ asce->val = vcpu->arch.sie_block->gcr[13];
+ return 0;
+ case PSW_AS_ACCREG:
+ rc = ar_translation(vcpu, asce, ar, write);
+ switch (rc) {
+ case PGM_ALEN_TRANSLATION:
+ case PGM_ALE_SEQUENCE:
+ case PGM_ASTE_VALIDITY:
+ case PGM_ASTE_SEQUENCE:
+ case PGM_EXTENDED_AUTHORITY:
+ vcpu->arch.pgm.exc_access_id = ar;
+ break;
+ case PGM_PROTECTION:
+ tec_bits->b60 = 1;
+ tec_bits->b61 = 1;
+ break;
+ }
+ if (rc > 0)
+ pgm->code = rc;
+ return rc;
}
return 0;
}
@@ -330,10 +521,11 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
* @vcpu: virtual cpu
* @gva: guest virtual address
* @gpa: points to where guest physical (absolute) address should be stored
+ * @asce: effective asce
* @write: indicates if access is a write access
*
* Translate a guest virtual address into a guest absolute address by means
- * of dynamic address translation as specified by the architecuture.
+ * of dynamic address translation as specified by the architecture.
* If the resulting absolute address is not available in the configuration
* an addressing exception is indicated and @gpa will not be changed.
*
@@ -345,7 +537,8 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
* by the architecture
*/
static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
- unsigned long *gpa, int write)
+ unsigned long *gpa, const union asce asce,
+ int write)
{
union vaddress vaddr = {.addr = gva};
union raddress raddr = {.addr = gva};
@@ -354,12 +547,10 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
union ctlreg0 ctlreg0;
unsigned long ptr;
int edat1, edat2;
- union asce asce;
ctlreg0.val = vcpu->arch.sie_block->gcr[0];
edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8);
edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78);
- asce.val = get_vcpu_asce(vcpu);
if (asce.r)
goto real_address;
ptr = asce.origin * 4096;
@@ -506,48 +697,30 @@ static inline int is_low_address(unsigned long ga)
return (ga & ~0x11fful) == 0;
}
-static int low_address_protection_enabled(struct kvm_vcpu *vcpu)
+static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
+ const union asce asce)
{
union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
psw_t *psw = &vcpu->arch.sie_block->gpsw;
- union asce asce;
if (!ctlreg0.lap)
return 0;
- asce.val = get_vcpu_asce(vcpu);
if (psw_bits(*psw).t && asce.p)
return 0;
return 1;
}
-struct trans_exc_code_bits {
- unsigned long addr : 52; /* Translation-exception Address */
- unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
- unsigned long : 7;
- unsigned long b61 : 1;
- unsigned long as : 2; /* ASCE Identifier */
-};
-
-enum {
- FSI_UNKNOWN = 0, /* Unknown wether fetch or store */
- FSI_STORE = 1, /* Exception was due to store operation */
- FSI_FETCH = 2 /* Exception was due to fetch operation */
-};
-
static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
unsigned long *pages, unsigned long nr_pages,
- int write)
+ const union asce asce, int write)
{
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
psw_t *psw = &vcpu->arch.sie_block->gpsw;
struct trans_exc_code_bits *tec_bits;
int lap_enabled, rc;
- memset(pgm, 0, sizeof(*pgm));
tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
- tec_bits->fsi = write ? FSI_STORE : FSI_FETCH;
- tec_bits->as = psw_bits(*psw).as;
- lap_enabled = low_address_protection_enabled(vcpu);
+ lap_enabled = low_address_protection_enabled(vcpu, asce);
while (nr_pages) {
ga = kvm_s390_logical_to_effective(vcpu, ga);
tec_bits->addr = ga >> PAGE_SHIFT;
@@ -557,7 +730,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
}
ga &= PAGE_MASK;
if (psw_bits(*psw).t) {
- rc = guest_translate(vcpu, ga, pages, write);
+ rc = guest_translate(vcpu, ga, pages, asce, write);
if (rc < 0)
return rc;
if (rc == PGM_PROTECTION)
@@ -578,7 +751,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
return 0;
}
-int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
+int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
unsigned long len, int write)
{
psw_t *psw = &vcpu->arch.sie_block->gpsw;
@@ -591,20 +764,19 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
if (!len)
return 0;
- /* Access register mode is not supported yet. */
- if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG)
- return -EOPNOTSUPP;
+ rc = get_vcpu_asce(vcpu, &asce, ar, write);
+ if (rc)
+ return rc;
nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
pages = pages_array;
if (nr_pages > ARRAY_SIZE(pages_array))
pages = vmalloc(nr_pages * sizeof(unsigned long));
if (!pages)
return -ENOMEM;
- asce.val = get_vcpu_asce(vcpu);
need_ipte_lock = psw_bits(*psw).t && !asce.r;
if (need_ipte_lock)
ipte_lock(vcpu);
- rc = guest_page_range(vcpu, ga, pages, nr_pages, write);
+ rc = guest_page_range(vcpu, ga, pages, nr_pages, asce, write);
for (idx = 0; idx < nr_pages && !rc; idx++) {
gpa = *(pages + idx) + (ga & ~PAGE_MASK);
_len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
@@ -652,7 +824,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
* Note: The IPTE lock is not taken during this function, so the caller
* has to take care of this.
*/
-int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
+int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
unsigned long *gpa, int write)
{
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
@@ -661,26 +833,21 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
union asce asce;
int rc;
- /* Access register mode is not supported yet. */
- if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG)
- return -EOPNOTSUPP;
-
gva = kvm_s390_logical_to_effective(vcpu, gva);
- memset(pgm, 0, sizeof(*pgm));
tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
- tec->as = psw_bits(*psw).as;
- tec->fsi = write ? FSI_STORE : FSI_FETCH;
+ rc = get_vcpu_asce(vcpu, &asce, ar, write);
tec->addr = gva >> PAGE_SHIFT;
- if (is_low_address(gva) && low_address_protection_enabled(vcpu)) {
+ if (rc)
+ return rc;
+ if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) {
if (write) {
rc = pgm->code = PGM_PROTECTION;
return rc;
}
}
- asce.val = get_vcpu_asce(vcpu);
if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */
- rc = guest_translate(vcpu, gva, gpa, write);
+ rc = guest_translate(vcpu, gva, gpa, asce, write);
if (rc > 0) {
if (rc == PGM_PROTECTION)
tec->b61 = 1;
@@ -697,28 +864,51 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
}
/**
- * kvm_s390_check_low_addr_protection - check for low-address protection
- * @ga: Guest address
+ * check_gva_range - test a range of guest virtual addresses for accessibility
+ */
+int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
+ unsigned long length, int is_write)
+{
+ unsigned long gpa;
+ unsigned long currlen;
+ int rc = 0;
+
+ ipte_lock(vcpu);
+ while (length > 0 && !rc) {
+ currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE));
+ rc = guest_translate_address(vcpu, gva, ar, &gpa, is_write);
+ gva += currlen;
+ length -= currlen;
+ }
+ ipte_unlock(vcpu);
+
+ return rc;
+}
+
+/**
+ * kvm_s390_check_low_addr_prot_real - check for low-address protection
+ * @gra: Guest real address
*
* Checks whether an address is subject to low-address protection and set
* up vcpu->arch.pgm accordingly if necessary.
*
* Return: 0 if no protection exception, or PGM_PROTECTION if protected.
*/
-int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga)
+int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
{
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
psw_t *psw = &vcpu->arch.sie_block->gpsw;
struct trans_exc_code_bits *tec_bits;
+ union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
- if (!is_low_address(ga) || !low_address_protection_enabled(vcpu))
+ if (!ctlreg0.lap || !is_low_address(gra))
return 0;
memset(pgm, 0, sizeof(*pgm));
tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
tec_bits->fsi = FSI_STORE;
tec_bits->as = psw_bits(*psw).as;
- tec_bits->addr = ga >> PAGE_SHIFT;
+ tec_bits->addr = gra >> PAGE_SHIFT;
pgm->code = PGM_PROTECTION;
return pgm->code;
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 0149cf15058a..ef03726cc661 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -156,9 +156,11 @@ int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
}
int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
- unsigned long *gpa, int write);
+ ar_t ar, unsigned long *gpa, int write);
+int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
+ unsigned long length, int is_write);
-int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
+int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
unsigned long len, int write);
int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
@@ -168,6 +170,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
* write_guest - copy data from kernel space to guest space
* @vcpu: virtual cpu
* @ga: guest address
+ * @ar: access register
* @data: source address in kernel space
* @len: number of bytes to copy
*
@@ -176,8 +179,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
* If DAT is off data will be copied to guest real or absolute memory.
* If DAT is on data will be copied to the address space as specified by
* the address space bits of the PSW:
- * Primary, secondory or home space (access register mode is currently not
- * implemented).
+ * Primary, secondary, home space or access register mode.
* The addressing mode of the PSW is also inspected, so that address wrap
* around is taken into account for 24-, 31- and 64-bit addressing mode,
* if the to be copied data crosses page boundaries in guest address space.
@@ -210,16 +212,17 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
* if data has been changed in guest space in case of an exception.
*/
static inline __must_check
-int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
+int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
unsigned long len)
{
- return access_guest(vcpu, ga, data, len, 1);
+ return access_guest(vcpu, ga, ar, data, len, 1);
}
/**
* read_guest - copy data from guest space to kernel space
* @vcpu: virtual cpu
* @ga: guest address
+ * @ar: access register
* @data: destination address in kernel space
* @len: number of bytes to copy
*
@@ -229,10 +232,10 @@ int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
* data will be copied from guest space to kernel space.
*/
static inline __must_check
-int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
+int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
unsigned long len)
{
- return access_guest(vcpu, ga, data, len, 0);
+ return access_guest(vcpu, ga, ar, data, len, 0);
}
/**
@@ -330,6 +333,6 @@ int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
void ipte_lock(struct kvm_vcpu *vcpu);
void ipte_unlock(struct kvm_vcpu *vcpu);
int ipte_lock_held(struct kvm_vcpu *vcpu);
-int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga);
+int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
#endif /* __KVM_S390_GACCESS_H */
diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c
index 3e8d4092ce30..e97b3455d7e6 100644
--- a/arch/s390/kvm/guestdbg.c
+++ b/arch/s390/kvm/guestdbg.c
@@ -191,8 +191,8 @@ static int __import_wp_info(struct kvm_vcpu *vcpu,
if (!wp_info->old_data)
return -ENOMEM;
/* try to backup the original value */
- ret = read_guest(vcpu, wp_info->phys_addr, wp_info->old_data,
- wp_info->len);
+ ret = read_guest_abs(vcpu, wp_info->phys_addr, wp_info->old_data,
+ wp_info->len);
if (ret) {
kfree(wp_info->old_data);
wp_info->old_data = NULL;
@@ -362,8 +362,8 @@ static struct kvm_hw_wp_info_arch *any_wp_changed(struct kvm_vcpu *vcpu)
continue;
/* refetch the wp data and compare it to the old value */
- if (!read_guest(vcpu, wp_info->phys_addr, temp,
- wp_info->len)) {
+ if (!read_guest_abs(vcpu, wp_info->phys_addr, temp,
+ wp_info->len)) {
if (memcmp(temp, wp_info->old_data, wp_info->len)) {
kfree(temp);
return wp_info;
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index bebd2157edd0..9e3779e3e496 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -165,6 +165,7 @@ static void __extract_prog_irq(struct kvm_vcpu *vcpu,
pgm_info->mon_class_nr = vcpu->arch.sie_block->mcn;
pgm_info->mon_code = vcpu->arch.sie_block->tecmc;
break;
+ case PGM_VECTOR_PROCESSING:
case PGM_DATA:
pgm_info->data_exc_code = vcpu->arch.sie_block->dxc;
break;
@@ -319,7 +320,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
/* Make sure that the source is paged-in */
rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2],
- &srcaddr, 0);
+ reg2, &srcaddr, 0);
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
@@ -328,7 +329,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
/* Make sure that the destination is paged-in */
rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1],
- &dstaddr, 1);
+ reg1, &dstaddr, 1);
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 073b5f387d1d..9de47265ef73 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1,7 +1,7 @@
/*
* handling kvm guest interrupts
*
- * Copyright IBM Corp. 2008,2014
+ * Copyright IBM Corp. 2008, 2015
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
@@ -17,9 +17,12 @@
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/bitmap.h>
+#include <linux/vmalloc.h>
#include <asm/asm-offsets.h>
+#include <asm/dis.h>
#include <asm/uaccess.h>
#include <asm/sclp.h>
+#include <asm/isc.h>
#include "kvm-s390.h"
#include "gaccess.h"
#include "trace-s390.h"
@@ -32,11 +35,6 @@
#define PFAULT_DONE 0x0680
#define VIRTIO_PARAM 0x0d00
-static int is_ioint(u64 type)
-{
- return ((type & 0xfffe0000u) != 0xfffe0000u);
-}
-
int psw_extint_disabled(struct kvm_vcpu *vcpu)
{
return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
@@ -72,70 +70,45 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
return 1;
}
-static u64 int_word_to_isc_bits(u32 int_word)
+static int ckc_irq_pending(struct kvm_vcpu *vcpu)
+{
+ if (!(vcpu->arch.sie_block->ckc <
+ get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
+ return 0;
+ return ckc_interrupts_enabled(vcpu);
+}
+
+static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu)
+{
+ return !psw_extint_disabled(vcpu) &&
+ (vcpu->arch.sie_block->gcr[0] & 0x400ul);
+}
+
+static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
+{
+ return (vcpu->arch.sie_block->cputm >> 63) &&
+ cpu_timer_interrupts_enabled(vcpu);
+}
+
+static inline int is_ioirq(unsigned long irq_type)
{
- u8 isc = (int_word & 0x38000000) >> 27;
+ return ((irq_type >= IRQ_PEND_IO_ISC_0) &&
+ (irq_type <= IRQ_PEND_IO_ISC_7));
+}
+static uint64_t isc_to_isc_bits(int isc)
+{
return (0x80 >> isc) << 24;
}
-static int __must_check __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
+static inline u8 int_word_to_isc(u32 int_word)
{
- switch (inti->type) {
- case KVM_S390_INT_EXTERNAL_CALL:
- if (psw_extint_disabled(vcpu))
- return 0;
- if (vcpu->arch.sie_block->gcr[0] & 0x2000ul)
- return 1;
- return 0;
- case KVM_S390_INT_EMERGENCY:
- if (psw_extint_disabled(vcpu))
- return 0;
- if (vcpu->arch.sie_block->gcr[0] & 0x4000ul)
- return 1;
- return 0;
- case KVM_S390_INT_CLOCK_COMP:
- return ckc_interrupts_enabled(vcpu);
- case KVM_S390_INT_CPU_TIMER:
- if (psw_extint_disabled(vcpu))
- return 0;
- if (vcpu->arch.sie_block->gcr[0] & 0x400ul)
- return 1;
- return 0;
- case KVM_S390_INT_SERVICE:
- case KVM_S390_INT_PFAULT_INIT:
- case KVM_S390_INT_PFAULT_DONE:
- case KVM_S390_INT_VIRTIO:
- if (psw_extint_disabled(vcpu))
- return 0;
- if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
- return 1;
- return 0;
- case KVM_S390_PROGRAM_INT:
- case KVM_S390_SIGP_STOP:
- case KVM_S390_SIGP_SET_PREFIX:
- case KVM_S390_RESTART:
- return 1;
- case KVM_S390_MCHK:
- if (psw_mchk_disabled(vcpu))
- return 0;
- if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14)
- return 1;
- return 0;
- case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
- if (psw_ioint_disabled(vcpu))
- return 0;
- if (vcpu->arch.sie_block->gcr[6] &
- int_word_to_isc_bits(inti->io.io_int_word))
- return 1;
- return 0;
- default:
- printk(KERN_WARNING "illegal interrupt type %llx\n",
- inti->type);
- BUG();
- }
- return 0;
+ return (int_word & 0x38000000) >> 27;
+}
+
+static inline unsigned long pending_floating_irqs(struct kvm_vcpu *vcpu)
+{
+ return vcpu->kvm->arch.float_int.pending_irqs;
}
static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu)
@@ -143,12 +116,31 @@ static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu)
return vcpu->arch.local_int.pending_irqs;
}
-static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu)
+static unsigned long disable_iscs(struct kvm_vcpu *vcpu,
+ unsigned long active_mask)
+{
+ int i;
+
+ for (i = 0; i <= MAX_ISC; i++)
+ if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i)))
+ active_mask &= ~(1UL << (IRQ_PEND_IO_ISC_0 + i));
+
+ return active_mask;
+}
+
+static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
{
- unsigned long active_mask = pending_local_irqs(vcpu);
+ unsigned long active_mask;
+
+ active_mask = pending_local_irqs(vcpu);
+ active_mask |= pending_floating_irqs(vcpu);
if (psw_extint_disabled(vcpu))
active_mask &= ~IRQ_PEND_EXT_MASK;
+ if (psw_ioint_disabled(vcpu))
+ active_mask &= ~IRQ_PEND_IO_MASK;
+ else
+ active_mask = disable_iscs(vcpu, active_mask);
if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul))
__clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul))
@@ -157,8 +149,13 @@ static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu)
__clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul))
__clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
+ if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
+ __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask);
if (psw_mchk_disabled(vcpu))
active_mask &= ~IRQ_PEND_MCHK_MASK;
+ if (!(vcpu->arch.sie_block->gcr[14] &
+ vcpu->kvm->arch.float_int.mchk.cr14))
+ __clear_bit(IRQ_PEND_MCHK_REP, &active_mask);
/*
* STOP irqs will never be actively delivered. They are triggered via
@@ -200,6 +197,16 @@ static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
}
+static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
+{
+ if (!(pending_floating_irqs(vcpu) & IRQ_PEND_IO_MASK))
+ return;
+ else if (psw_ioint_disabled(vcpu))
+ __set_cpuflag(vcpu, CPUSTAT_IO_INT);
+ else
+ vcpu->arch.sie_block->lctl |= LCTL_CR6;
+}
+
static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
{
if (!(pending_local_irqs(vcpu) & IRQ_PEND_EXT_MASK))
@@ -226,47 +233,17 @@ static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
__set_cpuflag(vcpu, CPUSTAT_STOP_INT);
}
-/* Set interception request for non-deliverable local interrupts */
-static void set_intercept_indicators_local(struct kvm_vcpu *vcpu)
+/* Set interception request for non-deliverable interrupts */
+static void set_intercept_indicators(struct kvm_vcpu *vcpu)
{
+ set_intercept_indicators_io(vcpu);
set_intercept_indicators_ext(vcpu);
set_intercept_indicators_mchk(vcpu);
set_intercept_indicators_stop(vcpu);
}
-static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
-{
- switch (inti->type) {
- case KVM_S390_INT_SERVICE:
- case KVM_S390_INT_PFAULT_DONE:
- case KVM_S390_INT_VIRTIO:
- if (psw_extint_disabled(vcpu))
- __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
- else
- vcpu->arch.sie_block->lctl |= LCTL_CR0;
- break;
- case KVM_S390_MCHK:
- if (psw_mchk_disabled(vcpu))
- vcpu->arch.sie_block->ictl |= ICTL_LPSW;
- else
- vcpu->arch.sie_block->lctl |= LCTL_CR14;
- break;
- case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
- if (psw_ioint_disabled(vcpu))
- __set_cpuflag(vcpu, CPUSTAT_IO_INT);
- else
- vcpu->arch.sie_block->lctl |= LCTL_CR6;
- break;
- default:
- BUG();
- }
-}
-
static u16 get_ilc(struct kvm_vcpu *vcpu)
{
- const unsigned short table[] = { 2, 4, 4, 6 };
-
switch (vcpu->arch.sie_block->icptcode) {
case ICPT_INST:
case ICPT_INSTPROGI:
@@ -274,7 +251,7 @@ static u16 get_ilc(struct kvm_vcpu *vcpu)
case ICPT_PARTEXEC:
case ICPT_IOINST:
/* last instruction only stored for these icptcodes */
- return table[vcpu->arch.sie_block->ipa >> 14];
+ return insn_length(vcpu->arch.sie_block->ipa >> 8);
case ICPT_PROGI:
return vcpu->arch.sie_block->pgmilc;
default:
@@ -350,38 +327,72 @@ static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
{
+ struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
- struct kvm_s390_mchk_info mchk;
- int rc;
+ struct kvm_s390_mchk_info mchk = {};
+ unsigned long adtl_status_addr;
+ int deliver = 0;
+ int rc = 0;
+ spin_lock(&fi->lock);
spin_lock(&li->lock);
- mchk = li->irq.mchk;
+ if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) ||
+ test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) {
+ /*
+ * If there was an exigent machine check pending, then any
+ * repressible machine checks that might have been pending
+ * are indicated along with it, so always clear bits for
+ * repressible and exigent interrupts
+ */
+ mchk = li->irq.mchk;
+ clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
+ clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
+ memset(&li->irq.mchk, 0, sizeof(mchk));
+ deliver = 1;
+ }
/*
- * If there was an exigent machine check pending, then any repressible
- * machine checks that might have been pending are indicated along
- * with it, so always clear both bits
+ * We indicate floating repressible conditions along with
+ * other pending conditions. Channel Report Pending and Channel
+ * Subsystem damage are the only two and and are indicated by
+ * bits in mcic and masked in cr14.
*/
- clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
- clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
- memset(&li->irq.mchk, 0, sizeof(mchk));
+ if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
+ mchk.mcic |= fi->mchk.mcic;
+ mchk.cr14 |= fi->mchk.cr14;
+ memset(&fi->mchk, 0, sizeof(mchk));
+ deliver = 1;
+ }
spin_unlock(&li->lock);
+ spin_unlock(&fi->lock);
- VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
- mchk.mcic);
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK,
- mchk.cr14, mchk.mcic);
-
- rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
- rc |= put_guest_lc(vcpu, mchk.mcic,
- (u64 __user *) __LC_MCCK_CODE);
- rc |= put_guest_lc(vcpu, mchk.failing_storage_address,
- (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
- rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
- &mchk.fixed_logout, sizeof(mchk.fixed_logout));
- rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ if (deliver) {
+ VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
+ mchk.mcic);
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
+ KVM_S390_MCHK,
+ mchk.cr14, mchk.mcic);
+
+ rc = kvm_s390_vcpu_store_status(vcpu,
+ KVM_S390_STORE_STATUS_PREFIXED);
+ rc |= read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR,
+ &adtl_status_addr,
+ sizeof(unsigned long));
+ rc |= kvm_s390_vcpu_store_adtl_status(vcpu,
+ adtl_status_addr);
+ rc |= put_guest_lc(vcpu, mchk.mcic,
+ (u64 __user *) __LC_MCCK_CODE);
+ rc |= put_guest_lc(vcpu, mchk.failing_storage_address,
+ (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
+ &mchk.fixed_logout,
+ sizeof(mchk.fixed_logout));
+ rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ }
return rc ? -EFAULT : 0;
}
@@ -484,7 +495,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_pgm_info pgm_info;
- int rc = 0;
+ int rc = 0, nullifying = false;
u16 ilc = get_ilc(vcpu);
spin_lock(&li->lock);
@@ -509,6 +520,8 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
case PGM_LX_TRANSLATION:
case PGM_PRIMARY_AUTHORITY:
case PGM_SECONDARY_AUTHORITY:
+ nullifying = true;
+ /* fall through */
case PGM_SPACE_SWITCH:
rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
(u64 *)__LC_TRANS_EXC_CODE);
@@ -521,6 +534,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
case PGM_EXTENDED_AUTHORITY:
rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
(u8 *)__LC_EXC_ACCESS_ID);
+ nullifying = true;
break;
case PGM_ASCE_TYPE:
case PGM_PAGE_TRANSLATION:
@@ -534,6 +548,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
(u8 *)__LC_EXC_ACCESS_ID);
rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
(u8 *)__LC_OP_ACCESS_ID);
+ nullifying = true;
break;
case PGM_MONITOR:
rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
@@ -541,6 +556,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
rc |= put_guest_lc(vcpu, pgm_info.mon_code,
(u64 *)__LC_MON_CODE);
break;
+ case PGM_VECTOR_PROCESSING:
case PGM_DATA:
rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
(u32 *)__LC_DATA_EXC_CODE);
@@ -551,6 +567,15 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
(u8 *)__LC_EXC_ACCESS_ID);
break;
+ case PGM_STACK_FULL:
+ case PGM_STACK_EMPTY:
+ case PGM_STACK_SPECIFICATION:
+ case PGM_STACK_TYPE:
+ case PGM_STACK_OPERATION:
+ case PGM_TRACE_TABEL:
+ case PGM_CRYPTO_OPERATION:
+ nullifying = true;
+ break;
}
if (pgm_info.code & PGM_PER) {
@@ -564,7 +589,12 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
(u8 *) __LC_PER_ACCESS_ID);
}
+ if (nullifying && vcpu->arch.sie_block->icptcode == ICPT_INST)
+ kvm_s390_rewind_psw(vcpu, ilc);
+
rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC);
+ rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
+ (u64 *) __LC_LAST_BREAK);
rc |= put_guest_lc(vcpu, pgm_info.code,
(u16 *)__LC_PGM_INT_CODE);
rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
@@ -574,16 +604,27 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
return rc ? -EFAULT : 0;
}
-static int __must_check __deliver_service(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
+static int __must_check __deliver_service(struct kvm_vcpu *vcpu)
{
- int rc;
+ struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
+ struct kvm_s390_ext_info ext;
+ int rc = 0;
+
+ spin_lock(&fi->lock);
+ if (!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) {
+ spin_unlock(&fi->lock);
+ return 0;
+ }
+ ext = fi->srv_signal;
+ memset(&fi->srv_signal, 0, sizeof(ext));
+ clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
- inti->ext.ext_params);
+ ext.ext_params);
vcpu->stat.deliver_service_signal++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- inti->ext.ext_params, 0);
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
+ ext.ext_params, 0);
rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
@@ -591,106 +632,146 @@ static int __must_check __deliver_service(struct kvm_vcpu *vcpu,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= put_guest_lc(vcpu, inti->ext.ext_params,
+ rc |= put_guest_lc(vcpu, ext.ext_params,
(u32 *)__LC_EXT_PARAMS);
+
return rc ? -EFAULT : 0;
}
-static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
+static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
{
- int rc;
+ struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
+ struct kvm_s390_interrupt_info *inti;
+ int rc = 0;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
- KVM_S390_INT_PFAULT_DONE, 0,
- inti->ext.ext_params2);
+ spin_lock(&fi->lock);
+ inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT],
+ struct kvm_s390_interrupt_info,
+ list);
+ if (inti) {
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
+ KVM_S390_INT_PFAULT_DONE, 0,
+ inti->ext.ext_params2);
+ list_del(&inti->list);
+ fi->counters[FIRQ_CNTR_PFAULT] -= 1;
+ }
+ if (list_empty(&fi->lists[FIRQ_LIST_PFAULT]))
+ clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
- rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
- rc |= put_guest_lc(vcpu, PFAULT_DONE, (u16 *)__LC_EXT_CPU_ADDR);
- rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
- (u64 *)__LC_EXT_PARAMS2);
+ if (inti) {
+ rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
+ (u16 *)__LC_EXT_INT_CODE);
+ rc |= put_guest_lc(vcpu, PFAULT_DONE,
+ (u16 *)__LC_EXT_CPU_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
+ (u64 *)__LC_EXT_PARAMS2);
+ kfree(inti);
+ }
return rc ? -EFAULT : 0;
}
-static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
+static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu)
{
- int rc;
+ struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
+ struct kvm_s390_interrupt_info *inti;
+ int rc = 0;
- VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
- inti->ext.ext_params, inti->ext.ext_params2);
- vcpu->stat.deliver_virtio_interrupt++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- inti->ext.ext_params,
- inti->ext.ext_params2);
+ spin_lock(&fi->lock);
+ inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO],
+ struct kvm_s390_interrupt_info,
+ list);
+ if (inti) {
+ VCPU_EVENT(vcpu, 4,
+ "interrupt: virtio parm:%x,parm64:%llx",
+ inti->ext.ext_params, inti->ext.ext_params2);
+ vcpu->stat.deliver_virtio_interrupt++;
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
+ inti->type,
+ inti->ext.ext_params,
+ inti->ext.ext_params2);
+ list_del(&inti->list);
+ fi->counters[FIRQ_CNTR_VIRTIO] -= 1;
+ }
+ if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO]))
+ clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
- rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
- rc |= put_guest_lc(vcpu, VIRTIO_PARAM, (u16 *)__LC_EXT_CPU_ADDR);
- rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= put_guest_lc(vcpu, inti->ext.ext_params,
- (u32 *)__LC_EXT_PARAMS);
- rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
- (u64 *)__LC_EXT_PARAMS2);
+ if (inti) {
+ rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
+ (u16 *)__LC_EXT_INT_CODE);
+ rc |= put_guest_lc(vcpu, VIRTIO_PARAM,
+ (u16 *)__LC_EXT_CPU_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ rc |= put_guest_lc(vcpu, inti->ext.ext_params,
+ (u32 *)__LC_EXT_PARAMS);
+ rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
+ (u64 *)__LC_EXT_PARAMS2);
+ kfree(inti);
+ }
return rc ? -EFAULT : 0;
}
static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
+ unsigned long irq_type)
{
- int rc;
+ struct list_head *isc_list;
+ struct kvm_s390_float_interrupt *fi;
+ struct kvm_s390_interrupt_info *inti = NULL;
+ int rc = 0;
- VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
- vcpu->stat.deliver_io_int++;
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
- ((__u32)inti->io.subchannel_id << 16) |
- inti->io.subchannel_nr,
- ((__u64)inti->io.io_int_parm << 32) |
- inti->io.io_int_word);
-
- rc = put_guest_lc(vcpu, inti->io.subchannel_id,
- (u16 *)__LC_SUBCHANNEL_ID);
- rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
- (u16 *)__LC_SUBCHANNEL_NR);
- rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
- (u32 *)__LC_IO_INT_PARM);
- rc |= put_guest_lc(vcpu, inti->io.io_int_word,
- (u32 *)__LC_IO_INT_WORD);
- rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- return rc ? -EFAULT : 0;
-}
+ fi = &vcpu->kvm->arch.float_int;
-static int __must_check __deliver_mchk_floating(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
-{
- struct kvm_s390_mchk_info *mchk = &inti->mchk;
- int rc;
+ spin_lock(&fi->lock);
+ isc_list = &fi->lists[irq_type - IRQ_PEND_IO_ISC_0];
+ inti = list_first_entry_or_null(isc_list,
+ struct kvm_s390_interrupt_info,
+ list);
+ if (inti) {
+ VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
+ vcpu->stat.deliver_io_int++;
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
+ inti->type,
+ ((__u32)inti->io.subchannel_id << 16) |
+ inti->io.subchannel_nr,
+ ((__u64)inti->io.io_int_parm << 32) |
+ inti->io.io_int_word);
+ list_del(&inti->list);
+ fi->counters[FIRQ_CNTR_IO] -= 1;
+ }
+ if (list_empty(isc_list))
+ clear_bit(irq_type, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
+
+ if (inti) {
+ rc = put_guest_lc(vcpu, inti->io.subchannel_id,
+ (u16 *)__LC_SUBCHANNEL_ID);
+ rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
+ (u16 *)__LC_SUBCHANNEL_NR);
+ rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
+ (u32 *)__LC_IO_INT_PARM);
+ rc |= put_guest_lc(vcpu, inti->io.io_int_word,
+ (u32 *)__LC_IO_INT_WORD);
+ rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ kfree(inti);
+ }
- VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
- mchk->mcic);
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK,
- mchk->cr14, mchk->mcic);
-
- rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
- rc |= put_guest_lc(vcpu, mchk->mcic,
- (u64 __user *) __LC_MCCK_CODE);
- rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
- (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
- rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
- &mchk->fixed_logout, sizeof(mchk->fixed_logout));
- rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
- &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
return rc ? -EFAULT : 0;
}
@@ -698,6 +779,7 @@ typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu);
static const deliver_irq_t deliver_irq_funcs[] = {
[IRQ_PEND_MCHK_EX] = __deliver_machine_check,
+ [IRQ_PEND_MCHK_REP] = __deliver_machine_check,
[IRQ_PEND_PROG] = __deliver_prog,
[IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal,
[IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call,
@@ -706,36 +788,11 @@ static const deliver_irq_t deliver_irq_funcs[] = {
[IRQ_PEND_RESTART] = __deliver_restart,
[IRQ_PEND_SET_PREFIX] = __deliver_set_prefix,
[IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init,
+ [IRQ_PEND_EXT_SERVICE] = __deliver_service,
+ [IRQ_PEND_PFAULT_DONE] = __deliver_pfault_done,
+ [IRQ_PEND_VIRTIO] = __deliver_virtio,
};
-static int __must_check __deliver_floating_interrupt(struct kvm_vcpu *vcpu,
- struct kvm_s390_interrupt_info *inti)
-{
- int rc;
-
- switch (inti->type) {
- case KVM_S390_INT_SERVICE:
- rc = __deliver_service(vcpu, inti);
- break;
- case KVM_S390_INT_PFAULT_DONE:
- rc = __deliver_pfault_done(vcpu, inti);
- break;
- case KVM_S390_INT_VIRTIO:
- rc = __deliver_virtio(vcpu, inti);
- break;
- case KVM_S390_MCHK:
- rc = __deliver_mchk_floating(vcpu, inti);
- break;
- case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
- rc = __deliver_io(vcpu, inti);
- break;
- default:
- BUG();
- }
-
- return rc;
-}
-
/* Check whether an external call is pending (deliverable or not) */
int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
{
@@ -751,21 +808,9 @@ int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
{
- struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
- struct kvm_s390_interrupt_info *inti;
int rc;
- rc = !!deliverable_local_irqs(vcpu);
-
- if ((!rc) && atomic_read(&fi->active)) {
- spin_lock(&fi->lock);
- list_for_each_entry(inti, &fi->list, list)
- if (__interrupt_is_deliverable(vcpu, inti)) {
- rc = 1;
- break;
- }
- spin_unlock(&fi->lock);
- }
+ rc = !!deliverable_irqs(vcpu);
if (!rc && kvm_cpu_has_pending_timer(vcpu))
rc = 1;
@@ -784,12 +829,7 @@ int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
- if (!(vcpu->arch.sie_block->ckc <
- get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
- return 0;
- if (!ckc_interrupts_enabled(vcpu))
- return 0;
- return 1;
+ return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
}
int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
@@ -884,60 +924,45 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
- struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
- struct kvm_s390_interrupt_info *n, *inti = NULL;
deliver_irq_t func;
- int deliver;
int rc = 0;
unsigned long irq_type;
- unsigned long deliverable_irqs;
+ unsigned long irqs;
__reset_intercept_indicators(vcpu);
/* pending ckc conditions might have been invalidated */
clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
- if (kvm_cpu_has_pending_timer(vcpu))
+ if (ckc_irq_pending(vcpu))
set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
+ /* pending cpu timer conditions might have been invalidated */
+ clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
+ if (cpu_timer_irq_pending(vcpu))
+ set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
+
do {
- deliverable_irqs = deliverable_local_irqs(vcpu);
+ irqs = deliverable_irqs(vcpu);
/* bits are in the order of interrupt priority */
- irq_type = find_first_bit(&deliverable_irqs, IRQ_PEND_COUNT);
+ irq_type = find_first_bit(&irqs, IRQ_PEND_COUNT);
if (irq_type == IRQ_PEND_COUNT)
break;
- func = deliver_irq_funcs[irq_type];
- if (!func) {
- WARN_ON_ONCE(func == NULL);
- clear_bit(irq_type, &li->pending_irqs);
- continue;
+ if (is_ioirq(irq_type)) {
+ rc = __deliver_io(vcpu, irq_type);
+ } else {
+ func = deliver_irq_funcs[irq_type];
+ if (!func) {
+ WARN_ON_ONCE(func == NULL);
+ clear_bit(irq_type, &li->pending_irqs);
+ continue;
+ }
+ rc = func(vcpu);
}
- rc = func(vcpu);
- } while (!rc && irq_type != IRQ_PEND_COUNT);
+ if (rc)
+ break;
+ } while (!rc);
- set_intercept_indicators_local(vcpu);
-
- if (!rc && atomic_read(&fi->active)) {
- do {
- deliver = 0;
- spin_lock(&fi->lock);
- list_for_each_entry_safe(inti, n, &fi->list, list) {
- if (__interrupt_is_deliverable(vcpu, inti)) {
- list_del(&inti->list);
- fi->irq_count--;
- deliver = 1;
- break;
- }
- __set_intercept_indicator(vcpu, inti);
- }
- if (list_empty(&fi->list))
- atomic_set(&fi->active, 0);
- spin_unlock(&fi->lock);
- if (deliver) {
- rc = __deliver_floating_interrupt(vcpu, inti);
- kfree(inti);
- }
- } while (!rc && deliver);
- }
+ set_intercept_indicators(vcpu);
return rc;
}
@@ -1172,80 +1197,182 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
return 0;
}
+static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm,
+ int isc, u32 schid)
+{
+ struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
+ struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
+ struct kvm_s390_interrupt_info *iter;
+ u16 id = (schid & 0xffff0000U) >> 16;
+ u16 nr = schid & 0x0000ffffU;
+ spin_lock(&fi->lock);
+ list_for_each_entry(iter, isc_list, list) {
+ if (schid && (id != iter->io.subchannel_id ||
+ nr != iter->io.subchannel_nr))
+ continue;
+ /* found an appropriate entry */
+ list_del_init(&iter->list);
+ fi->counters[FIRQ_CNTR_IO] -= 1;
+ if (list_empty(isc_list))
+ clear_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
+ return iter;
+ }
+ spin_unlock(&fi->lock);
+ return NULL;
+}
+
+/*
+ * Dequeue and return an I/O interrupt matching any of the interruption
+ * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
+ */
struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
- u64 cr6, u64 schid)
+ u64 isc_mask, u32 schid)
+{
+ struct kvm_s390_interrupt_info *inti = NULL;
+ int isc;
+
+ for (isc = 0; isc <= MAX_ISC && !inti; isc++) {
+ if (isc_mask & isc_to_isc_bits(isc))
+ inti = get_io_int(kvm, isc, schid);
+ }
+ return inti;
+}
+
+#define SCCB_MASK 0xFFFFFFF8
+#define SCCB_EVENT_PENDING 0x3
+
+static int __inject_service(struct kvm *kvm,
+ struct kvm_s390_interrupt_info *inti)
+{
+ struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
+
+ spin_lock(&fi->lock);
+ fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING;
+ /*
+ * Early versions of the QEMU s390 bios will inject several
+ * service interrupts after another without handling a
+ * condition code indicating busy.
+ * We will silently ignore those superfluous sccb values.
+ * A future version of QEMU will take care of serialization
+ * of servc requests
+ */
+ if (fi->srv_signal.ext_params & SCCB_MASK)
+ goto out;
+ fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK;
+ set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
+out:
+ spin_unlock(&fi->lock);
+ kfree(inti);
+ return 0;
+}
+
+static int __inject_virtio(struct kvm *kvm,
+ struct kvm_s390_interrupt_info *inti)
+{
+ struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
+
+ spin_lock(&fi->lock);
+ if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) {
+ spin_unlock(&fi->lock);
+ return -EBUSY;
+ }
+ fi->counters[FIRQ_CNTR_VIRTIO] += 1;
+ list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]);
+ set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
+ return 0;
+}
+
+static int __inject_pfault_done(struct kvm *kvm,
+ struct kvm_s390_interrupt_info *inti)
+{
+ struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
+
+ spin_lock(&fi->lock);
+ if (fi->counters[FIRQ_CNTR_PFAULT] >=
+ (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) {
+ spin_unlock(&fi->lock);
+ return -EBUSY;
+ }
+ fi->counters[FIRQ_CNTR_PFAULT] += 1;
+ list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]);
+ set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
+ return 0;
+}
+
+#define CR_PENDING_SUBCLASS 28
+static int __inject_float_mchk(struct kvm *kvm,
+ struct kvm_s390_interrupt_info *inti)
+{
+ struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
+
+ spin_lock(&fi->lock);
+ fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS);
+ fi->mchk.mcic |= inti->mchk.mcic;
+ set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs);
+ spin_unlock(&fi->lock);
+ kfree(inti);
+ return 0;
+}
+
+static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
{
struct kvm_s390_float_interrupt *fi;
- struct kvm_s390_interrupt_info *inti, *iter;
+ struct list_head *list;
+ int isc;
- if ((!schid && !cr6) || (schid && cr6))
- return NULL;
fi = &kvm->arch.float_int;
spin_lock(&fi->lock);
- inti = NULL;
- list_for_each_entry(iter, &fi->list, list) {
- if (!is_ioint(iter->type))
- continue;
- if (cr6 &&
- ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0))
- continue;
- if (schid) {
- if (((schid & 0x00000000ffff0000) >> 16) !=
- iter->io.subchannel_id)
- continue;
- if ((schid & 0x000000000000ffff) !=
- iter->io.subchannel_nr)
- continue;
- }
- inti = iter;
- break;
- }
- if (inti) {
- list_del_init(&inti->list);
- fi->irq_count--;
+ if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) {
+ spin_unlock(&fi->lock);
+ return -EBUSY;
}
- if (list_empty(&fi->list))
- atomic_set(&fi->active, 0);
+ fi->counters[FIRQ_CNTR_IO] += 1;
+
+ isc = int_word_to_isc(inti->io.io_int_word);
+ list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
+ list_add_tail(&inti->list, list);
+ set_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs);
spin_unlock(&fi->lock);
- return inti;
+ return 0;
}
static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
{
struct kvm_s390_local_interrupt *li;
struct kvm_s390_float_interrupt *fi;
- struct kvm_s390_interrupt_info *iter;
struct kvm_vcpu *dst_vcpu = NULL;
int sigcpu;
- int rc = 0;
+ u64 type = READ_ONCE(inti->type);
+ int rc;
fi = &kvm->arch.float_int;
- spin_lock(&fi->lock);
- if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) {
+
+ switch (type) {
+ case KVM_S390_MCHK:
+ rc = __inject_float_mchk(kvm, inti);
+ break;
+ case KVM_S390_INT_VIRTIO:
+ rc = __inject_virtio(kvm, inti);
+ break;
+ case KVM_S390_INT_SERVICE:
+ rc = __inject_service(kvm, inti);
+ break;
+ case KVM_S390_INT_PFAULT_DONE:
+ rc = __inject_pfault_done(kvm, inti);
+ break;
+ case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
+ rc = __inject_io(kvm, inti);
+ break;
+ default:
rc = -EINVAL;
- goto unlock_fi;
}
- fi->irq_count++;
- if (!is_ioint(inti->type)) {
- list_add_tail(&inti->list, &fi->list);
- } else {
- u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word);
+ if (rc)
+ return rc;
- /* Keep I/O interrupts sorted in isc order. */
- list_for_each_entry(iter, &fi->list, list) {
- if (!is_ioint(iter->type))
- continue;
- if (int_word_to_isc_bits(iter->io.io_int_word)
- <= isc_bits)
- continue;
- break;
- }
- list_add_tail(&inti->list, &iter->list);
- }
- atomic_set(&fi->active, 1);
- if (atomic_read(&kvm->online_vcpus) == 0)
- goto unlock_fi;
sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
if (sigcpu == KVM_MAX_VCPUS) {
do {
@@ -1257,7 +1384,7 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
li = &dst_vcpu->arch.local_int;
spin_lock(&li->lock);
- switch (inti->type) {
+ switch (type) {
case KVM_S390_MCHK:
atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
break;
@@ -1270,9 +1397,8 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
}
spin_unlock(&li->lock);
kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
-unlock_fi:
- spin_unlock(&fi->lock);
- return rc;
+ return 0;
+
}
int kvm_s390_inject_vm(struct kvm *kvm,
@@ -1332,10 +1458,10 @@ int kvm_s390_inject_vm(struct kvm *kvm,
return rc;
}
-void kvm_s390_reinject_io_int(struct kvm *kvm,
+int kvm_s390_reinject_io_int(struct kvm *kvm,
struct kvm_s390_interrupt_info *inti)
{
- __inject_vm(kvm, inti);
+ return __inject_vm(kvm, inti);
}
int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
@@ -1388,12 +1514,10 @@ void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
spin_unlock(&li->lock);
}
-int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
+static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
{
- struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
int rc;
- spin_lock(&li->lock);
switch (irq->type) {
case KVM_S390_PROGRAM_INT:
VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
@@ -1433,83 +1557,130 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
default:
rc = -EINVAL;
}
+
+ return rc;
+}
+
+int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ int rc;
+
+ spin_lock(&li->lock);
+ rc = do_inject_vcpu(vcpu, irq);
spin_unlock(&li->lock);
if (!rc)
kvm_s390_vcpu_wakeup(vcpu);
return rc;
}
-void kvm_s390_clear_float_irqs(struct kvm *kvm)
+static inline void clear_irq_list(struct list_head *_list)
{
- struct kvm_s390_float_interrupt *fi;
- struct kvm_s390_interrupt_info *n, *inti = NULL;
+ struct kvm_s390_interrupt_info *inti, *n;
- fi = &kvm->arch.float_int;
- spin_lock(&fi->lock);
- list_for_each_entry_safe(inti, n, &fi->list, list) {
+ list_for_each_entry_safe(inti, n, _list, list) {
list_del(&inti->list);
kfree(inti);
}
- fi->irq_count = 0;
- atomic_set(&fi->active, 0);
- spin_unlock(&fi->lock);
}
-static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti,
- u8 *addr)
+static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
+ struct kvm_s390_irq *irq)
{
- struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
- struct kvm_s390_irq irq = {0};
-
- irq.type = inti->type;
+ irq->type = inti->type;
switch (inti->type) {
case KVM_S390_INT_PFAULT_INIT:
case KVM_S390_INT_PFAULT_DONE:
case KVM_S390_INT_VIRTIO:
- case KVM_S390_INT_SERVICE:
- irq.u.ext = inti->ext;
+ irq->u.ext = inti->ext;
break;
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
- irq.u.io = inti->io;
+ irq->u.io = inti->io;
break;
- case KVM_S390_MCHK:
- irq.u.mchk = inti->mchk;
- break;
- default:
- return -EINVAL;
}
+}
- if (copy_to_user(uptr, &irq, sizeof(irq)))
- return -EFAULT;
+void kvm_s390_clear_float_irqs(struct kvm *kvm)
+{
+ struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
+ int i;
- return 0;
-}
+ spin_lock(&fi->lock);
+ for (i = 0; i < FIRQ_LIST_COUNT; i++)
+ clear_irq_list(&fi->lists[i]);
+ for (i = 0; i < FIRQ_MAX_COUNT; i++)
+ fi->counters[i] = 0;
+ spin_unlock(&fi->lock);
+};
-static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
+static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
{
struct kvm_s390_interrupt_info *inti;
struct kvm_s390_float_interrupt *fi;
+ struct kvm_s390_irq *buf;
+ struct kvm_s390_irq *irq;
+ int max_irqs;
int ret = 0;
int n = 0;
+ int i;
+
+ if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
+ return -EINVAL;
+
+ /*
+ * We are already using -ENOMEM to signal
+ * userspace it may retry with a bigger buffer,
+ * so we need to use something else for this case
+ */
+ buf = vzalloc(len);
+ if (!buf)
+ return -ENOBUFS;
+
+ max_irqs = len / sizeof(struct kvm_s390_irq);
fi = &kvm->arch.float_int;
spin_lock(&fi->lock);
-
- list_for_each_entry(inti, &fi->list, list) {
- if (len < sizeof(struct kvm_s390_irq)) {
+ for (i = 0; i < FIRQ_LIST_COUNT; i++) {
+ list_for_each_entry(inti, &fi->lists[i], list) {
+ if (n == max_irqs) {
+ /* signal userspace to try again */
+ ret = -ENOMEM;
+ goto out;
+ }
+ inti_to_irq(inti, &buf[n]);
+ n++;
+ }
+ }
+ if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs)) {
+ if (n == max_irqs) {
/* signal userspace to try again */
ret = -ENOMEM;
- break;
+ goto out;
}
- ret = copy_irq_to_user(inti, buf);
- if (ret)
- break;
- buf += sizeof(struct kvm_s390_irq);
- len -= sizeof(struct kvm_s390_irq);
+ irq = (struct kvm_s390_irq *) &buf[n];
+ irq->type = KVM_S390_INT_SERVICE;
+ irq->u.ext = fi->srv_signal;
n++;
}
+ if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
+ if (n == max_irqs) {
+ /* signal userspace to try again */
+ ret = -ENOMEM;
+ goto out;
+ }
+ irq = (struct kvm_s390_irq *) &buf[n];
+ irq->type = KVM_S390_MCHK;
+ irq->u.mchk = fi->mchk;
+ n++;
+}
+out:
spin_unlock(&fi->lock);
+ if (!ret && n > 0) {
+ if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
+ ret = -EFAULT;
+ }
+ vfree(buf);
return ret < 0 ? ret : n;
}
@@ -1520,7 +1691,7 @@ static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
switch (attr->group) {
case KVM_DEV_FLIC_GET_ALL_IRQS:
- r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr,
+ r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
attr->attr);
break;
default:
@@ -1952,3 +2123,143 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
{
return -EINVAL;
}
+
+int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ struct kvm_s390_irq *buf;
+ int r = 0;
+ int n;
+
+ buf = vmalloc(len);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user((void *) buf, irqstate, len)) {
+ r = -EFAULT;
+ goto out_free;
+ }
+
+ /*
+ * Don't allow setting the interrupt state
+ * when there are already interrupts pending
+ */
+ spin_lock(&li->lock);
+ if (li->pending_irqs) {
+ r = -EBUSY;
+ goto out_unlock;
+ }
+
+ for (n = 0; n < len / sizeof(*buf); n++) {
+ r = do_inject_vcpu(vcpu, &buf[n]);
+ if (r)
+ break;
+ }
+
+out_unlock:
+ spin_unlock(&li->lock);
+out_free:
+ vfree(buf);
+
+ return r;
+}
+
+static void store_local_irq(struct kvm_s390_local_interrupt *li,
+ struct kvm_s390_irq *irq,
+ unsigned long irq_type)
+{
+ switch (irq_type) {
+ case IRQ_PEND_MCHK_EX:
+ case IRQ_PEND_MCHK_REP:
+ irq->type = KVM_S390_MCHK;
+ irq->u.mchk = li->irq.mchk;
+ break;
+ case IRQ_PEND_PROG:
+ irq->type = KVM_S390_PROGRAM_INT;
+ irq->u.pgm = li->irq.pgm;
+ break;
+ case IRQ_PEND_PFAULT_INIT:
+ irq->type = KVM_S390_INT_PFAULT_INIT;
+ irq->u.ext = li->irq.ext;
+ break;
+ case IRQ_PEND_EXT_EXTERNAL:
+ irq->type = KVM_S390_INT_EXTERNAL_CALL;
+ irq->u.extcall = li->irq.extcall;
+ break;
+ case IRQ_PEND_EXT_CLOCK_COMP:
+ irq->type = KVM_S390_INT_CLOCK_COMP;
+ break;
+ case IRQ_PEND_EXT_CPU_TIMER:
+ irq->type = KVM_S390_INT_CPU_TIMER;
+ break;
+ case IRQ_PEND_SIGP_STOP:
+ irq->type = KVM_S390_SIGP_STOP;
+ irq->u.stop = li->irq.stop;
+ break;
+ case IRQ_PEND_RESTART:
+ irq->type = KVM_S390_RESTART;
+ break;
+ case IRQ_PEND_SET_PREFIX:
+ irq->type = KVM_S390_SIGP_SET_PREFIX;
+ irq->u.prefix = li->irq.prefix;
+ break;
+ }
+}
+
+int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
+{
+ uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
+ unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)];
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ unsigned long pending_irqs;
+ struct kvm_s390_irq irq;
+ unsigned long irq_type;
+ int cpuaddr;
+ int n = 0;
+
+ spin_lock(&li->lock);
+ pending_irqs = li->pending_irqs;
+ memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending,
+ sizeof(sigp_emerg_pending));
+ spin_unlock(&li->lock);
+
+ for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) {
+ memset(&irq, 0, sizeof(irq));
+ if (irq_type == IRQ_PEND_EXT_EMERGENCY)
+ continue;
+ if (n + sizeof(irq) > len)
+ return -ENOBUFS;
+ store_local_irq(&vcpu->arch.local_int, &irq, irq_type);
+ if (copy_to_user(&buf[n], &irq, sizeof(irq)))
+ return -EFAULT;
+ n += sizeof(irq);
+ }
+
+ if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) {
+ for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) {
+ memset(&irq, 0, sizeof(irq));
+ if (n + sizeof(irq) > len)
+ return -ENOBUFS;
+ irq.type = KVM_S390_INT_EMERGENCY;
+ irq.u.emerg.code = cpuaddr;
+ if (copy_to_user(&buf[n], &irq, sizeof(irq)))
+ return -EFAULT;
+ n += sizeof(irq);
+ }
+ }
+
+ if ((sigp_ctrl & SIGP_CTRL_C) &&
+ (atomic_read(&vcpu->arch.sie_block->cpuflags) &
+ CPUSTAT_ECALL_PEND)) {
+ if (n + sizeof(irq) > len)
+ return -ENOBUFS;
+ memset(&irq, 0, sizeof(irq));
+ irq.type = KVM_S390_INT_EXTERNAL_CALL;
+ irq.u.extcall.code = sigp_ctrl & SIGP_CTRL_SCN_MASK;
+ if (copy_to_user(&buf[n], &irq, sizeof(irq)))
+ return -EFAULT;
+ n += sizeof(irq);
+ }
+
+ return n;
+}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 19e17bd7aec0..afa2bd750ffc 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -25,11 +25,13 @@
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/timer.h>
+#include <linux/vmalloc.h>
#include <asm/asm-offsets.h>
#include <asm/lowcore.h>
#include <asm/pgtable.h>
#include <asm/nmi.h>
#include <asm/switch_to.h>
+#include <asm/isc.h>
#include <asm/sclp.h>
#include "kvm-s390.h"
#include "gaccess.h"
@@ -38,6 +40,11 @@
#include "trace.h"
#include "trace-s390.h"
+#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
+#define LOCAL_IRQS 32
+#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
+ (KVM_MAX_VCPUS + LOCAL_IRQS))
+
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
struct kvm_stats_debugfs_item debugfs_entries[] = {
@@ -87,6 +94,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
{ "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
{ "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
+ { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
@@ -101,8 +109,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
/* upper facilities limit for kvm */
unsigned long kvm_s390_fac_list_mask[] = {
- 0xff82fffbf4fc2000UL,
- 0x005c000000000000UL,
+ 0xffe6fffbfcfdfc40UL,
+ 0x205c800000000000UL,
};
unsigned long kvm_s390_fac_list_mask_size(void)
@@ -171,9 +179,16 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_IRQCHIP:
case KVM_CAP_VM_ATTRIBUTES:
case KVM_CAP_MP_STATE:
+ case KVM_CAP_S390_INJECT_IRQ:
case KVM_CAP_S390_USER_SIGP:
+ case KVM_CAP_S390_USER_STSI:
+ case KVM_CAP_S390_SKEYS:
+ case KVM_CAP_S390_IRQ_STATE:
r = 1;
break;
+ case KVM_CAP_S390_MEM_OP:
+ r = MEM_OP_MAX_SIZE;
+ break;
case KVM_CAP_NR_VCPUS:
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
@@ -184,6 +199,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_COW:
r = MACHINE_HAS_ESOP;
break;
+ case KVM_CAP_S390_VECTOR_REGISTERS:
+ r = MACHINE_HAS_VX;
+ break;
default:
r = 0;
}
@@ -264,6 +282,18 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
kvm->arch.user_sigp = 1;
r = 0;
break;
+ case KVM_CAP_S390_VECTOR_REGISTERS:
+ if (MACHINE_HAS_VX) {
+ set_kvm_facility(kvm->arch.model.fac->mask, 129);
+ set_kvm_facility(kvm->arch.model.fac->list, 129);
+ r = 0;
+ } else
+ r = -EINVAL;
+ break;
+ case KVM_CAP_S390_USER_STSI:
+ kvm->arch.user_stsi = 1;
+ r = 0;
+ break;
default:
r = -EINVAL;
break;
@@ -708,6 +738,108 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
return ret;
}
+static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
+{
+ uint8_t *keys;
+ uint64_t hva;
+ unsigned long curkey;
+ int i, r = 0;
+
+ if (args->flags != 0)
+ return -EINVAL;
+
+ /* Is this guest using storage keys? */
+ if (!mm_use_skey(current->mm))
+ return KVM_S390_GET_SKEYS_NONE;
+
+ /* Enforce sane limit on memory allocation */
+ if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
+ return -EINVAL;
+
+ keys = kmalloc_array(args->count, sizeof(uint8_t),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!keys)
+ keys = vmalloc(sizeof(uint8_t) * args->count);
+ if (!keys)
+ return -ENOMEM;
+
+ for (i = 0; i < args->count; i++) {
+ hva = gfn_to_hva(kvm, args->start_gfn + i);
+ if (kvm_is_error_hva(hva)) {
+ r = -EFAULT;
+ goto out;
+ }
+
+ curkey = get_guest_storage_key(current->mm, hva);
+ if (IS_ERR_VALUE(curkey)) {
+ r = curkey;
+ goto out;
+ }
+ keys[i] = curkey;
+ }
+
+ r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
+ sizeof(uint8_t) * args->count);
+ if (r)
+ r = -EFAULT;
+out:
+ kvfree(keys);
+ return r;
+}
+
+static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
+{
+ uint8_t *keys;
+ uint64_t hva;
+ int i, r = 0;
+
+ if (args->flags != 0)
+ return -EINVAL;
+
+ /* Enforce sane limit on memory allocation */
+ if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
+ return -EINVAL;
+
+ keys = kmalloc_array(args->count, sizeof(uint8_t),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!keys)
+ keys = vmalloc(sizeof(uint8_t) * args->count);
+ if (!keys)
+ return -ENOMEM;
+
+ r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
+ sizeof(uint8_t) * args->count);
+ if (r) {
+ r = -EFAULT;
+ goto out;
+ }
+
+ /* Enable storage key handling for the guest */
+ s390_enable_skey();
+
+ for (i = 0; i < args->count; i++) {
+ hva = gfn_to_hva(kvm, args->start_gfn + i);
+ if (kvm_is_error_hva(hva)) {
+ r = -EFAULT;
+ goto out;
+ }
+
+ /* Lowest order bit is reserved */
+ if (keys[i] & 0x01) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ r = set_guest_storage_key(current->mm, hva,
+ (unsigned long)keys[i], 0);
+ if (r)
+ goto out;
+ }
+out:
+ kvfree(keys);
+ return r;
+}
+
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -767,6 +899,26 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = kvm_s390_vm_has_attr(kvm, &attr);
break;
}
+ case KVM_S390_GET_SKEYS: {
+ struct kvm_s390_skeys args;
+
+ r = -EFAULT;
+ if (copy_from_user(&args, argp,
+ sizeof(struct kvm_s390_skeys)))
+ break;
+ r = kvm_s390_get_skeys(kvm, &args);
+ break;
+ }
+ case KVM_S390_SET_SKEYS: {
+ struct kvm_s390_skeys args;
+
+ r = -EFAULT;
+ if (copy_from_user(&args, argp,
+ sizeof(struct kvm_s390_skeys)))
+ break;
+ r = kvm_s390_set_skeys(kvm, &args);
+ break;
+ }
default:
r = -ENOTTY;
}
@@ -887,7 +1039,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
if (!kvm->arch.dbf)
- goto out_nodbf;
+ goto out_err;
/*
* The architectural maximum amount of facilities is 16 kbit. To store
@@ -899,7 +1051,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.model.fac =
(struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!kvm->arch.model.fac)
- goto out_nofac;
+ goto out_err;
/* Populate the facility mask initially. */
memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
@@ -919,10 +1071,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
if (kvm_s390_crypto_init(kvm) < 0)
- goto out_crypto;
+ goto out_err;
spin_lock_init(&kvm->arch.float_int.lock);
- INIT_LIST_HEAD(&kvm->arch.float_int.list);
+ for (i = 0; i < FIRQ_LIST_COUNT; i++)
+ INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
init_waitqueue_head(&kvm->arch.ipte_wq);
mutex_init(&kvm->arch.ipte_mutex);
@@ -934,7 +1087,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
} else {
kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
if (!kvm->arch.gmap)
- goto out_nogmap;
+ goto out_err;
kvm->arch.gmap->private = kvm;
kvm->arch.gmap->pfault_enabled = 0;
}
@@ -946,15 +1099,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
spin_lock_init(&kvm->arch.start_stop_lock);
return 0;
-out_nogmap:
+out_err:
kfree(kvm->arch.crypto.crycb);
-out_crypto:
free_page((unsigned long)kvm->arch.model.fac);
-out_nofac:
debug_unregister(kvm->arch.dbf);
-out_nodbf:
free_page((unsigned long)(kvm->arch.sca));
-out_err:
return rc;
}
@@ -1034,6 +1183,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
KVM_SYNC_CRS |
KVM_SYNC_ARCH0 |
KVM_SYNC_PFAULT;
+ if (test_kvm_facility(vcpu->kvm, 129))
+ vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
if (kvm_is_ucontrol(vcpu->kvm))
return __kvm_ucontrol_vcpu_init(vcpu);
@@ -1044,10 +1195,18 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
- save_fp_regs(vcpu->arch.host_fpregs.fprs);
+ if (test_kvm_facility(vcpu->kvm, 129))
+ save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
+ else
+ save_fp_regs(vcpu->arch.host_fpregs.fprs);
save_access_regs(vcpu->arch.host_acrs);
- restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
- restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
+ if (test_kvm_facility(vcpu->kvm, 129)) {
+ restore_fp_ctl(&vcpu->run->s.regs.fpc);
+ restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
+ } else {
+ restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
+ restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
+ }
restore_access_regs(vcpu->run->s.regs.acrs);
gmap_enable(vcpu->arch.gmap);
atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
@@ -1057,11 +1216,19 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
gmap_disable(vcpu->arch.gmap);
- save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
- save_fp_regs(vcpu->arch.guest_fpregs.fprs);
+ if (test_kvm_facility(vcpu->kvm, 129)) {
+ save_fp_ctl(&vcpu->run->s.regs.fpc);
+ save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
+ } else {
+ save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
+ save_fp_regs(vcpu->arch.guest_fpregs.fprs);
+ }
save_access_regs(vcpu->run->s.regs.acrs);
restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
- restore_fp_regs(vcpu->arch.host_fpregs.fprs);
+ if (test_kvm_facility(vcpu->kvm, 129))
+ restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
+ else
+ restore_fp_regs(vcpu->arch.host_fpregs.fprs);
restore_access_regs(vcpu->arch.host_acrs);
}
@@ -1129,6 +1296,15 @@ int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
return 0;
}
+static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
+
+ vcpu->arch.cpu_id = model->cpu_id;
+ vcpu->arch.sie_block->ibc = model->ibc;
+ vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
+}
+
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
int rc = 0;
@@ -1137,6 +1313,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
CPUSTAT_SM |
CPUSTAT_STOPPED |
CPUSTAT_GED);
+ kvm_s390_vcpu_setup_model(vcpu);
+
vcpu->arch.sie_block->ecb = 6;
if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
vcpu->arch.sie_block->ecb |= 0x10;
@@ -1147,8 +1325,11 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->eca |= 1;
if (sclp_has_sigpif())
vcpu->arch.sie_block->eca |= 0x10000000U;
- vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
- ICTL_TPROT;
+ if (test_kvm_facility(vcpu->kvm, 129)) {
+ vcpu->arch.sie_block->eca |= 0x00020000;
+ vcpu->arch.sie_block->ecd |= 0x20000000;
+ }
+ vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
if (kvm_s390_cmma_enabled(vcpu->kvm)) {
rc = kvm_s390_vcpu_setup_cmma(vcpu);
@@ -1158,11 +1339,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
- mutex_lock(&vcpu->kvm->lock);
- vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id;
- vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc;
- mutex_unlock(&vcpu->kvm->lock);
-
kvm_s390_vcpu_crypto_setup(vcpu);
return rc;
@@ -1190,6 +1366,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
vcpu->arch.sie_block = &sie_page->sie_block;
vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
+ vcpu->arch.host_vregs = &sie_page->vregs;
vcpu->arch.sie_block->icpua = id;
if (!kvm_is_ucontrol(kvm)) {
@@ -1205,7 +1382,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
}
- vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->list;
spin_lock_init(&vcpu->arch.local_int.lock);
vcpu->arch.local_int.float_int = &kvm->arch.float_int;
@@ -1725,6 +1901,31 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
return 0;
}
+static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
+{
+ psw_t *psw = &vcpu->arch.sie_block->gpsw;
+ u8 opcode;
+ int rc;
+
+ VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
+ trace_kvm_s390_sie_fault(vcpu);
+
+ /*
+ * We want to inject an addressing exception, which is defined as a
+ * suppressing or terminating exception. However, since we came here
+ * by a DAT access exception, the PSW still points to the faulting
+ * instruction since DAT exceptions are nullifying. So we've got
+ * to look up the current opcode to get the length of the instruction
+ * to be able to forward the PSW.
+ */
+ rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
+ if (rc)
+ return kvm_s390_inject_prog_cond(vcpu, rc);
+ psw->addr = __rewind_psw(*psw, -insn_length(opcode));
+
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+}
+
static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
{
int rc = -1;
@@ -1756,11 +1957,8 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
}
}
- if (rc == -1) {
- VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
- trace_kvm_s390_sie_fault(vcpu);
- rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
- }
+ if (rc == -1)
+ rc = vcpu_post_run_fault_in_sie(vcpu);
memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
@@ -1976,6 +2174,35 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
return kvm_s390_store_status_unloaded(vcpu, addr);
}
+/*
+ * store additional status at address
+ */
+int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
+ unsigned long gpa)
+{
+ /* Only bits 0-53 are used for address formation */
+ if (!(gpa & ~0x3ff))
+ return 0;
+
+ return write_guest_abs(vcpu, gpa & ~0x3ff,
+ (void *)&vcpu->run->s.regs.vrs, 512);
+}
+
+int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
+{
+ if (!test_kvm_facility(vcpu->kvm, 129))
+ return 0;
+
+ /*
+ * The guest VXRS are in the host VXRs due to the lazy
+ * copying in vcpu load/put. Let's update our copies before we save
+ * it into the save area.
+ */
+ save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
+
+ return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
+}
+
static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
{
kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
@@ -2100,6 +2327,65 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
return r;
}
+static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
+ struct kvm_s390_mem_op *mop)
+{
+ void __user *uaddr = (void __user *)mop->buf;
+ void *tmpbuf = NULL;
+ int r, srcu_idx;
+ const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
+ | KVM_S390_MEMOP_F_CHECK_ONLY;
+
+ if (mop->flags & ~supported_flags)
+ return -EINVAL;
+
+ if (mop->size > MEM_OP_MAX_SIZE)
+ return -E2BIG;
+
+ if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
+ tmpbuf = vmalloc(mop->size);
+ if (!tmpbuf)
+ return -ENOMEM;
+ }
+
+ srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+
+ switch (mop->op) {
+ case KVM_S390_MEMOP_LOGICAL_READ:
+ if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
+ r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
+ break;
+ }
+ r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
+ if (r == 0) {
+ if (copy_to_user(uaddr, tmpbuf, mop->size))
+ r = -EFAULT;
+ }
+ break;
+ case KVM_S390_MEMOP_LOGICAL_WRITE:
+ if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
+ r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
+ break;
+ }
+ if (copy_from_user(tmpbuf, uaddr, mop->size)) {
+ r = -EFAULT;
+ break;
+ }
+ r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
+ break;
+ default:
+ r = -EINVAL;
+ }
+
+ srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
+
+ if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
+ kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
+
+ vfree(tmpbuf);
+ return r;
+}
+
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -2109,6 +2395,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
long r;
switch (ioctl) {
+ case KVM_S390_IRQ: {
+ struct kvm_s390_irq s390irq;
+
+ r = -EFAULT;
+ if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
+ break;
+ r = kvm_s390_inject_vcpu(vcpu, &s390irq);
+ break;
+ }
case KVM_S390_INTERRUPT: {
struct kvm_s390_interrupt s390int;
struct kvm_s390_irq s390irq;
@@ -2199,6 +2494,47 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
break;
}
+ case KVM_S390_MEM_OP: {
+ struct kvm_s390_mem_op mem_op;
+
+ if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
+ r = kvm_s390_guest_mem_op(vcpu, &mem_op);
+ else
+ r = -EFAULT;
+ break;
+ }
+ case KVM_S390_SET_IRQ_STATE: {
+ struct kvm_s390_irq_state irq_state;
+
+ r = -EFAULT;
+ if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
+ break;
+ if (irq_state.len > VCPU_IRQS_MAX_BUF ||
+ irq_state.len == 0 ||
+ irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
+ r = -EINVAL;
+ break;
+ }
+ r = kvm_s390_set_irq_state(vcpu,
+ (void __user *) irq_state.buf,
+ irq_state.len);
+ break;
+ }
+ case KVM_S390_GET_IRQ_STATE: {
+ struct kvm_s390_irq_state irq_state;
+
+ r = -EFAULT;
+ if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
+ break;
+ if (irq_state.len == 0) {
+ r = -EINVAL;
+ break;
+ }
+ r = kvm_s390_get_irq_state(vcpu,
+ (__u8 __user *) irq_state.buf,
+ irq_state.len);
+ break;
+ }
default:
r = -ENOTTY;
}
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index c34109aa552d..ca108b90ae56 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -70,16 +70,22 @@ static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
}
-static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu)
+typedef u8 __bitwise ar_t;
+
+static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, ar_t *ar)
{
u32 base2 = vcpu->arch.sie_block->ipb >> 28;
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+ if (ar)
+ *ar = base2;
+
return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
}
static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
- u64 *address1, u64 *address2)
+ u64 *address1, u64 *address2,
+ ar_t *ar_b1, ar_t *ar_b2)
{
u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
@@ -88,6 +94,11 @@ static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
*address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1;
*address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
+
+ if (ar_b1)
+ *ar_b1 = base1;
+ if (ar_b2)
+ *ar_b2 = base2;
}
static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2)
@@ -98,7 +109,7 @@ static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2
*r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
}
-static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu)
+static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, ar_t *ar)
{
u32 base2 = vcpu->arch.sie_block->ipb >> 28;
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
@@ -107,14 +118,20 @@ static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu)
if (disp2 & 0x80000)
disp2+=0xfff00000;
+ if (ar)
+ *ar = base2;
+
return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2;
}
-static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu)
+static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, ar_t *ar)
{
u32 base2 = vcpu->arch.sie_block->ipb >> 28;
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+ if (ar)
+ *ar = base2;
+
return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
}
@@ -125,13 +142,24 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
vcpu->arch.sie_block->gpsw.mask |= cc << 44;
}
-/* test availability of facility in a kvm intance */
+/* test availability of facility in a kvm instance */
static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr)
{
return __test_facility(nr, kvm->arch.model.fac->mask) &&
__test_facility(nr, kvm->arch.model.fac->list);
}
+static inline int set_kvm_facility(u64 *fac_list, unsigned long nr)
+{
+ unsigned char *ptr;
+
+ if (nr >= MAX_FACILITY_BIT)
+ return -EINVAL;
+ ptr = (unsigned char *) fac_list + (nr >> 3);
+ *ptr |= (0x80UL >> (nr & 7));
+ return 0;
+}
+
/* are cpu states controlled by user space */
static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
{
@@ -150,9 +178,9 @@ int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
struct kvm_s390_irq *irq);
int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
- u64 cr6, u64 schid);
-void kvm_s390_reinject_io_int(struct kvm *kvm,
- struct kvm_s390_interrupt_info *inti);
+ u64 isc_mask, u32 schid);
+int kvm_s390_reinject_io_int(struct kvm *kvm,
+ struct kvm_s390_interrupt_info *inti);
int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
/* implemented in intercept.c */
@@ -177,7 +205,10 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
/* implemented in kvm-s390.c */
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
+int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
+ unsigned long addr);
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
+int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
void s390_vcpu_block(struct kvm_vcpu *vcpu);
@@ -241,6 +272,10 @@ int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu);
extern struct kvm_device_ops kvm_flic_ops;
int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu);
void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu);
+int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu,
+ void __user *buf, int len);
+int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu,
+ __u8 __user *buf, int len);
/* implemented in guestdbg.c */
void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 351116939ea2..d22d8ee1ff9d 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -36,15 +36,16 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
struct kvm_vcpu *cpup;
s64 hostclk, val;
int i, rc;
+ ar_t ar;
u64 op2;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- op2 = kvm_s390_get_base_disp_s(vcpu);
+ op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
if (op2 & 7) /* Operand must be on a doubleword boundary */
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- rc = read_guest(vcpu, op2, &val, sizeof(val));
+ rc = read_guest(vcpu, op2, ar, &val, sizeof(val));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
@@ -68,20 +69,21 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
u64 operand2;
u32 address;
int rc;
+ ar_t ar;
vcpu->stat.instruction_spx++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- operand2 = kvm_s390_get_base_disp_s(vcpu);
+ operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
/* must be word boundary */
if (operand2 & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
/* get the value */
- rc = read_guest(vcpu, operand2, &address, sizeof(address));
+ rc = read_guest(vcpu, operand2, ar, &address, sizeof(address));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
@@ -107,13 +109,14 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
u64 operand2;
u32 address;
int rc;
+ ar_t ar;
vcpu->stat.instruction_stpx++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- operand2 = kvm_s390_get_base_disp_s(vcpu);
+ operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
/* must be word boundary */
if (operand2 & 3)
@@ -122,7 +125,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
address = kvm_s390_get_prefix(vcpu);
/* get the value */
- rc = write_guest(vcpu, operand2, &address, sizeof(address));
+ rc = write_guest(vcpu, operand2, ar, &address, sizeof(address));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
@@ -136,18 +139,19 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
u16 vcpu_id = vcpu->vcpu_id;
u64 ga;
int rc;
+ ar_t ar;
vcpu->stat.instruction_stap++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- ga = kvm_s390_get_base_disp_s(vcpu);
+ ga = kvm_s390_get_base_disp_s(vcpu, &ar);
if (ga & 1)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- rc = write_guest(vcpu, ga, &vcpu_id, sizeof(vcpu_id));
+ rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
@@ -207,7 +211,7 @@ static int handle_test_block(struct kvm_vcpu *vcpu)
kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
addr = kvm_s390_logical_to_effective(vcpu, addr);
- if (kvm_s390_check_low_addr_protection(vcpu, addr))
+ if (kvm_s390_check_low_addr_prot_real(vcpu, addr))
return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
addr = kvm_s390_real_to_abs(vcpu, addr);
@@ -229,18 +233,20 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
struct kvm_s390_interrupt_info *inti;
unsigned long len;
u32 tpi_data[3];
- int cc, rc;
+ int rc;
u64 addr;
+ ar_t ar;
- rc = 0;
- addr = kvm_s390_get_base_disp_s(vcpu);
+ addr = kvm_s390_get_base_disp_s(vcpu, &ar);
if (addr & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- cc = 0;
+
inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
- if (!inti)
- goto no_interrupt;
- cc = 1;
+ if (!inti) {
+ kvm_s390_set_psw_cc(vcpu, 0);
+ return 0;
+ }
+
tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
tpi_data[1] = inti->io.io_int_parm;
tpi_data[2] = inti->io.io_int_word;
@@ -250,40 +256,51 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
* provided area.
*/
len = sizeof(tpi_data) - 4;
- rc = write_guest(vcpu, addr, &tpi_data, len);
- if (rc)
- return kvm_s390_inject_prog_cond(vcpu, rc);
+ rc = write_guest(vcpu, addr, ar, &tpi_data, len);
+ if (rc) {
+ rc = kvm_s390_inject_prog_cond(vcpu, rc);
+ goto reinject_interrupt;
+ }
} else {
/*
* Store the three-word I/O interruption code into
* the appropriate lowcore area.
*/
len = sizeof(tpi_data);
- if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len))
+ if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
+ /* failed writes to the low core are not recoverable */
rc = -EFAULT;
+ goto reinject_interrupt;
+ }
}
+
+ /* irq was successfully handed to the guest */
+ kfree(inti);
+ kvm_s390_set_psw_cc(vcpu, 1);
+ return 0;
+reinject_interrupt:
/*
* If we encounter a problem storing the interruption code, the
* instruction is suppressed from the guest's view: reinject the
* interrupt.
*/
- if (!rc)
+ if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
kfree(inti);
- else
- kvm_s390_reinject_io_int(vcpu->kvm, inti);
-no_interrupt:
- /* Set condition code and we're done. */
- if (!rc)
- kvm_s390_set_psw_cc(vcpu, cc);
+ rc = -EFAULT;
+ }
+ /* don't set the cc, a pgm irq was injected or we drop to user space */
return rc ? -EFAULT : 0;
}
static int handle_tsch(struct kvm_vcpu *vcpu)
{
- struct kvm_s390_interrupt_info *inti;
+ struct kvm_s390_interrupt_info *inti = NULL;
+ const u64 isc_mask = 0xffUL << 24; /* all iscs set */
- inti = kvm_s390_get_io_int(vcpu->kvm, 0,
- vcpu->run->s.regs.gprs[1]);
+ /* a valid schid has at least one bit set */
+ if (vcpu->run->s.regs.gprs[1])
+ inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask,
+ vcpu->run->s.regs.gprs[1]);
/*
* Prepare exit to userspace.
@@ -386,15 +403,16 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
psw_compat_t new_psw;
u64 addr;
int rc;
+ ar_t ar;
if (gpsw->mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- addr = kvm_s390_get_base_disp_s(vcpu);
+ addr = kvm_s390_get_base_disp_s(vcpu, &ar);
if (addr & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw));
+ rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
if (!(new_psw.mask & PSW32_MASK_BASE))
@@ -412,14 +430,15 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
psw_t new_psw;
u64 addr;
int rc;
+ ar_t ar;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- addr = kvm_s390_get_base_disp_s(vcpu);
+ addr = kvm_s390_get_base_disp_s(vcpu, &ar);
if (addr & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw));
+ rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
vcpu->arch.sie_block->gpsw = new_psw;
@@ -433,18 +452,19 @@ static int handle_stidp(struct kvm_vcpu *vcpu)
u64 stidp_data = vcpu->arch.stidp_data;
u64 operand2;
int rc;
+ ar_t ar;
vcpu->stat.instruction_stidp++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- operand2 = kvm_s390_get_base_disp_s(vcpu);
+ operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
if (operand2 & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- rc = write_guest(vcpu, operand2, &stidp_data, sizeof(stidp_data));
+ rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
@@ -467,6 +487,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
for (n = mem->count - 1; n > 0 ; n--)
memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
+ memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
mem->vm[0].cpus_total = cpus;
mem->vm[0].cpus_configured = cpus;
mem->vm[0].cpus_standby = 0;
@@ -478,6 +499,17 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
ASCEBC(mem->vm[0].cpi, 16);
}
+static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, ar_t ar,
+ u8 fc, u8 sel1, u16 sel2)
+{
+ vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
+ vcpu->run->s390_stsi.addr = addr;
+ vcpu->run->s390_stsi.ar = ar;
+ vcpu->run->s390_stsi.fc = fc;
+ vcpu->run->s390_stsi.sel1 = sel1;
+ vcpu->run->s390_stsi.sel2 = sel2;
+}
+
static int handle_stsi(struct kvm_vcpu *vcpu)
{
int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
@@ -486,6 +518,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
unsigned long mem = 0;
u64 operand2;
int rc = 0;
+ ar_t ar;
vcpu->stat.instruction_stsi++;
VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
@@ -508,7 +541,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
return 0;
}
- operand2 = kvm_s390_get_base_disp_s(vcpu);
+ operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
if (operand2 & 0xfff)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -532,16 +565,20 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
break;
}
- rc = write_guest(vcpu, operand2, (void *)mem, PAGE_SIZE);
+ rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
if (rc) {
rc = kvm_s390_inject_prog_cond(vcpu, rc);
goto out;
}
+ if (vcpu->kvm->arch.user_stsi) {
+ insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
+ rc = -EREMOTE;
+ }
trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
free_page(mem);
kvm_s390_set_psw_cc(vcpu, 0);
vcpu->run->s.regs.gprs[0] = 0;
- return 0;
+ return rc;
out_no_data:
kvm_s390_set_psw_cc(vcpu, 3);
out:
@@ -670,7 +707,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
}
if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
- if (kvm_s390_check_low_addr_protection(vcpu, start))
+ if (kvm_s390_check_low_addr_prot_real(vcpu, start))
return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
}
@@ -776,13 +813,14 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
int reg, rc, nr_regs;
u32 ctl_array[16];
u64 ga;
+ ar_t ar;
vcpu->stat.instruction_lctl++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- ga = kvm_s390_get_base_disp_rs(vcpu);
+ ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
if (ga & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -791,7 +829,7 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
nr_regs = ((reg3 - reg1) & 0xf) + 1;
- rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32));
+ rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
reg = reg1;
@@ -814,13 +852,14 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
int reg, rc, nr_regs;
u32 ctl_array[16];
u64 ga;
+ ar_t ar;
vcpu->stat.instruction_stctl++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- ga = kvm_s390_get_base_disp_rs(vcpu);
+ ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
if (ga & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -836,7 +875,7 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
break;
reg = (reg + 1) % 16;
} while (1);
- rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32));
+ rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
}
@@ -847,13 +886,14 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
int reg, rc, nr_regs;
u64 ctl_array[16];
u64 ga;
+ ar_t ar;
vcpu->stat.instruction_lctlg++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- ga = kvm_s390_get_base_disp_rsy(vcpu);
+ ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
if (ga & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -862,7 +902,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
nr_regs = ((reg3 - reg1) & 0xf) + 1;
- rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64));
+ rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
reg = reg1;
@@ -884,13 +924,14 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
int reg, rc, nr_regs;
u64 ctl_array[16];
u64 ga;
+ ar_t ar;
vcpu->stat.instruction_stctg++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- ga = kvm_s390_get_base_disp_rsy(vcpu);
+ ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
if (ga & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -906,7 +947,7 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
break;
reg = (reg + 1) % 16;
} while (1);
- rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64));
+ rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
}
@@ -931,13 +972,14 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
unsigned long hva, gpa;
int ret = 0, cc = 0;
bool writable;
+ ar_t ar;
vcpu->stat.instruction_tprot++;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);
+ kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL);
/* we only handle the Linux memory detection case:
* access key == 0
@@ -946,11 +988,11 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
ipte_lock(vcpu);
- ret = guest_translate_address(vcpu, address1, &gpa, 1);
+ ret = guest_translate_address(vcpu, address1, ar, &gpa, 1);
if (ret == PGM_PROTECTION) {
/* Write protected? Try again with read-only... */
cc = 1;
- ret = guest_translate_address(vcpu, address1, &gpa, 0);
+ ret = guest_translate_address(vcpu, address1, ar, &gpa, 0);
}
if (ret) {
if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 23b1e86b2122..72e58bd2bee7 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -393,6 +393,9 @@ static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code)
case SIGP_STORE_STATUS_AT_ADDRESS:
vcpu->stat.instruction_sigp_store_status++;
break;
+ case SIGP_STORE_ADDITIONAL_STATUS:
+ vcpu->stat.instruction_sigp_store_adtl_status++;
+ break;
case SIGP_SET_PREFIX:
vcpu->stat.instruction_sigp_prefix++;
break;
@@ -431,7 +434,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- order_code = kvm_s390_get_base_disp_rs(vcpu);
+ order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
if (handle_sigp_order_in_user_space(vcpu, order_code))
return -EOPNOTSUPP;
@@ -473,7 +476,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
int r3 = vcpu->arch.sie_block->ipa & 0x000f;
u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
struct kvm_vcpu *dest_vcpu;
- u8 order_code = kvm_s390_get_base_disp_rs(vcpu);
+ u8 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h
index 4f6725ff4c33..f5b6537306f0 100644
--- a/arch/sparc/include/asm/hypervisor.h
+++ b/arch/sparc/include/asm/hypervisor.h
@@ -2957,6 +2957,17 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num,
unsigned long reg_val);
#endif
+
+#define HV_FAST_M7_GET_PERFREG 0x43
+#define HV_FAST_M7_SET_PERFREG 0x44
+
+#ifndef __ASSEMBLY__
+unsigned long sun4v_m7_get_perfreg(unsigned long reg_num,
+ unsigned long *reg_val);
+unsigned long sun4v_m7_set_perfreg(unsigned long reg_num,
+ unsigned long reg_val);
+#endif
+
/* Function numbers for HV_CORE_TRAP. */
#define HV_CORE_SET_VER 0x00
#define HV_CORE_PUTCHAR 0x01
@@ -2981,6 +2992,7 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num,
#define HV_GRP_SDIO 0x0108
#define HV_GRP_SDIO_ERR 0x0109
#define HV_GRP_REBOOT_DATA 0x0110
+#define HV_GRP_M7_PERF 0x0114
#define HV_GRP_NIAG_PERF 0x0200
#define HV_GRP_FIRE_PERF 0x0201
#define HV_GRP_N2_CPU 0x0202
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
index ec2e2e2aba7d..cc9b04a2b11b 100644
--- a/arch/sparc/include/asm/jump_label.h
+++ b/arch/sparc/include/asm/jump_label.h
@@ -1,7 +1,7 @@
#ifndef _ASM_SPARC_JUMP_LABEL_H
#define _ASM_SPARC_JUMP_LABEL_H
-#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
#include <linux/types.h>
@@ -22,8 +22,6 @@ l_yes:
return true;
}
-#endif /* __KERNEL__ */
-
typedef u32 jump_label_t;
struct jump_entry {
@@ -32,4 +30,5 @@ struct jump_entry {
jump_label_t key;
};
+#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c
index 5c55145bfbf0..662500fa555f 100644
--- a/arch/sparc/kernel/hvapi.c
+++ b/arch/sparc/kernel/hvapi.c
@@ -48,6 +48,7 @@ static struct api_info api_table[] = {
{ .group = HV_GRP_VT_CPU, },
{ .group = HV_GRP_T5_CPU, },
{ .group = HV_GRP_DIAG, .flags = FLAG_PRE_API },
+ { .group = HV_GRP_M7_PERF, },
};
static DEFINE_SPINLOCK(hvapi_lock);
diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S
index caedf8320416..afbaba52d2f1 100644
--- a/arch/sparc/kernel/hvcalls.S
+++ b/arch/sparc/kernel/hvcalls.S
@@ -837,3 +837,19 @@ ENTRY(sun4v_t5_set_perfreg)
retl
nop
ENDPROC(sun4v_t5_set_perfreg)
+
+ENTRY(sun4v_m7_get_perfreg)
+ mov %o1, %o4
+ mov HV_FAST_M7_GET_PERFREG, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%o4]
+ retl
+ nop
+ENDPROC(sun4v_m7_get_perfreg)
+
+ENTRY(sun4v_m7_set_perfreg)
+ mov HV_FAST_M7_SET_PERFREG, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ENDPROC(sun4v_m7_set_perfreg)
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 9ce5afe167ff..b36365f49478 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -639,10 +639,7 @@ static void pci_claim_bus_resources(struct pci_bus *bus)
(unsigned long long)r->end,
(unsigned int)r->flags);
- if (pci_claim_resource(dev, i) == 0)
- continue;
-
- pci_claim_bridge_resource(dev, i);
+ pci_claim_resource(dev, i);
}
}
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index 7e967c8018c8..eb978c77c76a 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -217,6 +217,31 @@ static const struct pcr_ops n5_pcr_ops = {
.pcr_nmi_disable = PCR_N4_PICNPT,
};
+static u64 m7_pcr_read(unsigned long reg_num)
+{
+ unsigned long val;
+
+ (void) sun4v_m7_get_perfreg(reg_num, &val);
+
+ return val;
+}
+
+static void m7_pcr_write(unsigned long reg_num, u64 val)
+{
+ (void) sun4v_m7_set_perfreg(reg_num, val);
+}
+
+static const struct pcr_ops m7_pcr_ops = {
+ .read_pcr = m7_pcr_read,
+ .write_pcr = m7_pcr_write,
+ .read_pic = n4_pic_read,
+ .write_pic = n4_pic_write,
+ .nmi_picl_value = n4_picl_value,
+ .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE |
+ PCR_N4_UTRACE | PCR_N4_TOE |
+ (26 << PCR_N4_SL_SHIFT)),
+ .pcr_nmi_disable = PCR_N4_PICNPT,
+};
static unsigned long perf_hsvc_group;
static unsigned long perf_hsvc_major;
@@ -248,6 +273,10 @@ static int __init register_perf_hsvc(void)
perf_hsvc_group = HV_GRP_T5_CPU;
break;
+ case SUN4V_CHIP_SPARC_M7:
+ perf_hsvc_group = HV_GRP_M7_PERF;
+ break;
+
default:
return -ENODEV;
}
@@ -293,6 +322,10 @@ static int __init setup_sun4v_pcr_ops(void)
pcr_ops = &n5_pcr_ops;
break;
+ case SUN4V_CHIP_SPARC_M7:
+ pcr_ops = &m7_pcr_ops;
+ break;
+
default:
ret = -ENODEV;
break;
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 46a5e4508752..86eebfa3b158 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -792,6 +792,42 @@ static const struct sparc_pmu niagara4_pmu = {
.num_pic_regs = 4,
};
+static void sparc_m7_write_pmc(int idx, u64 val)
+{
+ u64 pcr;
+
+ pcr = pcr_ops->read_pcr(idx);
+ /* ensure ov and ntc are reset */
+ pcr &= ~(PCR_N4_OV | PCR_N4_NTC);
+
+ pcr_ops->write_pic(idx, val & 0xffffffff);
+
+ pcr_ops->write_pcr(idx, pcr);
+}
+
+static const struct sparc_pmu sparc_m7_pmu = {
+ .event_map = niagara4_event_map,
+ .cache_map = &niagara4_cache_map,
+ .max_events = ARRAY_SIZE(niagara4_perfmon_event_map),
+ .read_pmc = sparc_vt_read_pmc,
+ .write_pmc = sparc_m7_write_pmc,
+ .upper_shift = 5,
+ .lower_shift = 5,
+ .event_mask = 0x7ff,
+ .user_bit = PCR_N4_UTRACE,
+ .priv_bit = PCR_N4_STRACE,
+
+ /* We explicitly don't support hypervisor tracing. */
+ .hv_bit = 0,
+
+ .irq_bit = PCR_N4_TOE,
+ .upper_nop = 0,
+ .lower_nop = 0,
+ .flags = 0,
+ .max_hw_events = 4,
+ .num_pcrs = 4,
+ .num_pic_regs = 4,
+};
static const struct sparc_pmu *sparc_pmu __read_mostly;
static u64 event_encoding(u64 event_id, int idx)
@@ -960,6 +996,8 @@ out:
cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
}
+static void sparc_pmu_start(struct perf_event *event, int flags);
+
/* On this PMU each PIC has it's own PCR control register. */
static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
{
@@ -972,20 +1010,13 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
struct perf_event *cp = cpuc->event[i];
struct hw_perf_event *hwc = &cp->hw;
int idx = hwc->idx;
- u64 enc;
if (cpuc->current_idx[i] != PIC_NO_INDEX)
continue;
- sparc_perf_event_set_period(cp, hwc, idx);
cpuc->current_idx[i] = idx;
- enc = perf_event_get_enc(cpuc->events[i]);
- cpuc->pcr[idx] &= ~mask_for_index(idx);
- if (hwc->state & PERF_HES_STOPPED)
- cpuc->pcr[idx] |= nop_for_index(idx);
- else
- cpuc->pcr[idx] |= event_encoding(enc, idx);
+ sparc_pmu_start(cp, PERF_EF_RELOAD);
}
out:
for (i = 0; i < cpuc->n_events; i++) {
@@ -1101,7 +1132,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
int i;
local_irq_save(flags);
- perf_pmu_disable(event->pmu);
for (i = 0; i < cpuc->n_events; i++) {
if (event == cpuc->event[i]) {
@@ -1127,7 +1157,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
}
}
- perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
@@ -1361,7 +1390,6 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
unsigned long flags;
local_irq_save(flags);
- perf_pmu_disable(event->pmu);
n0 = cpuc->n_events;
if (n0 >= sparc_pmu->max_hw_events)
@@ -1394,7 +1422,6 @@ nocheck:
ret = 0;
out:
- perf_pmu_enable(event->pmu);
local_irq_restore(flags);
return ret;
}
@@ -1667,6 +1694,10 @@ static bool __init supported_pmu(void)
sparc_pmu = &niagara4_pmu;
return true;
}
+ if (!strcmp(sparc_pmu_type, "sparc-m7")) {
+ sparc_pmu = &sparc_m7_pmu;
+ return true;
+ }
return false;
}
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 0be7bf978cb1..46a59643bb1c 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -287,6 +287,8 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n",
gp->tpc, gp->o7, gp->i7, gp->rpc);
}
+
+ touch_nmi_watchdog();
}
memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
@@ -362,6 +364,8 @@ static void pmu_snapshot_all_cpus(void)
(cpu == this_cpu ? '*' : ' '), cpu,
pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3],
pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]);
+
+ touch_nmi_watchdog();
}
memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
diff --git a/arch/sparc/lib/memmove.S b/arch/sparc/lib/memmove.S
index b7f6334e159f..857ad4f8905f 100644
--- a/arch/sparc/lib/memmove.S
+++ b/arch/sparc/lib/memmove.S
@@ -8,9 +8,11 @@
.text
ENTRY(memmove) /* o0=dst o1=src o2=len */
- mov %o0, %g1
+ brz,pn %o2, 99f
+ mov %o0, %g1
+
cmp %o0, %o1
- bleu,pt %xcc, memcpy
+ bleu,pt %xcc, 2f
add %o1, %o2, %g7
cmp %g7, %o0
bleu,pt %xcc, memcpy
@@ -24,7 +26,34 @@ ENTRY(memmove) /* o0=dst o1=src o2=len */
stb %g7, [%o0]
bne,pt %icc, 1b
sub %o0, 1, %o0
-
+99:
retl
mov %g1, %o0
+
+ /* We can't just call memcpy for these memmove cases. On some
+ * chips the memcpy uses cache initializing stores and when dst
+ * and src are close enough, those can clobber the source data
+ * before we've loaded it in.
+ */
+2: or %o0, %o1, %g7
+ or %o2, %g7, %g7
+ andcc %g7, 0x7, %g0
+ bne,pn %xcc, 4f
+ nop
+
+3: ldx [%o1], %g7
+ add %o1, 8, %o1
+ subcc %o2, 8, %o2
+ add %o0, 8, %o0
+ bne,pt %icc, 3b
+ stx %g7, [%o0 - 0x8]
+ ba,a,pt %xcc, 99b
+
+4: ldub [%o1], %g7
+ add %o1, 1, %o1
+ subcc %o2, 1, %o2
+ add %o0, 1, %o0
+ bne,pt %icc, 4b
+ stb %g7, [%o0 - 0x1]
+ ba,a,pt %xcc, 99b
ENDPROC(memmove)
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 25bce45c6fc4..3738b138b843 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -2,6 +2,8 @@
#define _ASM_X86_EFI_H
#include <asm/i387.h>
+#include <asm/pgtable.h>
+
/*
* We map the EFI regions needed for runtime services non-contiguously,
* with preserved alignment on virtual addresses starting from -4G down
@@ -89,8 +91,8 @@ extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
extern struct efi_scratch efi_scratch;
extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable);
extern int __init efi_memblock_x86_reserve_range(void);
-extern void __init efi_call_phys_prolog(void);
-extern void __init efi_call_phys_epilog(void);
+extern pgd_t * __init efi_call_phys_prolog(void);
+extern void __init efi_call_phys_epilog(pgd_t *save_pgd);
extern void __init efi_unmap_memmap(void);
extern void __init efi_memory_uc(u64 addr, unsigned long size);
extern void __init efi_map_region(efi_memory_desc_t *md);
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index 6a2cefb4395a..a4c1cf7e93f8 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -1,7 +1,7 @@
#ifndef _ASM_X86_JUMP_LABEL_H
#define _ASM_X86_JUMP_LABEL_H
-#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
#include <linux/stringify.h>
#include <linux/types.h>
@@ -30,8 +30,6 @@ l_yes:
return true;
}
-#endif /* __KERNEL__ */
-
#ifdef CONFIG_X86_64
typedef u64 jump_label_t;
#else
@@ -44,4 +42,5 @@ struct jump_entry {
jump_label_t key;
};
+#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index a236e39cc385..dea2e7e962e3 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -81,11 +81,6 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
}
-#define SELECTOR_TI_MASK (1 << 2)
-#define SELECTOR_RPL_MASK 0x03
-
-#define IOPL_SHIFT 12
-
#define KVM_PERMILLE_MMU_PAGES 20
#define KVM_MIN_ALLOC_MMU_PAGES 64
#define KVM_MMU_HASH_SHIFT 10
@@ -345,6 +340,7 @@ struct kvm_pmu {
enum {
KVM_DEBUGREG_BP_ENABLED = 1,
KVM_DEBUGREG_WONT_EXIT = 2,
+ KVM_DEBUGREG_RELOAD = 4,
};
struct kvm_vcpu_arch {
@@ -431,6 +427,9 @@ struct kvm_vcpu_arch {
int cpuid_nent;
struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
+
+ int maxphyaddr;
+
/* emulate context */
struct x86_emulate_ctxt emulate_ctxt;
@@ -550,11 +549,20 @@ struct kvm_arch_memory_slot {
struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
};
+/*
+ * We use as the mode the number of bits allocated in the LDR for the
+ * logical processor ID. It happens that these are all powers of two.
+ * This makes it is very easy to detect cases where the APICs are
+ * configured for multiple modes; in that case, we cannot use the map and
+ * hence cannot use kvm_irq_delivery_to_apic_fast either.
+ */
+#define KVM_APIC_MODE_XAPIC_CLUSTER 4
+#define KVM_APIC_MODE_XAPIC_FLAT 8
+#define KVM_APIC_MODE_X2APIC 16
+
struct kvm_apic_map {
struct rcu_head rcu;
- u8 ldr_bits;
- /* fields bellow are used to decode ldr values in different modes */
- u32 cid_shift, cid_mask, lid_mask, broadcast;
+ u8 mode;
struct kvm_lapic *phys_map[256];
/* first index is cluster id second is cpu id in a cluster */
struct kvm_lapic *logical_map[16][16];
@@ -859,6 +867,8 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
struct kvm_memory_slot *memslot);
+void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
+ struct kvm_memory_slot *memslot);
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
struct kvm_memory_slot *memslot);
void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
@@ -933,6 +943,7 @@ struct x86_emulate_ctxt;
int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port);
void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
int kvm_emulate_halt(struct kvm_vcpu *vcpu);
+int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
@@ -1128,7 +1139,6 @@ int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
-int cpuid_maxphyaddr(struct kvm_vcpu *vcpu);
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index e62cf897f781..c1adf33fdd0d 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -115,7 +115,7 @@ static inline void kvm_spinlock_init(void)
static inline bool kvm_para_available(void)
{
- return 0;
+ return false;
}
static inline unsigned int kvm_arch_para_features(void)
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index d6b078e9fa28..25b1cc07d496 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -95,6 +95,7 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
struct pvclock_vsyscall_time_info {
struct pvclock_vcpu_time_info pvti;
+ u32 migrate_count;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
#define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 12a26b979bf1..f2f9b39b274a 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -231,6 +231,6 @@ __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
}
unsigned long
-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
+copy_user_handle_tail(char *to, char *from, unsigned len);
#endif /* _ASM_X86_UACCESS_64_H */
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index c5f1a1deb91a..1fe92181ee9e 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -67,6 +67,7 @@
#define EXIT_REASON_EPT_VIOLATION 48
#define EXIT_REASON_EPT_MISCONFIG 49
#define EXIT_REASON_INVEPT 50
+#define EXIT_REASON_RDTSCP 51
#define EXIT_REASON_PREEMPTION_TIMER 52
#define EXIT_REASON_INVVPID 53
#define EXIT_REASON_WBINVD 54
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 498b6d967138..258990688a5e 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -212,11 +212,11 @@ static struct event_constraint intel_hsw_event_constraints[] = {
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
/* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
- INTEL_EVENT_CONSTRAINT(0x08a3, 0x4),
+ INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
/* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
- INTEL_EVENT_CONSTRAINT(0x0ca3, 0x4),
+ INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
/* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
- INTEL_EVENT_CONSTRAINT(0x04a3, 0xf),
+ INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
EVENT_CONSTRAINT_END
};
@@ -1649,11 +1649,11 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event
if (c)
return c;
- c = intel_pebs_constraints(event);
+ c = intel_shared_regs_constraints(cpuc, event);
if (c)
return c;
- c = intel_shared_regs_constraints(cpuc, event);
+ c = intel_pebs_constraints(event);
if (c)
return c;
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 1d74d161687c..f0095a76c182 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -364,12 +364,21 @@ system_call_fastpath:
* Has incomplete stack frame and undefined top of stack.
*/
ret_from_sys_call:
- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
- jnz int_ret_from_sys_call_fixup /* Go the the slow path */
-
LOCKDEP_SYS_EXIT
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
+
+ /*
+ * We must check ti flags with interrupts (or at least preemption)
+ * off because we must *never* return to userspace without
+ * processing exit work that is enqueued if we're preempted here.
+ * In particular, returning to userspace with any of the one-shot
+ * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
+ * very bad.
+ */
+ testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+ jnz int_ret_from_sys_call_fixup /* Go the the slow path */
+
CFI_REMEMBER_STATE
/*
* sysretq will re-enable interrupts:
@@ -386,7 +395,7 @@ ret_from_sys_call:
int_ret_from_sys_call_fixup:
FIXUP_TOP_OF_STACK %r11, -ARGOFFSET
- jmp int_ret_from_sys_call
+ jmp int_ret_from_sys_call_irqs_off
/* Do syscall tracing */
tracesys:
@@ -432,6 +441,7 @@ tracesys_phase2:
GLOBAL(int_ret_from_sys_call)
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
+int_ret_from_sys_call_irqs_off:
movl $_TIF_ALLWORK_MASK,%edi
/* edi: mask to check */
GLOBAL(int_with_check)
@@ -789,7 +799,21 @@ retint_swapgs: /* return to user-space */
cmpq %r11,(EFLAGS-ARGOFFSET)(%rsp) /* R11 == RFLAGS */
jne opportunistic_sysret_failed
- testq $X86_EFLAGS_RF,%r11 /* sysret can't restore RF */
+ /*
+ * SYSRET can't restore RF. SYSRET can restore TF, but unlike IRET,
+ * restoring TF results in a trap from userspace immediately after
+ * SYSRET. This would cause an infinite loop whenever #DB happens
+ * with register state that satisfies the opportunistic SYSRET
+ * conditions. For example, single-stepping this user code:
+ *
+ * movq $stuck_here,%rcx
+ * pushfq
+ * popq %r11
+ * stuck_here:
+ *
+ * would never get past 'stuck_here'.
+ */
+ testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
jnz opportunistic_sysret_failed
/* nothing to check for RSP */
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 7ec1d5f8d283..25ecd56cefa8 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -72,7 +72,7 @@ struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
{ "bx", 8, offsetof(struct pt_regs, bx) },
{ "cx", 8, offsetof(struct pt_regs, cx) },
{ "dx", 8, offsetof(struct pt_regs, dx) },
- { "si", 8, offsetof(struct pt_regs, dx) },
+ { "si", 8, offsetof(struct pt_regs, si) },
{ "di", 8, offsetof(struct pt_regs, di) },
{ "bp", 8, offsetof(struct pt_regs, bp) },
{ "sp", 8, offsetof(struct pt_regs, sp) },
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index 2f355d229a58..e5ecd20e72dd 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -141,7 +141,46 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
}
+static struct pvclock_vsyscall_time_info *pvclock_vdso_info;
+
+static struct pvclock_vsyscall_time_info *
+pvclock_get_vsyscall_user_time_info(int cpu)
+{
+ if (!pvclock_vdso_info) {
+ BUG();
+ return NULL;
+ }
+
+ return &pvclock_vdso_info[cpu];
+}
+
+struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu)
+{
+ return &pvclock_get_vsyscall_user_time_info(cpu)->pvti;
+}
+
#ifdef CONFIG_X86_64
+static int pvclock_task_migrate(struct notifier_block *nb, unsigned long l,
+ void *v)
+{
+ struct task_migration_notifier *mn = v;
+ struct pvclock_vsyscall_time_info *pvti;
+
+ pvti = pvclock_get_vsyscall_user_time_info(mn->from_cpu);
+
+ /* this is NULL when pvclock vsyscall is not initialized */
+ if (unlikely(pvti == NULL))
+ return NOTIFY_DONE;
+
+ pvti->migrate_count++;
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block pvclock_migrate = {
+ .notifier_call = pvclock_task_migrate,
+};
+
/*
* Initialize the generic pvclock vsyscall state. This will allocate
* a/some page(s) for the per-vcpu pvclock information, set up a
@@ -155,12 +194,17 @@ int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,
WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE);
+ pvclock_vdso_info = i;
+
for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) {
__set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx,
__pa(i) + (idx*PAGE_SIZE),
PAGE_KERNEL_VVAR);
}
+
+ register_task_migration_notifier(&pvclock_migrate);
+
return 0;
}
#endif
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index bae6c609888e..86db4bcd7ce5 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -183,6 +183,16 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
},
},
+ /* ASRock */
+ { /* Handle problems with rebooting on ASRock Q1900DC-ITX */
+ .callback = set_pci_reboot,
+ .ident = "ASRock Q1900DC-ITX",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASRock"),
+ DMI_MATCH(DMI_BOARD_NAME, "Q1900DC-ITX"),
+ },
+ },
+
/* ASUS */
{ /* Handle problems with rebooting on ASUS P4S800 */
.callback = set_bios_reboot,
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index 08f790dfadc9..16e8f962eaad 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -1,5 +1,5 @@
-ccflags-y += -Ivirt/kvm -Iarch/x86/kvm
+ccflags-y += -Iarch/x86/kvm
CFLAGS_x86.o := -I.
CFLAGS_svm.o := -I.
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 8a80737ee6e6..59b69f6a2844 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -104,6 +104,9 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
((best->eax & 0xff00) >> 8) != 0)
return -EINVAL;
+ /* Update physical-address width */
+ vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
+
kvm_pmu_cpuid_update(vcpu);
return 0;
}
@@ -135,6 +138,21 @@ static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
}
}
+int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
+ if (!best || best->eax < 0x80000008)
+ goto not_found;
+ best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
+ if (best)
+ return best->eax & 0xff;
+not_found:
+ return 36;
+}
+EXPORT_SYMBOL_GPL(cpuid_query_maxphyaddr);
+
/* when an old userspace process fills a new kernel module */
int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
struct kvm_cpuid *cpuid,
@@ -757,21 +775,6 @@ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
}
EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
-int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid_entry2 *best;
-
- best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
- if (!best || best->eax < 0x80000008)
- goto not_found;
- best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
- if (best)
- return best->eax & 0xff;
-not_found:
- return 36;
-}
-EXPORT_SYMBOL_GPL(cpuid_maxphyaddr);
-
/*
* If no match is found, check whether we exceed the vCPU's limit
* and return the content of the highest valid _standard_ leaf instead.
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 4452eedfaedd..c3b1ad9fca81 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -20,13 +20,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
struct kvm_cpuid_entry2 __user *entries);
void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
+int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
+
+static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.maxphyaddr;
+}
static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
if (!static_cpu_has(X86_FEATURE_XSAVE))
- return 0;
+ return false;
best = kvm_find_cpuid_entry(vcpu, 1, 0);
return best && (best->ecx & bit(X86_FEATURE_XSAVE));
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 106c01557f2b..630bcb0d7a04 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -248,27 +248,7 @@ struct mode_dual {
struct opcode mode64;
};
-/* EFLAGS bit definitions. */
-#define EFLG_ID (1<<21)
-#define EFLG_VIP (1<<20)
-#define EFLG_VIF (1<<19)
-#define EFLG_AC (1<<18)
-#define EFLG_VM (1<<17)
-#define EFLG_RF (1<<16)
-#define EFLG_IOPL (3<<12)
-#define EFLG_NT (1<<14)
-#define EFLG_OF (1<<11)
-#define EFLG_DF (1<<10)
-#define EFLG_IF (1<<9)
-#define EFLG_TF (1<<8)
-#define EFLG_SF (1<<7)
-#define EFLG_ZF (1<<6)
-#define EFLG_AF (1<<4)
-#define EFLG_PF (1<<2)
-#define EFLG_CF (1<<0)
-
#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
-#define EFLG_RESERVED_ONE_MASK 2
enum x86_transfer_type {
X86_TRANSFER_NONE,
@@ -317,7 +297,8 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
* These EFLAGS bits are restored from saved value during emulation, and
* any changes are written back to the saved value after emulation.
*/
-#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
+#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
+ X86_EFLAGS_PF|X86_EFLAGS_CF)
#ifdef CONFIG_X86_64
#define ON64(x) x
@@ -478,6 +459,25 @@ static void assign_masked(ulong *dest, ulong src, ulong mask)
*dest = (*dest & ~mask) | (src & mask);
}
+static void assign_register(unsigned long *reg, u64 val, int bytes)
+{
+ /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
+ switch (bytes) {
+ case 1:
+ *(u8 *)reg = (u8)val;
+ break;
+ case 2:
+ *(u16 *)reg = (u16)val;
+ break;
+ case 4:
+ *reg = (u32)val;
+ break; /* 64b: zero-extend */
+ case 8:
+ *reg = val;
+ break;
+ }
+}
+
static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
{
return (1UL << (ctxt->ad_bytes << 3)) - 1;
@@ -943,6 +943,22 @@ FASTOP2(xadd);
FASTOP2R(cmp, cmp_r);
+static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
+{
+ /* If src is zero, do not writeback, but update flags */
+ if (ctxt->src.val == 0)
+ ctxt->dst.type = OP_NONE;
+ return fastop(ctxt, em_bsf);
+}
+
+static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
+{
+ /* If src is zero, do not writeback, but update flags */
+ if (ctxt->src.val == 0)
+ ctxt->dst.type = OP_NONE;
+ return fastop(ctxt, em_bsr);
+}
+
static u8 test_cc(unsigned int condition, unsigned long flags)
{
u8 rc;
@@ -1399,7 +1415,7 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
unsigned int in_page, n;
unsigned int count = ctxt->rep_prefix ?
address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
- in_page = (ctxt->eflags & EFLG_DF) ?
+ in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
@@ -1412,7 +1428,7 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
}
if (ctxt->rep_prefix && (ctxt->d & String) &&
- !(ctxt->eflags & EFLG_DF)) {
+ !(ctxt->eflags & X86_EFLAGS_DF)) {
ctxt->dst.data = rc->data + rc->pos;
ctxt->dst.type = OP_MEM_STR;
ctxt->dst.count = (rc->end - rc->pos) / size;
@@ -1691,21 +1707,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
static void write_register_operand(struct operand *op)
{
- /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
- switch (op->bytes) {
- case 1:
- *(u8 *)op->addr.reg = (u8)op->val;
- break;
- case 2:
- *(u16 *)op->addr.reg = (u16)op->val;
- break;
- case 4:
- *op->addr.reg = (u32)op->val;
- break; /* 64b: zero-extend */
- case 8:
- *op->addr.reg = op->val;
- break;
- }
+ return assign_register(op->addr.reg, op->val, op->bytes);
}
static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
@@ -1792,32 +1794,34 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt,
{
int rc;
unsigned long val, change_mask;
- int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
+ int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
int cpl = ctxt->ops->cpl(ctxt);
rc = emulate_pop(ctxt, &val, len);
if (rc != X86EMUL_CONTINUE)
return rc;
- change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
- | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
+ change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
+ X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
+ X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
+ X86_EFLAGS_AC | X86_EFLAGS_ID;
switch(ctxt->mode) {
case X86EMUL_MODE_PROT64:
case X86EMUL_MODE_PROT32:
case X86EMUL_MODE_PROT16:
if (cpl == 0)
- change_mask |= EFLG_IOPL;
+ change_mask |= X86_EFLAGS_IOPL;
if (cpl <= iopl)
- change_mask |= EFLG_IF;
+ change_mask |= X86_EFLAGS_IF;
break;
case X86EMUL_MODE_VM86:
if (iopl < 3)
return emulate_gp(ctxt, 0);
- change_mask |= EFLG_IF;
+ change_mask |= X86_EFLAGS_IF;
break;
default: /* real mode */
- change_mask |= (EFLG_IOPL | EFLG_IF);
+ change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
break;
}
@@ -1918,7 +1922,7 @@ static int em_pusha(struct x86_emulate_ctxt *ctxt)
static int em_pushf(struct x86_emulate_ctxt *ctxt)
{
- ctxt->src.val = (unsigned long)ctxt->eflags & ~EFLG_VM;
+ ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
return em_push(ctxt);
}
@@ -1926,6 +1930,7 @@ static int em_popa(struct x86_emulate_ctxt *ctxt)
{
int rc = X86EMUL_CONTINUE;
int reg = VCPU_REGS_RDI;
+ u32 val;
while (reg >= VCPU_REGS_RAX) {
if (reg == VCPU_REGS_RSP) {
@@ -1933,9 +1938,10 @@ static int em_popa(struct x86_emulate_ctxt *ctxt)
--reg;
}
- rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
+ rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
break;
+ assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
--reg;
}
return rc;
@@ -1956,7 +1962,7 @@ static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
if (rc != X86EMUL_CONTINUE)
return rc;
- ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
+ ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
rc = em_push(ctxt);
@@ -2022,10 +2028,14 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
unsigned long temp_eip = 0;
unsigned long temp_eflags = 0;
unsigned long cs = 0;
- unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
- EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
- EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
- unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
+ unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
+ X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
+ X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
+ X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
+ X86_EFLAGS_AC | X86_EFLAGS_ID |
+ X86_EFLAGS_FIXED;
+ unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
+ X86_EFLAGS_VIP;
/* TODO: Add stack limit check */
@@ -2054,7 +2064,6 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
ctxt->_eip = temp_eip;
-
if (ctxt->op_bytes == 4)
ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
else if (ctxt->op_bytes == 2) {
@@ -2063,7 +2072,7 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
}
ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
- ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
+ ctxt->eflags |= X86_EFLAGS_FIXED;
ctxt->ops->set_nmi_mask(ctxt, false);
return rc;
@@ -2145,12 +2154,12 @@ static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
*reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
*reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
- ctxt->eflags &= ~EFLG_ZF;
+ ctxt->eflags &= ~X86_EFLAGS_ZF;
} else {
ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
(u32) reg_read(ctxt, VCPU_REGS_RBX);
- ctxt->eflags |= EFLG_ZF;
+ ctxt->eflags |= X86_EFLAGS_ZF;
}
return X86EMUL_CONTINUE;
}
@@ -2222,7 +2231,7 @@ static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
ctxt->src.val = ctxt->dst.orig_val;
fastop(ctxt, em_cmp);
- if (ctxt->eflags & EFLG_ZF) {
+ if (ctxt->eflags & X86_EFLAGS_ZF) {
/* Success: write back to memory; no update of EAX */
ctxt->src.type = OP_NONE;
ctxt->dst.val = ctxt->src.orig_val;
@@ -2381,14 +2390,14 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
ctxt->eflags &= ~msr_data;
- ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
+ ctxt->eflags |= X86_EFLAGS_FIXED;
#endif
} else {
/* legacy mode */
ops->get_msr(ctxt, MSR_STAR, &msr_data);
ctxt->_eip = (u32)msr_data;
- ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
+ ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
}
return X86EMUL_CONTINUE;
@@ -2425,8 +2434,8 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
if ((msr_data & 0xfffc) == 0x0)
return emulate_gp(ctxt, 0);
- ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
- cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK;
+ ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
+ cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
ss_sel = cs_sel + 8;
if (efer & EFER_LMA) {
cs.d = 0;
@@ -2493,8 +2502,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
return emulate_gp(ctxt, 0);
break;
}
- cs_sel |= SELECTOR_RPL_MASK;
- ss_sel |= SELECTOR_RPL_MASK;
+ cs_sel |= SEGMENT_RPL_MASK;
+ ss_sel |= SEGMENT_RPL_MASK;
ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
@@ -2512,7 +2521,7 @@ static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
return false;
if (ctxt->mode == X86EMUL_MODE_VM86)
return true;
- iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
+ iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
return ctxt->ops->cpl(ctxt) > iopl;
}
@@ -2782,10 +2791,8 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
return ret;
ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
X86_TRANSFER_TASK_SWITCH, NULL);
- if (ret != X86EMUL_CONTINUE)
- return ret;
- return X86EMUL_CONTINUE;
+ return ret;
}
static int task_switch_32(struct x86_emulate_ctxt *ctxt,
@@ -2954,7 +2961,7 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
struct operand *op)
{
- int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
+ int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
register_address_increment(ctxt, reg, df * op->bytes);
op->addr.mem.ea = register_address(ctxt, reg);
@@ -3323,7 +3330,7 @@ static int em_clts(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
-static int em_vmcall(struct x86_emulate_ctxt *ctxt)
+static int em_hypercall(struct x86_emulate_ctxt *ctxt)
{
int rc = ctxt->ops->fix_hypercall(ctxt);
@@ -3395,17 +3402,6 @@ static int em_lgdt(struct x86_emulate_ctxt *ctxt)
return em_lgdt_lidt(ctxt, true);
}
-static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
-{
- int rc;
-
- rc = ctxt->ops->fix_hypercall(ctxt);
-
- /* Disable writeback. */
- ctxt->dst.type = OP_NONE;
- return rc;
-}
-
static int em_lidt(struct x86_emulate_ctxt *ctxt)
{
return em_lgdt_lidt(ctxt, false);
@@ -3504,7 +3500,8 @@ static int em_sahf(struct x86_emulate_ctxt *ctxt)
{
u32 flags;
- flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
+ flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
+ X86_EFLAGS_SF;
flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
ctxt->eflags &= ~0xffUL;
@@ -3769,7 +3766,7 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
static const struct opcode group7_rm0[] = {
N,
- I(SrcNone | Priv | EmulateOnUD, em_vmcall),
+ I(SrcNone | Priv | EmulateOnUD, em_hypercall),
N, N, N, N, N, N,
};
@@ -3781,7 +3778,7 @@ static const struct opcode group7_rm1[] = {
static const struct opcode group7_rm3[] = {
DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
- II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
+ II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
DIP(SrcNone | Prot | Priv, stgi, check_svme),
@@ -4192,7 +4189,8 @@ static const struct opcode twobyte_table[256] = {
N, N,
G(BitOp, group8),
F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
- F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
+ I(DstReg | SrcMem | ModRM, em_bsf_c),
+ I(DstReg | SrcMem | ModRM, em_bsr_c),
D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
/* 0xC0 - 0xC7 */
F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
@@ -4759,9 +4757,9 @@ static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
(ctxt->b == 0xae) || (ctxt->b == 0xaf))
&& (((ctxt->rep_prefix == REPE_PREFIX) &&
- ((ctxt->eflags & EFLG_ZF) == 0))
+ ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
- ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
+ ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
return true;
return false;
@@ -4913,7 +4911,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
/* All REP prefixes have the same first termination condition */
if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
ctxt->eip = ctxt->_eip;
- ctxt->eflags &= ~EFLG_RF;
+ ctxt->eflags &= ~X86_EFLAGS_RF;
goto done;
}
}
@@ -4963,9 +4961,9 @@ special_insn:
}
if (ctxt->rep_prefix && (ctxt->d & String))
- ctxt->eflags |= EFLG_RF;
+ ctxt->eflags |= X86_EFLAGS_RF;
else
- ctxt->eflags &= ~EFLG_RF;
+ ctxt->eflags &= ~X86_EFLAGS_RF;
if (ctxt->execute) {
if (ctxt->d & Fastop) {
@@ -5014,7 +5012,7 @@ special_insn:
rc = emulate_int(ctxt, ctxt->src.val);
break;
case 0xce: /* into */
- if (ctxt->eflags & EFLG_OF)
+ if (ctxt->eflags & X86_EFLAGS_OF)
rc = emulate_int(ctxt, 4);
break;
case 0xe9: /* jmp rel */
@@ -5027,19 +5025,19 @@ special_insn:
break;
case 0xf5: /* cmc */
/* complement carry flag from eflags reg */
- ctxt->eflags ^= EFLG_CF;
+ ctxt->eflags ^= X86_EFLAGS_CF;
break;
case 0xf8: /* clc */
- ctxt->eflags &= ~EFLG_CF;
+ ctxt->eflags &= ~X86_EFLAGS_CF;
break;
case 0xf9: /* stc */
- ctxt->eflags |= EFLG_CF;
+ ctxt->eflags |= X86_EFLAGS_CF;
break;
case 0xfc: /* cld */
- ctxt->eflags &= ~EFLG_DF;
+ ctxt->eflags &= ~X86_EFLAGS_DF;
break;
case 0xfd: /* std */
- ctxt->eflags |= EFLG_DF;
+ ctxt->eflags |= X86_EFLAGS_DF;
break;
default:
goto cannot_emulate;
@@ -5100,7 +5098,7 @@ writeback:
}
goto done; /* skip rip writeback */
}
- ctxt->eflags &= ~EFLG_RF;
+ ctxt->eflags &= ~X86_EFLAGS_RF;
}
ctxt->eip = ctxt->_eip;
@@ -5137,8 +5135,7 @@ twobyte_insn:
case 0x40 ... 0x4f: /* cmov */
if (test_cc(ctxt->b, ctxt->eflags))
ctxt->dst.val = ctxt->src.val;
- else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
- ctxt->op_bytes != 4)
+ else if (ctxt->op_bytes != 4)
ctxt->dst.type = OP_NONE; /* no writeback */
break;
case 0x80 ... 0x8f: /* jnz rel, etc*/
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 298781d4cfb4..4dce6f8b6129 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -443,7 +443,8 @@ static inline int pit_in_range(gpa_t addr)
(addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH));
}
-static int pit_ioport_write(struct kvm_io_device *this,
+static int pit_ioport_write(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *this,
gpa_t addr, int len, const void *data)
{
struct kvm_pit *pit = dev_to_pit(this);
@@ -519,7 +520,8 @@ static int pit_ioport_write(struct kvm_io_device *this,
return 0;
}
-static int pit_ioport_read(struct kvm_io_device *this,
+static int pit_ioport_read(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *this,
gpa_t addr, int len, void *data)
{
struct kvm_pit *pit = dev_to_pit(this);
@@ -589,7 +591,8 @@ static int pit_ioport_read(struct kvm_io_device *this,
return 0;
}
-static int speaker_ioport_write(struct kvm_io_device *this,
+static int speaker_ioport_write(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *this,
gpa_t addr, int len, const void *data)
{
struct kvm_pit *pit = speaker_to_pit(this);
@@ -606,8 +609,9 @@ static int speaker_ioport_write(struct kvm_io_device *this,
return 0;
}
-static int speaker_ioport_read(struct kvm_io_device *this,
- gpa_t addr, int len, void *data)
+static int speaker_ioport_read(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *this,
+ gpa_t addr, int len, void *data)
{
struct kvm_pit *pit = speaker_to_pit(this);
struct kvm_kpit_state *pit_state = &pit->pit_state;
diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h
index dd1b16b611b0..c84990b42b5b 100644
--- a/arch/x86/kvm/i8254.h
+++ b/arch/x86/kvm/i8254.h
@@ -3,7 +3,7 @@
#include <linux/kthread.h>
-#include "iodev.h"
+#include <kvm/iodev.h>
struct kvm_kpit_channel_state {
u32 count; /* can be 65536 */
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 9541ba34126b..fef922ff2635 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -529,42 +529,42 @@ static int picdev_read(struct kvm_pic *s,
return 0;
}
-static int picdev_master_write(struct kvm_io_device *dev,
+static int picdev_master_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
gpa_t addr, int len, const void *val)
{
return picdev_write(container_of(dev, struct kvm_pic, dev_master),
addr, len, val);
}
-static int picdev_master_read(struct kvm_io_device *dev,
+static int picdev_master_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
gpa_t addr, int len, void *val)
{
return picdev_read(container_of(dev, struct kvm_pic, dev_master),
addr, len, val);
}
-static int picdev_slave_write(struct kvm_io_device *dev,
+static int picdev_slave_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
gpa_t addr, int len, const void *val)
{
return picdev_write(container_of(dev, struct kvm_pic, dev_slave),
addr, len, val);
}
-static int picdev_slave_read(struct kvm_io_device *dev,
+static int picdev_slave_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
gpa_t addr, int len, void *val)
{
return picdev_read(container_of(dev, struct kvm_pic, dev_slave),
addr, len, val);
}
-static int picdev_eclr_write(struct kvm_io_device *dev,
+static int picdev_eclr_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
gpa_t addr, int len, const void *val)
{
return picdev_write(container_of(dev, struct kvm_pic, dev_eclr),
addr, len, val);
}
-static int picdev_eclr_read(struct kvm_io_device *dev,
+static int picdev_eclr_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
gpa_t addr, int len, void *val)
{
return picdev_read(container_of(dev, struct kvm_pic, dev_eclr),
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index b1947e0f3e10..28146f03c514 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -206,6 +206,8 @@ static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq,
old_irr = ioapic->irr;
ioapic->irr |= mask;
+ if (edge)
+ ioapic->irr_delivered &= ~mask;
if ((edge && old_irr == ioapic->irr) ||
(!edge && entry.fields.remote_irr)) {
ret = 0;
@@ -349,7 +351,7 @@ static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status)
irqe.shorthand = 0;
if (irqe.trig_mode == IOAPIC_EDGE_TRIG)
- ioapic->irr &= ~(1 << irq);
+ ioapic->irr_delivered |= 1 << irq;
if (irq == RTC_GSI && line_status) {
/*
@@ -422,6 +424,7 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
struct kvm_ioapic *ioapic, int vector, int trigger_mode)
{
int i;
+ struct kvm_lapic *apic = vcpu->arch.apic;
for (i = 0; i < IOAPIC_NUM_PINS; i++) {
union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
@@ -443,7 +446,8 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i);
spin_lock(&ioapic->lock);
- if (trigger_mode != IOAPIC_LEVEL_TRIG)
+ if (trigger_mode != IOAPIC_LEVEL_TRIG ||
+ kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)
continue;
ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
@@ -471,13 +475,6 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
}
}
-bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector)
-{
- struct kvm_ioapic *ioapic = kvm->arch.vioapic;
- smp_rmb();
- return test_bit(vector, ioapic->handled_vectors);
-}
-
void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode)
{
struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
@@ -498,8 +495,8 @@ static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr)
(addr < ioapic->base_address + IOAPIC_MEM_LENGTH)));
}
-static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
- void *val)
+static int ioapic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
+ gpa_t addr, int len, void *val)
{
struct kvm_ioapic *ioapic = to_ioapic(this);
u32 result;
@@ -541,8 +538,8 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
return 0;
}
-static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
- const void *val)
+static int ioapic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
+ gpa_t addr, int len, const void *val)
{
struct kvm_ioapic *ioapic = to_ioapic(this);
u32 data;
@@ -597,6 +594,7 @@ static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS;
ioapic->ioregsel = 0;
ioapic->irr = 0;
+ ioapic->irr_delivered = 0;
ioapic->id = 0;
memset(ioapic->irq_eoi, 0x00, IOAPIC_NUM_PINS);
rtc_irq_eoi_tracking_reset(ioapic);
@@ -654,6 +652,7 @@ int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
spin_lock(&ioapic->lock);
memcpy(state, ioapic, sizeof(struct kvm_ioapic_state));
+ state->irr &= ~ioapic->irr_delivered;
spin_unlock(&ioapic->lock);
return 0;
}
@@ -667,6 +666,7 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
spin_lock(&ioapic->lock);
memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
ioapic->irr = 0;
+ ioapic->irr_delivered = 0;
update_handled_vectors(ioapic);
kvm_vcpu_request_scan_ioapic(kvm);
kvm_ioapic_inject_all(ioapic, state->irr);
diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h
index c2e36d934af4..ca0b0b4e6256 100644
--- a/arch/x86/kvm/ioapic.h
+++ b/arch/x86/kvm/ioapic.h
@@ -3,7 +3,7 @@
#include <linux/kvm_host.h>
-#include "iodev.h"
+#include <kvm/iodev.h>
struct kvm;
struct kvm_vcpu;
@@ -77,6 +77,7 @@ struct kvm_ioapic {
struct rtc_status rtc_status;
struct delayed_work eoi_inject;
u32 irq_eoi[IOAPIC_NUM_PINS];
+ u32 irr_delivered;
};
#ifdef DEBUG
@@ -97,13 +98,19 @@ static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm)
return kvm->arch.vioapic;
}
+static inline bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector)
+{
+ struct kvm_ioapic *ioapic = kvm->arch.vioapic;
+ smp_rmb();
+ return test_bit(vector, ioapic->handled_vectors);
+}
+
void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu);
bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
int short_hand, unsigned int dest, int dest_mode);
int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector,
int trigger_mode);
-bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector);
int kvm_ioapic_init(struct kvm *kvm);
void kvm_ioapic_destroy(struct kvm *kvm);
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index 2d03568e9498..ad68c73008c5 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -27,7 +27,7 @@
#include <linux/kvm_host.h>
#include <linux/spinlock.h>
-#include "iodev.h"
+#include <kvm/iodev.h>
#include "ioapic.h"
#include "lapic.h"
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index bd4e34de24c7..d67206a7b99a 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -133,6 +133,28 @@ static inline int kvm_apic_id(struct kvm_lapic *apic)
return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
}
+/* The logical map is definitely wrong if we have multiple
+ * modes at the same time. (Physical map is always right.)
+ */
+static inline bool kvm_apic_logical_map_valid(struct kvm_apic_map *map)
+{
+ return !(map->mode & (map->mode - 1));
+}
+
+static inline void
+apic_logical_id(struct kvm_apic_map *map, u32 dest_id, u16 *cid, u16 *lid)
+{
+ unsigned lid_bits;
+
+ BUILD_BUG_ON(KVM_APIC_MODE_XAPIC_CLUSTER != 4);
+ BUILD_BUG_ON(KVM_APIC_MODE_XAPIC_FLAT != 8);
+ BUILD_BUG_ON(KVM_APIC_MODE_X2APIC != 16);
+ lid_bits = map->mode;
+
+ *cid = dest_id >> lid_bits;
+ *lid = dest_id & ((1 << lid_bits) - 1);
+}
+
static void recalculate_apic_map(struct kvm *kvm)
{
struct kvm_apic_map *new, *old = NULL;
@@ -146,48 +168,6 @@ static void recalculate_apic_map(struct kvm *kvm)
if (!new)
goto out;
- new->ldr_bits = 8;
- /* flat mode is default */
- new->cid_shift = 8;
- new->cid_mask = 0;
- new->lid_mask = 0xff;
- new->broadcast = APIC_BROADCAST;
-
- kvm_for_each_vcpu(i, vcpu, kvm) {
- struct kvm_lapic *apic = vcpu->arch.apic;
-
- if (!kvm_apic_present(vcpu))
- continue;
-
- if (apic_x2apic_mode(apic)) {
- new->ldr_bits = 32;
- new->cid_shift = 16;
- new->cid_mask = new->lid_mask = 0xffff;
- new->broadcast = X2APIC_BROADCAST;
- } else if (kvm_apic_get_reg(apic, APIC_LDR)) {
- if (kvm_apic_get_reg(apic, APIC_DFR) ==
- APIC_DFR_CLUSTER) {
- new->cid_shift = 4;
- new->cid_mask = 0xf;
- new->lid_mask = 0xf;
- } else {
- new->cid_shift = 8;
- new->cid_mask = 0;
- new->lid_mask = 0xff;
- }
- }
-
- /*
- * All APICs have to be configured in the same mode by an OS.
- * We take advatage of this while building logical id loockup
- * table. After reset APICs are in software disabled mode, so if
- * we find apic with different setting we assume this is the mode
- * OS wants all apics to be in; build lookup table accordingly.
- */
- if (kvm_apic_sw_enabled(apic))
- break;
- }
-
kvm_for_each_vcpu(i, vcpu, kvm) {
struct kvm_lapic *apic = vcpu->arch.apic;
u16 cid, lid;
@@ -198,11 +178,25 @@ static void recalculate_apic_map(struct kvm *kvm)
aid = kvm_apic_id(apic);
ldr = kvm_apic_get_reg(apic, APIC_LDR);
- cid = apic_cluster_id(new, ldr);
- lid = apic_logical_id(new, ldr);
if (aid < ARRAY_SIZE(new->phys_map))
new->phys_map[aid] = apic;
+
+ if (apic_x2apic_mode(apic)) {
+ new->mode |= KVM_APIC_MODE_X2APIC;
+ } else if (ldr) {
+ ldr = GET_APIC_LOGICAL_ID(ldr);
+ if (kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
+ new->mode |= KVM_APIC_MODE_XAPIC_FLAT;
+ else
+ new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER;
+ }
+
+ if (!kvm_apic_logical_map_valid(new))
+ continue;
+
+ apic_logical_id(new, ldr, &cid, &lid);
+
if (lid && cid < ARRAY_SIZE(new->logical_map))
new->logical_map[cid][ffs(lid) - 1] = apic;
}
@@ -588,15 +582,23 @@ static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
apic_update_ppr(apic);
}
-static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 dest)
+static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
{
- return dest == (apic_x2apic_mode(apic) ?
- X2APIC_BROADCAST : APIC_BROADCAST);
+ if (apic_x2apic_mode(apic))
+ return mda == X2APIC_BROADCAST;
+
+ return GET_APIC_DEST_FIELD(mda) == APIC_BROADCAST;
}
-static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 dest)
+static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
{
- return kvm_apic_id(apic) == dest || kvm_apic_broadcast(apic, dest);
+ if (kvm_apic_broadcast(apic, mda))
+ return true;
+
+ if (apic_x2apic_mode(apic))
+ return mda == kvm_apic_id(apic);
+
+ return mda == SET_APIC_DEST_FIELD(kvm_apic_id(apic));
}
static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
@@ -613,6 +615,7 @@ static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
&& (logical_id & mda & 0xffff) != 0;
logical_id = GET_APIC_LOGICAL_ID(logical_id);
+ mda = GET_APIC_DEST_FIELD(mda);
switch (kvm_apic_get_reg(apic, APIC_DFR)) {
case APIC_DFR_FLAT:
@@ -627,10 +630,27 @@ static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
}
}
+/* KVM APIC implementation has two quirks
+ * - dest always begins at 0 while xAPIC MDA has offset 24,
+ * - IOxAPIC messages have to be delivered (directly) to x2APIC.
+ */
+static u32 kvm_apic_mda(unsigned int dest_id, struct kvm_lapic *source,
+ struct kvm_lapic *target)
+{
+ bool ipi = source != NULL;
+ bool x2apic_mda = apic_x2apic_mode(ipi ? source : target);
+
+ if (!ipi && dest_id == APIC_BROADCAST && x2apic_mda)
+ return X2APIC_BROADCAST;
+
+ return x2apic_mda ? dest_id : SET_APIC_DEST_FIELD(dest_id);
+}
+
bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
int short_hand, unsigned int dest, int dest_mode)
{
struct kvm_lapic *target = vcpu->arch.apic;
+ u32 mda = kvm_apic_mda(dest, source, target);
apic_debug("target %p, source %p, dest 0x%x, "
"dest_mode 0x%x, short_hand 0x%x\n",
@@ -640,9 +660,9 @@ bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
switch (short_hand) {
case APIC_DEST_NOSHORT:
if (dest_mode == APIC_DEST_PHYSICAL)
- return kvm_apic_match_physical_addr(target, dest);
+ return kvm_apic_match_physical_addr(target, mda);
else
- return kvm_apic_match_logical_addr(target, dest);
+ return kvm_apic_match_logical_addr(target, mda);
case APIC_DEST_SELF:
return target == source;
case APIC_DEST_ALLINC:
@@ -664,6 +684,7 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
struct kvm_lapic **dst;
int i;
bool ret = false;
+ bool x2apic_ipi = src && apic_x2apic_mode(src);
*r = -1;
@@ -675,15 +696,15 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
if (irq->shorthand)
return false;
+ if (irq->dest_id == (x2apic_ipi ? X2APIC_BROADCAST : APIC_BROADCAST))
+ return false;
+
rcu_read_lock();
map = rcu_dereference(kvm->arch.apic_map);
if (!map)
goto out;
- if (irq->dest_id == map->broadcast)
- goto out;
-
ret = true;
if (irq->dest_mode == APIC_DEST_PHYSICAL) {
@@ -692,16 +713,20 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
dst = &map->phys_map[irq->dest_id];
} else {
- u32 mda = irq->dest_id << (32 - map->ldr_bits);
- u16 cid = apic_cluster_id(map, mda);
+ u16 cid;
+
+ if (!kvm_apic_logical_map_valid(map)) {
+ ret = false;
+ goto out;
+ }
+
+ apic_logical_id(map, irq->dest_id, &cid, (u16 *)&bitmap);
if (cid >= ARRAY_SIZE(map->logical_map))
goto out;
dst = map->logical_map[cid];
- bitmap = apic_logical_id(map, mda);
-
if (irq->delivery_mode == APIC_DM_LOWEST) {
int l = -1;
for_each_set_bit(i, &bitmap, 16) {
@@ -833,8 +858,7 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
{
- if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) &&
- kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
+ if (kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
int trigger_mode;
if (apic_test_vector(vector, apic->regs + APIC_TMR))
trigger_mode = IOAPIC_LEVEL_TRIG;
@@ -1038,7 +1062,7 @@ static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
addr < apic->base_address + LAPIC_MMIO_LENGTH;
}
-static int apic_mmio_read(struct kvm_io_device *this,
+static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
gpa_t address, int len, void *data)
{
struct kvm_lapic *apic = to_lapic(this);
@@ -1358,7 +1382,7 @@ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
return ret;
}
-static int apic_mmio_write(struct kvm_io_device *this,
+static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
gpa_t address, int len, const void *data)
{
struct kvm_lapic *apic = to_lapic(this);
@@ -1498,8 +1522,6 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
return;
}
- if (!kvm_vcpu_is_bsp(apic->vcpu))
- value &= ~MSR_IA32_APICBASE_BSP;
vcpu->arch.apic_base = value;
/* update jump label if enable bit changes */
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 0bc6c656625b..9d28383fc1e7 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -1,7 +1,7 @@
#ifndef __KVM_X86_LAPIC_H
#define __KVM_X86_LAPIC_H
-#include "iodev.h"
+#include <kvm/iodev.h>
#include <linux/kvm_host.h>
@@ -148,21 +148,6 @@ static inline bool kvm_apic_vid_enabled(struct kvm *kvm)
return kvm_x86_ops->vm_has_apicv(kvm);
}
-static inline u16 apic_cluster_id(struct kvm_apic_map *map, u32 ldr)
-{
- u16 cid;
- ldr >>= 32 - map->ldr_bits;
- cid = (ldr >> map->cid_shift) & map->cid_mask;
-
- return cid;
-}
-
-static inline u16 apic_logical_id(struct kvm_apic_map *map, u32 ldr)
-{
- ldr >>= (32 - map->ldr_bits);
- return ldr & map->lid_mask;
-}
-
static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)
{
return vcpu->arch.apic->pending_events;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index cee759299a35..146f295ee322 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4465,6 +4465,79 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
kvm_flush_remote_tlbs(kvm);
}
+static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
+ unsigned long *rmapp)
+{
+ u64 *sptep;
+ struct rmap_iterator iter;
+ int need_tlb_flush = 0;
+ pfn_t pfn;
+ struct kvm_mmu_page *sp;
+
+ for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
+ BUG_ON(!(*sptep & PT_PRESENT_MASK));
+
+ sp = page_header(__pa(sptep));
+ pfn = spte_to_pfn(*sptep);
+
+ /*
+ * Only EPT supported for now; otherwise, one would need to
+ * find out efficiently whether the guest page tables are
+ * also using huge pages.
+ */
+ if (sp->role.direct &&
+ !kvm_is_reserved_pfn(pfn) &&
+ PageTransCompound(pfn_to_page(pfn))) {
+ drop_spte(kvm, sptep);
+ sptep = rmap_get_first(*rmapp, &iter);
+ need_tlb_flush = 1;
+ } else
+ sptep = rmap_get_next(&iter);
+ }
+
+ return need_tlb_flush;
+}
+
+void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
+ struct kvm_memory_slot *memslot)
+{
+ bool flush = false;
+ unsigned long *rmapp;
+ unsigned long last_index, index;
+ gfn_t gfn_start, gfn_end;
+
+ spin_lock(&kvm->mmu_lock);
+
+ gfn_start = memslot->base_gfn;
+ gfn_end = memslot->base_gfn + memslot->npages - 1;
+
+ if (gfn_start >= gfn_end)
+ goto out;
+
+ rmapp = memslot->arch.rmap[0];
+ last_index = gfn_to_index(gfn_end, memslot->base_gfn,
+ PT_PAGE_TABLE_LEVEL);
+
+ for (index = 0; index <= last_index; ++index, ++rmapp) {
+ if (*rmapp)
+ flush |= kvm_mmu_zap_collapsible_spte(kvm, rmapp);
+
+ if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
+ if (flush) {
+ kvm_flush_remote_tlbs(kvm);
+ flush = false;
+ }
+ cond_resched_lock(&kvm->mmu_lock);
+ }
+ }
+
+ if (flush)
+ kvm_flush_remote_tlbs(kvm);
+
+out:
+ spin_unlock(&kvm->mmu_lock);
+}
+
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
struct kvm_memory_slot *memslot)
{
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 8e6b7d869d2f..29fbf9dfdc54 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -38,7 +38,7 @@ static struct kvm_arch_event_perf_mapping {
};
/* mapping between fixed pmc index and arch_events array */
-int fixed_pmc_events[] = {1, 0, 7};
+static int fixed_pmc_events[] = {1, 0, 7};
static bool pmc_is_gp(struct kvm_pmc *pmc)
{
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index cc618c882f90..ce741b8650f6 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1261,7 +1261,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
MSR_IA32_APICBASE_ENABLE;
- if (kvm_vcpu_is_bsp(&svm->vcpu))
+ if (kvm_vcpu_is_reset_bsp(&svm->vcpu))
svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
svm_init_osvw(&svm->vcpu);
@@ -1929,14 +1929,12 @@ static int nop_on_interception(struct vcpu_svm *svm)
static int halt_interception(struct vcpu_svm *svm)
{
svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
- skip_emulated_instruction(&svm->vcpu);
return kvm_emulate_halt(&svm->vcpu);
}
static int vmmcall_interception(struct vcpu_svm *svm)
{
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
- skip_emulated_instruction(&svm->vcpu);
kvm_emulate_hypercall(&svm->vcpu);
return 1;
}
@@ -2757,11 +2755,11 @@ static int invlpga_interception(struct vcpu_svm *svm)
{
struct kvm_vcpu *vcpu = &svm->vcpu;
- trace_kvm_invlpga(svm->vmcb->save.rip, vcpu->arch.regs[VCPU_REGS_RCX],
- vcpu->arch.regs[VCPU_REGS_RAX]);
+ trace_kvm_invlpga(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RCX),
+ kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
/* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
- kvm_mmu_invlpg(vcpu, vcpu->arch.regs[VCPU_REGS_RAX]);
+ kvm_mmu_invlpg(vcpu, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu);
@@ -2770,12 +2768,18 @@ static int invlpga_interception(struct vcpu_svm *svm)
static int skinit_interception(struct vcpu_svm *svm)
{
- trace_kvm_skinit(svm->vmcb->save.rip, svm->vcpu.arch.regs[VCPU_REGS_RAX]);
+ trace_kvm_skinit(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
kvm_queue_exception(&svm->vcpu, UD_VECTOR);
return 1;
}
+static int wbinvd_interception(struct vcpu_svm *svm)
+{
+ kvm_emulate_wbinvd(&svm->vcpu);
+ return 1;
+}
+
static int xsetbv_interception(struct vcpu_svm *svm)
{
u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
@@ -2902,7 +2906,8 @@ static int rdpmc_interception(struct vcpu_svm *svm)
return 1;
}
-bool check_selective_cr0_intercepted(struct vcpu_svm *svm, unsigned long val)
+static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
+ unsigned long val)
{
unsigned long cr0 = svm->vcpu.arch.cr0;
bool ret = false;
@@ -2940,7 +2945,10 @@ static int cr_interception(struct vcpu_svm *svm)
return emulate_on_interception(svm);
reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
- cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
+ if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
+ cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
+ else
+ cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
err = 0;
if (cr >= 16) { /* mov to cr */
@@ -3133,7 +3141,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
static int rdmsr_interception(struct vcpu_svm *svm)
{
- u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
+ u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
u64 data;
if (svm_get_msr(&svm->vcpu, ecx, &data)) {
@@ -3142,8 +3150,8 @@ static int rdmsr_interception(struct vcpu_svm *svm)
} else {
trace_kvm_msr_read(ecx, data);
- svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
- svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
+ kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, data & 0xffffffff);
+ kvm_register_write(&svm->vcpu, VCPU_REGS_RDX, data >> 32);
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
skip_emulated_instruction(&svm->vcpu);
}
@@ -3246,9 +3254,8 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
static int wrmsr_interception(struct vcpu_svm *svm)
{
struct msr_data msr;
- u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
- u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
- | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
+ u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
+ u64 data = kvm_read_edx_eax(&svm->vcpu);
msr.data = data;
msr.index = ecx;
@@ -3325,7 +3332,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_READ_CR3] = cr_interception,
[SVM_EXIT_READ_CR4] = cr_interception,
[SVM_EXIT_READ_CR8] = cr_interception,
- [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
+ [SVM_EXIT_CR0_SEL_WRITE] = cr_interception,
[SVM_EXIT_WRITE_CR0] = cr_interception,
[SVM_EXIT_WRITE_CR3] = cr_interception,
[SVM_EXIT_WRITE_CR4] = cr_interception,
@@ -3376,7 +3383,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_STGI] = stgi_interception,
[SVM_EXIT_CLGI] = clgi_interception,
[SVM_EXIT_SKINIT] = skinit_interception,
- [SVM_EXIT_WBINVD] = emulate_on_interception,
+ [SVM_EXIT_WBINVD] = wbinvd_interception,
[SVM_EXIT_MONITOR] = monitor_interception,
[SVM_EXIT_MWAIT] = mwait_interception,
[SVM_EXIT_XSETBV] = xsetbv_interception,
@@ -3555,7 +3562,7 @@ static int handle_exit(struct kvm_vcpu *vcpu)
if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
|| !svm_exit_handlers[exit_code]) {
- WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_code);
+ WARN_ONCE(1, "svm: unexpected exit reason 0x%x\n", exit_code);
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 10a481b7674d..f5e8dce8046c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2470,6 +2470,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
vmx->nested.nested_vmx_secondary_ctls_low = 0;
vmx->nested.nested_vmx_secondary_ctls_high &=
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ SECONDARY_EXEC_RDTSCP |
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
@@ -2479,8 +2480,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
if (enable_ept) {
/* nested EPT: emulate EPT also to L1 */
vmx->nested.nested_vmx_secondary_ctls_high |=
- SECONDARY_EXEC_ENABLE_EPT |
- SECONDARY_EXEC_UNRESTRICTED_GUEST;
+ SECONDARY_EXEC_ENABLE_EPT;
vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
VMX_EPT_INVEPT_BIT;
@@ -2494,6 +2494,10 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
} else
vmx->nested.nested_vmx_ept_caps = 0;
+ if (enable_unrestricted_guest)
+ vmx->nested.nested_vmx_secondary_ctls_high |=
+ SECONDARY_EXEC_UNRESTRICTED_GUEST;
+
/* miscellaneous data */
rdmsr(MSR_IA32_VMX_MISC,
vmx->nested.nested_vmx_misc_low,
@@ -3265,8 +3269,8 @@ static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
* default value.
*/
if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
- save->selector &= ~SELECTOR_RPL_MASK;
- save->dpl = save->selector & SELECTOR_RPL_MASK;
+ save->selector &= ~SEGMENT_RPL_MASK;
+ save->dpl = save->selector & SEGMENT_RPL_MASK;
save->s = 1;
}
vmx_set_segment(vcpu, save, seg);
@@ -3839,7 +3843,7 @@ static bool code_segment_valid(struct kvm_vcpu *vcpu)
unsigned int cs_rpl;
vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
- cs_rpl = cs.selector & SELECTOR_RPL_MASK;
+ cs_rpl = cs.selector & SEGMENT_RPL_MASK;
if (cs.unusable)
return false;
@@ -3867,7 +3871,7 @@ static bool stack_segment_valid(struct kvm_vcpu *vcpu)
unsigned int ss_rpl;
vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
- ss_rpl = ss.selector & SELECTOR_RPL_MASK;
+ ss_rpl = ss.selector & SEGMENT_RPL_MASK;
if (ss.unusable)
return true;
@@ -3889,7 +3893,7 @@ static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
unsigned int rpl;
vmx_get_segment(vcpu, &var, seg);
- rpl = var.selector & SELECTOR_RPL_MASK;
+ rpl = var.selector & SEGMENT_RPL_MASK;
if (var.unusable)
return true;
@@ -3916,7 +3920,7 @@ static bool tr_valid(struct kvm_vcpu *vcpu)
if (tr.unusable)
return false;
- if (tr.selector & SELECTOR_TI_MASK) /* TI = 1 */
+ if (tr.selector & SEGMENT_TI_MASK) /* TI = 1 */
return false;
if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
return false;
@@ -3934,7 +3938,7 @@ static bool ldtr_valid(struct kvm_vcpu *vcpu)
if (ldtr.unusable)
return true;
- if (ldtr.selector & SELECTOR_TI_MASK) /* TI = 1 */
+ if (ldtr.selector & SEGMENT_TI_MASK) /* TI = 1 */
return false;
if (ldtr.type != 2)
return false;
@@ -3951,8 +3955,8 @@ static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
- return ((cs.selector & SELECTOR_RPL_MASK) ==
- (ss.selector & SELECTOR_RPL_MASK));
+ return ((cs.selector & SEGMENT_RPL_MASK) ==
+ (ss.selector & SEGMENT_RPL_MASK));
}
/*
@@ -4708,7 +4712,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
kvm_set_cr8(&vmx->vcpu, 0);
apic_base_msr.data = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
- if (kvm_vcpu_is_bsp(&vmx->vcpu))
+ if (kvm_vcpu_is_reset_bsp(&vmx->vcpu))
apic_base_msr.data |= MSR_IA32_APICBASE_BSP;
apic_base_msr.host_initiated = true;
kvm_set_apic_base(&vmx->vcpu, &apic_base_msr);
@@ -5003,7 +5007,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
if (emulate_instruction(vcpu, 0) == EMULATE_DONE) {
if (vcpu->arch.halt_request) {
vcpu->arch.halt_request = 0;
- return kvm_emulate_halt(vcpu);
+ return kvm_vcpu_halt(vcpu);
}
return 1;
}
@@ -5068,6 +5072,10 @@ static int handle_exception(struct kvm_vcpu *vcpu)
}
if (is_invalid_opcode(intr_info)) {
+ if (is_guest_mode(vcpu)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
if (er != EMULATE_DONE)
kvm_queue_exception(vcpu, UD_VECTOR);
@@ -5087,9 +5095,10 @@ static int handle_exception(struct kvm_vcpu *vcpu)
!(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
- vcpu->run->internal.ndata = 2;
+ vcpu->run->internal.ndata = 3;
vcpu->run->internal.data[0] = vect_info;
vcpu->run->internal.data[1] = intr_info;
+ vcpu->run->internal.data[2] = error_code;
return 0;
}
@@ -5530,13 +5539,11 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu)
static int handle_halt(struct kvm_vcpu *vcpu)
{
- skip_emulated_instruction(vcpu);
return kvm_emulate_halt(vcpu);
}
static int handle_vmcall(struct kvm_vcpu *vcpu)
{
- skip_emulated_instruction(vcpu);
kvm_emulate_hypercall(vcpu);
return 1;
}
@@ -5567,7 +5574,6 @@ static int handle_rdpmc(struct kvm_vcpu *vcpu)
static int handle_wbinvd(struct kvm_vcpu *vcpu)
{
- skip_emulated_instruction(vcpu);
kvm_emulate_wbinvd(vcpu);
return 1;
}
@@ -5825,7 +5831,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
gpa_t gpa;
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
- if (!kvm_io_bus_write(vcpu->kvm, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
+ if (!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
skip_emulated_instruction(vcpu);
return 1;
}
@@ -5906,7 +5912,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
if (vcpu->arch.halt_request) {
vcpu->arch.halt_request = 0;
- ret = kvm_emulate_halt(vcpu);
+ ret = kvm_vcpu_halt(vcpu);
goto out;
}
@@ -7315,21 +7321,21 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
else if (port < 0x10000)
bitmap = vmcs12->io_bitmap_b;
else
- return 1;
+ return true;
bitmap += (port & 0x7fff) / 8;
if (last_bitmap != bitmap)
if (kvm_read_guest(vcpu->kvm, bitmap, &b, 1))
- return 1;
+ return true;
if (b & (1 << (port & 7)))
- return 1;
+ return true;
port++;
size--;
last_bitmap = bitmap;
}
- return 0;
+ return false;
}
/*
@@ -7345,7 +7351,7 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
gpa_t bitmap;
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
- return 1;
+ return true;
/*
* The MSR_BITMAP page is divided into four 1024-byte bitmaps,
@@ -7364,10 +7370,10 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
if (msr_index < 1024*8) {
unsigned char b;
if (kvm_read_guest(vcpu->kvm, bitmap + msr_index/8, &b, 1))
- return 1;
+ return true;
return 1 & (b >> (msr_index & 7));
} else
- return 1; /* let L1 handle the wrong parameter */
+ return true; /* let L1 handle the wrong parameter */
}
/*
@@ -7389,7 +7395,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
case 0:
if (vmcs12->cr0_guest_host_mask &
(val ^ vmcs12->cr0_read_shadow))
- return 1;
+ return true;
break;
case 3:
if ((vmcs12->cr3_target_count >= 1 &&
@@ -7400,37 +7406,37 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
vmcs12->cr3_target_value2 == val) ||
(vmcs12->cr3_target_count >= 4 &&
vmcs12->cr3_target_value3 == val))
- return 0;
+ return false;
if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
- return 1;
+ return true;
break;
case 4:
if (vmcs12->cr4_guest_host_mask &
(vmcs12->cr4_read_shadow ^ val))
- return 1;
+ return true;
break;
case 8:
if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
- return 1;
+ return true;
break;
}
break;
case 2: /* clts */
if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
(vmcs12->cr0_read_shadow & X86_CR0_TS))
- return 1;
+ return true;
break;
case 1: /* mov from cr */
switch (cr) {
case 3:
if (vmcs12->cpu_based_vm_exec_control &
CPU_BASED_CR3_STORE_EXITING)
- return 1;
+ return true;
break;
case 8:
if (vmcs12->cpu_based_vm_exec_control &
CPU_BASED_CR8_STORE_EXITING)
- return 1;
+ return true;
break;
}
break;
@@ -7441,14 +7447,14 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
*/
if (vmcs12->cr0_guest_host_mask & 0xe &
(val ^ vmcs12->cr0_read_shadow))
- return 1;
+ return true;
if ((vmcs12->cr0_guest_host_mask & 0x1) &&
!(vmcs12->cr0_read_shadow & 0x1) &&
(val & 0x1))
- return 1;
+ return true;
break;
}
- return 0;
+ return false;
}
/*
@@ -7471,48 +7477,48 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
KVM_ISA_VMX);
if (vmx->nested.nested_run_pending)
- return 0;
+ return false;
if (unlikely(vmx->fail)) {
pr_info_ratelimited("%s failed vm entry %x\n", __func__,
vmcs_read32(VM_INSTRUCTION_ERROR));
- return 1;
+ return true;
}
switch (exit_reason) {
case EXIT_REASON_EXCEPTION_NMI:
if (!is_exception(intr_info))
- return 0;
+ return false;
else if (is_page_fault(intr_info))
return enable_ept;
else if (is_no_device(intr_info) &&
!(vmcs12->guest_cr0 & X86_CR0_TS))
- return 0;
+ return false;
return vmcs12->exception_bitmap &
(1u << (intr_info & INTR_INFO_VECTOR_MASK));
case EXIT_REASON_EXTERNAL_INTERRUPT:
- return 0;
+ return false;
case EXIT_REASON_TRIPLE_FAULT:
- return 1;
+ return true;
case EXIT_REASON_PENDING_INTERRUPT:
return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING);
case EXIT_REASON_NMI_WINDOW:
return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING);
case EXIT_REASON_TASK_SWITCH:
- return 1;
+ return true;
case EXIT_REASON_CPUID:
if (kvm_register_read(vcpu, VCPU_REGS_RAX) == 0xa)
- return 0;
- return 1;
+ return false;
+ return true;
case EXIT_REASON_HLT:
return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
case EXIT_REASON_INVD:
- return 1;
+ return true;
case EXIT_REASON_INVLPG:
return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
case EXIT_REASON_RDPMC:
return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
- case EXIT_REASON_RDTSC:
+ case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP:
return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
@@ -7524,7 +7530,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
* VMX instructions trap unconditionally. This allows L1 to
* emulate them for its L2 guest, i.e., allows 3-level nesting!
*/
- return 1;
+ return true;
case EXIT_REASON_CR_ACCESS:
return nested_vmx_exit_handled_cr(vcpu, vmcs12);
case EXIT_REASON_DR_ACCESS:
@@ -7535,7 +7541,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
case EXIT_REASON_MSR_WRITE:
return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
case EXIT_REASON_INVALID_STATE:
- return 1;
+ return true;
case EXIT_REASON_MWAIT_INSTRUCTION:
return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
case EXIT_REASON_MONITOR_INSTRUCTION:
@@ -7545,7 +7551,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
nested_cpu_has2(vmcs12,
SECONDARY_EXEC_PAUSE_LOOP_EXITING);
case EXIT_REASON_MCE_DURING_VMENTRY:
- return 0;
+ return false;
case EXIT_REASON_TPR_BELOW_THRESHOLD:
return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
case EXIT_REASON_APIC_ACCESS:
@@ -7554,7 +7560,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
case EXIT_REASON_APIC_WRITE:
case EXIT_REASON_EOI_INDUCED:
/* apic_write and eoi_induced should exit unconditionally. */
- return 1;
+ return true;
case EXIT_REASON_EPT_VIOLATION:
/*
* L0 always deals with the EPT violation. If nested EPT is
@@ -7562,7 +7568,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
* missing in the guest EPT table (EPT12), the EPT violation
* will be injected with nested_ept_inject_page_fault()
*/
- return 0;
+ return false;
case EXIT_REASON_EPT_MISCONFIG:
/*
* L2 never uses directly L1's EPT, but rather L0's own EPT
@@ -7570,11 +7576,11 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
* (EPT on EPT). So any problems with the structure of the
* table is L0's fault.
*/
- return 0;
+ return false;
case EXIT_REASON_WBINVD:
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
case EXIT_REASON_XSETBV:
- return 1;
+ return true;
case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
/*
* This should never happen, since it is not possible to
@@ -7584,7 +7590,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
*/
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
default:
- return 1;
+ return true;
}
}
@@ -8519,6 +8525,9 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
exec_control);
}
}
+ if (nested && !vmx->rdtscp_enabled)
+ vmx->nested.nested_vmx_secondary_ctls_high &=
+ ~SECONDARY_EXEC_RDTSCP;
}
/* Exposing INVPCID only when PCID is exposed */
@@ -8619,10 +8628,11 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ int maxphyaddr = cpuid_maxphyaddr(vcpu);
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
- /* TODO: Also verify bits beyond physical address width are 0 */
- if (!PAGE_ALIGNED(vmcs12->apic_access_addr))
+ if (!PAGE_ALIGNED(vmcs12->apic_access_addr) ||
+ vmcs12->apic_access_addr >> maxphyaddr)
return false;
/*
@@ -8638,8 +8648,8 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
}
if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
- /* TODO: Also verify bits beyond physical address width are 0 */
- if (!PAGE_ALIGNED(vmcs12->virtual_apic_page_addr))
+ if (!PAGE_ALIGNED(vmcs12->virtual_apic_page_addr) ||
+ vmcs12->virtual_apic_page_addr >> maxphyaddr)
return false;
if (vmx->nested.virtual_apic_page) /* shouldn't happen */
@@ -8662,7 +8672,8 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
}
if (nested_cpu_has_posted_intr(vmcs12)) {
- if (!IS_ALIGNED(vmcs12->posted_intr_desc_addr, 64))
+ if (!IS_ALIGNED(vmcs12->posted_intr_desc_addr, 64) ||
+ vmcs12->posted_intr_desc_addr >> maxphyaddr)
return false;
if (vmx->nested.pi_desc_page) { /* shouldn't happen */
@@ -8861,9 +8872,9 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
unsigned long count_field,
- unsigned long addr_field,
- int maxphyaddr)
+ unsigned long addr_field)
{
+ int maxphyaddr;
u64 count, addr;
if (vmcs12_read_any(vcpu, count_field, &count) ||
@@ -8873,6 +8884,7 @@ static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
}
if (count == 0)
return 0;
+ maxphyaddr = cpuid_maxphyaddr(vcpu);
if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
(addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) {
pr_warn_ratelimited(
@@ -8886,19 +8898,16 @@ static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
- int maxphyaddr;
-
if (vmcs12->vm_exit_msr_load_count == 0 &&
vmcs12->vm_exit_msr_store_count == 0 &&
vmcs12->vm_entry_msr_load_count == 0)
return 0; /* Fast path */
- maxphyaddr = cpuid_maxphyaddr(vcpu);
if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT,
- VM_EXIT_MSR_LOAD_ADDR, maxphyaddr) ||
+ VM_EXIT_MSR_LOAD_ADDR) ||
nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT,
- VM_EXIT_MSR_STORE_ADDR, maxphyaddr) ||
+ VM_EXIT_MSR_STORE_ADDR) ||
nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT,
- VM_ENTRY_MSR_LOAD_ADDR, maxphyaddr))
+ VM_ENTRY_MSR_LOAD_ADDR))
return -EINVAL;
return 0;
}
@@ -9148,8 +9157,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
exec_control &= ~SECONDARY_EXEC_RDTSCP;
/* Take the following fields only from vmcs12 */
exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ SECONDARY_EXEC_RDTSCP |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
- SECONDARY_EXEC_APIC_REGISTER_VIRT);
+ SECONDARY_EXEC_APIC_REGISTER_VIRT);
if (nested_cpu_has(vmcs12,
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
exec_control |= vmcs12->secondary_vm_exec_control;
@@ -9382,7 +9392,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
}
if (!nested_get_vmcs12_pages(vcpu, vmcs12)) {
- /*TODO: Also verify bits beyond physical address width are 0*/
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
return 1;
}
@@ -9521,7 +9530,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
vmcs12->launch_state = 1;
if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT)
- return kvm_emulate_halt(vcpu);
+ return kvm_vcpu_halt(vcpu);
vmx->nested.nested_run_pending = 1;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 32bf19ef3115..2b2dd030ea3b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -801,6 +801,17 @@ unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_get_cr8);
+static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
+{
+ int i;
+
+ if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
+ for (i = 0; i < KVM_NR_DB_REGS; i++)
+ vcpu->arch.eff_db[i] = vcpu->arch.db[i];
+ vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
+ }
+}
+
static void kvm_update_dr6(struct kvm_vcpu *vcpu)
{
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
@@ -3149,6 +3160,7 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
return -EINVAL;
memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
+ kvm_update_dr0123(vcpu);
vcpu->arch.dr6 = dbgregs->dr6;
kvm_update_dr6(vcpu);
vcpu->arch.dr7 = dbgregs->dr7;
@@ -4114,8 +4126,8 @@ static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
do {
n = min(len, 8);
if (!(vcpu->arch.apic &&
- !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, n, v))
- && kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
+ !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v))
+ && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v))
break;
handled += n;
addr += n;
@@ -4134,8 +4146,9 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
do {
n = min(len, 8);
if (!(vcpu->arch.apic &&
- !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v))
- && kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
+ !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev,
+ addr, n, v))
+ && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
break;
trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
handled += n;
@@ -4475,7 +4488,8 @@ mmio:
return X86EMUL_CONTINUE;
}
-int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
+static int emulator_read_write(struct x86_emulate_ctxt *ctxt,
+ unsigned long addr,
void *val, unsigned int bytes,
struct x86_exception *exception,
const struct read_write_emulator_ops *ops)
@@ -4538,7 +4552,7 @@ static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
exception, &read_emultor);
}
-int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
+static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
unsigned long addr,
const void *val,
unsigned int bytes,
@@ -4629,10 +4643,10 @@ static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
int r;
if (vcpu->arch.pio.in)
- r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
+ r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
vcpu->arch.pio.size, pd);
else
- r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
+ r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
vcpu->arch.pio.port, vcpu->arch.pio.size,
pd);
return r;
@@ -4705,7 +4719,7 @@ static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
}
-int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
+int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
{
if (!need_emulate_wbinvd(vcpu))
return X86EMUL_CONTINUE;
@@ -4722,19 +4736,29 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
wbinvd();
return X86EMUL_CONTINUE;
}
+
+int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
+{
+ kvm_x86_ops->skip_emulated_instruction(vcpu);
+ return kvm_emulate_wbinvd_noskip(vcpu);
+}
EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
+
+
static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
{
- kvm_emulate_wbinvd(emul_to_vcpu(ctxt));
+ kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt));
}
-int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
+static int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
+ unsigned long *dest)
{
return kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
}
-int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
+static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
+ unsigned long value)
{
return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
@@ -5816,7 +5840,7 @@ void kvm_arch_exit(void)
free_percpu(shared_msrs);
}
-int kvm_emulate_halt(struct kvm_vcpu *vcpu)
+int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
{
++vcpu->stat.halt_exits;
if (irqchip_in_kernel(vcpu->kvm)) {
@@ -5827,6 +5851,13 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
return 0;
}
}
+EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
+
+int kvm_emulate_halt(struct kvm_vcpu *vcpu)
+{
+ kvm_x86_ops->skip_emulated_instruction(vcpu);
+ return kvm_vcpu_halt(vcpu);
+}
EXPORT_SYMBOL_GPL(kvm_emulate_halt);
int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
@@ -5903,7 +5934,7 @@ static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid)
lapic_irq.dest_id = apicid;
lapic_irq.delivery_mode = APIC_DM_REMRD;
- kvm_irq_delivery_to_apic(kvm, 0, &lapic_irq, NULL);
+ kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL);
}
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
@@ -5911,6 +5942,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
unsigned long nr, a0, a1, a2, a3, ret;
int op_64_bit, r = 1;
+ kvm_x86_ops->skip_emulated_instruction(vcpu);
+
if (kvm_hv_hypercall_enabled(vcpu->kvm))
return kvm_hv_hypercall(vcpu);
@@ -6164,7 +6197,7 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
}
/*
- * Returns 1 to let __vcpu_run() continue the guest execution loop without
+ * Returns 1 to let vcpu_run() continue the guest execution loop without
* exiting to the userspace. Otherwise, the value will be returned to the
* userspace.
*/
@@ -6301,6 +6334,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
set_debugreg(vcpu->arch.eff_db[2], 2);
set_debugreg(vcpu->arch.eff_db[3], 3);
set_debugreg(vcpu->arch.dr6, 6);
+ vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
}
trace_kvm_entry(vcpu->vcpu_id);
@@ -6382,42 +6416,47 @@ out:
return r;
}
+static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
+{
+ if (!kvm_arch_vcpu_runnable(vcpu)) {
+ srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+ kvm_vcpu_block(vcpu);
+ vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+ if (!kvm_check_request(KVM_REQ_UNHALT, vcpu))
+ return 1;
+ }
+
+ kvm_apic_accept_events(vcpu);
+ switch(vcpu->arch.mp_state) {
+ case KVM_MP_STATE_HALTED:
+ vcpu->arch.pv.pv_unhalted = false;
+ vcpu->arch.mp_state =
+ KVM_MP_STATE_RUNNABLE;
+ case KVM_MP_STATE_RUNNABLE:
+ vcpu->arch.apf.halted = false;
+ break;
+ case KVM_MP_STATE_INIT_RECEIVED:
+ break;
+ default:
+ return -EINTR;
+ break;
+ }
+ return 1;
+}
-static int __vcpu_run(struct kvm_vcpu *vcpu)
+static int vcpu_run(struct kvm_vcpu *vcpu)
{
int r;
struct kvm *kvm = vcpu->kvm;
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
- r = 1;
- while (r > 0) {
+ for (;;) {
if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
!vcpu->arch.apf.halted)
r = vcpu_enter_guest(vcpu);
- else {
- srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
- kvm_vcpu_block(vcpu);
- vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
- if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
- kvm_apic_accept_events(vcpu);
- switch(vcpu->arch.mp_state) {
- case KVM_MP_STATE_HALTED:
- vcpu->arch.pv.pv_unhalted = false;
- vcpu->arch.mp_state =
- KVM_MP_STATE_RUNNABLE;
- case KVM_MP_STATE_RUNNABLE:
- vcpu->arch.apf.halted = false;
- break;
- case KVM_MP_STATE_INIT_RECEIVED:
- break;
- default:
- r = -EINTR;
- break;
- }
- }
- }
-
+ else
+ r = vcpu_block(kvm, vcpu);
if (r <= 0)
break;
@@ -6429,6 +6468,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
r = -EINTR;
vcpu->run->exit_reason = KVM_EXIT_INTR;
++vcpu->stat.request_irq_exits;
+ break;
}
kvm_check_async_pf_completion(vcpu);
@@ -6437,6 +6477,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
r = -EINTR;
vcpu->run->exit_reason = KVM_EXIT_INTR;
++vcpu->stat.signal_exits;
+ break;
}
if (need_resched()) {
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
@@ -6568,7 +6609,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
} else
WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);
- r = __vcpu_run(vcpu);
+ r = vcpu_run(vcpu);
out:
post_kvm_run_save(vcpu);
@@ -7075,11 +7116,14 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu)
kvm_clear_exception_queue(vcpu);
memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
+ kvm_update_dr0123(vcpu);
vcpu->arch.dr6 = DR6_INIT;
kvm_update_dr6(vcpu);
vcpu->arch.dr7 = DR7_FIXED_1;
kvm_update_dr7(vcpu);
+ vcpu->arch.cr2 = 0;
+
kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu->arch.apf.msr_val = 0;
vcpu->arch.st.msr_val = 0;
@@ -7240,7 +7284,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
vcpu->arch.pv.pv_unhalted = false;
vcpu->arch.emulate_ctxt.ops = &emulate_ops;
- if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
+ if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_reset_bsp(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
else
vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
@@ -7288,6 +7332,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
vcpu->arch.guest_supported_xcr0 = 0;
vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
+ vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
+
kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);
@@ -7428,7 +7474,7 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) {
- kvm_kvfree(free->arch.rmap[i]);
+ kvfree(free->arch.rmap[i]);
free->arch.rmap[i] = NULL;
}
if (i == 0)
@@ -7436,7 +7482,7 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
if (!dont || free->arch.lpage_info[i - 1] !=
dont->arch.lpage_info[i - 1]) {
- kvm_kvfree(free->arch.lpage_info[i - 1]);
+ kvfree(free->arch.lpage_info[i - 1]);
free->arch.lpage_info[i - 1] = NULL;
}
}
@@ -7490,12 +7536,12 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
out_free:
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
- kvm_kvfree(slot->arch.rmap[i]);
+ kvfree(slot->arch.rmap[i]);
slot->arch.rmap[i] = NULL;
if (i == 0)
continue;
- kvm_kvfree(slot->arch.lpage_info[i - 1]);
+ kvfree(slot->arch.lpage_info[i - 1]);
slot->arch.lpage_info[i - 1] = NULL;
}
return -ENOMEM;
@@ -7618,6 +7664,23 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
new = id_to_memslot(kvm->memslots, mem->slot);
/*
+ * Dirty logging tracks sptes in 4k granularity, meaning that large
+ * sptes have to be split. If live migration is successful, the guest
+ * in the source machine will be destroyed and large sptes will be
+ * created in the destination. However, if the guest continues to run
+ * in the source machine (for example if live migration fails), small
+ * sptes will remain around and cause bad performance.
+ *
+ * Scan sptes if dirty logging has been stopped, dropping those
+ * which can be collapsed into a single large-page spte. Later
+ * page faults will create the large-page sptes.
+ */
+ if ((change != KVM_MR_DELETE) &&
+ (old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
+ !(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
+ kvm_mmu_zap_collapsible_sptes(kvm, new);
+
+ /*
* Set up write protection and/or dirty logging for the new slot.
*
* For KVM_MR_DELETE and KVM_MR_MOVE, the shadow pages of old slot have
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index c905e89e19fe..1f33b3d1fd68 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -69,21 +69,20 @@ EXPORT_SYMBOL(copy_in_user);
* it is not necessary to optimize tail handling.
*/
__visible unsigned long
-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
+copy_user_handle_tail(char *to, char *from, unsigned len)
{
- char c;
- unsigned zero_len;
-
for (; len; --len, to++) {
+ char c;
+
if (__get_user_nocheck(c, from++, sizeof(char)))
break;
if (__put_user_nocheck(c, to, sizeof(char)))
break;
}
-
- for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
- if (__put_user_nocheck(c, to++, sizeof(char)))
- break;
clac();
+
+ /* If the destination is a kernel buffer, we always clear the end */
+ if ((unsigned long)to >= TASK_SIZE_MAX)
+ memset(to, 0, len);
return len;
}
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index dbc8627a5cdf..02744df576d5 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -85,12 +85,20 @@ static efi_status_t __init phys_efi_set_virtual_address_map(
efi_memory_desc_t *virtual_map)
{
efi_status_t status;
+ unsigned long flags;
+ pgd_t *save_pgd;
- efi_call_phys_prolog();
+ save_pgd = efi_call_phys_prolog();
+
+ /* Disable interrupts around EFI calls: */
+ local_irq_save(flags);
status = efi_call_phys(efi_phys.set_virtual_address_map,
memory_map_size, descriptor_size,
descriptor_version, virtual_map);
- efi_call_phys_epilog();
+ local_irq_restore(flags);
+
+ efi_call_phys_epilog(save_pgd);
+
return status;
}
@@ -491,7 +499,8 @@ void __init efi_init(void)
if (efi_memmap_init())
return;
- print_efi_memmap();
+ if (efi_enabled(EFI_DBG))
+ print_efi_memmap();
}
void __init efi_late_init(void)
@@ -939,6 +948,8 @@ static int __init arch_parse_efi_cmdline(char *str)
{
if (parse_option_str(str, "old_map"))
set_bit(EFI_OLD_MEMMAP, &efi.flags);
+ if (parse_option_str(str, "debug"))
+ set_bit(EFI_DBG, &efi.flags);
return 0;
}
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
index 40e7cda52936..ed5b67338294 100644
--- a/arch/x86/platform/efi/efi_32.c
+++ b/arch/x86/platform/efi/efi_32.c
@@ -33,11 +33,10 @@
/*
* To make EFI call EFI runtime service in physical addressing mode we need
- * prolog/epilog before/after the invocation to disable interrupt, to
- * claim EFI runtime service handler exclusively and to duplicate a memory in
- * low memory space say 0 - 3G.
+ * prolog/epilog before/after the invocation to claim the EFI runtime service
+ * handler exclusively and to duplicate a memory mapping in low memory space,
+ * say 0 - 3G.
*/
-static unsigned long efi_rt_eflags;
void efi_sync_low_kernel_mappings(void) {}
void __init efi_dump_pagetable(void) {}
@@ -57,21 +56,24 @@ void __init efi_map_region(efi_memory_desc_t *md)
void __init efi_map_region_fixed(efi_memory_desc_t *md) {}
void __init parse_efi_setup(u64 phys_addr, u32 data_len) {}
-void __init efi_call_phys_prolog(void)
+pgd_t * __init efi_call_phys_prolog(void)
{
struct desc_ptr gdt_descr;
+ pgd_t *save_pgd;
- local_irq_save(efi_rt_eflags);
-
+ /* Current pgd is swapper_pg_dir, we'll restore it later: */
+ save_pgd = swapper_pg_dir;
load_cr3(initial_page_table);
__flush_tlb_all();
gdt_descr.address = __pa(get_cpu_gdt_table(0));
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
+
+ return save_pgd;
}
-void __init efi_call_phys_epilog(void)
+void __init efi_call_phys_epilog(pgd_t *save_pgd)
{
struct desc_ptr gdt_descr;
@@ -79,10 +81,8 @@ void __init efi_call_phys_epilog(void)
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
- load_cr3(swapper_pg_dir);
+ load_cr3(save_pgd);
__flush_tlb_all();
-
- local_irq_restore(efi_rt_eflags);
}
void __init efi_runtime_mkexec(void)
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 17e80d829df0..a0ac0f9c307f 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -41,9 +41,6 @@
#include <asm/realmode.h>
#include <asm/time.h>
-static pgd_t *save_pgd __initdata;
-static unsigned long efi_flags __initdata;
-
/*
* We allocate runtime services regions bottom-up, starting from -4G, i.e.
* 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G.
@@ -78,17 +75,18 @@ static void __init early_code_mapping_set_exec(int executable)
}
}
-void __init efi_call_phys_prolog(void)
+pgd_t * __init efi_call_phys_prolog(void)
{
unsigned long vaddress;
+ pgd_t *save_pgd;
+
int pgd;
int n_pgds;
if (!efi_enabled(EFI_OLD_MEMMAP))
- return;
+ return NULL;
early_code_mapping_set_exec(1);
- local_irq_save(efi_flags);
n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL);
@@ -99,24 +97,29 @@ void __init efi_call_phys_prolog(void)
set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
}
__flush_tlb_all();
+
+ return save_pgd;
}
-void __init efi_call_phys_epilog(void)
+void __init efi_call_phys_epilog(pgd_t *save_pgd)
{
/*
* After the lock is released, the original page table is restored.
*/
- int pgd;
- int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
+ int pgd_idx;
+ int nr_pgds;
- if (!efi_enabled(EFI_OLD_MEMMAP))
+ if (!save_pgd)
return;
- for (pgd = 0; pgd < n_pgds; pgd++)
- set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
+ nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
+
+ for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++)
+ set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
+
kfree(save_pgd);
+
__flush_tlb_all();
- local_irq_restore(efi_flags);
early_code_mapping_set_exec(0);
}
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index 9793322751e0..40d2473836c9 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -82,18 +82,15 @@ static notrace cycle_t vread_pvclock(int *mode)
cycle_t ret;
u64 last;
u32 version;
+ u32 migrate_count;
u8 flags;
unsigned cpu, cpu1;
/*
- * Note: hypervisor must guarantee that:
- * 1. cpu ID number maps 1:1 to per-CPU pvclock time info.
- * 2. that per-CPU pvclock time info is updated if the
- * underlying CPU changes.
- * 3. that version is increased whenever underlying CPU
- * changes.
- *
+ * When looping to get a consistent (time-info, tsc) pair, we
+ * also need to deal with the possibility we can switch vcpus,
+ * so make sure we always re-fetch time-info for the current vcpu.
*/
do {
cpu = __getcpu() & VGETCPU_CPU_MASK;
@@ -102,20 +99,27 @@ static notrace cycle_t vread_pvclock(int *mode)
* __getcpu() calls (Gleb).
*/
- pvti = get_pvti(cpu);
+ /* Make sure migrate_count will change if we leave the VCPU. */
+ do {
+ pvti = get_pvti(cpu);
+ migrate_count = pvti->migrate_count;
+
+ cpu1 = cpu;
+ cpu = __getcpu() & VGETCPU_CPU_MASK;
+ } while (unlikely(cpu != cpu1));
version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags);
/*
* Test we're still on the cpu as well as the version.
- * We could have been migrated just after the first
- * vgetcpu but before fetching the version, so we
- * wouldn't notice a version change.
+ * - We must read TSC of pvti's VCPU.
+ * - KVM doesn't follow the versioning protocol, so data could
+ * change before version if we left the VCPU.
*/
- cpu1 = __getcpu() & VGETCPU_CPU_MASK;
- } while (unlikely(cpu != cpu1 ||
- (pvti->pvti.version & 1) ||
- pvti->pvti.version != version));
+ smp_rmb();
+ } while (unlikely((pvti->pvti.version & 1) ||
+ pvti->pvti.version != version ||
+ pvti->migrate_count != migrate_count));
if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT)))
*mode = VCLOCK_NONE;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 9f93af56a5fc..b47124d4cd67 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -91,6 +91,12 @@ EXPORT_SYMBOL_GPL(xen_p2m_size);
unsigned long xen_max_p2m_pfn __read_mostly;
EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
+#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
+#define P2M_LIMIT CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
+#else
+#define P2M_LIMIT 0
+#endif
+
static DEFINE_SPINLOCK(p2m_update_lock);
static unsigned long *p2m_mid_missing_mfn;
@@ -385,9 +391,11 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m)
void __init xen_vmalloc_p2m_tree(void)
{
static struct vm_struct vm;
+ unsigned long p2m_limit;
+ p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
vm.flags = VM_ALLOC;
- vm.size = ALIGN(sizeof(unsigned long) * xen_max_p2m_pfn,
+ vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
PMD_SIZE * PMDS_PER_MID_PAGE);
vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE);
pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index fc1ff3b1ea1f..fd3fee81c23c 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -592,7 +592,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) {
struct bio_vec *bprev;
- bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1];
+ bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1];
if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
return false;
}
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index d53a764b05ea..be3290cc0644 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -278,9 +278,11 @@ static int bt_get(struct blk_mq_alloc_data *data,
/*
* We're out of tags on this hardware queue, kick any
* pending IO submits before going to sleep waiting for
- * some to complete.
+ * some to complete. Note that hctx can be NULL here for
+ * reserved tag allocation.
*/
- blk_mq_run_hw_queue(hctx, false);
+ if (hctx)
+ blk_mq_run_hw_queue(hctx, false);
/*
* Retry tag allocation after running the hardware queue,
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 4f4bea21052e..33c428530193 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1457,7 +1457,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
do {
page = alloc_pages_node(set->numa_node,
- GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
+ GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
this_order);
if (page)
break;
@@ -1479,8 +1479,6 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
left -= to_do * rq_size;
for (j = 0; j < to_do; j++) {
tags->rqs[i] = p;
- tags->rqs[i]->atomic_flags = 0;
- tags->rqs[i]->cmd_flags = 0;
if (set->ops->init_request) {
if (set->ops->init_request(set->driver_data,
tags->rqs[i], hctx_idx, i,
@@ -1938,7 +1936,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
*/
if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
- goto err_map;
+ goto err_mq_usage;
setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
blk_queue_rq_timeout(q, 30000);
@@ -1981,7 +1979,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
if (blk_mq_init_hw_queues(q, set))
- goto err_hw;
+ goto err_mq_usage;
mutex_lock(&all_q_mutex);
list_add_tail(&q->all_q_node, &all_q_list);
@@ -1993,7 +1991,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
return q;
-err_hw:
+err_mq_usage:
blk_cleanup_queue(q);
err_hctxs:
kfree(map);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 6ed2cbe5e8c9..12600bfffca9 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -585,7 +585,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
b->physical_block_size);
t->io_min = max(t->io_min, b->io_min);
- t->io_opt = lcm(t->io_opt, b->io_opt);
+ t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
t->cluster &= b->cluster;
t->discard_zeroes_data &= b->discard_zeroes_data;
@@ -616,7 +616,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
b->raid_partial_stripes_expensive);
/* Find lowest common alignment_offset */
- t->alignment_offset = lcm(t->alignment_offset, alignment)
+ t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
% max(t->physical_block_size, t->io_min);
/* Verify that new alignment_offset is on a logical block boundary */
@@ -643,7 +643,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
b->max_discard_sectors);
t->discard_granularity = max(t->discard_granularity,
b->discard_granularity);
- t->discard_alignment = lcm(t->discard_alignment, alignment) %
+ t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
t->discard_granularity;
}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index c6bb9f1257c9..f98db0b50551 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -922,7 +922,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
return -EINVAL;
drv->safe_state_index = -1;
- for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
+ for (i = CPUIDLE_DRIVER_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
drv->states[i].name[0] = '\0';
drv->states[i].desc[0] = '\0';
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 4c35f0822d06..23dac3babfe3 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4204,9 +4204,18 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
/* devices that don't properly handle queued TRIM commands */
- { "Micron_M[56]*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "Micron_M5[15]0*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "Samsung SSD 850 PRO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
- { "Crucial_CT*SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
/*
* As defined, the DRAT (Deterministic Read After Trim) and RZAT
@@ -4226,6 +4235,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
*/
{ "INTEL*SSDSC2MH*", NULL, 0, },
+ { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
@@ -4737,7 +4748,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
return NULL;
/* libsas case */
- if (!ap->scsi_host) {
+ if (ap->flags & ATA_FLAG_SAS_HOST) {
tag = ata_sas_allocate_tag(ap);
if (tag < 0)
return NULL;
@@ -4776,7 +4787,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
tag = qc->tag;
if (likely(ata_tag_valid(tag))) {
qc->tag = ATA_TAG_POISON;
- if (!ap->scsi_host)
+ if (ap->flags & ATA_FLAG_SAS_HOST)
ata_sas_free_tag(tag, ap);
}
}
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index beb8b27d4621..a13587b5c2be 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -243,4 +243,12 @@ extern struct regcache_ops regcache_rbtree_ops;
extern struct regcache_ops regcache_lzo_ops;
extern struct regcache_ops regcache_flat_ops;
+static inline const char *regmap_name(const struct regmap *map)
+{
+ if (map->dev)
+ return dev_name(map->dev);
+
+ return map->name;
+}
+
#endif
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index da84f544c544..87db9893b463 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -218,7 +218,7 @@ int regcache_read(struct regmap *map,
ret = map->cache_ops->read(map, reg, value);
if (ret == 0)
- trace_regmap_reg_read_cache(map->dev, reg, *value);
+ trace_regmap_reg_read_cache(map, reg, *value);
return ret;
}
@@ -311,7 +311,7 @@ int regcache_sync(struct regmap *map)
dev_dbg(map->dev, "Syncing %s cache\n",
map->cache_ops->name);
name = map->cache_ops->name;
- trace_regcache_sync(map->dev, name, "start");
+ trace_regcache_sync(map, name, "start");
if (!map->cache_dirty)
goto out;
@@ -346,7 +346,7 @@ out:
regmap_async_complete(map);
- trace_regcache_sync(map->dev, name, "stop");
+ trace_regcache_sync(map, name, "stop");
return ret;
}
@@ -381,7 +381,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
name = map->cache_ops->name;
dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
- trace_regcache_sync(map->dev, name, "start region");
+ trace_regcache_sync(map, name, "start region");
if (!map->cache_dirty)
goto out;
@@ -401,7 +401,7 @@ out:
regmap_async_complete(map);
- trace_regcache_sync(map->dev, name, "stop region");
+ trace_regcache_sync(map, name, "stop region");
return ret;
}
@@ -428,7 +428,7 @@ int regcache_drop_region(struct regmap *map, unsigned int min,
map->lock(map->lock_arg);
- trace_regcache_drop_region(map->dev, min, max);
+ trace_regcache_drop_region(map, min, max);
ret = map->cache_ops->drop(map, min, max);
@@ -455,7 +455,7 @@ void regcache_cache_only(struct regmap *map, bool enable)
map->lock(map->lock_arg);
WARN_ON(map->cache_bypass && enable);
map->cache_only = enable;
- trace_regmap_cache_only(map->dev, enable);
+ trace_regmap_cache_only(map, enable);
map->unlock(map->lock_arg);
}
EXPORT_SYMBOL_GPL(regcache_cache_only);
@@ -493,7 +493,7 @@ void regcache_cache_bypass(struct regmap *map, bool enable)
map->lock(map->lock_arg);
WARN_ON(map->cache_only && enable);
map->cache_bypass = enable;
- trace_regmap_cache_bypass(map->dev, enable);
+ trace_regmap_cache_bypass(map, enable);
map->unlock(map->lock_arg);
}
EXPORT_SYMBOL_GPL(regcache_cache_bypass);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index f99b098ddabf..dbfe6a69c3da 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1281,7 +1281,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
if (map->async && map->bus->async_write) {
struct regmap_async *async;
- trace_regmap_async_write_start(map->dev, reg, val_len);
+ trace_regmap_async_write_start(map, reg, val_len);
spin_lock_irqsave(&map->async_lock, flags);
async = list_first_entry_or_null(&map->async_free,
@@ -1339,8 +1339,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
return ret;
}
- trace_regmap_hw_write_start(map->dev, reg,
- val_len / map->format.val_bytes);
+ trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
/* If we're doing a single register write we can probably just
* send the work_buf directly, otherwise try to do a gather
@@ -1372,8 +1371,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
kfree(buf);
}
- trace_regmap_hw_write_done(map->dev, reg,
- val_len / map->format.val_bytes);
+ trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
return ret;
}
@@ -1407,12 +1405,12 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
map->format.format_write(map, reg, val);
- trace_regmap_hw_write_start(map->dev, reg, 1);
+ trace_regmap_hw_write_start(map, reg, 1);
ret = map->bus->write(map->bus_context, map->work_buf,
map->format.buf_size);
- trace_regmap_hw_write_done(map->dev, reg, 1);
+ trace_regmap_hw_write_done(map, reg, 1);
return ret;
}
@@ -1470,7 +1468,7 @@ int _regmap_write(struct regmap *map, unsigned int reg,
dev_info(map->dev, "%x <= %x\n", reg, val);
#endif
- trace_regmap_reg_write(map->dev, reg, val);
+ trace_regmap_reg_write(map, reg, val);
return map->reg_write(context, reg, val);
}
@@ -1773,7 +1771,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
for (i = 0; i < num_regs; i++) {
int reg = regs[i].reg;
int val = regs[i].def;
- trace_regmap_hw_write_start(map->dev, reg, 1);
+ trace_regmap_hw_write_start(map, reg, 1);
map->format.format_reg(u8, reg, map->reg_shift);
u8 += reg_bytes + pad_bytes;
map->format.format_val(u8, val, 0);
@@ -1788,7 +1786,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
for (i = 0; i < num_regs; i++) {
int reg = regs[i].reg;
- trace_regmap_hw_write_done(map->dev, reg, 1);
+ trace_regmap_hw_write_done(map, reg, 1);
}
return ret;
}
@@ -2059,15 +2057,13 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
*/
u8[0] |= map->read_flag_mask;
- trace_regmap_hw_read_start(map->dev, reg,
- val_len / map->format.val_bytes);
+ trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
ret = map->bus->read(map->bus_context, map->work_buf,
map->format.reg_bytes + map->format.pad_bytes,
val, val_len);
- trace_regmap_hw_read_done(map->dev, reg,
- val_len / map->format.val_bytes);
+ trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
return ret;
}
@@ -2123,7 +2119,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
dev_info(map->dev, "%x => %x\n", reg, *val);
#endif
- trace_regmap_reg_read(map->dev, reg, *val);
+ trace_regmap_reg_read(map, reg, *val);
if (!map->cache_bypass)
regcache_write(map, reg, *val);
@@ -2480,7 +2476,7 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
struct regmap *map = async->map;
bool wake;
- trace_regmap_async_io_complete(map->dev);
+ trace_regmap_async_io_complete(map);
spin_lock(&map->async_lock);
list_move(&async->list, &map->async_free);
@@ -2525,7 +2521,7 @@ int regmap_async_complete(struct regmap *map)
if (!map->bus || !map->bus->async_write)
return 0;
- trace_regmap_async_complete_start(map->dev);
+ trace_regmap_async_complete_start(map);
wait_event(map->async_waitq, regmap_async_is_done(map));
@@ -2534,7 +2530,7 @@ int regmap_async_complete(struct regmap *map)
map->async_ret = 0;
spin_unlock_irqrestore(&map->async_lock, flags);
- trace_regmap_async_complete_done(map->dev);
+ trace_regmap_async_complete_done(map);
return ret;
}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 4bc2a5cb9935..a98c41f72c63 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -803,10 +803,6 @@ static int __init nbd_init(void)
return -EINVAL;
}
- nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
- if (!nbd_dev)
- return -ENOMEM;
-
part_shift = 0;
if (max_part > 0) {
part_shift = fls(max_part);
@@ -828,6 +824,10 @@ static int __init nbd_init(void)
if (nbds_max > 1UL << (MINORBITS - part_shift))
return -EINVAL;
+ nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
+ if (!nbd_dev)
+ return -ENOMEM;
+
for (i = 0; i < nbds_max; i++) {
struct gendisk *disk = alloc_disk(1 << part_shift);
if (!disk)
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index ceb32dd52a6c..e23be20a3417 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -3003,6 +3003,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
get_device(dev->device);
+ INIT_LIST_HEAD(&dev->node);
INIT_WORK(&dev->probe_work, nvme_async_probe);
schedule_work(&dev->probe_work);
return 0;
diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c
index 79524ed2a3cb..8753b0f6a317 100644
--- a/drivers/char/ipmi/ipmi_powernv.c
+++ b/drivers/char/ipmi/ipmi_powernv.c
@@ -125,6 +125,7 @@ static int ipmi_powernv_recv(struct ipmi_smi_powernv *smi)
spin_lock_irqsave(&smi->msg_lock, flags);
if (!smi->cur_msg) {
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
pr_warn("no current message?\n");
return 0;
}
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index f6646ed3047e..518585c1ce94 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -263,6 +263,11 @@ struct smi_info {
bool supports_event_msg_buff;
/*
+ * Can we clear the global enables receive irq bit?
+ */
+ bool cannot_clear_recv_irq_bit;
+
+ /*
* Did we get an attention that we did not handle?
*/
bool got_attn;
@@ -461,6 +466,9 @@ static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
* allocate messages, we just leave them in the BMC and run the system
* polled until we can allocate some memory. Once we have some
* memory, we will re-enable the interrupt.
+ *
+ * Note that we cannot just use disable_irq(), since the interrupt may
+ * be shared.
*/
static inline bool disable_si_irq(struct smi_info *smi_info)
{
@@ -549,20 +557,15 @@ static u8 current_global_enables(struct smi_info *smi_info, u8 base,
if (smi_info->supports_event_msg_buff)
enables |= IPMI_BMC_EVT_MSG_BUFF;
- else
- enables &= ~IPMI_BMC_EVT_MSG_BUFF;
- if (smi_info->irq && !smi_info->interrupt_disabled)
+ if ((smi_info->irq && !smi_info->interrupt_disabled) ||
+ smi_info->cannot_clear_recv_irq_bit)
enables |= IPMI_BMC_RCV_MSG_INTR;
- else
- enables &= ~IPMI_BMC_RCV_MSG_INTR;
if (smi_info->supports_event_msg_buff &&
smi_info->irq && !smi_info->interrupt_disabled)
enables |= IPMI_BMC_EVT_MSG_INTR;
- else
- enables &= ~IPMI_BMC_EVT_MSG_INTR;
*irq_on = enables & (IPMI_BMC_EVT_MSG_INTR | IPMI_BMC_RCV_MSG_INTR);
@@ -2900,6 +2903,96 @@ static int try_get_dev_id(struct smi_info *smi_info)
return rv;
}
+/*
+ * Some BMCs do not support clearing the receive irq bit in the global
+ * enables (even if they don't support interrupts on the BMC). Check
+ * for this and handle it properly.
+ */
+static void check_clr_rcv_irq(struct smi_info *smi_info)
+{
+ unsigned char msg[3];
+ unsigned char *resp;
+ unsigned long resp_len;
+ int rv;
+
+ resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!resp) {
+ printk(KERN_WARNING PFX "Out of memory allocating response for"
+ " global enables command, cannot check recv irq bit"
+ " handling.\n");
+ return;
+ }
+
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
+ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+
+ rv = wait_for_msg_done(smi_info);
+ if (rv) {
+ printk(KERN_WARNING PFX "Error getting response from get"
+ " global enables command, cannot check recv irq bit"
+ " handling.\n");
+ goto out;
+ }
+
+ resp_len = smi_info->handlers->get_result(smi_info->si_sm,
+ resp, IPMI_MAX_MSG_LENGTH);
+
+ if (resp_len < 4 ||
+ resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
+ resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
+ resp[2] != 0) {
+ printk(KERN_WARNING PFX "Invalid return from get global"
+ " enables command, cannot check recv irq bit"
+ " handling.\n");
+ rv = -EINVAL;
+ goto out;
+ }
+
+ if ((resp[3] & IPMI_BMC_RCV_MSG_INTR) == 0)
+ /* Already clear, should work ok. */
+ goto out;
+
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+ msg[2] = resp[3] & ~IPMI_BMC_RCV_MSG_INTR;
+ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
+
+ rv = wait_for_msg_done(smi_info);
+ if (rv) {
+ printk(KERN_WARNING PFX "Error getting response from set"
+ " global enables command, cannot check recv irq bit"
+ " handling.\n");
+ goto out;
+ }
+
+ resp_len = smi_info->handlers->get_result(smi_info->si_sm,
+ resp, IPMI_MAX_MSG_LENGTH);
+
+ if (resp_len < 3 ||
+ resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
+ resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
+ printk(KERN_WARNING PFX "Invalid return from get global"
+ " enables command, cannot check recv irq bit"
+ " handling.\n");
+ rv = -EINVAL;
+ goto out;
+ }
+
+ if (resp[2] != 0) {
+ /*
+ * An error when setting the event buffer bit means
+ * clearing the bit is not supported.
+ */
+ printk(KERN_WARNING PFX "The BMC does not support clearing"
+ " the recv irq bit, compensating, but the BMC needs to"
+ " be fixed.\n");
+ smi_info->cannot_clear_recv_irq_bit = true;
+ }
+ out:
+ kfree(resp);
+}
+
static int try_enable_event_buffer(struct smi_info *smi_info)
{
unsigned char msg[3];
@@ -3395,6 +3488,8 @@ static int try_smi_init(struct smi_info *new_smi)
goto out_err;
}
+ check_clr_rcv_irq(new_smi);
+
setup_oem_data_handler(new_smi);
setup_xaction_handlers(new_smi);
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index f6e378dac5f5..f40e3bd2c69c 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -468,11 +468,13 @@ static int ipmi_ssif_thread(void *data)
int result;
/* Wait for something to do */
- wait_for_completion(&ssif_info->wake_thread);
- init_completion(&ssif_info->wake_thread);
-
+ result = wait_for_completion_interruptible(
+ &ssif_info->wake_thread);
if (ssif_info->stopping)
break;
+ if (result == -ERESTARTSYS)
+ continue;
+ init_completion(&ssif_info->wake_thread);
if (ssif_info->i2c_read_write == I2C_SMBUS_WRITE) {
result = i2c_smbus_write_block_data(
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 68161f7a07d6..a0b036ccb118 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -192,6 +192,7 @@ config SYS_SUPPORTS_EM_STI
config SH_TIMER_CMT
bool "Renesas CMT timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
+ depends on HAS_IOMEM
default SYS_SUPPORTS_SH_CMT
help
This enables build of a clocksource and clockevent driver for
@@ -201,6 +202,7 @@ config SH_TIMER_CMT
config SH_TIMER_MTU2
bool "Renesas MTU2 timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
+ depends on HAS_IOMEM
default SYS_SUPPORTS_SH_MTU2
help
This enables build of a clockevent driver for the Multi-Function
@@ -210,6 +212,7 @@ config SH_TIMER_MTU2
config SH_TIMER_TMU
bool "Renesas TMU timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
+ depends on HAS_IOMEM
default SYS_SUPPORTS_SH_TMU
help
This enables build of a clocksource and clockevent driver for
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 5dcbf90b8015..58597fbcc046 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -17,7 +17,6 @@
#include <linux/irq.h>
#include <linux/irqreturn.h>
#include <linux/reset.h>
-#include <linux/sched_clock.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -137,11 +136,6 @@ static struct irqaction sun5i_timer_irq = {
.dev_id = &sun5i_clockevent,
};
-static u64 sun5i_timer_sched_read(void)
-{
- return ~readl(timer_base + TIMER_CNTVAL_LO_REG(1));
-}
-
static void __init sun5i_timer_init(struct device_node *node)
{
struct reset_control *rstc;
@@ -172,7 +166,6 @@ static void __init sun5i_timer_init(struct device_node *node)
writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
timer_base + TIMER_CTL_REG(1));
- sched_clock_register(sun5i_timer_sched_read, 32, rate);
clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name,
rate, 340, 32, clocksource_mmio_readl_down);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 28e59a48b35f..8ae655c364f4 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1698,15 +1698,18 @@ void cpufreq_resume(void)
|| __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
pr_err("%s: Failed to start governor for policy: %p\n",
__func__, policy);
-
- /*
- * schedule call cpufreq_update_policy() for boot CPU, i.e. last
- * policy in list. It will verify that the current freq is in
- * sync with what we believe it to be.
- */
- if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
- schedule_work(&policy->update);
}
+
+ /*
+ * schedule call cpufreq_update_policy() for first-online CPU, as that
+ * wouldn't be hotplugged-out on suspend. It will verify that the
+ * current freq is in sync with what we believe it to be.
+ */
+ policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
+ if (WARN_ON(!policy))
+ return;
+
+ schedule_work(&policy->update);
}
/**
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 080bd2dbde4b..7a73a279e179 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -330,9 +330,6 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
if (!dev->registered)
return -EINVAL;
- if (!dev->state_count)
- dev->state_count = drv->state_count;
-
ret = cpuidle_add_device_sysfs(dev);
if (ret)
return ret;
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 97c5903b4606..832a2c3f01ff 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -401,7 +401,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device);
/* state statistics */
- for (i = 0; i < device->state_count; i++) {
+ for (i = 0; i < drv->state_count; i++) {
kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL);
if (!kobj)
goto error_state;
@@ -433,9 +433,10 @@ error_state:
*/
static void cpuidle_remove_state_sysfs(struct cpuidle_device *device)
{
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device);
int i;
- for (i = 0; i < device->state_count; i++)
+ for (i = 0; i < drv->state_count; i++)
cpuidle_free_state_kobj(device, i);
}
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 0723096fb50a..c92d6a70ccf3 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -475,6 +475,7 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
* c->desc is NULL and exit.)
*/
if (c->desc) {
+ bcm2835_dma_desc_free(&c->desc->vd);
c->desc = NULL;
bcm2835_dma_abort(c->chan_base);
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index 512cb8e2805e..ceedafbd23e0 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -903,6 +903,11 @@ static const struct cppi_glue_infos *get_glue_info(struct device *dev)
return of_id->data;
}
+#define CPPI41_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
static int cppi41_dma_probe(struct platform_device *pdev)
{
struct cppi41_dd *cdd;
@@ -926,6 +931,10 @@ static int cppi41_dma_probe(struct platform_device *pdev)
cdd->ddev.device_issue_pending = cppi41_dma_issue_pending;
cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg;
cdd->ddev.device_terminate_all = cppi41_stop_chan;
+ cdd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ cdd->ddev.src_addr_widths = CPPI41_DMA_BUSWIDTHS;
+ cdd->ddev.dst_addr_widths = CPPI41_DMA_BUSWIDTHS;
+ cdd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
cdd->ddev.dev = dev;
INIT_LIST_HEAD(&cdd->ddev.channels);
cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index 4527a3ebeac4..84884418fd30 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -511,6 +511,9 @@ static void jz4740_dma_desc_free(struct virt_dma_desc *vdesc)
kfree(container_of(vdesc, struct jz4740_dma_desc, vdesc));
}
+#define JZ4740_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
static int jz4740_dma_probe(struct platform_device *pdev)
{
struct jz4740_dmaengine_chan *chan;
@@ -548,6 +551,10 @@ static int jz4740_dma_probe(struct platform_device *pdev)
dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic;
dd->device_config = jz4740_dma_slave_config;
dd->device_terminate_all = jz4740_dma_terminate_all;
+ dd->src_addr_widths = JZ4740_DMA_BUSWIDTHS;
+ dd->dst_addr_widths = JZ4740_DMA_BUSWIDTHS;
+ dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
dd->dev = &pdev->dev;
INIT_LIST_HEAD(&dd->channels);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index f15712f2fec6..ac336a961dea 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -859,9 +859,6 @@ int dma_async_device_register(struct dma_device *device)
BUG_ON(!device->device_issue_pending);
BUG_ON(!device->dev);
- WARN(dma_has_cap(DMA_SLAVE, device->cap_mask) && !device->directions,
- "this driver doesn't support generic slave capabilities reporting\n");
-
/* note: this only matters in the
* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
*/
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 276157f22612..53dbd3b3384c 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -260,6 +260,13 @@ static int edma_terminate_all(struct dma_chan *chan)
*/
if (echan->edesc) {
int cyclic = echan->edesc->cyclic;
+
+ /*
+ * free the running request descriptor
+ * since it is not in any of the vdesc lists
+ */
+ edma_desc_free(&echan->edesc->vdesc);
+
echan->edesc = NULL;
edma_stop(echan->ch_num);
/* Move the cyclic channel back to default queue */
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
index 15cab7d79525..b4634109e010 100644
--- a/drivers/dma/moxart-dma.c
+++ b/drivers/dma/moxart-dma.c
@@ -193,8 +193,10 @@ static int moxart_terminate_all(struct dma_chan *chan)
spin_lock_irqsave(&ch->vc.lock, flags);
- if (ch->desc)
+ if (ch->desc) {
+ moxart_dma_desc_free(&ch->desc->vd);
ch->desc = NULL;
+ }
ctrl = readl(ch->base + REG_OFF_CTRL);
ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 7dd6dd121681..167dbaf65742 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -981,6 +981,7 @@ static int omap_dma_terminate_all(struct dma_chan *chan)
* c->desc is NULL and exit.)
*/
if (c->desc) {
+ omap_dma_desc_free(&c->desc->vd);
c->desc = NULL;
/* Avoid stopping the dma twice */
if (!c->paused)
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 69fac068669f..6e45a43ffe84 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -17,7 +17,9 @@
*/
static const char dmi_empty_string[] = " ";
-static u16 __initdata dmi_ver;
+static u32 dmi_ver __initdata;
+static u32 dmi_len;
+static u16 dmi_num;
/*
* Catch too early calls to dmi_check_system():
*/
@@ -78,7 +80,7 @@ static const char * __init dmi_string(const struct dmi_header *dm, u8 s)
* We have to be cautious here. We have seen BIOSes with DMI pointers
* pointing to completely the wrong place for example
*/
-static void dmi_table(u8 *buf, u32 len, int num,
+static void dmi_table(u8 *buf,
void (*decode)(const struct dmi_header *, void *),
void *private_data)
{
@@ -86,10 +88,13 @@ static void dmi_table(u8 *buf, u32 len, int num,
int i = 0;
/*
- * Stop when we see all the items the table claimed to have
- * OR we run off the end of the table (also happens)
+ * Stop when we have seen all the items the table claimed to have
+ * (SMBIOS < 3.0 only) OR we reach an end-of-table marker OR we run
+ * off the end of the table (should never happen but sometimes does
+ * on bogus implementations.)
*/
- while ((i < num) && (data - buf + sizeof(struct dmi_header)) <= len) {
+ while ((!dmi_num || i < dmi_num) &&
+ (data - buf + sizeof(struct dmi_header)) <= dmi_len) {
const struct dmi_header *dm = (const struct dmi_header *)data;
/*
@@ -98,9 +103,9 @@ static void dmi_table(u8 *buf, u32 len, int num,
* table in dmi_decode or dmi_string
*/
data += dm->length;
- while ((data - buf < len - 1) && (data[0] || data[1]))
+ while ((data - buf < dmi_len - 1) && (data[0] || data[1]))
data++;
- if (data - buf < len - 1)
+ if (data - buf < dmi_len - 1)
decode(dm, private_data);
/*
@@ -115,8 +120,6 @@ static void dmi_table(u8 *buf, u32 len, int num,
}
static phys_addr_t dmi_base;
-static u32 dmi_len;
-static u16 dmi_num;
static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
void *))
@@ -127,7 +130,7 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
if (buf == NULL)
return -1;
- dmi_table(buf, dmi_len, dmi_num, decode, NULL);
+ dmi_table(buf, decode, NULL);
add_device_randomness(buf, dmi_len);
@@ -198,7 +201,7 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
* the UUID are supposed to be little-endian encoded. The specification
* says that this is the defacto standard.
*/
- if (dmi_ver >= 0x0206)
+ if (dmi_ver >= 0x020600)
sprintf(s, "%pUL", d);
else
sprintf(s, "%pUB", d);
@@ -470,7 +473,7 @@ static void __init dmi_format_ids(char *buf, size_t len)
*/
static int __init dmi_present(const u8 *buf)
{
- int smbios_ver;
+ u32 smbios_ver;
if (memcmp(buf, "_SM_", 4) == 0 &&
buf[5] < 32 && dmi_checksum(buf, buf[5])) {
@@ -503,14 +506,16 @@ static int __init dmi_present(const u8 *buf)
if (dmi_walk_early(dmi_decode) == 0) {
if (smbios_ver) {
dmi_ver = smbios_ver;
- pr_info("SMBIOS %d.%d present.\n",
- dmi_ver >> 8, dmi_ver & 0xFF);
+ pr_info("SMBIOS %d.%d%s present.\n",
+ dmi_ver >> 8, dmi_ver & 0xFF,
+ (dmi_ver < 0x0300) ? "" : ".x");
} else {
dmi_ver = (buf[14] & 0xF0) << 4 |
(buf[14] & 0x0F);
pr_info("Legacy DMI %d.%d present.\n",
dmi_ver >> 8, dmi_ver & 0xFF);
}
+ dmi_ver <<= 8;
dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string));
printk(KERN_DEBUG "DMI: %s\n", dmi_ids_string);
return 0;
@@ -528,25 +533,16 @@ static int __init dmi_smbios3_present(const u8 *buf)
{
if (memcmp(buf, "_SM3_", 5) == 0 &&
buf[6] < 32 && dmi_checksum(buf, buf[6])) {
- dmi_ver = get_unaligned_be16(buf + 7);
+ dmi_ver = get_unaligned_be32(buf + 6);
+ dmi_ver &= 0xFFFFFF;
+ dmi_num = 0; /* No longer specified */
dmi_len = get_unaligned_le32(buf + 12);
dmi_base = get_unaligned_le64(buf + 16);
- /*
- * The 64-bit SMBIOS 3.0 entry point no longer has a field
- * containing the number of structures present in the table.
- * Instead, it defines the table size as a maximum size, and
- * relies on the end-of-table structure type (#127) to be used
- * to signal the end of the table.
- * So let's define dmi_num as an upper bound as well: each
- * structure has a 4 byte header, so dmi_len / 4 is an upper
- * bound for the number of structures in the table.
- */
- dmi_num = dmi_len / 4;
-
if (dmi_walk_early(dmi_decode) == 0) {
- pr_info("SMBIOS %d.%d present.\n",
- dmi_ver >> 8, dmi_ver & 0xFF);
+ pr_info("SMBIOS %d.%d.%d present.\n",
+ dmi_ver >> 16, (dmi_ver >> 8) & 0xFF,
+ dmi_ver & 0xFF);
dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string));
pr_debug("DMI: %s\n", dmi_ids_string);
return 0;
@@ -901,7 +897,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
if (buf == NULL)
return -1;
- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
+ dmi_table(buf, decode, private_data);
dmi_unmap(buf);
return 0;
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index dcae482a9a17..e29560e6b40b 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -175,7 +175,7 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
unsigned long initrd_addr;
u64 initrd_size = 0;
unsigned long fdt_addr = 0; /* Original DTB */
- u64 fdt_size = 0; /* We don't get size from configuration table */
+ unsigned long fdt_size = 0;
char *cmdline_ptr = NULL;
int cmdline_size = 0;
unsigned long new_fdt_addr;
@@ -239,8 +239,7 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
} else {
status = handle_cmdline_files(sys_table, image, cmdline_ptr,
"dtb=",
- ~0UL, (unsigned long *)&fdt_addr,
- (unsigned long *)&fdt_size);
+ ~0UL, &fdt_addr, &fdt_size);
if (status != EFI_SUCCESS) {
pr_efi_err(sys_table, "Failed to load device tree!\n");
@@ -252,7 +251,7 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
pr_efi(sys_table, "Using DTB from command line\n");
} else {
/* Look for a device tree configuration table entry. */
- fdt_addr = (uintptr_t)get_fdt(sys_table);
+ fdt_addr = (uintptr_t)get_fdt(sys_table, &fdt_size);
if (fdt_addr)
pr_efi(sys_table, "Using DTB from configuration table\n");
}
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 47437b16b186..e334a01cf92f 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -41,7 +41,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
unsigned long fdt_addr,
unsigned long fdt_size);
-void *get_fdt(efi_system_table_t *sys_table);
+void *get_fdt(efi_system_table_t *sys_table, unsigned long *fdt_size);
void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
unsigned long desc_size, efi_memory_desc_t *runtime_map,
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 91da56c4fd54..ef5d764e2a27 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -323,7 +323,7 @@ fail:
return EFI_LOAD_ERROR;
}
-void *get_fdt(efi_system_table_t *sys_table)
+void *get_fdt(efi_system_table_t *sys_table, unsigned long *fdt_size)
{
efi_guid_t fdt_guid = DEVICE_TREE_GUID;
efi_config_table_t *tables;
@@ -336,6 +336,11 @@ void *get_fdt(efi_system_table_t *sys_table)
for (i = 0; i < sys_table->nr_tables; i++)
if (efi_guidcmp(tables[i].guid, fdt_guid) == 0) {
fdt = (void *) tables[i].table;
+ if (fdt_check_header(fdt) != 0) {
+ pr_efi_err(sys_table, "Invalid header detected on UEFI supplied FDT, ignoring ...\n");
+ return NULL;
+ }
+ *fdt_size = fdt_totalsize(fdt);
break;
}
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index a6952ba343a8..a65b75161aa4 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -334,7 +334,7 @@ static struct irq_domain_ops mpc8xxx_gpio_irq_ops = {
.xlate = irq_domain_xlate_twocell,
};
-static struct of_device_id mpc8xxx_gpio_ids[] __initdata = {
+static struct of_device_id mpc8xxx_gpio_ids[] = {
{ .compatible = "fsl,mpc8349-gpio", },
{ .compatible = "fsl,mpc8572-gpio", },
{ .compatible = "fsl,mpc8610-gpio", },
diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c
index 257e2989215c..045a952576c7 100644
--- a/drivers/gpio/gpio-syscon.c
+++ b/drivers/gpio/gpio-syscon.c
@@ -219,7 +219,7 @@ static int syscon_gpio_probe(struct platform_device *pdev)
ret = of_property_read_u32_index(np, "gpio,syscon-dev", 2,
&priv->dir_reg_offset);
if (ret)
- dev_err(dev, "can't read the dir register offset!\n");
+ dev_dbg(dev, "can't read the dir register offset!\n");
priv->dir_reg_offset <<= 3;
}
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index c0929d938ced..df990f29757a 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -201,6 +201,10 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
if (!handler)
return AE_BAD_PARAMETER;
+ pin = acpi_gpiochip_pin_to_gpio_offset(chip, pin);
+ if (pin < 0)
+ return AE_BAD_PARAMETER;
+
desc = gpiochip_request_own_desc(chip, pin, "ACPI:Event");
if (IS_ERR(desc)) {
dev_err(chip->dev, "Failed to request GPIO\n");
@@ -551,6 +555,12 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
struct gpio_desc *desc;
bool found;
+ pin = acpi_gpiochip_pin_to_gpio_offset(chip, pin);
+ if (pin < 0) {
+ status = AE_BAD_PARAMETER;
+ goto out;
+ }
+
mutex_lock(&achip->conn_lock);
found = false;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index f6d04c7b5115..b6f076b213bc 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -525,17 +525,6 @@ void drm_framebuffer_reference(struct drm_framebuffer *fb)
}
EXPORT_SYMBOL(drm_framebuffer_reference);
-static void drm_framebuffer_free_bug(struct kref *kref)
-{
- BUG();
-}
-
-static void __drm_framebuffer_unreference(struct drm_framebuffer *fb)
-{
- DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
- kref_put(&fb->refcount, drm_framebuffer_free_bug);
-}
-
/**
* drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
* @fb: fb to unregister
@@ -1320,7 +1309,7 @@ void drm_plane_force_disable(struct drm_plane *plane)
return;
}
/* disconnect the plane from the fb and crtc: */
- __drm_framebuffer_unreference(plane->old_fb);
+ drm_framebuffer_unreference(plane->old_fb);
plane->old_fb = NULL;
plane->fb = NULL;
plane->crtc = NULL;
@@ -2132,7 +2121,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
connector = drm_connector_find(dev, out_resp->connector_id);
if (!connector) {
ret = -ENOENT;
- goto out;
+ goto out_unlock;
}
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
@@ -2212,6 +2201,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
out:
drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+out_unlock:
mutex_unlock(&dev->mode_config.mutex);
return ret;
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 732cb6f8e653..4c0aa97aaf03 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -287,6 +287,7 @@ int drm_load_edid_firmware(struct drm_connector *connector)
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
+ drm_edid_to_eld(connector, edid);
kfree(edid);
return ret;
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 6591d48c1b9d..3fee587bc284 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -174,6 +174,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
count = drm_add_edid_modes(connector, edid);
+ drm_edid_to_eld(connector, edid);
} else
count = (*connector_funcs->get_modes)(connector);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index c300e22da8ac..33a10ce967ea 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -147,6 +147,7 @@ struct fimd_win_data {
unsigned int ovl_height;
unsigned int fb_width;
unsigned int fb_height;
+ unsigned int fb_pitch;
unsigned int bpp;
unsigned int pixel_format;
dma_addr_t dma_addr;
@@ -532,13 +533,14 @@ static void fimd_win_mode_set(struct exynos_drm_crtc *crtc,
win_data->offset_y = plane->crtc_y;
win_data->ovl_width = plane->crtc_width;
win_data->ovl_height = plane->crtc_height;
+ win_data->fb_pitch = plane->pitch;
win_data->fb_width = plane->fb_width;
win_data->fb_height = plane->fb_height;
win_data->dma_addr = plane->dma_addr[0] + offset;
win_data->bpp = plane->bpp;
win_data->pixel_format = plane->pixel_format;
- win_data->buf_offsize = (plane->fb_width - plane->crtc_width) *
- (plane->bpp >> 3);
+ win_data->buf_offsize =
+ plane->pitch - (plane->crtc_width * (plane->bpp >> 3));
win_data->line_size = plane->crtc_width * (plane->bpp >> 3);
DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n",
@@ -704,7 +706,7 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, int zpos)
writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
/* buffer end address */
- size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3);
+ size = win_data->fb_pitch * win_data->ovl_height * (win_data->bpp >> 3);
val = (unsigned long)(win_data->dma_addr + size);
writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 3518bc4654c5..2e3bc57ea50e 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -55,6 +55,7 @@ struct hdmi_win_data {
unsigned int fb_x;
unsigned int fb_y;
unsigned int fb_width;
+ unsigned int fb_pitch;
unsigned int fb_height;
unsigned int src_width;
unsigned int src_height;
@@ -438,7 +439,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
} else {
luma_addr[0] = win_data->dma_addr;
chroma_addr[0] = win_data->dma_addr
- + (win_data->fb_width * win_data->fb_height);
+ + (win_data->fb_pitch * win_data->fb_height);
}
if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) {
@@ -447,8 +448,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
luma_addr[1] = luma_addr[0] + 0x40;
chroma_addr[1] = chroma_addr[0] + 0x40;
} else {
- luma_addr[1] = luma_addr[0] + win_data->fb_width;
- chroma_addr[1] = chroma_addr[0] + win_data->fb_width;
+ luma_addr[1] = luma_addr[0] + win_data->fb_pitch;
+ chroma_addr[1] = chroma_addr[0] + win_data->fb_pitch;
}
} else {
ctx->interlace = false;
@@ -469,10 +470,10 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
/* setting size of input image */
- vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_width) |
+ vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_pitch) |
VP_IMG_VSIZE(win_data->fb_height));
/* chroma height has to reduced by 2 to avoid chroma distorions */
- vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_width) |
+ vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_pitch) |
VP_IMG_VSIZE(win_data->fb_height / 2));
vp_reg_write(res, VP_SRC_WIDTH, win_data->src_width);
@@ -559,7 +560,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
/* converting dma address base and source offset */
dma_addr = win_data->dma_addr
+ (win_data->fb_x * win_data->bpp >> 3)
- + (win_data->fb_y * win_data->fb_width * win_data->bpp >> 3);
+ + (win_data->fb_y * win_data->fb_pitch);
src_x_offset = 0;
src_y_offset = 0;
@@ -576,7 +577,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK);
/* setup geometry */
- mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), win_data->fb_width);
+ mixer_reg_write(res, MXR_GRAPHIC_SPAN(win),
+ win_data->fb_pitch / (win_data->bpp >> 3));
/* setup display size */
if (ctx->mxr_ver == MXR_VER_128_0_0_184 &&
@@ -961,6 +963,7 @@ static void mixer_win_mode_set(struct exynos_drm_crtc *crtc,
win_data->fb_y = plane->fb_y;
win_data->fb_width = plane->fb_width;
win_data->fb_height = plane->fb_height;
+ win_data->fb_pitch = plane->pitch;
win_data->src_width = plane->src_width;
win_data->src_height = plane->src_height;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index cc6ea53d2b81..5c66b568bb81 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1095,6 +1095,7 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
/* Gunit-Display CZ domain, 0x182028-0x1821CF */
s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
+ s->pcbr = I915_READ(VLV_PCBR);
s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
/*
@@ -1189,6 +1190,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
/* Gunit-Display CZ domain, 0x182028-0x1821CF */
I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
+ I915_WRITE(VLV_PCBR, s->pcbr);
I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
}
@@ -1197,19 +1199,7 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
u32 val;
int err;
- val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
- WARN_ON(!!(val & VLV_GFX_CLK_FORCE_ON_BIT) == force_on);
-
#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
- /* Wait for a previous force-off to settle */
- if (force_on) {
- err = wait_for(!COND, 20);
- if (err) {
- DRM_ERROR("timeout waiting for GFX clock force-off (%08x)\n",
- I915_READ(VLV_GTLC_SURVIVABILITY_REG));
- return err;
- }
- }
val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8727086cf48c..b4faa2df9d3d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1094,6 +1094,7 @@ struct vlv_s0ix_state {
/* Display 2 CZ domain */
u32 gu_ctl0;
u32 gu_ctl1;
+ u32 pcbr;
u32 clock_gate_dis2;
};
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5b205863b659..27ea6bdebce7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2737,24 +2737,11 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
WARN_ON(i915_verify_lists(ring->dev));
- /* Move any buffers on the active list that are no longer referenced
- * by the ringbuffer to the flushing/inactive lists as appropriate,
- * before we free the context associated with the requests.
+ /* Retire requests first as we use it above for the early return.
+ * If we retire requests last, we may use a later seqno and so clear
+ * the requests lists without clearing the active list, leading to
+ * confusion.
*/
- while (!list_empty(&ring->active_list)) {
- struct drm_i915_gem_object *obj;
-
- obj = list_first_entry(&ring->active_list,
- struct drm_i915_gem_object,
- ring_list);
-
- if (!i915_gem_request_completed(obj->last_read_req, true))
- break;
-
- i915_gem_object_move_to_inactive(obj);
- }
-
-
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
struct intel_ringbuffer *ringbuf;
@@ -2789,6 +2776,23 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
i915_gem_free_request(request);
}
+ /* Move any buffers on the active list that are no longer referenced
+ * by the ringbuffer to the flushing/inactive lists as appropriate,
+ * before we free the context associated with the requests.
+ */
+ while (!list_empty(&ring->active_list)) {
+ struct drm_i915_gem_object *obj;
+
+ obj = list_first_entry(&ring->active_list,
+ struct drm_i915_gem_object,
+ ring_list);
+
+ if (!i915_gem_request_completed(obj->last_read_req, true))
+ break;
+
+ i915_gem_object_move_to_inactive(obj);
+ }
+
if (unlikely(ring->trace_irq_req &&
i915_gem_request_completed(ring->trace_irq_req, true))) {
ring->irq_put(ring);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index b773368fc62c..38a742532c4f 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1487,7 +1487,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
- if (i915_needs_cmd_parser(ring)) {
+ if (i915_needs_cmd_parser(ring) && args->batch_len) {
batch_obj = i915_gem_execbuffer_parse(ring,
&shadow_exec_entry,
eb,
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 6d22128d97b1..f75173c20f47 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2438,8 +2438,15 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc,
if (!intel_crtc->base.primary->fb)
return;
- if (intel_alloc_plane_obj(intel_crtc, plane_config))
+ if (intel_alloc_plane_obj(intel_crtc, plane_config)) {
+ struct drm_plane *primary = intel_crtc->base.primary;
+
+ primary->state->crtc = &intel_crtc->base;
+ primary->crtc = &intel_crtc->base;
+ update_state_fb(primary);
+
return;
+ }
kfree(intel_crtc->base.primary->fb);
intel_crtc->base.primary->fb = NULL;
@@ -2462,11 +2469,15 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc,
continue;
if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
+ struct drm_plane *primary = intel_crtc->base.primary;
+
if (obj->tiling_mode != I915_TILING_NONE)
dev_priv->preserve_bios_swizzle = true;
drm_framebuffer_reference(c->primary->fb);
- intel_crtc->base.primary->fb = c->primary->fb;
+ primary->fb = c->primary->fb;
+ primary->state->crtc = &intel_crtc->base;
+ primary->crtc = &intel_crtc->base;
obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
break;
}
@@ -6663,7 +6674,6 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
plane_config->size);
crtc->base.primary->fb = fb;
- update_state_fb(crtc->base.primary);
}
static void chv_crtc_clock_get(struct intel_crtc *crtc,
@@ -7704,7 +7714,6 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
plane_config->size);
crtc->base.primary->fb = fb;
- update_state_fb(crtc->base.primary);
return;
error:
@@ -7798,7 +7807,6 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
plane_config->size);
crtc->base.primary->fb = fb;
- update_state_fb(crtc->base.primary);
}
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 0a52c44ad03d..9c5451c97942 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -1322,7 +1322,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
plane = drm_plane_find(dev, set->plane_id);
- if (!plane) {
+ if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) {
ret = -ENOENT;
goto out_unlock;
}
@@ -1349,7 +1349,7 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
plane = drm_plane_find(dev, get->plane_id);
- if (!plane) {
+ if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) {
ret = -ENOENT;
goto out_unlock;
}
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index c648e1996dab..243a36c93b8f 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -2129,6 +2129,7 @@
#define VCE_UENC_REG_CLOCK_GATING 0x207c0
#define VCE_SYS_INT_EN 0x21300
# define VCE_SYS_INT_TRAP_INTERRUPT_EN (1 << 3)
+#define VCE_LMI_VCPU_CACHE_40BIT_BAR 0x2145c
#define VCE_LMI_CTRL2 0x21474
#define VCE_LMI_CTRL 0x21498
#define VCE_LMI_VM_CTRL 0x214a0
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 5587603b4a89..33d5a4f4eebd 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1565,6 +1565,7 @@ struct radeon_dpm {
int new_active_crtc_count;
u32 current_active_crtcs;
int current_active_crtc_count;
+ bool single_display;
struct radeon_dpm_dynamic_state dyn_state;
struct radeon_dpm_fan fan;
u32 tdp_limit;
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 63ccb8fa799c..d27e4ccb848c 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -76,7 +76,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev)
static bool radeon_read_bios(struct radeon_device *rdev)
{
- uint8_t __iomem *bios;
+ uint8_t __iomem *bios, val1, val2;
size_t size;
rdev->bios = NULL;
@@ -86,15 +86,19 @@ static bool radeon_read_bios(struct radeon_device *rdev)
return false;
}
- if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
+ val1 = readb(&bios[0]);
+ val2 = readb(&bios[1]);
+
+ if (size == 0 || val1 != 0x55 || val2 != 0xaa) {
pci_unmap_rom(rdev->pdev, bios);
return false;
}
- rdev->bios = kmemdup(bios, size, GFP_KERNEL);
+ rdev->bios = kzalloc(size, GFP_KERNEL);
if (rdev->bios == NULL) {
pci_unmap_rom(rdev->pdev, bios);
return false;
}
+ memcpy_fromio(rdev->bios, bios, size);
pci_unmap_rom(rdev->pdev, bios);
return true;
}
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index a69bd441dd2d..572b4dbec186 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -122,7 +122,6 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
it = interval_tree_iter_first(&rmn->objects, start, end);
while (it) {
struct radeon_bo *bo;
- struct fence *fence;
int r;
bo = container_of(it, struct radeon_bo, mn_it);
@@ -134,12 +133,10 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
continue;
}
- fence = reservation_object_get_excl(bo->tbo.resv);
- if (fence) {
- r = radeon_fence_wait((struct radeon_fence *)fence, false);
- if (r)
- DRM_ERROR("(%d) failed to wait for user bo\n", r);
- }
+ r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true,
+ false, MAX_SCHEDULE_TIMEOUT);
+ if (r)
+ DRM_ERROR("(%d) failed to wait for user bo\n", r);
radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 33cf4108386d..c1ba83a8dd8c 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -837,12 +837,8 @@ static void radeon_dpm_thermal_work_handler(struct work_struct *work)
radeon_pm_compute_clocks(rdev);
}
-static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
- enum radeon_pm_state_type dpm_state)
+static bool radeon_dpm_single_display(struct radeon_device *rdev)
{
- int i;
- struct radeon_ps *ps;
- u32 ui_class;
bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
true : false;
@@ -858,6 +854,17 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120))
single_display = false;
+ return single_display;
+}
+
+static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
+ enum radeon_pm_state_type dpm_state)
+{
+ int i;
+ struct radeon_ps *ps;
+ u32 ui_class;
+ bool single_display = radeon_dpm_single_display(rdev);
+
/* certain older asics have a separare 3D performance state,
* so try that first if the user selected performance
*/
@@ -983,6 +990,7 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
struct radeon_ps *ps;
enum radeon_pm_state_type dpm_state;
int ret;
+ bool single_display = radeon_dpm_single_display(rdev);
/* if dpm init failed */
if (!rdev->pm.dpm_enabled)
@@ -1007,6 +1015,9 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
/* vce just modifies an existing state so force a change */
if (ps->vce_active != rdev->pm.dpm.vce_active)
goto force;
+ /* user has made a display change (such as timing) */
+ if (rdev->pm.dpm.single_display != single_display)
+ goto force;
if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
/* for pre-BTC and APUs if the num crtcs changed but state is the same,
* all we need to do is update the display configuration.
@@ -1069,6 +1080,7 @@ force:
rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
+ rdev->pm.dpm.single_display = single_display;
/* wait for the rings to drain */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 2456f69efd23..8c7872339c2a 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -495,7 +495,7 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count);
- if (!ring->ready)
+ if (!ring->ring)
return 0;
/* print 8 dw before current rptr as often it's the last executed
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index d02aa1d0f588..b292aca0f342 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -598,6 +598,10 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
enum dma_data_direction direction = write ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+ /* double check that we don't free the table twice */
+ if (!ttm->sg->sgl)
+ return;
+
/* free the sg table and pages again */
dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
diff --git a/drivers/gpu/drm/radeon/vce_v2_0.c b/drivers/gpu/drm/radeon/vce_v2_0.c
index 1ac7bb825a1b..fbbe78fbd087 100644
--- a/drivers/gpu/drm/radeon/vce_v2_0.c
+++ b/drivers/gpu/drm/radeon/vce_v2_0.c
@@ -156,6 +156,9 @@ int vce_v2_0_resume(struct radeon_device *rdev)
WREG32(VCE_LMI_SWAP_CNTL1, 0);
WREG32(VCE_LMI_VM_CTRL, 0);
+ WREG32(VCE_LMI_VCPU_CACHE_40BIT_BAR, addr >> 8);
+
+ addr &= 0xff;
size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size);
WREG32(VCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff);
WREG32(VCE_VCPU_CACHE_SIZE0, size);
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 1096da327130..75c6d2103e07 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -659,7 +659,7 @@ static irqreturn_t bma180_trigger_handler(int irq, void *p)
mutex_lock(&data->mutex);
- for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength) {
ret = bma180_get_data_reg(data, bit);
if (ret < 0) {
diff --git a/drivers/iio/accel/bmc150-accel.c b/drivers/iio/accel/bmc150-accel.c
index 066d0c04072c..75567fd457dc 100644
--- a/drivers/iio/accel/bmc150-accel.c
+++ b/drivers/iio/accel/bmc150-accel.c
@@ -168,14 +168,14 @@ static const struct {
int val;
int val2;
u8 bw_bits;
-} bmc150_accel_samp_freq_table[] = { {7, 810000, 0x08},
- {15, 630000, 0x09},
- {31, 250000, 0x0A},
- {62, 500000, 0x0B},
- {125, 0, 0x0C},
- {250, 0, 0x0D},
- {500, 0, 0x0E},
- {1000, 0, 0x0F} };
+} bmc150_accel_samp_freq_table[] = { {15, 620000, 0x08},
+ {31, 260000, 0x09},
+ {62, 500000, 0x0A},
+ {125, 0, 0x0B},
+ {250, 0, 0x0C},
+ {500, 0, 0x0D},
+ {1000, 0, 0x0E},
+ {2000, 0, 0x0F} };
static const struct {
int bw_bits;
@@ -840,7 +840,7 @@ static int bmc150_accel_validate_trigger(struct iio_dev *indio_dev,
}
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
- "7.810000 15.630000 31.250000 62.500000 125 250 500 1000");
+ "15.620000 31.260000 62.50000 125 250 500 1000 2000");
static struct attribute *bmc150_accel_attributes[] = {
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
@@ -986,7 +986,7 @@ static irqreturn_t bmc150_accel_trigger_handler(int irq, void *p)
int bit, ret, i = 0;
mutex_lock(&data->mutex);
- for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength) {
ret = i2c_smbus_read_word_data(data->client,
BMC150_ACCEL_AXIS_TO_REG(bit));
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index 567de269cc00..1a6379525fa4 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -956,7 +956,7 @@ static irqreturn_t kxcjk1013_trigger_handler(int irq, void *p)
mutex_lock(&data->mutex);
- for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength) {
ret = kxcjk1013_get_acc_reg(data, bit);
if (ret < 0) {
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 202daf889be2..46379b1fb25b 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -137,7 +137,8 @@ config AXP288_ADC
config CC10001_ADC
tristate "Cosmic Circuits 10001 ADC driver"
- depends on HAS_IOMEM || HAVE_CLK || REGULATOR
+ depends on HAVE_CLK || REGULATOR
+ depends on HAS_IOMEM
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index ff61ae55dd3f..8a0eb4a04fb5 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -544,7 +544,6 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
{
struct iio_dev *idev = iio_trigger_get_drvdata(trig);
struct at91_adc_state *st = iio_priv(idev);
- struct iio_buffer *buffer = idev->buffer;
struct at91_adc_reg_desc *reg = st->registers;
u32 status = at91_adc_readl(st, reg->trigger_register);
int value;
@@ -564,7 +563,7 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
at91_adc_writel(st, reg->trigger_register,
status | value);
- for_each_set_bit(bit, buffer->scan_mask,
+ for_each_set_bit(bit, idev->active_scan_mask,
st->num_channels) {
struct iio_chan_spec const *chan = idev->channels + bit;
at91_adc_writel(st, AT91_ADC_CHER,
@@ -579,7 +578,7 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
at91_adc_writel(st, reg->trigger_register,
status & ~value);
- for_each_set_bit(bit, buffer->scan_mask,
+ for_each_set_bit(bit, idev->active_scan_mask,
st->num_channels) {
struct iio_chan_spec const *chan = idev->channels + bit;
at91_adc_writel(st, AT91_ADC_CHDR,
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 2e5cc4409f78..a0e7161f040c 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -188,12 +188,11 @@ static int tiadc_buffer_preenable(struct iio_dev *indio_dev)
static int tiadc_buffer_postenable(struct iio_dev *indio_dev)
{
struct tiadc_device *adc_dev = iio_priv(indio_dev);
- struct iio_buffer *buffer = indio_dev->buffer;
unsigned int enb = 0;
u8 bit;
tiadc_step_config(indio_dev);
- for_each_set_bit(bit, buffer->scan_mask, adc_dev->channels)
+ for_each_set_bit(bit, indio_dev->active_scan_mask, adc_dev->channels)
enb |= (get_adc_step_bit(adc_dev, bit) << 1);
adc_dev->buffer_en_ch_steps = enb;
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 8ec353c01d98..e63b8e76d4c3 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -141,9 +141,13 @@ struct vf610_adc {
struct regulator *vref;
struct vf610_adc_feature adc_feature;
+ u32 sample_freq_avail[5];
+
struct completion completion;
};
+static const u32 vf610_hw_avgs[] = { 1, 4, 8, 16, 32 };
+
#define VF610_ADC_CHAN(_idx, _chan_type) { \
.type = (_chan_type), \
.indexed = 1, \
@@ -180,35 +184,47 @@ static const struct iio_chan_spec vf610_adc_iio_channels[] = {
/* sentinel */
};
-/*
- * ADC sample frequency, unit is ADCK cycles.
- * ADC clk source is ipg clock, which is the same as bus clock.
- *
- * ADC conversion time = SFCAdder + AverageNum x (BCT + LSTAdder)
- * SFCAdder: fixed to 6 ADCK cycles
- * AverageNum: 1, 4, 8, 16, 32 samples for hardware average.
- * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode
- * LSTAdder(Long Sample Time): fixed to 3 ADCK cycles
- *
- * By default, enable 12 bit resolution mode, clock source
- * set to ipg clock, So get below frequency group:
- */
-static const u32 vf610_sample_freq_avail[5] =
-{1941176, 559332, 286957, 145374, 73171};
+static inline void vf610_adc_calculate_rates(struct vf610_adc *info)
+{
+ unsigned long adck_rate, ipg_rate = clk_get_rate(info->clk);
+ int i;
+
+ /*
+ * Calculate ADC sample frequencies
+ * Sample time unit is ADCK cycles. ADCK clk source is ipg clock,
+ * which is the same as bus clock.
+ *
+ * ADC conversion time = SFCAdder + AverageNum x (BCT + LSTAdder)
+ * SFCAdder: fixed to 6 ADCK cycles
+ * AverageNum: 1, 4, 8, 16, 32 samples for hardware average.
+ * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode
+ * LSTAdder(Long Sample Time): fixed to 3 ADCK cycles
+ */
+ adck_rate = ipg_rate / info->adc_feature.clk_div;
+ for (i = 0; i < ARRAY_SIZE(vf610_hw_avgs); i++)
+ info->sample_freq_avail[i] =
+ adck_rate / (6 + vf610_hw_avgs[i] * (25 + 3));
+}
static inline void vf610_adc_cfg_init(struct vf610_adc *info)
{
+ struct vf610_adc_feature *adc_feature = &info->adc_feature;
+
/* set default Configuration for ADC controller */
- info->adc_feature.clk_sel = VF610_ADCIOC_BUSCLK_SET;
- info->adc_feature.vol_ref = VF610_ADCIOC_VR_VREF_SET;
+ adc_feature->clk_sel = VF610_ADCIOC_BUSCLK_SET;
+ adc_feature->vol_ref = VF610_ADCIOC_VR_VREF_SET;
+
+ adc_feature->calibration = true;
+ adc_feature->ovwren = true;
+
+ adc_feature->res_mode = 12;
+ adc_feature->sample_rate = 1;
+ adc_feature->lpm = true;
- info->adc_feature.calibration = true;
- info->adc_feature.ovwren = true;
+ /* Use a save ADCK which is below 20MHz on all devices */
+ adc_feature->clk_div = 8;
- info->adc_feature.clk_div = 1;
- info->adc_feature.res_mode = 12;
- info->adc_feature.sample_rate = 1;
- info->adc_feature.lpm = true;
+ vf610_adc_calculate_rates(info);
}
static void vf610_adc_cfg_post_set(struct vf610_adc *info)
@@ -290,12 +306,10 @@ static void vf610_adc_cfg_set(struct vf610_adc *info)
cfg_data = readl(info->regs + VF610_REG_ADC_CFG);
- /* low power configuration */
cfg_data &= ~VF610_ADC_ADLPC_EN;
if (adc_feature->lpm)
cfg_data |= VF610_ADC_ADLPC_EN;
- /* disable high speed */
cfg_data &= ~VF610_ADC_ADHSC_EN;
writel(cfg_data, info->regs + VF610_REG_ADC_CFG);
@@ -435,10 +449,27 @@ static irqreturn_t vf610_adc_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("1941176, 559332, 286957, 145374, 73171");
+static ssize_t vf610_show_samp_freq_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct vf610_adc *info = iio_priv(dev_to_iio_dev(dev));
+ size_t len = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(info->sample_freq_avail); i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%u ", info->sample_freq_avail[i]);
+
+ /* replace trailing space by newline */
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(vf610_show_samp_freq_avail);
static struct attribute *vf610_attributes[] = {
- &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
NULL
};
@@ -502,7 +533,7 @@ static int vf610_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_FRACTIONAL_LOG2;
case IIO_CHAN_INFO_SAMP_FREQ:
- *val = vf610_sample_freq_avail[info->adc_feature.sample_rate];
+ *val = info->sample_freq_avail[info->adc_feature.sample_rate];
*val2 = 0;
return IIO_VAL_INT;
@@ -525,9 +556,9 @@ static int vf610_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
for (i = 0;
- i < ARRAY_SIZE(vf610_sample_freq_avail);
+ i < ARRAY_SIZE(info->sample_freq_avail);
i++)
- if (val == vf610_sample_freq_avail[i]) {
+ if (val == info->sample_freq_avail[i]) {
info->adc_feature.sample_rate = i;
vf610_adc_sample_set(info);
return 0;
diff --git a/drivers/iio/gyro/bmg160.c b/drivers/iio/gyro/bmg160.c
index 60451b328242..ccf3ea7e1afa 100644
--- a/drivers/iio/gyro/bmg160.c
+++ b/drivers/iio/gyro/bmg160.c
@@ -822,7 +822,7 @@ static irqreturn_t bmg160_trigger_handler(int irq, void *p)
int bit, ret, i = 0;
mutex_lock(&data->mutex);
- for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength) {
ret = i2c_smbus_read_word_data(data->client,
BMG160_AXIS_TO_REG(bit));
diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c
index e0017c22bb9c..f53e9a803a0e 100644
--- a/drivers/iio/imu/adis_trigger.c
+++ b/drivers/iio/imu/adis_trigger.c
@@ -60,7 +60,7 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
iio_trigger_set_drvdata(adis->trig, adis);
ret = iio_trigger_register(adis->trig);
- indio_dev->trig = adis->trig;
+ indio_dev->trig = iio_trigger_get(adis->trig);
if (ret)
goto error_free_irq;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index d8d5bed65e07..ef76afe2643c 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -410,42 +410,46 @@ error_read_raw:
}
}
-static int inv_mpu6050_write_fsr(struct inv_mpu6050_state *st, int fsr)
+static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val)
{
- int result;
+ int result, i;
u8 d;
- if (fsr < 0 || fsr > INV_MPU6050_MAX_GYRO_FS_PARAM)
- return -EINVAL;
- if (fsr == st->chip_config.fsr)
- return 0;
+ for (i = 0; i < ARRAY_SIZE(gyro_scale_6050); ++i) {
+ if (gyro_scale_6050[i] == val) {
+ d = (i << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT);
+ result = inv_mpu6050_write_reg(st,
+ st->reg->gyro_config, d);
+ if (result)
+ return result;
- d = (fsr << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT);
- result = inv_mpu6050_write_reg(st, st->reg->gyro_config, d);
- if (result)
- return result;
- st->chip_config.fsr = fsr;
+ st->chip_config.fsr = i;
+ return 0;
+ }
+ }
- return 0;
+ return -EINVAL;
}
-static int inv_mpu6050_write_accel_fs(struct inv_mpu6050_state *st, int fs)
+static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val)
{
- int result;
+ int result, i;
u8 d;
- if (fs < 0 || fs > INV_MPU6050_MAX_ACCL_FS_PARAM)
- return -EINVAL;
- if (fs == st->chip_config.accl_fs)
- return 0;
+ for (i = 0; i < ARRAY_SIZE(accel_scale); ++i) {
+ if (accel_scale[i] == val) {
+ d = (i << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
+ result = inv_mpu6050_write_reg(st,
+ st->reg->accl_config, d);
+ if (result)
+ return result;
- d = (fs << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
- result = inv_mpu6050_write_reg(st, st->reg->accl_config, d);
- if (result)
- return result;
- st->chip_config.accl_fs = fs;
+ st->chip_config.accl_fs = i;
+ return 0;
+ }
+ }
- return 0;
+ return -EINVAL;
}
static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
@@ -471,10 +475,10 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_ANGL_VEL:
- result = inv_mpu6050_write_fsr(st, val);
+ result = inv_mpu6050_write_gyro_scale(st, val2);
break;
case IIO_ACCEL:
- result = inv_mpu6050_write_accel_fs(st, val);
+ result = inv_mpu6050_write_accel_scale(st, val2);
break;
default:
result = -EINVAL;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index 0cd306a72a6e..ba27e277511f 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -24,6 +24,16 @@
#include <linux/poll.h>
#include "inv_mpu_iio.h"
+static void inv_clear_kfifo(struct inv_mpu6050_state *st)
+{
+ unsigned long flags;
+
+ /* take the spin lock sem to avoid interrupt kick in */
+ spin_lock_irqsave(&st->time_stamp_lock, flags);
+ kfifo_reset(&st->timestamps);
+ spin_unlock_irqrestore(&st->time_stamp_lock, flags);
+}
+
int inv_reset_fifo(struct iio_dev *indio_dev)
{
int result;
@@ -50,6 +60,10 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
INV_MPU6050_BIT_FIFO_RST);
if (result)
goto reset_fifo_fail;
+
+ /* clear timestamps fifo */
+ inv_clear_kfifo(st);
+
/* enable interrupt */
if (st->chip_config.accl_fifo_enable ||
st->chip_config.gyro_fifo_enable) {
@@ -83,16 +97,6 @@ reset_fifo_fail:
return result;
}
-static void inv_clear_kfifo(struct inv_mpu6050_state *st)
-{
- unsigned long flags;
-
- /* take the spin lock sem to avoid interrupt kick in */
- spin_lock_irqsave(&st->time_stamp_lock, flags);
- kfifo_reset(&st->timestamps);
- spin_unlock_irqrestore(&st->time_stamp_lock, flags);
-}
-
/**
* inv_mpu6050_irq_handler() - Cache a timestamp at each data ready interrupt.
*/
@@ -184,7 +188,6 @@ end_session:
flush_fifo:
/* Flush HW and SW FIFOs. */
inv_reset_fifo(indio_dev);
- inv_clear_kfifo(st);
mutex_unlock(&indio_dev->mlock);
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
index 5cc3692acf37..b3a36376c719 100644
--- a/drivers/iio/imu/kmx61.c
+++ b/drivers/iio/imu/kmx61.c
@@ -1227,7 +1227,7 @@ static irqreturn_t kmx61_trigger_handler(int irq, void *p)
base = KMX61_MAG_XOUT_L;
mutex_lock(&data->lock);
- for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength) {
ret = kmx61_read_measurement(data, base, bit);
if (ret < 0) {
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index aaba9d3d980e..4df97f650e44 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -847,8 +847,7 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
* @attr_list: List of IIO device attributes
*
* This function frees the memory allocated for each of the IIO device
- * attributes in the list. Note: if you want to reuse the list after calling
- * this function you have to reinitialize it using INIT_LIST_HEAD().
+ * attributes in the list.
*/
void iio_free_chan_devattr_list(struct list_head *attr_list)
{
@@ -856,6 +855,7 @@ void iio_free_chan_devattr_list(struct list_head *attr_list)
list_for_each_entry_safe(p, n, attr_list, l) {
kfree(p->dev_attr.attr.name);
+ list_del(&p->l);
kfree(p);
}
}
@@ -936,6 +936,7 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
iio_free_chan_devattr_list(&indio_dev->channel_attr_list);
kfree(indio_dev->chan_attr_group.attrs);
+ indio_dev->chan_attr_group.attrs = NULL;
}
static void iio_dev_release(struct device *device)
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index a4b397048f71..a99692ba91bc 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -500,6 +500,7 @@ int iio_device_register_eventset(struct iio_dev *indio_dev)
error_free_setup_event_lines:
iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list);
kfree(indio_dev->event_interface);
+ indio_dev->event_interface = NULL;
return ret;
}
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
index 74dff4e4a11a..89fca3a70750 100644
--- a/drivers/iio/proximity/sx9500.c
+++ b/drivers/iio/proximity/sx9500.c
@@ -494,7 +494,7 @@ static irqreturn_t sx9500_trigger_handler(int irq, void *private)
mutex_lock(&data->mutex);
- for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength) {
ret = sx9500_read_proximity(data, &indio_dev->channels[bit],
&val);
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index aec7a6aa2951..8c014b5dab4c 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -99,6 +99,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
if (dmasync)
dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
+ /*
+ * If the combination of the addr and size requested for this memory
+ * region causes an integer overflow, return error.
+ */
+ if ((PAGE_ALIGN(addr + size) <= size) ||
+ (PAGE_ALIGN(addr + size) <= addr))
+ return ERR_PTR(-EINVAL);
+
if (!can_do_mlock())
return ERR_PTR(-EPERM);
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 1bd15ebc01f2..27bcdbc950c9 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1154,10 +1154,28 @@ out:
mutex_unlock(&alps_mutex);
}
-static void alps_report_bare_ps2_packet(struct input_dev *dev,
+static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
unsigned char packet[],
bool report_buttons)
{
+ struct alps_data *priv = psmouse->private;
+ struct input_dev *dev;
+
+ /* Figure out which device to use to report the bare packet */
+ if (priv->proto_version == ALPS_PROTO_V2 &&
+ (priv->flags & ALPS_DUALPOINT)) {
+ /* On V2 devices the DualPoint Stick reports bare packets */
+ dev = priv->dev2;
+ } else if (unlikely(IS_ERR_OR_NULL(priv->dev3))) {
+ /* Register dev3 mouse if we received PS/2 packet first time */
+ if (!IS_ERR(priv->dev3))
+ psmouse_queue_work(psmouse, &priv->dev3_register_work,
+ 0);
+ return;
+ } else {
+ dev = priv->dev3;
+ }
+
if (report_buttons)
alps_report_buttons(dev, NULL,
packet[0] & 1, packet[0] & 2, packet[0] & 4);
@@ -1232,8 +1250,8 @@ static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
* de-synchronization.
*/
- alps_report_bare_ps2_packet(priv->dev2,
- &psmouse->packet[3], false);
+ alps_report_bare_ps2_packet(psmouse, &psmouse->packet[3],
+ false);
/*
* Continue with the standard ALPS protocol handling,
@@ -1289,18 +1307,9 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
* properly we only do this if the device is fully synchronized.
*/
if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) {
-
- /* Register dev3 mouse if we received PS/2 packet first time */
- if (unlikely(!priv->dev3))
- psmouse_queue_work(psmouse,
- &priv->dev3_register_work, 0);
-
if (psmouse->pktcnt == 3) {
- /* Once dev3 mouse device is registered report data */
- if (likely(!IS_ERR_OR_NULL(priv->dev3)))
- alps_report_bare_ps2_packet(priv->dev3,
- psmouse->packet,
- true);
+ alps_report_bare_ps2_packet(psmouse, psmouse->packet,
+ true);
return PSMOUSE_FULL_PACKET;
}
return PSMOUSE_GOOD_DATA;
@@ -2281,10 +2290,12 @@ static int alps_set_protocol(struct psmouse *psmouse,
priv->set_abs_params = alps_set_abs_params_mt;
priv->nibble_commands = alps_v3_nibble_commands;
priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
- priv->x_max = 1360;
- priv->y_max = 660;
priv->x_bits = 23;
priv->y_bits = 12;
+
+ if (alps_dolphin_get_device_area(psmouse, priv))
+ return -EIO;
+
break;
case ALPS_PROTO_V6:
@@ -2303,9 +2314,8 @@ static int alps_set_protocol(struct psmouse *psmouse,
priv->set_abs_params = alps_set_abs_params_mt;
priv->nibble_commands = alps_v3_nibble_commands;
priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
-
- if (alps_dolphin_get_device_area(psmouse, priv))
- return -EIO;
+ priv->x_max = 0xfff;
+ priv->y_max = 0x7ff;
if (priv->fw_ver[1] != 0xba)
priv->flags |= ALPS_BUTTONPAD;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index dda605836546..3b06c8a360b6 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -154,6 +154,11 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
},
{
(const char * const []){"LEN2006", NULL},
+ {2691, 2691},
+ 1024, 5045, 2457, 4832
+ },
+ {
+ (const char * const []){"LEN2006", NULL},
{ANY_BOARD_ID, ANY_BOARD_ID},
1264, 5675, 1171, 4688
},
@@ -189,7 +194,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
"LEN2003",
"LEN2004", /* L440 */
"LEN2005",
- "LEN2006",
+ "LEN2006", /* Edge E440/E540 */
"LEN2007",
"LEN2008",
"LEN2009",
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index fc13dd56953e..a3adde6519f0 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1288,10 +1288,13 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
return 0;
spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
- if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS)
+ if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
+ smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
ret = arm_smmu_iova_to_phys_hard(domain, iova);
- else
+ } else {
ret = ops->iova_to_phys(ops, iova);
+ }
+
spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
return ret;
@@ -1556,7 +1559,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
return -ENODEV;
}
- if (smmu->version == 1 || (!(id & ID0_ATOSNS) && (id & ID0_S1TS))) {
+ if ((id & ID0_S1TS) && ((smmu->version == 1) || (id & ID0_ATOSNS))) {
smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
dev_notice(smmu->dev, "\taddress translation ops\n");
}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index ae4c1a854e57..2d1e05bdbb53 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1742,9 +1742,8 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
static void domain_exit(struct dmar_domain *domain)
{
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
struct page *freelist = NULL;
+ int i;
/* Domain 0 is reserved, so dont process it */
if (!domain)
@@ -1764,8 +1763,8 @@ static void domain_exit(struct dmar_domain *domain)
/* clear attached or cached domains */
rcu_read_lock();
- for_each_active_iommu(iommu, drhd)
- iommu_detach_domain(domain, iommu);
+ for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus)
+ iommu_detach_domain(domain, g_iommus[i]);
rcu_read_unlock();
dma_free_pagelist(freelist);
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 10186cac7716..bc39bdf7b99b 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -851,6 +851,7 @@ static int ipmmu_remove(struct platform_device *pdev)
static const struct of_device_id ipmmu_of_ids[] = {
{ .compatible = "renesas,ipmmu-vmsa", },
+ { }
};
static struct platform_driver ipmmu_driver = {
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 596b0a9eee99..9687f8afebff 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -169,7 +169,7 @@ static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
{
- cmd->raw_cmd[0] &= ~(0xffffUL << 32);
+ cmd->raw_cmd[0] &= BIT_ULL(32) - 1;
cmd->raw_cmd[0] |= ((u64)devid) << 32;
}
@@ -802,6 +802,7 @@ static int its_alloc_tables(struct its_node *its)
int i;
int psz = SZ_64K;
u64 shr = GITS_BASER_InnerShareable;
+ u64 cache = GITS_BASER_WaWb;
for (i = 0; i < GITS_BASER_NR_REGS; i++) {
u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
@@ -848,7 +849,7 @@ retry_baser:
val = (virt_to_phys(base) |
(type << GITS_BASER_TYPE_SHIFT) |
((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
- GITS_BASER_WaWb |
+ cache |
shr |
GITS_BASER_VALID);
@@ -874,9 +875,12 @@ retry_baser:
* Shareability didn't stick. Just use
* whatever the read reported, which is likely
* to be the only thing this redistributor
- * supports.
+ * supports. If that's zero, make it
+ * non-cacheable as well.
*/
shr = tmp & GITS_BASER_SHAREABILITY_MASK;
+ if (!shr)
+ cache = GITS_BASER_nC;
goto retry_baser;
}
@@ -980,16 +984,39 @@ static void its_cpu_init_lpis(void)
tmp = readq_relaxed(rbase + GICR_PROPBASER);
if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
+ if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
+ /*
+ * The HW reports non-shareable, we must
+ * remove the cacheability attributes as
+ * well.
+ */
+ val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
+ GICR_PROPBASER_CACHEABILITY_MASK);
+ val |= GICR_PROPBASER_nC;
+ writeq_relaxed(val, rbase + GICR_PROPBASER);
+ }
pr_info_once("GIC: using cache flushing for LPI property table\n");
gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
}
/* set PENDBASE */
val = (page_to_phys(pend_page) |
- GICR_PROPBASER_InnerShareable |
- GICR_PROPBASER_WaWb);
+ GICR_PENDBASER_InnerShareable |
+ GICR_PENDBASER_WaWb);
writeq_relaxed(val, rbase + GICR_PENDBASER);
+ tmp = readq_relaxed(rbase + GICR_PENDBASER);
+
+ if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
+ /*
+ * The HW reports non-shareable, we must remove the
+ * cacheability attributes as well.
+ */
+ val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
+ GICR_PENDBASER_CACHEABILITY_MASK);
+ val |= GICR_PENDBASER_nC;
+ writeq_relaxed(val, rbase + GICR_PENDBASER);
+ }
/* Enable LPIs */
val = readl_relaxed(rbase + GICR_CTLR);
@@ -1026,7 +1053,7 @@ static void its_cpu_init_collection(void)
* This ITS wants a linear CPU number.
*/
target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER);
- target = GICR_TYPER_CPU_NUMBER(target);
+ target = GICR_TYPER_CPU_NUMBER(target) << 16;
}
/* Perform collection mapping */
@@ -1422,14 +1449,26 @@ static int its_probe(struct device_node *node, struct irq_domain *parent)
writeq_relaxed(baser, its->base + GITS_CBASER);
tmp = readq_relaxed(its->base + GITS_CBASER);
- writeq_relaxed(0, its->base + GITS_CWRITER);
- writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
- if ((tmp ^ baser) & GITS_BASER_SHAREABILITY_MASK) {
+ if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
+ if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
+ /*
+ * The HW reports non-shareable, we must
+ * remove the cacheability attributes as
+ * well.
+ */
+ baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
+ GITS_CBASER_CACHEABILITY_MASK);
+ baser |= GITS_CBASER_nC;
+ writeq_relaxed(baser, its->base + GITS_CBASER);
+ }
pr_info("ITS: using cache flushing for cmd queue\n");
its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
}
+ writeq_relaxed(0, its->base + GITS_CWRITER);
+ writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
+
if (of_property_read_bool(its->msi_chip.of_node, "msi-controller")) {
its->domain = irq_domain_add_tree(NULL, &its_domain_ops, its);
if (!its->domain) {
diff --git a/drivers/lguest/Kconfig b/drivers/lguest/Kconfig
index ee035ec4526b..169172d2ba05 100644
--- a/drivers/lguest/Kconfig
+++ b/drivers/lguest/Kconfig
@@ -1,6 +1,6 @@
config LGUEST
tristate "Linux hypervisor example code"
- depends on X86_32 && EVENTFD && TTY
+ depends on X86_32 && EVENTFD && TTY && PCI_DIRECT
select HVC_DRIVER
---help---
This is a very simple module which allows you to run
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 9b641b38b857..8001fe9e3434 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -433,7 +433,6 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
dm_get(md);
atomic_inc(&md->open_count);
-
out:
spin_unlock(&_minor_lock);
@@ -442,16 +441,20 @@ out:
static void dm_blk_close(struct gendisk *disk, fmode_t mode)
{
- struct mapped_device *md = disk->private_data;
+ struct mapped_device *md;
spin_lock(&_minor_lock);
+ md = disk->private_data;
+ if (WARN_ON(!md))
+ goto out;
+
if (atomic_dec_and_test(&md->open_count) &&
(test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
queue_work(deferred_remove_workqueue, &deferred_remove_work);
dm_put(md);
-
+out:
spin_unlock(&_minor_lock);
}
@@ -2241,7 +2244,6 @@ static void free_dev(struct mapped_device *md)
int minor = MINOR(disk_devt(md->disk));
unlock_fs(md);
- bdput(md->bdev);
destroy_workqueue(md->wq);
if (md->kworker_task)
@@ -2252,19 +2254,22 @@ static void free_dev(struct mapped_device *md)
mempool_destroy(md->rq_pool);
if (md->bs)
bioset_free(md->bs);
- blk_integrity_unregister(md->disk);
- del_gendisk(md->disk);
+
cleanup_srcu_struct(&md->io_barrier);
free_table_devices(&md->table_devices);
- free_minor(minor);
+ dm_stats_cleanup(&md->stats);
spin_lock(&_minor_lock);
md->disk->private_data = NULL;
spin_unlock(&_minor_lock);
-
+ if (blk_get_integrity(md->disk))
+ blk_integrity_unregister(md->disk);
+ del_gendisk(md->disk);
put_disk(md->disk);
blk_cleanup_queue(md->queue);
- dm_stats_cleanup(&md->stats);
+ bdput(md->bdev);
+ free_minor(minor);
+
module_put(THIS_MODULE);
kfree(md);
}
@@ -2642,8 +2647,9 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
might_sleep();
- spin_lock(&_minor_lock);
map = dm_get_live_table(md, &srcu_idx);
+
+ spin_lock(&_minor_lock);
idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 717daad71fb1..e6178787ce3d 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -249,6 +249,7 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
const int rw = bio_data_dir(bio);
struct mddev *mddev = q->queuedata;
unsigned int sectors;
+ int cpu;
if (mddev == NULL || mddev->pers == NULL
|| !mddev->ready) {
@@ -284,7 +285,10 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
sectors = bio_sectors(bio);
mddev->pers->make_request(mddev, bio);
- generic_start_io_acct(rw, sectors, &mddev->gendisk->part0);
+ cpu = part_stat_lock();
+ part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
+ part_stat_unlock();
if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
wake_up(&mddev->sb_wait);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 3ed9f42ddca6..3b5d7f704aa3 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -313,7 +313,7 @@ static struct strip_zone *find_zone(struct r0conf *conf,
/*
* remaps the bio to the target device. we separate two flows.
- * power 2 flow and a general flow for the sake of perfromance
+ * power 2 flow and a general flow for the sake of performance
*/
static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
sector_t sector, sector_t *sector_offset)
@@ -524,6 +524,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
split = bio;
}
+ sector = bio->bi_iter.bi_sector;
zone = find_zone(mddev->private, &sector);
tmp_dev = map_sector(mddev, zone, sector, &sector);
split->bi_bdev = tmp_dev->bdev;
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index 5d2d8f45b4b6..67faa8d6950e 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -1240,7 +1240,7 @@ static int rtl2832_probe(struct i2c_client *client,
dev->regmap_config.max_register = 5 * 0x100,
dev->regmap_config.ranges = regmap_range_cfg,
dev->regmap_config.num_ranges = ARRAY_SIZE(regmap_range_cfg),
- dev->regmap_config.cache_type = REGCACHE_RBTREE,
+ dev->regmap_config.cache_type = REGCACHE_NONE,
dev->regmap = regmap_init(&client->dev, &regmap_bus, client,
&dev->regmap_config);
if (IS_ERR(dev->regmap)) {
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c
index e4901a503c73..63c0ee5d0bf5 100644
--- a/drivers/media/pci/cx23885/cx23885-417.c
+++ b/drivers/media/pci/cx23885/cx23885-417.c
@@ -1339,14 +1339,13 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(cap->driver, dev->name, sizeof(cap->driver));
strlcpy(cap->card, cx23885_boards[tsport->dev->board].name,
sizeof(cap->card));
- sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
- cap->capabilities =
- V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING |
- 0;
+ sprintf(cap->bus_info, "PCIe:%s", pci_name(dev->pci));
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING;
if (dev->tuner_type != TUNER_ABSENT)
- cap->capabilities |= V4L2_CAP_TUNER;
+ cap->device_caps |= V4L2_CAP_TUNER;
+ cap->capabilities = cap->device_caps | V4L2_CAP_VBI_CAPTURE |
+ V4L2_CAP_AUDIO | V4L2_CAP_DEVICE_CAPS;
return 0;
}
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 12f7452edce3..a92ff4249d10 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -1845,6 +1845,9 @@ static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
struct s5p_jpeg_addr jpeg_addr;
u32 pix_size, padding_bytes = 0;
+ jpeg_addr.cb = 0;
+ jpeg_addr.cr = 0;
+
pix_size = ctx->cap_q.w * ctx->cap_q.h;
if (ctx->mode == S5P_JPEG_ENCODE) {
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c
index e8c2cad93962..0974b9a7a584 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c
@@ -20,7 +20,7 @@
void exynos3250_jpeg_reset(void __iomem *regs)
{
- u32 reg = 0;
+ u32 reg = 1;
int count = 1000;
writel(1, regs + EXYNOS3250_SW_RESET);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 8e44a59d8ec2..98374e8bad3e 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -833,6 +833,7 @@ static int s5p_mfc_open(struct file *file)
q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
q->io_modes = VB2_MMAP;
q->drv_priv = &ctx->fh;
+ q->lock = &dev->mfc_mutex;
if (vdev == dev->vfd_dec) {
q->io_modes = VB2_MMAP;
q->ops = get_dec_queue_ops();
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
index 15f7663dd9f5..24262bbb1a35 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
@@ -29,7 +29,7 @@
/* Offset base used to differentiate between CAPTURE and OUTPUT
* while mmaping */
-#define DST_QUEUE_OFF_BASE (TASK_SIZE / 2)
+#define DST_QUEUE_OFF_BASE (1 << 30)
#define MFC_BANK1_ALLOC_CTX 0
#define MFC_BANK2_ALLOC_CTX 1
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
index de2b8c69daa5..22dfb3effda8 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
@@ -302,7 +302,7 @@ struct s5p_mfc_hw_ops {
void (*write_info)(struct s5p_mfc_ctx *ctx, unsigned int data,
unsigned int ofs);
unsigned int (*read_info)(struct s5p_mfc_ctx *ctx,
- unsigned int ofs);
+ unsigned long ofs);
int (*get_dspl_y_adr)(struct s5p_mfc_dev *dev);
int (*get_dec_y_adr)(struct s5p_mfc_dev *dev);
int (*get_dspl_status)(struct s5p_mfc_dev *dev);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
index 0c4fcf2dfd09..b09bcd140491 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
@@ -263,15 +263,15 @@ static void s5p_mfc_release_dev_context_buffer_v5(struct s5p_mfc_dev *dev)
static void s5p_mfc_write_info_v5(struct s5p_mfc_ctx *ctx, unsigned int data,
unsigned int ofs)
{
- writel(data, (volatile void __iomem *)(ctx->shm.virt + ofs));
+ writel(data, (void *)(ctx->shm.virt + ofs));
wmb();
}
static unsigned int s5p_mfc_read_info_v5(struct s5p_mfc_ctx *ctx,
- unsigned int ofs)
+ unsigned long ofs)
{
rmb();
- return readl((volatile void __iomem *)(ctx->shm.virt + ofs));
+ return readl((void *)(ctx->shm.virt + ofs));
}
static void s5p_mfc_dec_calc_dpb_size_v5(struct s5p_mfc_ctx *ctx)
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
index d826c58b5d53..cefad184fe96 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
@@ -1852,17 +1852,17 @@ static void s5p_mfc_write_info_v6(struct s5p_mfc_ctx *ctx, unsigned int data,
unsigned int ofs)
{
s5p_mfc_clock_on();
- writel(data, (volatile void __iomem *)((unsigned long)ofs));
+ writel(data, (void *)((unsigned long)ofs));
s5p_mfc_clock_off();
}
static unsigned int
-s5p_mfc_read_info_v6(struct s5p_mfc_ctx *ctx, unsigned int ofs)
+s5p_mfc_read_info_v6(struct s5p_mfc_ctx *ctx, unsigned long ofs)
{
int ret;
s5p_mfc_clock_on();
- ret = readl((volatile void __iomem *)((unsigned long)ofs));
+ ret = readl((void *)ofs);
s5p_mfc_clock_off();
return ret;
diff --git a/drivers/media/platform/s5p-tv/Kconfig b/drivers/media/platform/s5p-tv/Kconfig
index 5a1835dd65e8..697aaed42486 100644
--- a/drivers/media/platform/s5p-tv/Kconfig
+++ b/drivers/media/platform/s5p-tv/Kconfig
@@ -20,6 +20,7 @@ if VIDEO_SAMSUNG_S5P_TV
config VIDEO_SAMSUNG_S5P_HDMI
tristate "Samsung HDMI Driver"
depends on VIDEO_V4L2
+ depends on I2C
depends on VIDEO_SAMSUNG_S5P_TV
select VIDEO_SAMSUNG_S5P_HDMIPHY
help
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
index a901b6248557..2554f3719b9e 100644
--- a/drivers/media/platform/sh_veu.c
+++ b/drivers/media/platform/sh_veu.c
@@ -1158,6 +1158,7 @@ static int sh_veu_probe(struct platform_device *pdev)
}
*vdev = sh_veu_videodev;
+ vdev->v4l2_dev = &veu->v4l2_dev;
spin_lock_init(&veu->lock);
mutex_init(&veu->fop_lock);
vdev->lock = &veu->fop_lock;
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c
index 8526bf5c8429..c835beb2a1a8 100644
--- a/drivers/media/platform/soc_camera/atmel-isi.c
+++ b/drivers/media/platform/soc_camera/atmel-isi.c
@@ -843,6 +843,8 @@ static int isi_camera_set_bus_param(struct soc_camera_device *icd)
if (isi->pdata.full_mode)
cfg1 |= ISI_CFG1_FULL_MODE;
+ cfg1 |= ISI_CFG1_THMASK_BEATS_16;
+
isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
isi_writel(isi, ISI_CFG1, cfg1);
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index cee7b56f8404..66634b469c98 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -1665,7 +1665,7 @@ eclkreg:
eaddpdev:
platform_device_put(sasc->pdev);
eallocpdev:
- devm_kfree(ici->v4l2_dev.dev, sasc);
+ devm_kfree(ici->v4l2_dev.dev, info);
dev_err(ici->v4l2_dev.dev, "group probe failed: %d\n", ret);
return ret;
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 77dcfdf547ac..87fc0fe29ebd 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -780,8 +780,6 @@ static int rtl2832u_frontend_callback(void *adapter_priv, int component,
case TUNER_RTL2832_TUA9001:
return rtl2832u_tua9001_tuner_callback(d, cmd, arg);
}
- default:
- return -EINVAL;
}
return 0;
diff --git a/drivers/media/usb/gspca/Kconfig b/drivers/media/usb/gspca/Kconfig
index 60af3b167f3b..3fd94fe7e1eb 100644
--- a/drivers/media/usb/gspca/Kconfig
+++ b/drivers/media/usb/gspca/Kconfig
@@ -1,6 +1,7 @@
menuconfig USB_GSPCA
tristate "GSPCA based webcams"
depends on VIDEO_V4L2
+ depends on INPUT || INPUT=n
default m
---help---
Say Y here if you want to enable selecting webcams based
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index bc08a829bc13..cc16e76a2493 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -3230,18 +3230,13 @@ int vb2_thread_stop(struct vb2_queue *q)
if (threadio == NULL)
return 0;
- call_void_qop(q, wait_finish, q);
threadio->stop = true;
- vb2_internal_streamoff(q, q->type);
- call_void_qop(q, wait_prepare, q);
+ /* Wake up all pending sleeps in the thread */
+ vb2_queue_error(q);
err = kthread_stop(threadio->thread);
- q->fileio = NULL;
- fileio->req.count = 0;
- vb2_reqbufs(q, &fileio->req);
- kfree(fileio);
+ __vb2_cleanup_fileio(q);
threadio->thread = NULL;
kfree(threadio);
- q->fileio = NULL;
q->threadio = NULL;
return err;
}
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index b481d20c8372..69e0483adfee 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -632,8 +632,7 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
}
/* extract page list from userspace mapping */
- ret = vb2_dc_get_user_pages(start, pages, n_pages, vma,
- dma_dir == DMA_FROM_DEVICE);
+ ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, dma_dir);
if (ret) {
unsigned long pfn;
if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) {
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index f38ec424872e..5615522f8d62 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -739,7 +739,7 @@ static int __init kempld_init(void)
for (id = kempld_dmi_table;
id->matches[0].slot != DMI_NONE; id++)
if (strstr(id->ident, force_device_id))
- if (id->callback && id->callback(id))
+ if (id->callback && !id->callback(id))
break;
if (id->matches[0].slot == DMI_NONE)
return -ENODEV;
diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c
index ede50244f265..dbd907d7170e 100644
--- a/drivers/mfd/rtsx_usb.c
+++ b/drivers/mfd/rtsx_usb.c
@@ -196,18 +196,27 @@ EXPORT_SYMBOL_GPL(rtsx_usb_ep0_write_register);
int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data)
{
u16 value;
+ u8 *buf;
+ int ret;
if (!data)
return -EINVAL;
- *data = 0;
+
+ buf = kzalloc(sizeof(u8), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
addr |= EP0_READ_REG_CMD << EP0_OP_SHIFT;
value = swab16(addr);
- return usb_control_msg(ucr->pusb_dev,
+ ret = usb_control_msg(ucr->pusb_dev,
usb_rcvctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- value, 0, data, 1, 100);
+ value, 0, buf, 1, 100);
+ *data = *buf;
+
+ kfree(buf);
+ return ret;
}
EXPORT_SYMBOL_GPL(rtsx_usb_ep0_read_register);
@@ -288,18 +297,27 @@ static int rtsx_usb_get_status_with_bulk(struct rtsx_ucr *ucr, u16 *status)
int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status)
{
int ret;
+ u16 *buf;
if (!status)
return -EINVAL;
- if (polling_pipe == 0)
+ if (polling_pipe == 0) {
+ buf = kzalloc(sizeof(u16), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
ret = usb_control_msg(ucr->pusb_dev,
usb_rcvctrlpipe(ucr->pusb_dev, 0),
RTSX_USB_REQ_POLL,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 0, 0, status, 2, 100);
- else
+ 0, 0, buf, 2, 100);
+ *status = *buf;
+
+ kfree(buf);
+ } else {
ret = rtsx_usb_get_status_with_bulk(ucr, status);
+ }
/* usb_control_msg may return positive when success */
if (ret < 0)
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 38552a31304a..65fed7146e9b 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -202,16 +202,17 @@ static void enclosure_remove_links(struct enclosure_component *cdev)
{
char name[ENCLOSURE_NAME_SIZE];
+ enclosure_link_name(cdev, name);
+
/*
* In odd circumstances, like multipath devices, something else may
* already have removed the links, so check for this condition first.
*/
- if (!cdev->dev->kobj.sd)
- return;
+ if (cdev->dev->kobj.sd)
+ sysfs_remove_link(&cdev->dev->kobj, name);
- enclosure_link_name(cdev, name);
- sysfs_remove_link(&cdev->dev->kobj, name);
- sysfs_remove_link(&cdev->cdev.kobj, "device");
+ if (cdev->cdev.kobj.sd)
+ sysfs_remove_link(&cdev->cdev.kobj, "device");
}
static int enclosure_add_links(struct enclosure_component *cdev)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b979c265fc51..089a4028859d 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3850,7 +3850,8 @@ static inline int bond_slave_override(struct bonding *bond,
/* Find out if any slaves have the same mapping as this skb. */
bond_for_each_slave_rcu(bond, slave, iter) {
if (slave->queue_id == skb->queue_mapping) {
- if (bond_slave_can_tx(slave)) {
+ if (bond_slave_is_up(slave) &&
+ slave->link == BOND_LINK_UP) {
bond_dev_queue_xmit(bond, skb, slave->dev);
return 0;
}
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 80c46ad4cee4..ad0a7e8c2c2b 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -592,13 +592,12 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
rx_state = unlikely(reg_esr & FLEXCAN_ESR_RX_WRN) ?
CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE;
new_state = max(tx_state, rx_state);
- } else if (unlikely(flt == FLEXCAN_ESR_FLT_CONF_PASSIVE)) {
+ } else {
__flexcan_get_berr_counter(dev, &bec);
- new_state = CAN_STATE_ERROR_PASSIVE;
+ new_state = flt == FLEXCAN_ESR_FLT_CONF_PASSIVE ?
+ CAN_STATE_ERROR_PASSIVE : CAN_STATE_BUS_OFF;
rx_state = bec.rxerr >= bec.txerr ? new_state : 0;
tx_state = bec.rxerr <= bec.txerr ? new_state : 0;
- } else {
- new_state = CAN_STATE_BUS_OFF;
}
/* state hasn't changed */
@@ -1158,12 +1157,19 @@ static int flexcan_probe(struct platform_device *pdev)
const struct flexcan_devtype_data *devtype_data;
struct net_device *dev;
struct flexcan_priv *priv;
+ struct regulator *reg_xceiver;
struct resource *mem;
struct clk *clk_ipg = NULL, *clk_per = NULL;
void __iomem *base;
int err, irq;
u32 clock_freq = 0;
+ reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
+ if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ else if (IS_ERR(reg_xceiver))
+ reg_xceiver = NULL;
+
if (pdev->dev.of_node)
of_property_read_u32(pdev->dev.of_node,
"clock-frequency", &clock_freq);
@@ -1224,9 +1230,7 @@ static int flexcan_probe(struct platform_device *pdev)
priv->pdata = dev_get_platdata(&pdev->dev);
priv->devtype_data = devtype_data;
- priv->reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
- if (IS_ERR(priv->reg_xceiver))
- priv->reg_xceiver = NULL;
+ priv->reg_xceiver = reg_xceiver;
netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 009acc8641fc..8b4d3e6875eb 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -901,6 +901,8 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *
}
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
init_usb_anchor(&dev->rx_submitted);
atomic_set(&dev->active_channels, 0);
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index e97a08ce0b90..57611fd91229 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -25,7 +25,6 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#define MAX_TX_URBS 16
#define MAX_RX_URBS 4
#define START_TIMEOUT 1000 /* msecs */
#define STOP_TIMEOUT 1000 /* msecs */
@@ -443,6 +442,7 @@ struct kvaser_usb_error_summary {
};
};
+/* Context for an outstanding, not yet ACKed, transmission */
struct kvaser_usb_tx_urb_context {
struct kvaser_usb_net_priv *priv;
u32 echo_index;
@@ -456,8 +456,13 @@ struct kvaser_usb {
struct usb_endpoint_descriptor *bulk_in, *bulk_out;
struct usb_anchor rx_submitted;
+ /* @max_tx_urbs: Firmware-reported maximum number of oustanding,
+ * not yet ACKed, transmissions on this device. This value is
+ * also used as a sentinel for marking free tx contexts.
+ */
u32 fw_version;
unsigned int nchannels;
+ unsigned int max_tx_urbs;
enum kvaser_usb_family family;
bool rxinitdone;
@@ -467,19 +472,18 @@ struct kvaser_usb {
struct kvaser_usb_net_priv {
struct can_priv can;
-
- spinlock_t tx_contexts_lock;
- int active_tx_contexts;
- struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS];
-
- struct usb_anchor tx_submitted;
- struct completion start_comp, stop_comp;
+ struct can_berr_counter bec;
struct kvaser_usb *dev;
struct net_device *netdev;
int channel;
- struct can_berr_counter bec;
+ struct completion start_comp, stop_comp;
+ struct usb_anchor tx_submitted;
+
+ spinlock_t tx_contexts_lock;
+ int active_tx_contexts;
+ struct kvaser_usb_tx_urb_context tx_contexts[];
};
static const struct usb_device_id kvaser_usb_table[] = {
@@ -592,8 +596,8 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
* for further details.
*/
if (tmp->len == 0) {
- pos = round_up(pos,
- dev->bulk_in->wMaxPacketSize);
+ pos = round_up(pos, le16_to_cpu(dev->bulk_in->
+ wMaxPacketSize));
continue;
}
@@ -657,9 +661,13 @@ static int kvaser_usb_get_software_info(struct kvaser_usb *dev)
switch (dev->family) {
case KVASER_LEAF:
dev->fw_version = le32_to_cpu(msg.u.leaf.softinfo.fw_version);
+ dev->max_tx_urbs =
+ le16_to_cpu(msg.u.leaf.softinfo.max_outstanding_tx);
break;
case KVASER_USBCAN:
dev->fw_version = le32_to_cpu(msg.u.usbcan.softinfo.fw_version);
+ dev->max_tx_urbs =
+ le16_to_cpu(msg.u.usbcan.softinfo.max_outstanding_tx);
break;
}
@@ -715,7 +723,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
stats = &priv->netdev->stats;
- context = &priv->tx_contexts[tid % MAX_TX_URBS];
+ context = &priv->tx_contexts[tid % dev->max_tx_urbs];
/* Sometimes the state change doesn't come after a bus-off event */
if (priv->can.restart_ms &&
@@ -744,7 +752,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
spin_lock_irqsave(&priv->tx_contexts_lock, flags);
can_get_echo_skb(priv->netdev, context->echo_index);
- context->echo_index = MAX_TX_URBS;
+ context->echo_index = dev->max_tx_urbs;
--priv->active_tx_contexts;
netif_wake_queue(priv->netdev);
@@ -1329,7 +1337,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
* number of events in case of a heavy rx load on the bus.
*/
if (msg->len == 0) {
- pos = round_up(pos, dev->bulk_in->wMaxPacketSize);
+ pos = round_up(pos, le16_to_cpu(dev->bulk_in->
+ wMaxPacketSize));
continue;
}
@@ -1512,11 +1521,13 @@ error:
static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv)
{
- int i;
+ int i, max_tx_urbs;
+
+ max_tx_urbs = priv->dev->max_tx_urbs;
priv->active_tx_contexts = 0;
- for (i = 0; i < MAX_TX_URBS; i++)
- priv->tx_contexts[i].echo_index = MAX_TX_URBS;
+ for (i = 0; i < max_tx_urbs; i++)
+ priv->tx_contexts[i].echo_index = max_tx_urbs;
}
/* This method might sleep. Do not call it in the atomic context
@@ -1702,14 +1713,14 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
*msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME;
spin_lock_irqsave(&priv->tx_contexts_lock, flags);
- for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) {
- if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) {
+ for (i = 0; i < dev->max_tx_urbs; i++) {
+ if (priv->tx_contexts[i].echo_index == dev->max_tx_urbs) {
context = &priv->tx_contexts[i];
context->echo_index = i;
can_put_echo_skb(skb, netdev, context->echo_index);
++priv->active_tx_contexts;
- if (priv->active_tx_contexts >= MAX_TX_URBS)
+ if (priv->active_tx_contexts >= dev->max_tx_urbs)
netif_stop_queue(netdev);
break;
@@ -1743,7 +1754,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
spin_lock_irqsave(&priv->tx_contexts_lock, flags);
can_free_echo_skb(netdev, context->echo_index);
- context->echo_index = MAX_TX_URBS;
+ context->echo_index = dev->max_tx_urbs;
--priv->active_tx_contexts;
netif_wake_queue(netdev);
@@ -1881,7 +1892,9 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
if (err)
return err;
- netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS);
+ netdev = alloc_candev(sizeof(*priv) +
+ dev->max_tx_urbs * sizeof(*priv->tx_contexts),
+ dev->max_tx_urbs);
if (!netdev) {
dev_err(&intf->dev, "Cannot alloc candev\n");
return -ENOMEM;
@@ -2009,6 +2022,13 @@ static int kvaser_usb_probe(struct usb_interface *intf,
return err;
}
+ dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
+ ((dev->fw_version >> 24) & 0xff),
+ ((dev->fw_version >> 16) & 0xff),
+ (dev->fw_version & 0xffff));
+
+ dev_dbg(&intf->dev, "Max oustanding tx = %d URBs\n", dev->max_tx_urbs);
+
err = kvaser_usb_get_card_info(dev);
if (err) {
dev_err(&intf->dev,
@@ -2016,11 +2036,6 @@ static int kvaser_usb_probe(struct usb_interface *intf,
return err;
}
- dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
- ((dev->fw_version >> 24) & 0xff),
- ((dev->fw_version >> 16) & 0xff),
- (dev->fw_version & 0xffff));
-
for (i = 0; i < dev->nchannels; i++) {
err = kvaser_usb_init_one(intf, id, i);
if (err) {
diff --git a/drivers/net/can/usb/peak_usb/pcan_ucan.h b/drivers/net/can/usb/peak_usb/pcan_ucan.h
index 1ba7c25002e1..e8fc4952c6b0 100644
--- a/drivers/net/can/usb/peak_usb/pcan_ucan.h
+++ b/drivers/net/can/usb/peak_usb/pcan_ucan.h
@@ -26,8 +26,8 @@
#define PUCAN_CMD_FILTER_STD 0x008
#define PUCAN_CMD_TX_ABORT 0x009
#define PUCAN_CMD_WR_ERR_CNT 0x00a
-#define PUCAN_CMD_RX_FRAME_ENABLE 0x00b
-#define PUCAN_CMD_RX_FRAME_DISABLE 0x00c
+#define PUCAN_CMD_SET_EN_OPTION 0x00b
+#define PUCAN_CMD_CLR_DIS_OPTION 0x00c
#define PUCAN_CMD_END_OF_COLLECTION 0x3ff
/* uCAN received messages list */
@@ -101,14 +101,15 @@ struct __packed pucan_wr_err_cnt {
u16 unused;
};
-/* uCAN RX_FRAME_ENABLE command fields */
-#define PUCAN_FLTEXT_ERROR 0x0001
-#define PUCAN_FLTEXT_BUSLOAD 0x0002
+/* uCAN SET_EN/CLR_DIS _OPTION command fields */
+#define PUCAN_OPTION_ERROR 0x0001
+#define PUCAN_OPTION_BUSLOAD 0x0002
+#define PUCAN_OPTION_CANDFDISO 0x0004
-struct __packed pucan_filter_ext {
+struct __packed pucan_options {
__le16 opcode_channel;
- __le16 ext_mask;
+ __le16 options;
u32 unused;
};
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 0bac0f14edc3..a9221ad9f1a0 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -110,13 +110,13 @@ struct __packed pcan_ufd_led {
u8 unused[5];
};
-/* Extended usage of uCAN commands CMD_RX_FRAME_xxxABLE for PCAN-USB Pro FD */
+/* Extended usage of uCAN commands CMD_xxx_xx_OPTION for PCAN-USB Pro FD */
#define PCAN_UFD_FLTEXT_CALIBRATION 0x8000
-struct __packed pcan_ufd_filter_ext {
+struct __packed pcan_ufd_options {
__le16 opcode_channel;
- __le16 ext_mask;
+ __le16 ucan_mask;
u16 unused;
__le16 usb_mask;
};
@@ -251,6 +251,27 @@ static int pcan_usb_fd_build_restart_cmd(struct peak_usb_device *dev, u8 *buf)
/* moves the pointer forward */
pc += sizeof(struct pucan_wr_err_cnt);
+ /* add command to switch from ISO to non-ISO mode, if fw allows it */
+ if (dev->can.ctrlmode_supported & CAN_CTRLMODE_FD_NON_ISO) {
+ struct pucan_options *puo = (struct pucan_options *)pc;
+
+ puo->opcode_channel =
+ (dev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) ?
+ pucan_cmd_opcode_channel(dev,
+ PUCAN_CMD_CLR_DIS_OPTION) :
+ pucan_cmd_opcode_channel(dev, PUCAN_CMD_SET_EN_OPTION);
+
+ puo->options = cpu_to_le16(PUCAN_OPTION_CANDFDISO);
+
+ /* to be sure that no other extended bits will be taken into
+ * account
+ */
+ puo->unused = 0;
+
+ /* moves the pointer forward */
+ pc += sizeof(struct pucan_options);
+ }
+
/* next, go back to operational mode */
cmd = (struct pucan_command *)pc;
cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
@@ -321,21 +342,21 @@ static int pcan_usb_fd_set_filter_std(struct peak_usb_device *dev, int idx,
return pcan_usb_fd_send_cmd(dev, cmd);
}
-/* set/unset notifications filter:
+/* set/unset options
*
- * onoff sets(1)/unset(0) notifications
- * mask each bit defines a kind of notification to set/unset
+ * onoff set(1)/unset(0) options
+ * mask each bit defines a kind of options to set/unset
*/
-static int pcan_usb_fd_set_filter_ext(struct peak_usb_device *dev,
- bool onoff, u16 ext_mask, u16 usb_mask)
+static int pcan_usb_fd_set_options(struct peak_usb_device *dev,
+ bool onoff, u16 ucan_mask, u16 usb_mask)
{
- struct pcan_ufd_filter_ext *cmd = pcan_usb_fd_cmd_buffer(dev);
+ struct pcan_ufd_options *cmd = pcan_usb_fd_cmd_buffer(dev);
cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
- (onoff) ? PUCAN_CMD_RX_FRAME_ENABLE :
- PUCAN_CMD_RX_FRAME_DISABLE);
+ (onoff) ? PUCAN_CMD_SET_EN_OPTION :
+ PUCAN_CMD_CLR_DIS_OPTION);
- cmd->ext_mask = cpu_to_le16(ext_mask);
+ cmd->ucan_mask = cpu_to_le16(ucan_mask);
cmd->usb_mask = cpu_to_le16(usb_mask);
/* send the command */
@@ -770,9 +791,9 @@ static int pcan_usb_fd_start(struct peak_usb_device *dev)
&pcan_usb_pro_fd);
/* enable USB calibration messages */
- err = pcan_usb_fd_set_filter_ext(dev, 1,
- PUCAN_FLTEXT_ERROR,
- PCAN_UFD_FLTEXT_CALIBRATION);
+ err = pcan_usb_fd_set_options(dev, 1,
+ PUCAN_OPTION_ERROR,
+ PCAN_UFD_FLTEXT_CALIBRATION);
}
pdev->usb_if->dev_opened_count++;
@@ -806,9 +827,9 @@ static int pcan_usb_fd_stop(struct peak_usb_device *dev)
/* turn off special msgs for that interface if no other dev opened */
if (pdev->usb_if->dev_opened_count == 1)
- pcan_usb_fd_set_filter_ext(dev, 0,
- PUCAN_FLTEXT_ERROR,
- PCAN_UFD_FLTEXT_CALIBRATION);
+ pcan_usb_fd_set_options(dev, 0,
+ PUCAN_OPTION_ERROR,
+ PCAN_UFD_FLTEXT_CALIBRATION);
pdev->usb_if->dev_opened_count--;
return 0;
@@ -860,8 +881,14 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
pdev->usb_if->fw_info.fw_version[2],
dev->adapter->ctrl_count);
- /* the currently supported hw is non-ISO */
- dev->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO;
+ /* check for ability to switch between ISO/non-ISO modes */
+ if (pdev->usb_if->fw_info.fw_version[0] >= 2) {
+ /* firmware >= 2.x supports ISO/non-ISO switching */
+ dev->can.ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO;
+ } else {
+ /* firmware < 2.x only supports fixed(!) non-ISO */
+ dev->can.ctrlmode |= CAN_CTRLMODE_FD_NON_ISO;
+ }
/* tell the hardware the can driver is running */
err = pcan_usb_fd_drv_loaded(dev, 1);
@@ -937,9 +964,9 @@ static void pcan_usb_fd_exit(struct peak_usb_device *dev)
if (dev->ctrl_idx == 0) {
/* turn off calibration message if any device were opened */
if (pdev->usb_if->dev_opened_count > 0)
- pcan_usb_fd_set_filter_ext(dev, 0,
- PUCAN_FLTEXT_ERROR,
- PCAN_UFD_FLTEXT_CALIBRATION);
+ pcan_usb_fd_set_options(dev, 0,
+ PUCAN_OPTION_ERROR,
+ PCAN_UFD_FLTEXT_CALIBRATION);
/* tell USB adapter that the driver is being unloaded */
pcan_usb_fd_drv_loaded(dev, 0);
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 11d6e6561df1..15a8190a6f75 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1543,7 +1543,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
{
struct pcnet32_private *lp;
int i, media;
- int fdx, mii, fset, dxsuflo;
+ int fdx, mii, fset, dxsuflo, sram;
int chip_version;
char *chipname;
struct net_device *dev;
@@ -1580,7 +1580,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
}
/* initialize variables */
- fdx = mii = fset = dxsuflo = 0;
+ fdx = mii = fset = dxsuflo = sram = 0;
chip_version = (chip_version >> 12) & 0xffff;
switch (chip_version) {
@@ -1613,6 +1613,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
chipname = "PCnet/FAST III 79C973"; /* PCI */
fdx = 1;
mii = 1;
+ sram = 1;
break;
case 0x2626:
chipname = "PCnet/Home 79C978"; /* PCI */
@@ -1636,6 +1637,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
chipname = "PCnet/FAST III 79C975"; /* PCI */
fdx = 1;
mii = 1;
+ sram = 1;
break;
case 0x2628:
chipname = "PCnet/PRO 79C976";
@@ -1664,6 +1666,31 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
dxsuflo = 1;
}
+ /*
+ * The Am79C973/Am79C975 controllers come with 12K of SRAM
+ * which we can use for the Tx/Rx buffers but most importantly,
+ * the use of SRAM allow us to use the BCR18:NOUFLO bit to avoid
+ * Tx fifo underflows.
+ */
+ if (sram) {
+ /*
+ * The SRAM is being configured in two steps. First we
+ * set the SRAM size in the BCR25:SRAM_SIZE bits. According
+ * to the datasheet, each bit corresponds to a 512-byte
+ * page so we can have at most 24 pages. The SRAM_SIZE
+ * holds the value of the upper 8 bits of the 16-bit SRAM size.
+ * The low 8-bits start at 0x00 and end at 0xff. So the
+ * address range is from 0x0000 up to 0x17ff. Therefore,
+ * the SRAM_SIZE is set to 0x17. The next step is to set
+ * the BCR26:SRAM_BND midway through so the Tx and Rx
+ * buffers can share the SRAM equally.
+ */
+ a->write_bcr(ioaddr, 25, 0x17);
+ a->write_bcr(ioaddr, 26, 0xc);
+ /* And finally enable the NOUFLO bit */
+ a->write_bcr(ioaddr, 18, a->read_bcr(ioaddr, 18) | (1 << 11));
+ }
+
dev = alloc_etherdev(sizeof(*lp));
if (!dev) {
ret = -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 756053c028be..4085c4b31047 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1811,7 +1811,7 @@ struct bnx2x {
int stats_state;
/* used for synchronization of concurrent threads statistics handling */
- spinlock_t stats_lock;
+ struct mutex stats_lock;
/* used by dmae command loader */
struct dmae_command stats_dmae;
@@ -1935,8 +1935,6 @@ struct bnx2x {
int fp_array_size;
u32 dump_preset_idx;
- bool stats_started;
- struct semaphore stats_sema;
u8 phys_port_id[ETH_ALEN];
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 996e215fc324..1ec635f54994 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -129,8 +129,8 @@ struct bnx2x_mac_vals {
u32 xmac_val;
u32 emac_addr;
u32 emac_val;
- u32 umac_addr;
- u32 umac_val;
+ u32 umac_addr[2];
+ u32 umac_val[2];
u32 bmac_addr;
u32 bmac_val[2];
};
@@ -7866,6 +7866,20 @@ int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
return 0;
}
+/* previous driver DMAE transaction may have occurred when pre-boot stage ended
+ * and boot began, or when kdump kernel was loaded. Either case would invalidate
+ * the addresses of the transaction, resulting in was-error bit set in the pci
+ * causing all hw-to-host pcie transactions to timeout. If this happened we want
+ * to clear the interrupt which detected this from the pglueb and the was done
+ * bit
+ */
+static void bnx2x_clean_pglue_errors(struct bnx2x *bp)
+{
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
+ 1 << BP_ABS_FUNC(bp));
+}
+
static int bnx2x_init_hw_func(struct bnx2x *bp)
{
int port = BP_PORT(bp);
@@ -7958,8 +7972,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
- if (!CHIP_IS_E1x(bp))
- REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
+ bnx2x_clean_pglue_errors(bp);
bnx2x_init_block(bp, BLOCK_ATC, init_phase);
bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
@@ -10141,6 +10154,25 @@ static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
return base + (BP_ABS_FUNC(bp)) * stride;
}
+static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp,
+ u8 port, u32 reset_reg,
+ struct bnx2x_mac_vals *vals)
+{
+ u32 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
+ u32 base_addr;
+
+ if (!(mask & reset_reg))
+ return false;
+
+ BNX2X_DEV_INFO("Disable umac Rx %02x\n", port);
+ base_addr = port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+ vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG;
+ vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]);
+ REG_WR(bp, vals->umac_addr[port], 0);
+
+ return true;
+}
+
static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
struct bnx2x_mac_vals *vals)
{
@@ -10149,10 +10181,7 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
u8 port = BP_PORT(bp);
/* reset addresses as they also mark which values were changed */
- vals->bmac_addr = 0;
- vals->umac_addr = 0;
- vals->xmac_addr = 0;
- vals->emac_addr = 0;
+ memset(vals, 0, sizeof(*vals));
reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
@@ -10201,15 +10230,11 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
REG_WR(bp, vals->xmac_addr, 0);
mac_stopped = true;
}
- mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
- if (mask & reset_reg) {
- BNX2X_DEV_INFO("Disable umac Rx\n");
- base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
- vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
- vals->umac_val = REG_RD(bp, vals->umac_addr);
- REG_WR(bp, vals->umac_addr, 0);
- mac_stopped = true;
- }
+
+ mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0,
+ reset_reg, vals);
+ mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1,
+ reset_reg, vals);
}
if (mac_stopped)
@@ -10505,8 +10530,11 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
/* Close the MAC Rx to prevent BRB from filling up */
bnx2x_prev_unload_close_mac(bp, &mac_vals);
- /* close LLH filters towards the BRB */
+ /* close LLH filters for both ports towards the BRB */
bnx2x_set_rx_filter(&bp->link_params, 0);
+ bp->link_params.port ^= 1;
+ bnx2x_set_rx_filter(&bp->link_params, 0);
+ bp->link_params.port ^= 1;
/* Check if the UNDI driver was previously loaded */
if (bnx2x_prev_is_after_undi(bp)) {
@@ -10553,8 +10581,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
if (mac_vals.xmac_addr)
REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
- if (mac_vals.umac_addr)
- REG_WR(bp, mac_vals.umac_addr, mac_vals.umac_val);
+ if (mac_vals.umac_addr[0])
+ REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]);
+ if (mac_vals.umac_addr[1])
+ REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]);
if (mac_vals.emac_addr)
REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
if (mac_vals.bmac_addr) {
@@ -10571,26 +10601,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
return bnx2x_prev_mcp_done(bp);
}
-/* previous driver DMAE transaction may have occurred when pre-boot stage ended
- * and boot began, or when kdump kernel was loaded. Either case would invalidate
- * the addresses of the transaction, resulting in was-error bit set in the pci
- * causing all hw-to-host pcie transactions to timeout. If this happened we want
- * to clear the interrupt which detected this from the pglueb and the was done
- * bit
- */
-static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
-{
- if (!CHIP_IS_E1x(bp)) {
- u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
- if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
- DP(BNX2X_MSG_SP,
- "'was error' bit was found to be set in pglueb upon startup. Clearing\n");
- REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
- 1 << BP_FUNC(bp));
- }
- }
-}
-
static int bnx2x_prev_unload(struct bnx2x *bp)
{
int time_counter = 10;
@@ -10600,7 +10610,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
/* clear hw from errors which may have resulted from an interrupted
* dmae transaction.
*/
- bnx2x_prev_interrupted_dmae(bp);
+ bnx2x_clean_pglue_errors(bp);
/* Release previously held locks */
hw_lock_reg = (BP_FUNC(bp) <= 5) ?
@@ -12037,9 +12047,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
mutex_init(&bp->port.phy_mutex);
mutex_init(&bp->fw_mb_mutex);
mutex_init(&bp->drv_info_mutex);
+ mutex_init(&bp->stats_lock);
bp->drv_info_mng_owner = false;
- spin_lock_init(&bp->stats_lock);
- sema_init(&bp->stats_sema, 1);
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
@@ -13668,9 +13677,9 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
cancel_delayed_work_sync(&bp->sp_task);
cancel_delayed_work_sync(&bp->period_task);
- spin_lock_bh(&bp->stats_lock);
+ mutex_lock(&bp->stats_lock);
bp->stats_state = STATS_STATE_DISABLED;
- spin_unlock_bh(&bp->stats_lock);
+ mutex_unlock(&bp->stats_lock);
bnx2x_save_statistics(bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index e5aca2de1871..cfe3c7695455 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2238,7 +2238,9 @@ int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
cookie.vf = vf;
cookie.state = VF_ACQUIRED;
- bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
+ rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
+ if (rc)
+ goto op_err;
}
DP(BNX2X_MSG_IOV, "set state to acquired\n");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index d1608297c773..800ab44a07ce 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -123,36 +123,28 @@ static void bnx2x_dp_stats(struct bnx2x *bp)
*/
static void bnx2x_storm_stats_post(struct bnx2x *bp)
{
- if (!bp->stats_pending) {
- int rc;
+ int rc;
- spin_lock_bh(&bp->stats_lock);
-
- if (bp->stats_pending) {
- spin_unlock_bh(&bp->stats_lock);
- return;
- }
-
- bp->fw_stats_req->hdr.drv_stats_counter =
- cpu_to_le16(bp->stats_counter++);
+ if (bp->stats_pending)
+ return;
- DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
- le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter));
+ bp->fw_stats_req->hdr.drv_stats_counter =
+ cpu_to_le16(bp->stats_counter++);
- /* adjust the ramrod to include VF queues statistics */
- bnx2x_iov_adjust_stats_req(bp);
- bnx2x_dp_stats(bp);
+ DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
+ le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter));
- /* send FW stats ramrod */
- rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
- U64_HI(bp->fw_stats_req_mapping),
- U64_LO(bp->fw_stats_req_mapping),
- NONE_CONNECTION_TYPE);
- if (rc == 0)
- bp->stats_pending = 1;
+ /* adjust the ramrod to include VF queues statistics */
+ bnx2x_iov_adjust_stats_req(bp);
+ bnx2x_dp_stats(bp);
- spin_unlock_bh(&bp->stats_lock);
- }
+ /* send FW stats ramrod */
+ rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
+ U64_HI(bp->fw_stats_req_mapping),
+ U64_LO(bp->fw_stats_req_mapping),
+ NONE_CONNECTION_TYPE);
+ if (rc == 0)
+ bp->stats_pending = 1;
}
static void bnx2x_hw_stats_post(struct bnx2x *bp)
@@ -221,7 +213,7 @@ static void bnx2x_stats_comp(struct bnx2x *bp)
*/
/* should be called under stats_sema */
-static void __bnx2x_stats_pmf_update(struct bnx2x *bp)
+static void bnx2x_stats_pmf_update(struct bnx2x *bp)
{
struct dmae_command *dmae;
u32 opcode;
@@ -519,7 +511,7 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
}
/* should be called under stats_sema */
-static void __bnx2x_stats_start(struct bnx2x *bp)
+static void bnx2x_stats_start(struct bnx2x *bp)
{
if (IS_PF(bp)) {
if (bp->port.pmf)
@@ -531,34 +523,13 @@ static void __bnx2x_stats_start(struct bnx2x *bp)
bnx2x_hw_stats_post(bp);
bnx2x_storm_stats_post(bp);
}
-
- bp->stats_started = true;
-}
-
-static void bnx2x_stats_start(struct bnx2x *bp)
-{
- if (down_timeout(&bp->stats_sema, HZ/10))
- BNX2X_ERR("Unable to acquire stats lock\n");
- __bnx2x_stats_start(bp);
- up(&bp->stats_sema);
}
static void bnx2x_stats_pmf_start(struct bnx2x *bp)
{
- if (down_timeout(&bp->stats_sema, HZ/10))
- BNX2X_ERR("Unable to acquire stats lock\n");
bnx2x_stats_comp(bp);
- __bnx2x_stats_pmf_update(bp);
- __bnx2x_stats_start(bp);
- up(&bp->stats_sema);
-}
-
-static void bnx2x_stats_pmf_update(struct bnx2x *bp)
-{
- if (down_timeout(&bp->stats_sema, HZ/10))
- BNX2X_ERR("Unable to acquire stats lock\n");
- __bnx2x_stats_pmf_update(bp);
- up(&bp->stats_sema);
+ bnx2x_stats_pmf_update(bp);
+ bnx2x_stats_start(bp);
}
static void bnx2x_stats_restart(struct bnx2x *bp)
@@ -568,11 +539,9 @@ static void bnx2x_stats_restart(struct bnx2x *bp)
*/
if (IS_VF(bp))
return;
- if (down_timeout(&bp->stats_sema, HZ/10))
- BNX2X_ERR("Unable to acquire stats lock\n");
+
bnx2x_stats_comp(bp);
- __bnx2x_stats_start(bp);
- up(&bp->stats_sema);
+ bnx2x_stats_start(bp);
}
static void bnx2x_bmac_stats_update(struct bnx2x *bp)
@@ -1246,18 +1215,12 @@ static void bnx2x_stats_update(struct bnx2x *bp)
{
u32 *stats_comp = bnx2x_sp(bp, stats_comp);
- /* we run update from timer context, so give up
- * if somebody is in the middle of transition
- */
- if (down_trylock(&bp->stats_sema))
+ if (bnx2x_edebug_stats_stopped(bp))
return;
- if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started)
- goto out;
-
if (IS_PF(bp)) {
if (*stats_comp != DMAE_COMP_VAL)
- goto out;
+ return;
if (bp->port.pmf)
bnx2x_hw_stats_update(bp);
@@ -1267,7 +1230,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
BNX2X_ERR("storm stats were not updated for 3 times\n");
bnx2x_panic();
}
- goto out;
+ return;
}
} else {
/* vf doesn't collect HW statistics, and doesn't get completions
@@ -1281,7 +1244,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
/* vf is done */
if (IS_VF(bp))
- goto out;
+ return;
if (netif_msg_timer(bp)) {
struct bnx2x_eth_stats *estats = &bp->eth_stats;
@@ -1292,9 +1255,6 @@ static void bnx2x_stats_update(struct bnx2x *bp)
bnx2x_hw_stats_post(bp);
bnx2x_storm_stats_post(bp);
-
-out:
- up(&bp->stats_sema);
}
static void bnx2x_port_stats_stop(struct bnx2x *bp)
@@ -1358,12 +1318,7 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp)
static void bnx2x_stats_stop(struct bnx2x *bp)
{
- int update = 0;
-
- if (down_timeout(&bp->stats_sema, HZ/10))
- BNX2X_ERR("Unable to acquire stats lock\n");
-
- bp->stats_started = false;
+ bool update = false;
bnx2x_stats_comp(bp);
@@ -1381,8 +1336,6 @@ static void bnx2x_stats_stop(struct bnx2x *bp)
bnx2x_hw_stats_post(bp);
bnx2x_stats_comp(bp);
}
-
- up(&bp->stats_sema);
}
static void bnx2x_stats_do_nothing(struct bnx2x *bp)
@@ -1410,18 +1363,28 @@ static const struct {
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
{
- enum bnx2x_stats_state state;
- void (*action)(struct bnx2x *bp);
+ enum bnx2x_stats_state state = bp->stats_state;
+
if (unlikely(bp->panic))
return;
- spin_lock_bh(&bp->stats_lock);
- state = bp->stats_state;
+ /* Statistics update run from timer context, and we don't want to stop
+ * that context in case someone is in the middle of a transition.
+ * For other events, wait a bit until lock is taken.
+ */
+ if (!mutex_trylock(&bp->stats_lock)) {
+ if (event == STATS_EVENT_UPDATE)
+ return;
+
+ DP(BNX2X_MSG_STATS,
+ "Unlikely stats' lock contention [event %d]\n", event);
+ mutex_lock(&bp->stats_lock);
+ }
+
+ bnx2x_stats_stm[state][event].action(bp);
bp->stats_state = bnx2x_stats_stm[state][event].next_state;
- action = bnx2x_stats_stm[state][event].action;
- spin_unlock_bh(&bp->stats_lock);
- action(bp);
+ mutex_unlock(&bp->stats_lock);
if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
@@ -1998,13 +1961,34 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
}
}
-void bnx2x_stats_safe_exec(struct bnx2x *bp,
- void (func_to_exec)(void *cookie),
- void *cookie){
- if (down_timeout(&bp->stats_sema, HZ/10))
- BNX2X_ERR("Unable to acquire stats lock\n");
+int bnx2x_stats_safe_exec(struct bnx2x *bp,
+ void (func_to_exec)(void *cookie),
+ void *cookie)
+{
+ int cnt = 10, rc = 0;
+
+ /* Wait for statistics to end [while blocking further requests],
+ * then run supplied function 'safely'.
+ */
+ mutex_lock(&bp->stats_lock);
+
bnx2x_stats_comp(bp);
+ while (bp->stats_pending && cnt--)
+ if (bnx2x_storm_stats_update(bp))
+ usleep_range(1000, 2000);
+ if (bp->stats_pending) {
+ BNX2X_ERR("Failed to wait for stats pending to clear [possibly FW is stuck]\n");
+ rc = -EBUSY;
+ goto out;
+ }
+
func_to_exec(cookie);
- __bnx2x_stats_start(bp);
- up(&bp->stats_sema);
+
+out:
+ /* No need to restart statistics - if they're enabled, the timer
+ * will restart the statistics.
+ */
+ mutex_unlock(&bp->stats_lock);
+
+ return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 2beceaefdeea..965539a9dabe 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -539,9 +539,9 @@ struct bnx2x;
void bnx2x_memset_stats(struct bnx2x *bp);
void bnx2x_stats_init(struct bnx2x *bp);
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
-void bnx2x_stats_safe_exec(struct bnx2x *bp,
- void (func_to_exec)(void *cookie),
- void *cookie);
+int bnx2x_stats_safe_exec(struct bnx2x *bp,
+ void (func_to_exec)(void *cookie),
+ void *cookie);
/**
* bnx2x_save_statistics - save statistics when unloading.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 97842d03675b..c6ff4890d171 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -376,8 +376,6 @@ enum {
enum {
INGQ_EXTRAS = 2, /* firmware event queue and */
/* forwarded interrupts */
- MAX_EGRQ = MAX_ETH_QSETS*2 + MAX_OFLD_QSETS*2
- + MAX_CTRL_QUEUES + MAX_RDMA_QUEUES + MAX_ISCSI_QUEUES,
MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES
+ MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS,
};
@@ -616,11 +614,13 @@ struct sge {
unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */
unsigned int egr_start;
+ unsigned int egr_sz;
unsigned int ingr_start;
- void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */
- struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */
- DECLARE_BITMAP(starving_fl, MAX_EGRQ);
- DECLARE_BITMAP(txq_maperr, MAX_EGRQ);
+ unsigned int ingr_sz;
+ void **egr_map; /* qid->queue egress queue map */
+ struct sge_rspq **ingr_map; /* qid->queue ingress queue map */
+ unsigned long *starving_fl;
+ unsigned long *txq_maperr;
struct timer_list rx_timer; /* refills starving FLs */
struct timer_list tx_timer; /* checks Tx queues */
};
@@ -1136,6 +1136,8 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
unsigned int qtimer_val(const struct adapter *adap,
const struct sge_rspq *q);
+
+int t4_init_devlog_params(struct adapter *adapter);
int t4_init_sge_params(struct adapter *adapter);
int t4_init_tp_params(struct adapter *adap);
int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 78854ceb0870..dcb047945290 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -670,9 +670,13 @@ static int cctrl_tbl_show(struct seq_file *seq, void *v)
"0.9375" };
int i;
- u16 incr[NMTUS][NCCTRL_WIN];
+ u16 (*incr)[NCCTRL_WIN];
struct adapter *adap = seq->private;
+ incr = kmalloc(sizeof(*incr) * NMTUS, GFP_KERNEL);
+ if (!incr)
+ return -ENOMEM;
+
t4_read_cong_tbl(adap, incr);
for (i = 0; i < NCCTRL_WIN; ++i) {
@@ -685,6 +689,8 @@ static int cctrl_tbl_show(struct seq_file *seq, void *v)
adap->params.a_wnd[i],
dec_fac[adap->params.b_wnd[i]]);
}
+
+ kfree(incr);
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index a22cf932ca35..d92995138f7e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -920,7 +920,7 @@ static void quiesce_rx(struct adapter *adap)
{
int i;
- for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
+ for (i = 0; i < adap->sge.ingr_sz; i++) {
struct sge_rspq *q = adap->sge.ingr_map[i];
if (q && q->handler) {
@@ -934,6 +934,21 @@ static void quiesce_rx(struct adapter *adap)
}
}
+/* Disable interrupt and napi handler */
+static void disable_interrupts(struct adapter *adap)
+{
+ if (adap->flags & FULL_INIT_DONE) {
+ t4_intr_disable(adap);
+ if (adap->flags & USING_MSIX) {
+ free_msix_queue_irqs(adap);
+ free_irq(adap->msix_info[0].vec, adap);
+ } else {
+ free_irq(adap->pdev->irq, adap);
+ }
+ quiesce_rx(adap);
+ }
+}
+
/*
* Enable NAPI scheduling and interrupt generation for all Rx queues.
*/
@@ -941,7 +956,7 @@ static void enable_rx(struct adapter *adap)
{
int i;
- for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
+ for (i = 0; i < adap->sge.ingr_sz; i++) {
struct sge_rspq *q = adap->sge.ingr_map[i];
if (!q)
@@ -970,8 +985,8 @@ static int setup_sge_queues(struct adapter *adap)
int err, msi_idx, i, j;
struct sge *s = &adap->sge;
- bitmap_zero(s->starving_fl, MAX_EGRQ);
- bitmap_zero(s->txq_maperr, MAX_EGRQ);
+ bitmap_zero(s->starving_fl, s->egr_sz);
+ bitmap_zero(s->txq_maperr, s->egr_sz);
if (adap->flags & USING_MSIX)
msi_idx = 1; /* vector 0 is for non-queue interrupts */
@@ -983,6 +998,19 @@ static int setup_sge_queues(struct adapter *adap)
msi_idx = -((int)s->intrq.abs_id + 1);
}
+ /* NOTE: If you add/delete any Ingress/Egress Queue allocations in here,
+ * don't forget to update the following which need to be
+ * synchronized to and changes here.
+ *
+ * 1. The calculations of MAX_INGQ in cxgb4.h.
+ *
+ * 2. Update enable_msix/name_msix_vecs/request_msix_queue_irqs
+ * to accommodate any new/deleted Ingress Queues
+ * which need MSI-X Vectors.
+ *
+ * 3. Update sge_qinfo_show() to include information on the
+ * new/deleted queues.
+ */
err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
msi_idx, NULL, fwevtq_handler);
if (err) {
@@ -4244,19 +4272,12 @@ static int cxgb_up(struct adapter *adap)
static void cxgb_down(struct adapter *adapter)
{
- t4_intr_disable(adapter);
cancel_work_sync(&adapter->tid_release_task);
cancel_work_sync(&adapter->db_full_task);
cancel_work_sync(&adapter->db_drop_task);
adapter->tid_release_task_busy = false;
adapter->tid_release_head = NULL;
- if (adapter->flags & USING_MSIX) {
- free_msix_queue_irqs(adapter);
- free_irq(adapter->msix_info[0].vec, adapter);
- } else
- free_irq(adapter->pdev->irq, adapter);
- quiesce_rx(adapter);
t4_sge_stop(adapter);
t4_free_sge_resources(adapter);
adapter->flags &= ~FULL_INIT_DONE;
@@ -4733,8 +4754,9 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
if (ret < 0)
return ret;
- ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
- 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
+ ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, adap->sge.egr_sz, 64,
+ MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
+ FW_CMD_CAP_PF);
if (ret < 0)
return ret;
@@ -5088,10 +5110,15 @@ static int adap_init0(struct adapter *adap)
enum dev_state state;
u32 params[7], val[7];
struct fw_caps_config_cmd caps_cmd;
- struct fw_devlog_cmd devlog_cmd;
- u32 devlog_meminfo;
int reset = 1;
+ /* Grab Firmware Device Log parameters as early as possible so we have
+ * access to it for debugging, etc.
+ */
+ ret = t4_init_devlog_params(adap);
+ if (ret < 0)
+ return ret;
+
/* Contact FW, advertising Master capability */
ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
if (ret < 0) {
@@ -5169,30 +5196,6 @@ static int adap_init0(struct adapter *adap)
if (ret < 0)
goto bye;
- /* Read firmware device log parameters. We really need to find a way
- * to get these parameters initialized with some default values (which
- * are likely to be correct) for the case where we either don't
- * attache to the firmware or it's crashed when we probe the adapter.
- * That way we'll still be able to perform early firmware startup
- * debugging ... If the request to get the Firmware's Device Log
- * parameters fails, we'll live so we don't make that a fatal error.
- */
- memset(&devlog_cmd, 0, sizeof(devlog_cmd));
- devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_READ_F);
- devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
- ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
- &devlog_cmd);
- if (ret == 0) {
- devlog_meminfo =
- ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
- adap->params.devlog.memtype =
- FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
- adap->params.devlog.start =
- FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
- adap->params.devlog.size = ntohl(devlog_cmd.memsize_devlog);
- }
-
/*
* Find out what ports are available to us. Note that we need to do
* this before calling adap_init0_no_config() since it needs nports
@@ -5293,6 +5296,51 @@ static int adap_init0(struct adapter *adap)
adap->tids.nftids = val[4] - val[3] + 1;
adap->sge.ingr_start = val[5];
+ /* qids (ingress/egress) returned from firmware can be anywhere
+ * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
+ * Hence driver needs to allocate memory for this range to
+ * store the queue info. Get the highest IQFLINT/EQ index returned
+ * in FW_EQ_*_CMD.alloc command.
+ */
+ params[0] = FW_PARAM_PFVF(EQ_END);
+ params[1] = FW_PARAM_PFVF(IQFLINT_END);
+ ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
+ if (ret < 0)
+ goto bye;
+ adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
+ adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
+
+ adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
+ sizeof(*adap->sge.egr_map), GFP_KERNEL);
+ if (!adap->sge.egr_map) {
+ ret = -ENOMEM;
+ goto bye;
+ }
+
+ adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
+ sizeof(*adap->sge.ingr_map), GFP_KERNEL);
+ if (!adap->sge.ingr_map) {
+ ret = -ENOMEM;
+ goto bye;
+ }
+
+ /* Allocate the memory for the vaious egress queue bitmaps
+ * ie starving_fl and txq_maperr.
+ */
+ adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
+ sizeof(long), GFP_KERNEL);
+ if (!adap->sge.starving_fl) {
+ ret = -ENOMEM;
+ goto bye;
+ }
+
+ adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
+ sizeof(long), GFP_KERNEL);
+ if (!adap->sge.txq_maperr) {
+ ret = -ENOMEM;
+ goto bye;
+ }
+
params[0] = FW_PARAM_PFVF(CLIP_START);
params[1] = FW_PARAM_PFVF(CLIP_END);
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
@@ -5501,6 +5549,10 @@ static int adap_init0(struct adapter *adap)
* happened to HW/FW, stop issuing commands.
*/
bye:
+ kfree(adap->sge.egr_map);
+ kfree(adap->sge.ingr_map);
+ kfree(adap->sge.starving_fl);
+ kfree(adap->sge.txq_maperr);
if (ret != -ETIMEDOUT && ret != -EIO)
t4_fw_bye(adap, adap->mbox);
return ret;
@@ -5528,6 +5580,7 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
netif_carrier_off(dev);
}
spin_unlock(&adap->stats_lock);
+ disable_interrupts(adap);
if (adap->flags & FULL_INIT_DONE)
cxgb_down(adap);
rtnl_unlock();
@@ -5912,6 +5965,10 @@ static void free_some_resources(struct adapter *adapter)
t4_free_mem(adapter->l2t);
t4_free_mem(adapter->tids.tid_tab);
+ kfree(adapter->sge.egr_map);
+ kfree(adapter->sge.ingr_map);
+ kfree(adapter->sge.starving_fl);
+ kfree(adapter->sge.txq_maperr);
disable_msi(adapter);
for_each_port(adapter, i)
@@ -6237,6 +6294,8 @@ static void remove_one(struct pci_dev *pdev)
if (is_offload(adapter))
detach_ulds(adapter);
+ disable_interrupts(adapter);
+
for_each_port(adapter, i)
if (adapter->port[i]->reg_state == NETREG_REGISTERED)
unregister_netdev(adapter->port[i]);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index b4b9f6048fe7..b688b32c21fe 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2171,7 +2171,7 @@ static void sge_rx_timer_cb(unsigned long data)
struct adapter *adap = (struct adapter *)data;
struct sge *s = &adap->sge;
- for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++)
+ for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
for (m = s->starving_fl[i]; m; m &= m - 1) {
struct sge_eth_rxq *rxq;
unsigned int id = __ffs(m) + i * BITS_PER_LONG;
@@ -2259,7 +2259,7 @@ static void sge_tx_timer_cb(unsigned long data)
struct adapter *adap = (struct adapter *)data;
struct sge *s = &adap->sge;
- for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++)
+ for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
for (m = s->txq_maperr[i]; m; m &= m - 1) {
unsigned long id = __ffs(m) + i * BITS_PER_LONG;
struct sge_ofld_txq *txq = s->egr_map[id];
@@ -2741,7 +2741,8 @@ void t4_free_sge_resources(struct adapter *adap)
free_rspq_fl(adap, &adap->sge.intrq, NULL);
/* clear the reverse egress queue map */
- memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map));
+ memset(adap->sge.egr_map, 0,
+ adap->sge.egr_sz * sizeof(*adap->sge.egr_map));
}
void t4_sge_start(struct adapter *adap)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 1abdfa123c6c..ee394dc68303 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -4459,6 +4459,59 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
}
/**
+ * t4_init_devlog_params - initialize adapter->params.devlog
+ * @adap: the adapter
+ *
+ * Initialize various fields of the adapter's Firmware Device Log
+ * Parameters structure.
+ */
+int t4_init_devlog_params(struct adapter *adap)
+{
+ struct devlog_params *dparams = &adap->params.devlog;
+ u32 pf_dparams;
+ unsigned int devlog_meminfo;
+ struct fw_devlog_cmd devlog_cmd;
+ int ret;
+
+ /* If we're dealing with newer firmware, the Device Log Paramerters
+ * are stored in a designated register which allows us to access the
+ * Device Log even if we can't talk to the firmware.
+ */
+ pf_dparams =
+ t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG));
+ if (pf_dparams) {
+ unsigned int nentries, nentries128;
+
+ dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams);
+ dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4;
+
+ nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams);
+ nentries = (nentries128 + 1) * 128;
+ dparams->size = nentries * sizeof(struct fw_devlog_e);
+
+ return 0;
+ }
+
+ /* Otherwise, ask the firmware for it's Device Log Parameters.
+ */
+ memset(&devlog_cmd, 0, sizeof(devlog_cmd));
+ devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F);
+ devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
+ ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
+ &devlog_cmd);
+ if (ret)
+ return ret;
+
+ devlog_meminfo = ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
+ dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
+ dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
+ dparams->size = ntohl(devlog_cmd.memsize_devlog);
+
+ return 0;
+}
+
+/**
* t4_init_sge_params - initialize adap->params.sge
* @adapter: the adapter
*
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 231a725f6d5d..326674b19983 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -63,6 +63,8 @@
#define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
#define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define PCIE_FW_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+
#define SGE_PF_KDOORBELL_A 0x0
#define QID_S 15
@@ -707,6 +709,7 @@
#define PFNUM_V(x) ((x) << PFNUM_S)
#define PCIE_FW_A 0x30b8
+#define PCIE_FW_PF_A 0x30bc
#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A 0x5908
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 9b353a88cbda..a4a19e0ec7f5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -101,7 +101,7 @@ enum fw_wr_opcodes {
FW_RI_BIND_MW_WR = 0x18,
FW_RI_FR_NSMR_WR = 0x19,
FW_RI_INV_LSTAG_WR = 0x1a,
- FW_LASTC2E_WR = 0x40
+ FW_LASTC2E_WR = 0x70
};
struct fw_wr_hdr {
@@ -993,6 +993,7 @@ enum fw_memtype_cf {
FW_MEMTYPE_CF_EXTMEM = 0x2,
FW_MEMTYPE_CF_FLASH = 0x4,
FW_MEMTYPE_CF_INTERNAL = 0x5,
+ FW_MEMTYPE_CF_EXTMEM1 = 0x6,
};
struct fw_caps_config_cmd {
@@ -1035,6 +1036,7 @@ enum fw_params_mnem {
FW_PARAMS_MNEM_PFVF = 2, /* function params */
FW_PARAMS_MNEM_REG = 3, /* limited register access */
FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */
+ FW_PARAMS_MNEM_CHNET = 5, /* chnet params */
FW_PARAMS_MNEM_LAST
};
@@ -3102,7 +3104,8 @@ enum fw_devlog_facility {
FW_DEVLOG_FACILITY_FCOE = 0x2E,
FW_DEVLOG_FACILITY_FOISCSI = 0x30,
FW_DEVLOG_FACILITY_FOFCOE = 0x32,
- FW_DEVLOG_FACILITY_MAX = 0x32,
+ FW_DEVLOG_FACILITY_CHNET = 0x34,
+ FW_DEVLOG_FACILITY_MAX = 0x34,
};
/* log message format */
@@ -3139,4 +3142,36 @@ struct fw_devlog_cmd {
(((x) >> FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S) & \
FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M)
+/* P C I E F W P F 7 R E G I S T E R */
+
+/* PF7 stores the Firmware Device Log parameters which allows Host Drivers to
+ * access the "devlog" which needing to contact firmware. The encoding is
+ * mostly the same as that returned by the DEVLOG command except for the size
+ * which is encoded as the number of entries in multiples-1 of 128 here rather
+ * than the memory size as is done in the DEVLOG command. Thus, 0 means 128
+ * and 15 means 2048. This of course in turn constrains the allowed values
+ * for the devlog size ...
+ */
+#define PCIE_FW_PF_DEVLOG 7
+
+#define PCIE_FW_PF_DEVLOG_NENTRIES128_S 28
+#define PCIE_FW_PF_DEVLOG_NENTRIES128_M 0xf
+#define PCIE_FW_PF_DEVLOG_NENTRIES128_V(x) \
+ ((x) << PCIE_FW_PF_DEVLOG_NENTRIES128_S)
+#define PCIE_FW_PF_DEVLOG_NENTRIES128_G(x) \
+ (((x) >> PCIE_FW_PF_DEVLOG_NENTRIES128_S) & \
+ PCIE_FW_PF_DEVLOG_NENTRIES128_M)
+
+#define PCIE_FW_PF_DEVLOG_ADDR16_S 4
+#define PCIE_FW_PF_DEVLOG_ADDR16_M 0xffffff
+#define PCIE_FW_PF_DEVLOG_ADDR16_V(x) ((x) << PCIE_FW_PF_DEVLOG_ADDR16_S)
+#define PCIE_FW_PF_DEVLOG_ADDR16_G(x) \
+ (((x) >> PCIE_FW_PF_DEVLOG_ADDR16_S) & PCIE_FW_PF_DEVLOG_ADDR16_M)
+
+#define PCIE_FW_PF_DEVLOG_MEMTYPE_S 0
+#define PCIE_FW_PF_DEVLOG_MEMTYPE_M 0xf
+#define PCIE_FW_PF_DEVLOG_MEMTYPE_V(x) ((x) << PCIE_FW_PF_DEVLOG_MEMTYPE_S)
+#define PCIE_FW_PF_DEVLOG_MEMTYPE_G(x) \
+ (((x) >> PCIE_FW_PF_DEVLOG_MEMTYPE_S) & PCIE_FW_PF_DEVLOG_MEMTYPE_M)
+
#endif /* _T4FW_INTERFACE_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index e2bd3f747858..b9d1cbac0eee 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -36,13 +36,13 @@
#define __T4FW_VERSION_H__
#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x0C
-#define T4FW_VERSION_MICRO 0x19
+#define T4FW_VERSION_MINOR 0x0D
+#define T4FW_VERSION_MICRO 0x20
#define T4FW_VERSION_BUILD 0x00
#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x0C
-#define T5FW_VERSION_MICRO 0x19
+#define T5FW_VERSION_MINOR 0x0D
+#define T5FW_VERSION_MICRO 0x20
#define T5FW_VERSION_BUILD 0x00
#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 0545f0de1c52..e0d711071afb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1004,7 +1004,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
? (tq->pidx - 1)
: (tq->size - 1));
__be64 *src = (__be64 *)&tq->desc[index];
- __be64 __iomem *dst = (__be64 *)(tq->bar2_addr +
+ __be64 __iomem *dst = (__be64 __iomem *)(tq->bar2_addr +
SGE_UDB_WCDOORBELL);
unsigned int count = EQ_UNIT / sizeof(__be64);
@@ -1018,7 +1018,11 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
* DMA.
*/
while (count) {
- writeq(*src, dst);
+ /* the (__force u64) is because the compiler
+ * doesn't understand the endian swizzling
+ * going on
+ */
+ writeq((__force u64)*src, dst);
src++;
dst++;
count--;
@@ -1252,8 +1256,8 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
wr = (void *)&txq->q.desc[txq->q.pidx];
wr->equiq_to_len16 = cpu_to_be32(wr_mid);
- wr->r3[0] = cpu_to_be64(0);
- wr->r3[1] = cpu_to_be64(0);
+ wr->r3[0] = cpu_to_be32(0);
+ wr->r3[1] = cpu_to_be32(0);
skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
end = (u64 *)wr + flits;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 1b5506df35b1..280b4a215849 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -210,10 +210,10 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
if (rpl) {
/* request bit in high-order BE word */
- WARN_ON((be32_to_cpu(*(const u32 *)cmd)
+ WARN_ON((be32_to_cpu(*(const __be32 *)cmd)
& FW_CMD_REQUEST_F) == 0);
get_mbox_rpl(adapter, rpl, size, mbox_data);
- WARN_ON((be32_to_cpu(*(u32 *)rpl)
+ WARN_ON((be32_to_cpu(*(__be32 *)rpl)
& FW_CMD_REQUEST_F) != 0);
}
t4_write_reg(adapter, mbox_ctl,
@@ -484,7 +484,7 @@ int t4_bar2_sge_qregs(struct adapter *adapter,
* o The BAR2 Queue ID.
* o The BAR2 Queue ID Offset into the BAR2 page.
*/
- bar2_page_offset = ((qid >> qpp_shift) << page_shift);
+ bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
bar2_qid = qid & qpp_mask;
bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 27de37aa90af..27b9fe99a9bd 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -354,6 +354,7 @@ struct be_vf_cfg {
u16 vlan_tag;
u32 tx_rate;
u32 plink_tracking;
+ u32 privileges;
};
enum vf_state {
@@ -423,6 +424,7 @@ struct be_adapter {
u8 __iomem *csr; /* CSR BAR used only for BE2/3 */
u8 __iomem *db; /* Door Bell */
+ u8 __iomem *pcicfg; /* On SH,BEx only. Shadow of PCI config space */
struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
struct be_dma_mem mbox_mem;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 36916cfa70f9..7f05f309e935 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1902,15 +1902,11 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
{
int num_eqs, i = 0;
- if (lancer_chip(adapter) && num > 8) {
- while (num) {
- num_eqs = min(num, 8);
- __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
- i += num_eqs;
- num -= num_eqs;
- }
- } else {
- __be_cmd_modify_eqd(adapter, set_eqd, num);
+ while (num) {
+ num_eqs = min(num, 8);
+ __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
+ i += num_eqs;
+ num -= num_eqs;
}
return 0;
@@ -1918,7 +1914,7 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
/* Uses sycnhronous mcc */
int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
- u32 num)
+ u32 num, u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_vlan_config *req;
@@ -1936,6 +1932,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
wrb, NULL);
+ req->hdr.domain = domain;
req->interface_id = if_id;
req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index db761e8e42a3..a7634a3f052a 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -2256,7 +2256,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
int be_cmd_get_fw_ver(struct be_adapter *adapter);
int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
- u32 num);
+ u32 num, u32 domain);
int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc);
int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 0a816859aca5..e6b790f0d9dc 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1171,7 +1171,7 @@ static int be_vid_config(struct be_adapter *adapter)
for_each_set_bit(i, adapter->vids, VLAN_N_VID)
vids[num++] = cpu_to_le16(i);
- status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
+ status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
if (status) {
dev_err(dev, "Setting HW VLAN filtering failed\n");
/* Set to VLAN promisc mode as setting VLAN filter failed */
@@ -1380,11 +1380,67 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
return 0;
}
+static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
+{
+ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
+ u16 vids[BE_NUM_VLANS_SUPPORTED];
+ int vf_if_id = vf_cfg->if_handle;
+ int status;
+
+ /* Enable Transparent VLAN Tagging */
+ status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0);
+ if (status)
+ return status;
+
+ /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
+ vids[0] = 0;
+ status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
+ if (!status)
+ dev_info(&adapter->pdev->dev,
+ "Cleared guest VLANs on VF%d", vf);
+
+ /* After TVT is enabled, disallow VFs to program VLAN filters */
+ if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
+ status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
+ ~BE_PRIV_FILTMGMT, vf + 1);
+ if (!status)
+ vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
+ }
+ return 0;
+}
+
+static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
+{
+ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
+ struct device *dev = &adapter->pdev->dev;
+ int status;
+
+ /* Reset Transparent VLAN Tagging. */
+ status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
+ vf_cfg->if_handle, 0);
+ if (status)
+ return status;
+
+ /* Allow VFs to program VLAN filtering */
+ if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
+ status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
+ BE_PRIV_FILTMGMT, vf + 1);
+ if (!status) {
+ vf_cfg->privileges |= BE_PRIV_FILTMGMT;
+ dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
+ }
+ }
+
+ dev_info(dev,
+ "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
+ return 0;
+}
+
static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
- int status = 0;
+ int status;
if (!sriov_enabled(adapter))
return -EPERM;
@@ -1394,24 +1450,19 @@ static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
if (vlan || qos) {
vlan |= qos << VLAN_PRIO_SHIFT;
- if (vf_cfg->vlan_tag != vlan)
- status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
- vf_cfg->if_handle, 0);
+ status = be_set_vf_tvt(adapter, vf, vlan);
} else {
- /* Reset Transparent Vlan Tagging. */
- status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
- vf + 1, vf_cfg->if_handle, 0);
+ status = be_clear_vf_tvt(adapter, vf);
}
if (status) {
dev_err(&adapter->pdev->dev,
- "VLAN %d config on VF %d failed : %#x\n", vlan,
- vf, status);
+ "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
+ status);
return be_cmd_status(status);
}
vf_cfg->vlan_tag = vlan;
-
return 0;
}
@@ -2772,14 +2823,12 @@ void be_detect_error(struct be_adapter *adapter)
}
}
} else {
- pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_LOW, &ue_lo);
- pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_HIGH, &ue_hi);
- pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
- pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
+ ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
+ ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
+ ue_lo_mask = ioread32(adapter->pcicfg +
+ PCICFG_UE_STATUS_LOW_MASK);
+ ue_hi_mask = ioread32(adapter->pcicfg +
+ PCICFG_UE_STATUS_HI_MASK);
ue_lo = (ue_lo & ~ue_lo_mask);
ue_hi = (ue_hi & ~ue_hi_mask);
@@ -3339,7 +3388,6 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
u32 cap_flags, u32 vf)
{
u32 en_flags;
- int status;
en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
@@ -3347,10 +3395,7 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
en_flags &= cap_flags;
- status = be_cmd_if_create(adapter, cap_flags, en_flags,
- if_handle, vf);
-
- return status;
+ return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
}
static int be_vfs_if_create(struct be_adapter *adapter)
@@ -3368,8 +3413,13 @@ static int be_vfs_if_create(struct be_adapter *adapter)
if (!BE3_chip(adapter)) {
status = be_cmd_get_profile_config(adapter, &res,
vf + 1);
- if (!status)
+ if (!status) {
cap_flags = res.if_cap_flags;
+ /* Prevent VFs from enabling VLAN promiscuous
+ * mode
+ */
+ cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
+ }
}
status = be_if_create(adapter, &vf_cfg->if_handle,
@@ -3403,7 +3453,6 @@ static int be_vf_setup(struct be_adapter *adapter)
struct device *dev = &adapter->pdev->dev;
struct be_vf_cfg *vf_cfg;
int status, old_vfs, vf;
- u32 privileges;
old_vfs = pci_num_vf(adapter->pdev);
@@ -3433,15 +3482,18 @@ static int be_vf_setup(struct be_adapter *adapter)
for_all_vfs(adapter, vf_cfg, vf) {
/* Allow VFs to programs MAC/VLAN filters */
- status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
- if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
+ status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
+ vf + 1);
+ if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
status = be_cmd_set_fn_privileges(adapter,
- privileges |
+ vf_cfg->privileges |
BE_PRIV_FILTMGMT,
vf + 1);
- if (!status)
+ if (!status) {
+ vf_cfg->privileges |= BE_PRIV_FILTMGMT;
dev_info(dev, "VF%d has FILTMGMT privilege\n",
vf);
+ }
}
/* Allow full available bandwidth */
@@ -4820,24 +4872,37 @@ static int be_roce_map_pci_bars(struct be_adapter *adapter)
static int be_map_pci_bars(struct be_adapter *adapter)
{
+ struct pci_dev *pdev = adapter->pdev;
u8 __iomem *addr;
if (BEx_chip(adapter) && be_physfn(adapter)) {
- adapter->csr = pci_iomap(adapter->pdev, 2, 0);
+ adapter->csr = pci_iomap(pdev, 2, 0);
if (!adapter->csr)
return -ENOMEM;
}
- addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
+ addr = pci_iomap(pdev, db_bar(adapter), 0);
if (!addr)
goto pci_map_err;
adapter->db = addr;
+ if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
+ if (be_physfn(adapter)) {
+ /* PCICFG is the 2nd BAR in BE2 */
+ addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
+ if (!addr)
+ goto pci_map_err;
+ adapter->pcicfg = addr;
+ } else {
+ adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
+ }
+ }
+
be_roce_map_pci_bars(adapter);
return 0;
pci_map_err:
- dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
+ dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
be_unmap_pci_bars(adapter);
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 78e1ce09b1ab..f6a3a7abd468 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1954,6 +1954,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
struct fec_enet_private *fep = netdev_priv(ndev);
struct device_node *node;
int err = -ENXIO, i;
+ u32 mii_speed, holdtime;
/*
* The i.MX28 dual fec interfaces are not equal.
@@ -1991,10 +1992,33 @@ static int fec_enet_mii_init(struct platform_device *pdev)
* Reference Manual has an error on this, and gets fixed on i.MX6Q
* document.
*/
- fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
+ mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
if (fep->quirks & FEC_QUIRK_ENET_MAC)
- fep->phy_speed--;
- fep->phy_speed <<= 1;
+ mii_speed--;
+ if (mii_speed > 63) {
+ dev_err(&pdev->dev,
+ "fec clock (%lu) to fast to get right mii speed\n",
+ clk_get_rate(fep->clk_ipg));
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ /*
+ * The i.MX28 and i.MX6 types have another filed in the MSCR (aka
+ * MII_SPEED) register that defines the MDIO output hold time. Earlier
+ * versions are RAZ there, so just ignore the difference and write the
+ * register always.
+ * The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
+ * HOLDTIME + 1 is the number of clk cycles the fec is holding the
+ * output.
+ * The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
+ * Given that ceil(clkrate / 5000000) <= 64, the calculation for
+ * holdtime cannot result in a value greater than 3.
+ */
+ holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1;
+
+ fep->phy_speed = mii_speed << 1 | holdtime << 8;
+
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
fep->mii_bus = mdiobus_alloc();
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 357e8b576905..56b774d3a13d 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3893,6 +3893,9 @@ static int ucc_geth_probe(struct platform_device* ofdev)
ugeth->phy_interface = phy_interface;
ugeth->max_speed = max_speed;
+ /* Carrier starts down, phylib will bring it up */
+ netif_carrier_off(dev);
+
err = register_netdev(dev);
if (err) {
if (netif_msg_probe(ugeth))
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 96208f17bb53..2db653225a0e 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2658,16 +2658,11 @@ static int mvneta_stop(struct net_device *dev)
static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mvneta_port *pp = netdev_priv(dev);
- int ret;
if (!pp->phy_dev)
return -ENOTSUPP;
- ret = phy_mii_ioctl(pp->phy_dev, ifr, cmd);
- if (!ret)
- mvneta_adjust_link(dev);
-
- return ret;
+ return phy_mii_ioctl(pp->phy_dev, ifr, cmd);
}
/* Ethtool methods */
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index a681d7c0bb9f..546ca4226916 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -724,7 +724,8 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
* on the host, we deprecate the error message for this
* specific command/input_mod/opcode_mod/fw-status to be debug.
*/
- if (op == MLX4_CMD_SET_PORT && in_modifier == 1 &&
+ if (op == MLX4_CMD_SET_PORT &&
+ (in_modifier == 1 || in_modifier == 2) &&
op_modifier == 0 && context->fw_status == CMD_STAT_BAD_SIZE)
mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n",
op, context->fw_status);
@@ -1993,7 +1994,6 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
goto reset_slave;
slave_state[slave].vhcr_dma = ((u64) param) << 48;
priv->mfunc.master.slave_state[slave].cookie = 0;
- mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]);
break;
case MLX4_COMM_CMD_VHCR1:
if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
@@ -2225,6 +2225,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
for (i = 0; i < dev->num_slaves; ++i) {
s_state = &priv->mfunc.master.slave_state[i];
s_state->last_cmd = MLX4_COMM_CMD_RESET;
+ mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]);
for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
s_state->event_eq[j].eqn = -1;
__raw_writel((__force u32) 0,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index ebce5bb24df9..3485acf03014 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2805,13 +2805,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
netif_carrier_off(dev);
mlx4_en_set_default_moderation(priv);
- err = register_netdev(dev);
- if (err) {
- en_err(priv, "Netdev registration failed for port %d\n", port);
- goto out;
- }
- priv->registered = 1;
-
en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
@@ -2853,6 +2846,14 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
+ err = register_netdev(dev);
+ if (err) {
+ en_err(priv, "Netdev registration failed for port %d\n", port);
+ goto out;
+ }
+
+ priv->registered = 1;
+
return 0;
out:
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 264bc15c1ff2..6e70ffee8e87 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -153,12 +153,10 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
/* All active slaves need to receive the event */
if (slave == ALL_SLAVES) {
- for (i = 0; i < dev->num_slaves; i++) {
- if (i != dev->caps.function &&
- master->slave_state[i].active)
- if (mlx4_GEN_EQE(dev, i, eqe))
- mlx4_warn(dev, "Failed to generate event for slave %d\n",
- i);
+ for (i = 0; i <= dev->persist->num_vfs; i++) {
+ if (mlx4_GEN_EQE(dev, i, eqe))
+ mlx4_warn(dev, "Failed to generate event for slave %d\n",
+ i);
}
} else {
if (mlx4_GEN_EQE(dev, slave, eqe))
@@ -203,13 +201,11 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
struct mlx4_eqe *eqe)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- struct mlx4_slave_state *s_slave =
- &priv->mfunc.master.slave_state[slave];
- if (!s_slave->active) {
- /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/
+ if (slave < 0 || slave > dev->persist->num_vfs ||
+ slave == dev->caps.function ||
+ !priv->mfunc.master.slave_state[slave].active)
return;
- }
slave_event(dev, slave, eqe);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index d97ca88c55b5..6e413ac4e940 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3095,6 +3095,12 @@ int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
if (!priv->mfunc.master.slave_state)
return -EINVAL;
+ /* check for slave valid, slave not PF, and slave active */
+ if (slave < 0 || slave > dev->persist->num_vfs ||
+ slave == dev->caps.function ||
+ !priv->mfunc.master.slave_state[slave].active)
+ return 0;
+
event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
/* Create the event only if the slave is registered */
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 9fb6948e14c6..5cecec282aba 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -4468,10 +4468,16 @@ static int rocker_port_master_changed(struct net_device *dev)
struct net_device *master = netdev_master_upper_dev_get(dev);
int err = 0;
+ /* There are currently three cases handled here:
+ * 1. Joining a bridge
+ * 2. Leaving a previously joined bridge
+ * 3. Other, e.g. being added to or removed from a bond or openvswitch,
+ * in which case nothing is done
+ */
if (master && master->rtnl_link_ops &&
!strcmp(master->rtnl_link_ops->kind, "bridge"))
err = rocker_port_bridge_join(rocker_port, master);
- else
+ else if (rocker_port_is_bridged(rocker_port))
err = rocker_port_bridge_leave(rocker_port);
return err;
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 924ea98bd531..54549a6223dd 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -114,7 +114,9 @@ unsigned int ipvlan_mac_hash(const unsigned char *addr);
rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb);
int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev);
void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr);
-bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6);
+struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
+ const void *iaddr, bool is_v6);
+bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6);
struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
const void *iaddr, bool is_v6);
void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync);
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 2a175006028b..b7877a194cfe 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -81,19 +81,20 @@ void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
hash = (addr->atype == IPVL_IPV6) ?
ipvlan_get_v6_hash(&addr->ip6addr) :
ipvlan_get_v4_hash(&addr->ip4addr);
- hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
+ if (hlist_unhashed(&addr->hlnode))
+ hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
}
void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync)
{
- hlist_del_rcu(&addr->hlnode);
+ hlist_del_init_rcu(&addr->hlnode);
if (sync)
synchronize_rcu();
}
-bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
+struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
+ const void *iaddr, bool is_v6)
{
- struct ipvl_port *port = ipvlan->port;
struct ipvl_addr *addr;
list_for_each_entry(addr, &ipvlan->addrs, anode) {
@@ -101,12 +102,21 @@ bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
ipv6_addr_equal(&addr->ip6addr, iaddr)) ||
(!is_v6 && addr->atype == IPVL_IPV4 &&
addr->ip4addr.s_addr == ((struct in_addr *)iaddr)->s_addr))
- return true;
+ return addr;
}
+ return NULL;
+}
+
+bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
+{
+ struct ipvl_dev *ipvlan;
- if (ipvlan_ht_addr_lookup(port, iaddr, is_v6))
- return true;
+ ASSERT_RTNL();
+ list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
+ if (ipvlan_find_addr(ipvlan, iaddr, is_v6))
+ return true;
+ }
return false;
}
@@ -192,7 +202,8 @@ static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb,
if (skb->protocol == htons(ETH_P_PAUSE))
return;
- list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
if (local && (ipvlan == in_dev))
continue;
@@ -219,6 +230,7 @@ static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb,
mcast_acct:
ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
}
+ rcu_read_unlock();
/* Locally generated? ...Forward a copy to the main-device as
* well. On the RX side we'll ignore it (wont give it to any
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 4f4099d5603d..4fa14208d799 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -505,7 +505,7 @@ static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
ipvlan_ht_addr_del(addr, !dev->dismantle);
- list_del_rcu(&addr->anode);
+ list_del(&addr->anode);
}
}
list_del_rcu(&ipvlan->pnode);
@@ -607,7 +607,7 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
{
struct ipvl_addr *addr;
- if (ipvlan_addr_busy(ipvlan, ip6_addr, true)) {
+ if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true)) {
netif_err(ipvlan, ifup, ipvlan->dev,
"Failed to add IPv6=%pI6c addr for %s intf\n",
ip6_addr, ipvlan->dev->name);
@@ -620,9 +620,13 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
addr->master = ipvlan;
memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr));
addr->atype = IPVL_IPV6;
- list_add_tail_rcu(&addr->anode, &ipvlan->addrs);
+ list_add_tail(&addr->anode, &ipvlan->addrs);
ipvlan->ipv6cnt++;
- ipvlan_ht_addr_add(ipvlan, addr);
+ /* If the interface is not up, the address will be added to the hash
+ * list by ipvlan_open.
+ */
+ if (netif_running(ipvlan->dev))
+ ipvlan_ht_addr_add(ipvlan, addr);
return 0;
}
@@ -631,12 +635,12 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
{
struct ipvl_addr *addr;
- addr = ipvlan_ht_addr_lookup(ipvlan->port, ip6_addr, true);
+ addr = ipvlan_find_addr(ipvlan, ip6_addr, true);
if (!addr)
return;
ipvlan_ht_addr_del(addr, true);
- list_del_rcu(&addr->anode);
+ list_del(&addr->anode);
ipvlan->ipv6cnt--;
WARN_ON(ipvlan->ipv6cnt < 0);
kfree_rcu(addr, rcu);
@@ -675,7 +679,7 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
{
struct ipvl_addr *addr;
- if (ipvlan_addr_busy(ipvlan, ip4_addr, false)) {
+ if (ipvlan_addr_busy(ipvlan->port, ip4_addr, false)) {
netif_err(ipvlan, ifup, ipvlan->dev,
"Failed to add IPv4=%pI4 on %s intf.\n",
ip4_addr, ipvlan->dev->name);
@@ -688,9 +692,13 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
addr->master = ipvlan;
memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr));
addr->atype = IPVL_IPV4;
- list_add_tail_rcu(&addr->anode, &ipvlan->addrs);
+ list_add_tail(&addr->anode, &ipvlan->addrs);
ipvlan->ipv4cnt++;
- ipvlan_ht_addr_add(ipvlan, addr);
+ /* If the interface is not up, the address will be added to the hash
+ * list by ipvlan_open.
+ */
+ if (netif_running(ipvlan->dev))
+ ipvlan_ht_addr_add(ipvlan, addr);
ipvlan_set_broadcast_mac_filter(ipvlan, true);
return 0;
@@ -700,12 +708,12 @@ static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
{
struct ipvl_addr *addr;
- addr = ipvlan_ht_addr_lookup(ipvlan->port, ip4_addr, false);
+ addr = ipvlan_find_addr(ipvlan, ip4_addr, false);
if (!addr)
return;
ipvlan_ht_addr_del(addr, true);
- list_del_rcu(&addr->anode);
+ list_del(&addr->anode);
ipvlan->ipv4cnt--;
WARN_ON(ipvlan->ipv4cnt < 0);
if (!ipvlan->ipv4cnt)
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 5c55f11572ba..75d6f26729a3 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -188,6 +188,8 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
skb_put(skb, sizeof(padbytes));
}
+
+ usbnet_set_skb_tx_stats(skb, 1, 0);
return skb;
}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 9311a08565be..4545e78840b0 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -522,6 +522,7 @@ static const struct driver_info wwan_info = {
#define DELL_VENDOR_ID 0x413C
#define REALTEK_VENDOR_ID 0x0bda
#define SAMSUNG_VENDOR_ID 0x04e8
+#define LENOVO_VENDOR_ID 0x17ef
static const struct usb_device_id products[] = {
/* BLACKLIST !!
@@ -702,6 +703,13 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* WHITELIST!!!
*
* CDC Ether uses two interfaces, not necessarily consecutive.
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 80a844e0ae03..c3e4da9e79ca 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1172,17 +1172,17 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
/* return skb */
ctx->tx_curr_skb = NULL;
- dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
/* keep private stats: framing overhead and number of NTBs */
ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload;
ctx->tx_ntbs++;
- /* usbnet has already counted all the framing overhead.
+ /* usbnet will count all the framing overhead by default.
* Adjust the stats so that the tx_bytes counter show real
* payload data instead.
*/
- dev->net->stats.tx_bytes -= skb_out->len - ctx->tx_curr_frame_payload;
+ usbnet_set_skb_tx_stats(skb_out, n,
+ ctx->tx_curr_frame_payload - skb_out->len);
return skb_out;
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index fe48f4c51373..1762ad3910b2 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -46,8 +46,7 @@ enum cx82310_status {
};
#define CMD_PACKET_SIZE 64
-/* first command after power on can take around 8 seconds */
-#define CMD_TIMEOUT 15000
+#define CMD_TIMEOUT 100
#define CMD_REPLY_RETRY 5
#define CX82310_MTU 1514
@@ -78,8 +77,9 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply,
ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, CMD_EP), buf,
CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT);
if (ret < 0) {
- dev_err(&dev->udev->dev, "send command %#x: error %d\n",
- cmd, ret);
+ if (cmd != CMD_GET_LINK_STATUS)
+ dev_err(&dev->udev->dev, "send command %#x: error %d\n",
+ cmd, ret);
goto end;
}
@@ -90,8 +90,10 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply,
buf, CMD_PACKET_SIZE, &actual_len,
CMD_TIMEOUT);
if (ret < 0) {
- dev_err(&dev->udev->dev,
- "reply receive error %d\n", ret);
+ if (cmd != CMD_GET_LINK_STATUS)
+ dev_err(&dev->udev->dev,
+ "reply receive error %d\n",
+ ret);
goto end;
}
if (actual_len > 0)
@@ -134,6 +136,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
int ret;
char buf[15];
struct usb_device *udev = dev->udev;
+ u8 link[3];
+ int timeout = 50;
/* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */
if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0
@@ -160,6 +164,20 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
if (!dev->partial_data)
return -ENOMEM;
+ /* wait for firmware to become ready (indicated by the link being up) */
+ while (--timeout) {
+ ret = cx82310_cmd(dev, CMD_GET_LINK_STATUS, true, NULL, 0,
+ link, sizeof(link));
+ /* the command can time out during boot - it's not an error */
+ if (!ret && link[0] == 1 && link[2] == 1)
+ break;
+ msleep(500);
+ };
+ if (!timeout) {
+ dev_err(&udev->dev, "firmware not ready in time\n");
+ return -ETIMEDOUT;
+ }
+
/* enable ethernet mode (?) */
ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0);
if (ret) {
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 438fc6bcaef1..9f7c0ab3b349 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -492,6 +492,7 @@ enum rtl8152_flags {
/* Define these values to match your device */
#define VENDOR_ID_REALTEK 0x0bda
#define VENDOR_ID_SAMSUNG 0x04e8
+#define VENDOR_ID_LENOVO 0x17ef
#define MCU_TYPE_PLA 0x0100
#define MCU_TYPE_USB 0x0000
@@ -4037,6 +4038,7 @@ static struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)},
{REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
{REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
{}
};
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index b94a0fbb8b3b..953de13267df 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -144,6 +144,7 @@ static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
skb_put(skb, sizeof(padbytes));
}
+ usbnet_set_skb_tx_stats(skb, 1, 0);
return skb;
}
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 449835f4331e..777757ae1973 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1188,8 +1188,7 @@ static void tx_complete (struct urb *urb)
struct usbnet *dev = entry->dev;
if (urb->status == 0) {
- if (!(dev->driver_info->flags & FLAG_MULTI_PACKET))
- dev->net->stats.tx_packets++;
+ dev->net->stats.tx_packets += entry->packets;
dev->net->stats.tx_bytes += entry->length;
} else {
dev->net->stats.tx_errors++;
@@ -1347,7 +1346,19 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
} else
urb->transfer_flags |= URB_ZERO_PACKET;
}
- entry->length = urb->transfer_buffer_length = length;
+ urb->transfer_buffer_length = length;
+
+ if (info->flags & FLAG_MULTI_PACKET) {
+ /* Driver has set number of packets and a length delta.
+ * Calculate the complete length and ensure that it's
+ * positive.
+ */
+ entry->length += length;
+ if (WARN_ON_ONCE(entry->length <= 0))
+ entry->length = length;
+ } else {
+ usbnet_set_skb_tx_stats(skb, 1, length);
+ }
spin_lock_irqsave(&dev->txq.lock, flags);
retval = usb_autopm_get_interface_async(dev->intf);
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index cb366adc820b..f50a6bc5d06e 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -219,12 +219,15 @@ void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif)
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_vif *avp = (void *)vif->drv_priv;
struct ath_buf *bf = avp->av_bcbuf;
+ struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon;
ath_dbg(common, CONFIG, "Removing interface at beacon slot: %d\n",
avp->av_bslot);
tasklet_disable(&sc->bcon_tasklet);
+ cur_conf->enable_beacon &= ~BIT(avp->av_bslot);
+
if (bf && bf->bf_mpdu) {
struct sk_buff *skb = bf->bf_mpdu;
dma_unmap_single(sc->dev, bf->bf_buf_addr,
@@ -521,8 +524,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc,
}
if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) {
- if ((vif->type != NL80211_IFTYPE_AP) ||
- (sc->nbcnvifs > 1)) {
+ if (vif->type != NL80211_IFTYPE_AP) {
ath_dbg(common, CONFIG,
"An AP interface is already present !\n");
return false;
@@ -616,12 +618,14 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
* enabling/disabling SWBA.
*/
if (changed & BSS_CHANGED_BEACON_ENABLED) {
- if (!bss_conf->enable_beacon &&
- (sc->nbcnvifs <= 1)) {
- cur_conf->enable_beacon = false;
- } else if (bss_conf->enable_beacon) {
- cur_conf->enable_beacon = true;
- ath9k_cache_beacon_config(sc, ctx, bss_conf);
+ bool enabled = cur_conf->enable_beacon;
+
+ if (!bss_conf->enable_beacon) {
+ cur_conf->enable_beacon &= ~BIT(avp->av_bslot);
+ } else {
+ cur_conf->enable_beacon |= BIT(avp->av_bslot);
+ if (!enabled)
+ ath9k_cache_beacon_config(sc, ctx, bss_conf);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index 2b79a568e803..d23737342f4f 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -54,7 +54,7 @@ struct ath_beacon_config {
u16 dtim_period;
u16 bmiss_timeout;
u8 dtim_count;
- bool enable_beacon;
+ u8 enable_beacon;
bool ibss_creator;
u32 nexttbtt;
u32 intval;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 60aa8d71e753..8529014e1a5e 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -424,7 +424,7 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
ah->power_mode = ATH9K_PM_UNDEFINED;
ah->htc_reset_init = true;
- ah->tpc_enabled = true;
+ ah->tpc_enabled = false;
ah->ani_function = ATH9K_ANI_ALL;
if (!AR_SREV_9300_20_OR_LATER(ah))
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/brcm80211/brcmfmac/feature.c
index defb7a44e0bc..7748a1ccf14f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/feature.c
@@ -126,7 +126,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan");
if (drvr->bus_if->wowl_supported)
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl");
- brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0);
+ if (drvr->bus_if->chip != BRCM_CC_43362_CHIP_ID)
+ brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0);
/* set chip related quirks */
switch (drvr->bus_if->chip) {
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index a6f22c32a279..3811878ab9cd 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -708,7 +708,6 @@ struct iwl_priv {
unsigned long reload_jiffies;
int reload_count;
bool ucode_loaded;
- bool init_ucode_run; /* Don't run init uCode again */
u8 plcp_delta_threshold;
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 47e64e8b9517..cceb026e0793 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -1114,16 +1114,17 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) |
BIT(IWL_DEFAULT_CMD_QUEUE_NUM));
- if (vif)
- scd_queues &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]);
-
- IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n", scd_queues);
- if (iwlagn_txfifo_flush(priv, scd_queues)) {
- IWL_ERR(priv, "flush request fail\n");
- goto done;
+ if (drop) {
+ IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n",
+ scd_queues);
+ if (iwlagn_txfifo_flush(priv, scd_queues)) {
+ IWL_ERR(priv, "flush request fail\n");
+ goto done;
+ }
}
+
IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n");
- iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
+ iwl_trans_wait_tx_queue_empty(priv->trans, scd_queues);
done:
mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index 4dbef7e58c2e..5244e43bfafb 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -418,9 +418,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len)
return 0;
- if (priv->init_ucode_run)
- return 0;
-
iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
calib_complete, ARRAY_SIZE(calib_complete),
iwlagn_wait_calib, priv);
@@ -440,8 +437,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
*/
ret = iwl_wait_notification(&priv->notif_wait, &calib_wait,
UCODE_CALIB_TIMEOUT);
- if (!ret)
- priv->init_ucode_run = true;
goto out;
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 996e7f16adf9..c7154ac42c8c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1257,6 +1257,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
op->name, err);
#endif
}
+ kfree(pieces);
return;
try_again:
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index efa9688a4cf1..078f24cf4af3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -1278,6 +1278,9 @@ static void rs_mac80211_tx_status(void *mvm_r,
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ if (!iwl_mvm_sta_from_mac80211(sta)->vif)
+ return;
+
if (!ieee80211_is_data(hdr->frame_control) ||
info->flags & IEEE80211_TX_CTL_NO_ACK)
return;
@@ -2511,6 +2514,14 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_lq_sta *lq_sta = mvm_sta;
+ if (sta && !iwl_mvm_sta_from_mac80211(sta)->vif) {
+ /* if vif isn't initialized mvm doesn't know about
+ * this station, so don't do anything with the it
+ */
+ sta = NULL;
+ mvm_sta = NULL;
+ }
+
/* TODO: handle rate_idx_mask and rate_idx_mcs_mask */
/* Treat uninitialized rate scaling data same as non-existing. */
@@ -2827,6 +2838,9 @@ static void rs_rate_update(void *mvm_r,
(struct iwl_op_mode *)mvm_r;
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ if (!iwl_mvm_sta_from_mac80211(sta)->vif)
+ return;
+
/* Stop any ongoing aggregations as rs starts off assuming no agg */
for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
ieee80211_stop_tx_ba_session(sta, tid);
@@ -3587,9 +3601,15 @@ static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32);
-static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
+static void rs_add_debugfs(void *mvm, void *priv_sta, struct dentry *dir)
{
- struct iwl_lq_sta *lq_sta = mvm_sta;
+ struct iwl_lq_sta *lq_sta = priv_sta;
+ struct iwl_mvm_sta *mvmsta;
+
+ mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta);
+
+ if (!mvmsta->vif)
+ return;
debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
lq_sta, &rs_sta_dbgfs_scale_table_ops);
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index f8d6f306dd76..4b81c0bf63b0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -197,6 +197,8 @@ iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
struct iwl_time_event_notif *notif)
{
if (!le32_to_cpu(notif->status)) {
+ if (te_data->vif->type == NL80211_IFTYPE_STATION)
+ ieee80211_connection_loss(te_data->vif);
IWL_DEBUG_TE(mvm, "CSA time event failed to start\n");
iwl_mvm_te_clear_data(mvm, te_data);
return;
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 07304e1fd64a..96a05406babf 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -949,8 +949,10 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
mvmsta = iwl_mvm_sta_from_mac80211(sta);
tid_data = &mvmsta->tid_data[tid];
- if (WARN_ONCE(tid_data->txq_id != scd_flow, "Q %d, tid %d, flow %d",
- tid_data->txq_id, tid, scd_flow)) {
+ if (tid_data->txq_id != scd_flow) {
+ IWL_ERR(mvm,
+ "invalid BA notification: Q %d, tid %d, flow %d\n",
+ tid_data->txq_id, tid, scd_flow);
rcu_read_unlock();
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index dbd6bcf52205..686dd301cd53 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -368,10 +368,12 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* 3165 Series */
{IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x3165, 0x4210, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)},
/* 7265 Series */
{IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index a62170ea0481..8c45cf44ce24 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -1124,12 +1124,22 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
/*This is for new trx flow*/
struct rtl_tx_buffer_desc *pbuffer_desc = NULL;
u8 temp_one = 1;
+ u8 *entry;
memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
ring = &rtlpci->tx_ring[BEACON_QUEUE];
pskb = __skb_dequeue(&ring->queue);
- if (pskb)
+ if (rtlpriv->use_new_trx_flow)
+ entry = (u8 *)(&ring->buffer_desc[ring->idx]);
+ else
+ entry = (u8 *)(&ring->desc[ring->idx]);
+ if (pskb) {
+ pci_unmap_single(rtlpci->pdev,
+ rtlpriv->cfg->ops->get_desc(
+ (u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
+ pskb->len, PCI_DMA_TODEVICE);
kfree_skb(pskb);
+ }
/*NB: the beacon data buffer must be 32-bit aligned. */
pskb = ieee80211_beacon_get(hw, mac->vif);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index e9b960f0ff32..720aaf6313d2 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1008,8 +1008,7 @@ err:
static int xennet_change_mtu(struct net_device *dev, int mtu)
{
- int max = xennet_can_sg(dev) ?
- XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN;
+ int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
if (mtu > max)
return -EINVAL;
@@ -1279,8 +1278,6 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
netdev->ethtool_ops = &xennet_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->dev);
- netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
-
np->netdev = netdev;
netif_carrier_off(netdev);
diff --git a/drivers/of/address.c b/drivers/of/address.c
index ad2906919d45..78a7dcbec7d8 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -450,12 +450,17 @@ static struct of_bus *of_match_bus(struct device_node *np)
return NULL;
}
-static int of_empty_ranges_quirk(void)
+static int of_empty_ranges_quirk(struct device_node *np)
{
if (IS_ENABLED(CONFIG_PPC)) {
- /* To save cycles, we cache the result */
+ /* To save cycles, we cache the result for global "Mac" setting */
static int quirk_state = -1;
+ /* PA-SEMI sdc DT bug */
+ if (of_device_is_compatible(np, "1682m-sdc"))
+ return true;
+
+ /* Make quirk cached */
if (quirk_state < 0)
quirk_state =
of_machine_is_compatible("Power Macintosh") ||
@@ -490,7 +495,7 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
* This code is only enabled on powerpc. --gcl
*/
ranges = of_get_property(parent, rprop, &rlen);
- if (ranges == NULL && !of_empty_ranges_quirk()) {
+ if (ranges == NULL && !of_empty_ranges_quirk(parent)) {
pr_debug("OF: no ranges; cannot translate\n");
return 1;
}
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 1f4ea6f2d910..2e9f84fdd9ce 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -342,7 +342,7 @@ static const struct irq_domain_ops msi_domain_ops = {
.map = dw_pcie_msi_map,
};
-int __init dw_pcie_host_init(struct pcie_port *pp)
+int dw_pcie_host_init(struct pcie_port *pp)
{
struct device_node *np = pp->dev->of_node;
struct platform_device *pdev = to_platform_device(pp->dev);
diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c
index 866465fd3dbf..020d78890719 100644
--- a/drivers/pci/host/pcie-spear13xx.c
+++ b/drivers/pci/host/pcie-spear13xx.c
@@ -269,7 +269,7 @@ static struct pcie_host_ops spear13xx_pcie_host_ops = {
.host_init = spear13xx_pcie_host_init,
};
-static int __init spear13xx_add_pcie_port(struct pcie_port *pp,
+static int spear13xx_add_pcie_port(struct pcie_port *pp,
struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -299,7 +299,7 @@ static int __init spear13xx_add_pcie_port(struct pcie_port *pp,
return 0;
}
-static int __init spear13xx_pcie_probe(struct platform_device *pdev)
+static int spear13xx_pcie_probe(struct platform_device *pdev)
{
struct spear13xx_pcie *spear13xx_pcie;
struct pcie_port *pp;
@@ -370,7 +370,7 @@ static const struct of_device_id spear13xx_pcie_of_match[] = {
};
MODULE_DEVICE_TABLE(of, spear13xx_pcie_of_match);
-static struct platform_driver spear13xx_pcie_driver __initdata = {
+static struct platform_driver spear13xx_pcie_driver = {
.probe = spear13xx_pcie_probe,
.driver = {
.name = "spear-pcie",
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
index 7d48ecae6695..788db48dbbad 100644
--- a/drivers/pci/hotplug/cpci_hotplug_pci.c
+++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
@@ -286,11 +286,12 @@ int cpci_configure_slot(struct slot *slot)
}
parent = slot->dev->bus;
- list_for_each_entry(dev, &parent->devices, bus_list)
+ list_for_each_entry(dev, &parent->devices, bus_list) {
if (PCI_SLOT(dev->devfn) != PCI_SLOT(slot->devfn))
continue;
if (pci_is_bridge(dev))
pci_hp_add_bridge(dev);
+ }
pci_assign_unassigned_bridge_resources(parent->self);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 489063987325..c93fbe76d281 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -248,6 +248,9 @@ int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
acpi_handle handle, phandle;
struct pci_bus *pbus;
+ if (acpi_pci_disabled)
+ return -ENODEV;
+
handle = NULL;
for (pbus = dev->bus; pbus; pbus = pbus->parent) {
handle = acpi_pci_get_bridge_handle(pbus);
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index c6849d9e86ce..167fe411ce2e 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -132,16 +132,8 @@ static const char *aer_agent_string[] = {
static void __print_tlp_header(struct pci_dev *dev,
struct aer_header_log_regs *t)
{
- unsigned char *tlp = (unsigned char *)&t;
-
- dev_err(&dev->dev, " TLP Header:"
- " %02x%02x%02x%02x %02x%02x%02x%02x"
- " %02x%02x%02x%02x %02x%02x%02x%02x\n",
- *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
- *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4),
- *(tlp + 11), *(tlp + 10), *(tlp + 9),
- *(tlp + 8), *(tlp + 15), *(tlp + 14),
- *(tlp + 13), *(tlp + 12));
+ dev_err(&dev->dev, " TLP Header: %08x %08x %08x %08x\n",
+ t->dw0, t->dw1, t->dw2, t->dw3);
}
static void __aer_print_error(struct pci_dev *dev,
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 9205f433573c..18198316b6cf 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -1572,6 +1572,10 @@ static int palmas_regulators_probe(struct platform_device *pdev)
if (!pmic)
return -ENOMEM;
+ if (of_device_is_compatible(node, "ti,tps659038-pmic"))
+ palmas_generic_regs_info[PALMAS_REG_REGEN2].ctrl_addr =
+ TPS659038_REGEN2_CTRL;
+
pmic->dev = &pdev->dev;
pmic->palmas = palmas;
palmas->pmic = pmic;
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index e2436d140175..3a6fd3a8a2ec 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -413,8 +413,8 @@ static void rtc_mrst_do_remove(struct device *dev)
mrst->dev = NULL;
}
-#ifdef CONFIG_PM
-static int mrst_suspend(struct device *dev, pm_message_t mesg)
+#ifdef CONFIG_PM_SLEEP
+static int mrst_suspend(struct device *dev)
{
struct mrst_rtc *mrst = dev_get_drvdata(dev);
unsigned char tmp;
@@ -453,7 +453,7 @@ static int mrst_suspend(struct device *dev, pm_message_t mesg)
*/
static inline int mrst_poweroff(struct device *dev)
{
- return mrst_suspend(dev, PMSG_HIBERNATE);
+ return mrst_suspend(dev);
}
static int mrst_resume(struct device *dev)
@@ -490,9 +490,11 @@ static int mrst_resume(struct device *dev)
return 0;
}
+static SIMPLE_DEV_PM_OPS(mrst_pm_ops, mrst_suspend, mrst_resume);
+#define MRST_PM_OPS (&mrst_pm_ops)
+
#else
-#define mrst_suspend NULL
-#define mrst_resume NULL
+#define MRST_PM_OPS NULL
static inline int mrst_poweroff(struct device *dev)
{
@@ -529,9 +531,8 @@ static struct platform_driver vrtc_mrst_platform_driver = {
.remove = vrtc_mrst_platform_remove,
.shutdown = vrtc_mrst_platform_shutdown,
.driver = {
- .name = (char *) driver_name,
- .suspend = mrst_suspend,
- .resume = mrst_resume,
+ .name = driver_name,
+ .pm = MRST_PM_OPS,
}
};
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index a7cc61837818..923a2b5a2439 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -5734,9 +5734,9 @@ free_port:
hba_free:
if (phba->msix_enabled)
pci_disable_msix(phba->pcidev);
- iscsi_host_remove(phba->shost);
pci_dev_put(phba->pcidev);
iscsi_host_free(phba->shost);
+ pci_set_drvdata(pcidev, NULL);
disable_pci:
pci_disable_device(pcidev);
return ret;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 9219953ee949..d9afc51af7d3 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6815,7 +6815,8 @@ static struct ata_port_operations ipr_sata_ops = {
};
static struct ata_port_info sata_port_info = {
- .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
+ .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
+ ATA_FLAG_SAS_HOST,
.pio_mask = ATA_PIO4_ONLY,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 932d9cc98d2f..9c706d8c1441 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -547,7 +547,8 @@ static struct ata_port_operations sas_sata_ops = {
};
static struct ata_port_info sata_port_info = {
- .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ,
+ .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ |
+ ATA_FLAG_SAS_HOST,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 54d7a6cbb98a..b1a263137a23 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1311,9 +1311,11 @@ scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
"rejecting I/O to dead device\n");
ret = BLKPREP_KILL;
break;
- case SDEV_QUIESCE:
case SDEV_BLOCK:
case SDEV_CREATED_BLOCK:
+ ret = BLKPREP_DEFER;
+ break;
+ case SDEV_QUIESCE:
/*
* If the devices is blocked we defer normal commands.
*/
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index 3ce39d10fafb..4f8c798e0633 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -108,7 +108,8 @@ static void dw_spi_dma_tx_done(void *arg)
{
struct dw_spi *dws = arg;
- if (test_and_clear_bit(TX_BUSY, &dws->dma_chan_busy) & BIT(RX_BUSY))
+ clear_bit(TX_BUSY, &dws->dma_chan_busy);
+ if (test_bit(RX_BUSY, &dws->dma_chan_busy))
return;
dw_spi_xfer_done(dws);
}
@@ -156,7 +157,8 @@ static void dw_spi_dma_rx_done(void *arg)
{
struct dw_spi *dws = arg;
- if (test_and_clear_bit(RX_BUSY, &dws->dma_chan_busy) & BIT(TX_BUSY))
+ clear_bit(RX_BUSY, &dws->dma_chan_busy);
+ if (test_bit(TX_BUSY, &dws->dma_chan_busy))
return;
dw_spi_xfer_done(dws);
}
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index ff9cdbdb6672..2b2c359f5a50 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -498,7 +498,7 @@ static int spi_qup_probe(struct platform_device *pdev)
struct resource *res;
struct device *dev;
void __iomem *base;
- u32 max_freq, iomode;
+ u32 max_freq, iomode, num_cs;
int ret, irq, size;
dev = &pdev->dev;
@@ -550,10 +550,11 @@ static int spi_qup_probe(struct platform_device *pdev)
}
/* use num-cs unless not present or out of range */
- if (of_property_read_u16(dev->of_node, "num-cs",
- &master->num_chipselect) ||
- (master->num_chipselect > SPI_NUM_CHIPSELECTS))
+ if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
+ num_cs > SPI_NUM_CHIPSELECTS)
master->num_chipselect = SPI_NUM_CHIPSELECTS;
+ else
+ master->num_chipselect = num_cs;
master->bus_num = pdev->id;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index c64a3e59fce3..57a195041dc7 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1105,13 +1105,14 @@ void spi_finalize_current_message(struct spi_master *master)
"failed to unprepare message: %d\n", ret);
}
}
+
+ trace_spi_message_done(mesg);
+
master->cur_msg_prepared = false;
mesg->state = NULL;
if (mesg->complete)
mesg->complete(mesg->context);
-
- trace_spi_message_done(mesg);
}
EXPORT_SYMBOL_GPL(spi_finalize_current_message);
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index 24183028bd71..6d5b38d69578 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -38,6 +38,7 @@ config IIO_SIMPLE_DUMMY_EVENTS
config IIO_SIMPLE_DUMMY_BUFFER
bool "Buffered capture support"
select IIO_BUFFER
+ select IIO_TRIGGER
select IIO_KFIFO_BUF
help
Add buffered data capture to the simple dummy driver.
diff --git a/drivers/staging/iio/magnetometer/hmc5843_core.c b/drivers/staging/iio/magnetometer/hmc5843_core.c
index fd171d8b38fb..90cc18b703cf 100644
--- a/drivers/staging/iio/magnetometer/hmc5843_core.c
+++ b/drivers/staging/iio/magnetometer/hmc5843_core.c
@@ -592,6 +592,7 @@ int hmc5843_common_probe(struct device *dev, struct regmap *regmap,
mutex_init(&data->lock);
indio_dev->dev.parent = dev;
+ indio_dev->name = dev->driver->name;
indio_dev->info = &hmc5843_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = data->variant->channels;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 2accb6e47beb..77d64251af40 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1181,7 +1181,7 @@ iscsit_handle_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
* traditional iSCSI block I/O.
*/
if (iscsit_allocate_iovecs(cmd) < 0) {
- return iscsit_add_reject_cmd(cmd,
+ return iscsit_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
}
immed_data = cmd->immediate_data;
@@ -3468,6 +3468,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
tpg_np_list) {
struct iscsi_np *np = tpg_np->tpg_np;
bool inaddr_any = iscsit_check_inaddr_any(np);
+ char *fmt_str;
if (np->np_network_transport != network_transport)
continue;
@@ -3495,8 +3496,12 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
}
}
- len = sprintf(buf, "TargetAddress="
- "%s:%hu,%hu",
+ if (np->np_sockaddr.ss_family == AF_INET6)
+ fmt_str = "TargetAddress=[%s]:%hu,%hu";
+ else
+ fmt_str = "TargetAddress=%s:%hu,%hu";
+
+ len = sprintf(buf, fmt_str,
inaddr_any ? conn->local_ip : np->np_ip,
np->np_port,
tpg->tpgt);
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 79b4ec3ca2db..7faa6aef9a4d 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -781,8 +781,8 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
}
if (flag &&
dev->transport->get_write_cache) {
- pr_err("emulate_fua_write not supported for this device\n");
- return -EINVAL;
+ pr_warn("emulate_fua_write not supported for this device, ignoring\n");
+ return 0;
}
if (dev->export_count) {
pr_err("emulate_fua_write cannot be changed with active"
diff --git a/drivers/thermal/st/st_thermal.c b/drivers/thermal/st/st_thermal.c
index d1ec5804c0bb..76c515dd802b 100644
--- a/drivers/thermal/st/st_thermal.c
+++ b/drivers/thermal/st/st_thermal.c
@@ -25,7 +25,7 @@
* Function to allocate regfields which are common
* between syscfg and memory mapped based sensors
*/
-int st_thermal_alloc_regfields(struct st_thermal_sensor *sensor)
+static int st_thermal_alloc_regfields(struct st_thermal_sensor *sensor)
{
struct device *dev = sensor->dev;
struct regmap *regmap = sensor->regmap;
diff --git a/drivers/thermal/st/st_thermal_memmap.c b/drivers/thermal/st/st_thermal_memmap.c
index 067bfcdb91d6..fc0c9e198710 100644
--- a/drivers/thermal/st/st_thermal_memmap.c
+++ b/drivers/thermal/st/st_thermal_memmap.c
@@ -157,7 +157,7 @@ static const struct st_thermal_sensor_ops st_mmap_sensor_ops = {
};
/* Compatible device data stih416 mpe thermal sensor */
-const struct st_thermal_compat_data st_416mpe_cdata = {
+static const struct st_thermal_compat_data st_416mpe_cdata = {
.reg_fields = st_mmap_thermal_regfields,
.ops = &st_mmap_sensor_ops,
.calibration_val = 14,
@@ -166,7 +166,7 @@ const struct st_thermal_compat_data st_416mpe_cdata = {
};
/* Compatible device data stih407 thermal sensor */
-const struct st_thermal_compat_data st_407_cdata = {
+static const struct st_thermal_compat_data st_407_cdata = {
.reg_fields = st_mmap_thermal_regfields,
.ops = &st_mmap_sensor_ops,
.calibration_val = 16,
@@ -174,19 +174,19 @@ const struct st_thermal_compat_data st_407_cdata = {
.crit_temp = 120,
};
-static struct of_device_id st_mmap_thermal_of_match[] = {
+static const struct of_device_id st_mmap_thermal_of_match[] = {
{ .compatible = "st,stih416-mpe-thermal", .data = &st_416mpe_cdata },
{ .compatible = "st,stih407-thermal", .data = &st_407_cdata },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, st_mmap_thermal_of_match);
-int st_mmap_probe(struct platform_device *pdev)
+static int st_mmap_probe(struct platform_device *pdev)
{
return st_thermal_register(pdev, st_mmap_thermal_of_match);
}
-int st_mmap_remove(struct platform_device *pdev)
+static int st_mmap_remove(struct platform_device *pdev)
{
return st_thermal_unregister(pdev);
}
diff --git a/drivers/thermal/st/st_thermal_syscfg.c b/drivers/thermal/st/st_thermal_syscfg.c
index 26d36a242bb8..3df5b7890703 100644
--- a/drivers/thermal/st/st_thermal_syscfg.c
+++ b/drivers/thermal/st/st_thermal_syscfg.c
@@ -104,7 +104,7 @@ static const struct st_thermal_sensor_ops st_syscfg_sensor_ops = {
};
/* Compatible device data for stih415 sas thermal sensor */
-const struct st_thermal_compat_data st_415sas_cdata = {
+static const struct st_thermal_compat_data st_415sas_cdata = {
.sys_compat = "st,stih415-front-syscfg",
.reg_fields = st_415sas_regfields,
.ops = &st_syscfg_sensor_ops,
@@ -114,7 +114,7 @@ const struct st_thermal_compat_data st_415sas_cdata = {
};
/* Compatible device data for stih415 mpe thermal sensor */
-const struct st_thermal_compat_data st_415mpe_cdata = {
+static const struct st_thermal_compat_data st_415mpe_cdata = {
.sys_compat = "st,stih415-system-syscfg",
.reg_fields = st_415mpe_regfields,
.ops = &st_syscfg_sensor_ops,
@@ -124,7 +124,7 @@ const struct st_thermal_compat_data st_415mpe_cdata = {
};
/* Compatible device data for stih416 sas thermal sensor */
-const struct st_thermal_compat_data st_416sas_cdata = {
+static const struct st_thermal_compat_data st_416sas_cdata = {
.sys_compat = "st,stih416-front-syscfg",
.reg_fields = st_416sas_regfields,
.ops = &st_syscfg_sensor_ops,
@@ -134,7 +134,7 @@ const struct st_thermal_compat_data st_416sas_cdata = {
};
/* Compatible device data for stid127 thermal sensor */
-const struct st_thermal_compat_data st_127_cdata = {
+static const struct st_thermal_compat_data st_127_cdata = {
.sys_compat = "st,stid127-cpu-syscfg",
.reg_fields = st_127_regfields,
.ops = &st_syscfg_sensor_ops,
@@ -143,7 +143,7 @@ const struct st_thermal_compat_data st_127_cdata = {
.crit_temp = 120,
};
-static struct of_device_id st_syscfg_thermal_of_match[] = {
+static const struct of_device_id st_syscfg_thermal_of_match[] = {
{ .compatible = "st,stih415-sas-thermal", .data = &st_415sas_cdata },
{ .compatible = "st,stih415-mpe-thermal", .data = &st_415mpe_cdata },
{ .compatible = "st,stih416-sas-thermal", .data = &st_416sas_cdata },
@@ -152,12 +152,12 @@ static struct of_device_id st_syscfg_thermal_of_match[] = {
};
MODULE_DEVICE_TABLE(of, st_syscfg_thermal_of_match);
-int st_syscfg_probe(struct platform_device *pdev)
+static int st_syscfg_probe(struct platform_device *pdev)
{
return st_thermal_register(pdev, st_syscfg_thermal_of_match);
}
-int st_syscfg_remove(struct platform_device *pdev)
+static int st_syscfg_remove(struct platform_device *pdev)
{
return st_thermal_unregister(pdev);
}
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 174d3bcf8bd7..4108db7e10c1 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -458,8 +458,10 @@ static void update_temperature(struct thermal_zone_device *tz)
ret = thermal_zone_get_temp(tz, &temp);
if (ret) {
- dev_warn(&tz->device, "failed to read out thermal zone %d\n",
- tz->id);
+ if (ret != -EAGAIN)
+ dev_warn(&tz->device,
+ "failed to read out thermal zone (%d)\n",
+ ret);
return;
}
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index b1893f3f88f1..3ad1458bfeb0 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -921,6 +921,9 @@ static void lpuart_setup_watermark(struct lpuart_port *sport)
writeb(val | UARTPFIFO_TXFE | UARTPFIFO_RXFE,
sport->port.membase + UARTPFIFO);
+ /* explicitly clear RDRF */
+ readb(sport->port.membase + UARTSR1);
+
/* flush Tx and Rx FIFO */
writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH,
sport->port.membase + UARTCFIFO);
@@ -1076,6 +1079,8 @@ static int lpuart_startup(struct uart_port *port)
sport->txfifo_size = 0x1 << (((temp >> UARTPFIFO_TXSIZE_OFF) &
UARTPFIFO_FIFOSIZE_MASK) + 1);
+ sport->port.fifosize = sport->txfifo_size;
+
sport->rxfifo_size = 0x1 << (((temp >> UARTPFIFO_RXSIZE_OFF) &
UARTPFIFO_FIFOSIZE_MASK) + 1);
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index af821a908720..cf08876922f1 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -963,6 +963,7 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
free_irq(ourport->tx_irq, ourport);
tx_enabled(port) = 0;
ourport->tx_claimed = 0;
+ ourport->tx_mode = 0;
}
if (ourport->rx_claimed) {
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index a7865c4b0498..0827d7c96527 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -387,6 +387,10 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
status = PORT_PLC;
port_change_bit = "link state";
break;
+ case USB_PORT_FEAT_C_PORT_CONFIG_ERROR:
+ status = PORT_CEC;
+ port_change_bit = "config error";
+ break;
default:
/* Should never happen */
return;
@@ -588,6 +592,8 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
status |= USB_PORT_STAT_C_LINK_STATE << 16;
if ((raw_port_status & PORT_WRC))
status |= USB_PORT_STAT_C_BH_RESET << 16;
+ if ((raw_port_status & PORT_CEC))
+ status |= USB_PORT_STAT_C_CONFIG_ERROR << 16;
}
if (hcd->speed != HCD_USB3) {
@@ -1005,6 +1011,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_C_OVER_CURRENT:
case USB_PORT_FEAT_C_ENABLE:
case USB_PORT_FEAT_C_PORT_LINK_STATE:
+ case USB_PORT_FEAT_C_PORT_CONFIG_ERROR:
xhci_clear_port_change_bit(xhci, wValue, wIndex,
port_array[wIndex], temp);
break;
@@ -1069,7 +1076,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
*/
status = bus_state->resuming_ports;
- mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC;
+ mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC;
spin_lock_irqsave(&xhci->lock, flags);
/* For each port, did anything change? If so, set that bit in buf. */
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index fd53c9ebd662..2af32e26fafc 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -115,6 +115,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
xhci->quirks |= XHCI_LPM_SUPPORT;
xhci->quirks |= XHCI_INTEL_HOST;
+ xhci->quirks |= XHCI_AVOID_BEI;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
@@ -130,7 +131,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
* PPT chipsets.
*/
xhci->quirks |= XHCI_SPURIOUS_REBOOT;
- xhci->quirks |= XHCI_AVOID_BEI;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
diff --git a/drivers/usb/isp1760/isp1760-udc.c b/drivers/usb/isp1760/isp1760-udc.c
index f32c292cc868..3fc4fe770253 100644
--- a/drivers/usb/isp1760/isp1760-udc.c
+++ b/drivers/usb/isp1760/isp1760-udc.c
@@ -1203,7 +1203,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget,
if (udc->driver) {
dev_err(udc->isp->dev, "UDC already has a gadget driver\n");
- spin_unlock(&udc->lock);
+ spin_unlock_irqrestore(&udc->lock, flags);
return -EBUSY;
}
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 3086dec0ef53..8eb68a31cab6 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -604,6 +604,7 @@ static const struct usb_device_id id_table_combined[] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
/*
* ELV devices:
*/
@@ -1883,8 +1884,12 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
{
struct usb_device *udev = serial->dev;
- if ((udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) ||
- (udev->product && !strcmp(udev->product, "BeagleBone/XDS100V2")))
+ if (udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems"))
+ return ftdi_jtag_probe(serial);
+
+ if (udev->product &&
+ (!strcmp(udev->product, "BeagleBone/XDS100V2") ||
+ !strcmp(udev->product, "SNAP Connect E10")))
return ftdi_jtag_probe(serial);
return 0;
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 56b1b55c4751..4e4f46f3c89c 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -561,6 +561,12 @@
*/
#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
+/*
+ * Synapse Wireless product ids (FTDI_VID)
+ * http://www.synapse-wireless.com
+ */
+#define FTDI_SYNAPSE_SS200_PID 0x9090 /* SS200 - SNAP Stick 200 */
+
/********************************/
/** third-party VID/PID combos **/
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index dd97d8b572c3..4f7e072e4e00 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -61,6 +61,7 @@ struct keyspan_pda_private {
/* For Xircom PGSDB9 and older Entrega version of the same device */
#define XIRCOM_VENDOR_ID 0x085a
#define XIRCOM_FAKE_ID 0x8027
+#define XIRCOM_FAKE_ID_2 0x8025 /* "PGMFHUB" serial */
#define ENTREGA_VENDOR_ID 0x1645
#define ENTREGA_FAKE_ID 0x8093
@@ -70,6 +71,7 @@ static const struct usb_device_id id_table_combined[] = {
#endif
#ifdef XIRCOM
{ USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) },
+ { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID_2) },
{ USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) },
#endif
{ USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_ID) },
@@ -93,6 +95,7 @@ static const struct usb_device_id id_table_fake[] = {
#ifdef XIRCOM
static const struct usb_device_id id_table_fake_xircom[] = {
{ USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) },
+ { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID_2) },
{ USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) },
{ }
};
diff --git a/drivers/watchdog/imgpdc_wdt.c b/drivers/watchdog/imgpdc_wdt.c
index c8def68d9e4c..0deaa4f971f5 100644
--- a/drivers/watchdog/imgpdc_wdt.c
+++ b/drivers/watchdog/imgpdc_wdt.c
@@ -42,10 +42,10 @@
#define PDC_WDT_MIN_TIMEOUT 1
#define PDC_WDT_DEF_TIMEOUT 64
-static int heartbeat;
+static int heartbeat = PDC_WDT_DEF_TIMEOUT;
module_param(heartbeat, int, 0);
-MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. "
- "(default = " __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")");
+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds "
+ "(default=" __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
@@ -191,6 +191,7 @@ static int pdc_wdt_probe(struct platform_device *pdev)
pdc_wdt->wdt_dev.ops = &pdc_wdt_ops;
pdc_wdt->wdt_dev.max_timeout = 1 << PDC_WDT_CONFIG_DELAY_MASK;
pdc_wdt->wdt_dev.parent = &pdev->dev;
+ watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt);
ret = watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev);
if (ret < 0) {
@@ -232,7 +233,6 @@ static int pdc_wdt_probe(struct platform_device *pdev)
watchdog_set_nowayout(&pdc_wdt->wdt_dev, nowayout);
platform_set_drvdata(pdev, pdc_wdt);
- watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt);
ret = watchdog_register_device(&pdc_wdt->wdt_dev);
if (ret)
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index a87f6df6e85f..938b987de551 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -133,7 +133,7 @@ static int mtk_wdt_start(struct watchdog_device *wdt_dev)
u32 reg;
struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev);
void __iomem *wdt_base = mtk_wdt->wdt_base;
- u32 ret;
+ int ret;
ret = mtk_wdt_set_timeout(wdt_dev, wdt_dev->timeout);
if (ret < 0)
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index b812462083fc..94d96809e686 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -55,6 +55,23 @@ config XEN_BALLOON_MEMORY_HOTPLUG
In that case step 3 should be omitted.
+config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
+ int "Hotplugged memory limit (in GiB) for a PV guest"
+ default 512 if X86_64
+ default 4 if X86_32
+ range 0 64 if X86_32
+ depends on XEN_HAVE_PVMMU
+ depends on XEN_BALLOON_MEMORY_HOTPLUG
+ help
+ Maxmium amount of memory (in GiB) that a PV guest can be
+ expanded to when using memory hotplug.
+
+ A PV guest can have more memory than this limit if is
+ started with a larger maximum.
+
+ This value is used to allocate enough space in internal
+ tables needed for physical memory administration.
+
config XEN_SCRUB_PAGES
bool "Scrub pages before returning them to system"
depends on XEN_BALLOON
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 0b52d92cb2e5..fd933695f232 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -229,6 +229,29 @@ static enum bp_state reserve_additional_memory(long credit)
balloon_hotplug = round_up(balloon_hotplug, PAGES_PER_SECTION);
nid = memory_add_physaddr_to_nid(hotplug_start_paddr);
+#ifdef CONFIG_XEN_HAVE_PVMMU
+ /*
+ * add_memory() will build page tables for the new memory so
+ * the p2m must contain invalid entries so the correct
+ * non-present PTEs will be written.
+ *
+ * If a failure occurs, the original (identity) p2m entries
+ * are not restored since this region is now known not to
+ * conflict with any devices.
+ */
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ unsigned long pfn, i;
+
+ pfn = PFN_DOWN(hotplug_start_paddr);
+ for (i = 0; i < balloon_hotplug; i++) {
+ if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
+ pr_warn("set_phys_to_machine() failed, no memory added\n");
+ return BP_ECANCELED;
+ }
+ }
+ }
+#endif
+
rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT);
if (rc) {
diff --git a/fs/affs/file.c b/fs/affs/file.c
index d2468bf95669..a91795e01a7f 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -699,8 +699,10 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
boff = tmp % bsize;
if (boff) {
bh = affs_bread_ino(inode, bidx, 0);
- if (IS_ERR(bh))
- return PTR_ERR(bh);
+ if (IS_ERR(bh)) {
+ written = PTR_ERR(bh);
+ goto err_first_bh;
+ }
tmp = min(bsize - boff, to - from);
BUG_ON(boff + tmp > bsize || tmp > bsize);
memcpy(AFFS_DATA(bh) + boff, data + from, tmp);
@@ -712,14 +714,16 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
bidx++;
} else if (bidx) {
bh = affs_bread_ino(inode, bidx - 1, 0);
- if (IS_ERR(bh))
- return PTR_ERR(bh);
+ if (IS_ERR(bh)) {
+ written = PTR_ERR(bh);
+ goto err_first_bh;
+ }
}
while (from + bsize <= to) {
prev_bh = bh;
bh = affs_getemptyblk_ino(inode, bidx);
if (IS_ERR(bh))
- goto out;
+ goto err_bh;
memcpy(AFFS_DATA(bh), data + from, bsize);
if (buffer_new(bh)) {
AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
@@ -751,7 +755,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
prev_bh = bh;
bh = affs_bread_ino(inode, bidx, 1);
if (IS_ERR(bh))
- goto out;
+ goto err_bh;
tmp = min(bsize, to - from);
BUG_ON(tmp > bsize);
memcpy(AFFS_DATA(bh), data + from, tmp);
@@ -790,12 +794,13 @@ done:
if (tmp > inode->i_size)
inode->i_size = AFFS_I(inode)->mmu_private = tmp;
+err_first_bh:
unlock_page(page);
page_cache_release(page);
return written;
-out:
+err_bh:
bh = prev_bh;
if (!written)
written = PTR_ERR(bh);
diff --git a/fs/aio.c b/fs/aio.c
index f8e52a1854c1..a793f7023755 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -278,11 +278,11 @@ static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
return 0;
}
-static void aio_ring_remap(struct file *file, struct vm_area_struct *vma)
+static int aio_ring_remap(struct file *file, struct vm_area_struct *vma)
{
struct mm_struct *mm = vma->vm_mm;
struct kioctx_table *table;
- int i;
+ int i, res = -EINVAL;
spin_lock(&mm->ioctx_lock);
rcu_read_lock();
@@ -292,13 +292,17 @@ static void aio_ring_remap(struct file *file, struct vm_area_struct *vma)
ctx = table->table[i];
if (ctx && ctx->aio_ring_file == file) {
- ctx->user_id = ctx->mmap_base = vma->vm_start;
+ if (!atomic_read(&ctx->dead)) {
+ ctx->user_id = ctx->mmap_base = vma->vm_start;
+ res = 0;
+ }
break;
}
}
rcu_read_unlock();
spin_unlock(&mm->ioctx_lock);
+ return res;
}
static const struct file_operations aio_ring_fops = {
@@ -727,6 +731,9 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
err_cleanup:
aio_nr_sub(ctx->max_reqs);
err_ctx:
+ atomic_set(&ctx->dead, 1);
+ if (ctx->mmap_size)
+ vm_munmap(ctx->mmap_base, ctx->mmap_size);
aio_free_ring(ctx);
err:
mutex_unlock(&ctx->ring_lock);
@@ -748,11 +755,12 @@ static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
{
struct kioctx_table *table;
- if (atomic_xchg(&ctx->dead, 1))
+ spin_lock(&mm->ioctx_lock);
+ if (atomic_xchg(&ctx->dead, 1)) {
+ spin_unlock(&mm->ioctx_lock);
return -EINVAL;
+ }
-
- spin_lock(&mm->ioctx_lock);
table = rcu_dereference_raw(mm->ioctx_table);
WARN_ON(ctx != table->table[ctx->id]);
table->table[ctx->id] = NULL;
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 4ac7445e6ec7..aa0dc2573374 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -1,6 +1,9 @@
/*
* fs/cifs/cifsencrypt.c
*
+ * Encryption and hashing operations relating to NTLM, NTLMv2. See MS-NLMP
+ * for more detailed information
+ *
* Copyright (C) International Business Machines Corp., 2005,2013
* Author(s): Steve French (sfrench@us.ibm.com)
*
@@ -515,7 +518,8 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
__func__);
return rc;
}
- } else if (ses->serverName) {
+ } else {
+ /* We use ses->serverName if no domain name available */
len = strlen(ses->serverName);
server = kmalloc(2 + (len * 2), GFP_KERNEL);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index d3aa999ab785..480cf9c81d50 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1599,6 +1599,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
pr_warn("CIFS: username too long\n");
goto cifs_parse_mount_err;
}
+
+ kfree(vol->username);
vol->username = kstrdup(string, GFP_KERNEL);
if (!vol->username)
goto cifs_parse_mount_err;
@@ -1700,6 +1702,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
goto cifs_parse_mount_err;
}
+ kfree(vol->domainname);
vol->domainname = kstrdup(string, GFP_KERNEL);
if (!vol->domainname) {
pr_warn("CIFS: no memory for domainname\n");
@@ -1731,6 +1734,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
}
if (strncasecmp(string, "default", 7) != 0) {
+ kfree(vol->iocharset);
vol->iocharset = kstrdup(string,
GFP_KERNEL);
if (!vol->iocharset) {
@@ -2913,8 +2917,7 @@ ip_rfc1001_connect(struct TCP_Server_Info *server)
* calling name ends in null (byte 16) from old smb
* convention.
*/
- if (server->workstation_RFC1001_name &&
- server->workstation_RFC1001_name[0] != 0)
+ if (server->workstation_RFC1001_name[0] != 0)
rfc1002mangle(ses_init_buf->trailer.
session_req.calling_name,
server->workstation_RFC1001_name,
@@ -3692,6 +3695,12 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
#endif /* CIFS_WEAK_PW_HASH */
rc = SMBNTencrypt(tcon->password, ses->server->cryptkey,
bcc_ptr, nls_codepage);
+ if (rc) {
+ cifs_dbg(FYI, "%s Can't generate NTLM rsp. Error: %d\n",
+ __func__, rc);
+ cifs_buf_release(smb_buffer);
+ return rc;
+ }
bcc_ptr += CIFS_AUTH_RESP_SIZE;
if (ses->capabilities & CAP_UNICODE) {
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index a94b3e673182..ca30c391a894 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1823,6 +1823,7 @@ refind_writable:
cifsFileInfo_put(inv_file);
spin_lock(&cifs_file_list_lock);
++refind;
+ inv_file = NULL;
goto refind_writable;
}
}
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 2d4f37235ed0..3e126d7bb2ea 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -771,6 +771,8 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
cifs_buf_release(srchinf->ntwrk_buf_start);
}
kfree(srchinf);
+ if (rc)
+ goto cgii_exit;
} else
goto cgii_exit;
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 689f035915cf..22dfdf17d065 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -322,7 +322,7 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr)
/* return pointer to beginning of data area, ie offset from SMB start */
if ((*off != 0) && (*len != 0))
- return hdr->ProtocolId + *off;
+ return (char *)(&hdr->ProtocolId[0]) + *off;
else
return NULL;
}
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 96b5d40a2ece..eab05e1aa587 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -684,7 +684,8 @@ smb2_clone_range(const unsigned int xid,
/* No need to change MaxChunks since already set to 1 */
chunk_sizes_updated = true;
- }
+ } else
+ goto cchunk_out;
}
cchunk_out:
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 3417340bf89e..65cd7a84c8bc 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1218,7 +1218,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
struct smb2_ioctl_req *req;
struct smb2_ioctl_rsp *rsp;
struct TCP_Server_Info *server;
- struct cifs_ses *ses = tcon->ses;
+ struct cifs_ses *ses;
struct kvec iov[2];
int resp_buftype;
int num_iovecs;
@@ -1233,6 +1233,11 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
if (plen)
*plen = 0;
+ if (tcon)
+ ses = tcon->ses;
+ else
+ return -EIO;
+
if (ses && (ses->server))
server = ses->server;
else
@@ -1296,14 +1301,12 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base;
if ((rc != 0) && (rc != -EINVAL)) {
- if (tcon)
- cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
+ cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
goto ioctl_exit;
} else if (rc == -EINVAL) {
if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) &&
(opcode != FSCTL_SRV_COPYCHUNK)) {
- if (tcon)
- cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
+ cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
goto ioctl_exit;
}
}
@@ -1629,7 +1632,7 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
- if ((rc != 0) && tcon)
+ if (rc != 0)
cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
free_rsp_buf(resp_buftype, iov[0].iov_base);
@@ -2114,7 +2117,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
struct kvec iov[2];
int rc = 0;
int len;
- int resp_buftype;
+ int resp_buftype = CIFS_NO_BUFFER;
unsigned char *bufptr;
struct TCP_Server_Info *server;
struct cifs_ses *ses = tcon->ses;
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index e907052eeadb..32a8bbd7a9ad 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -53,6 +53,18 @@ struct wb_writeback_work {
struct completion *done; /* set if the caller waits */
};
+/*
+ * If an inode is constantly having its pages dirtied, but then the
+ * updates stop dirtytime_expire_interval seconds in the past, it's
+ * possible for the worst case time between when an inode has its
+ * timestamps updated and when they finally get written out to be two
+ * dirtytime_expire_intervals. We set the default to 12 hours (in
+ * seconds), which means most of the time inodes will have their
+ * timestamps written to disk after 12 hours, but in the worst case a
+ * few inodes might not their timestamps updated for 24 hours.
+ */
+unsigned int dirtytime_expire_interval = 12 * 60 * 60;
+
/**
* writeback_in_progress - determine whether there is writeback in progress
* @bdi: the device's backing_dev_info structure.
@@ -275,8 +287,8 @@ static int move_expired_inodes(struct list_head *delaying_queue,
if ((flags & EXPIRE_DIRTY_ATIME) == 0)
older_than_this = work->older_than_this;
- else if ((work->reason == WB_REASON_SYNC) == 0) {
- expire_time = jiffies - (HZ * 86400);
+ else if (!work->for_sync) {
+ expire_time = jiffies - (dirtytime_expire_interval * HZ);
older_than_this = &expire_time;
}
while (!list_empty(delaying_queue)) {
@@ -458,6 +470,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
*/
redirty_tail(inode, wb);
} else if (inode->i_state & I_DIRTY_TIME) {
+ inode->dirtied_when = jiffies;
list_move(&inode->i_wb_list, &wb->b_dirty_time);
} else {
/* The inode is clean. Remove from writeback lists. */
@@ -505,12 +518,17 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
spin_lock(&inode->i_lock);
dirty = inode->i_state & I_DIRTY;
- if (((dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) &&
- (inode->i_state & I_DIRTY_TIME)) ||
- (inode->i_state & I_DIRTY_TIME_EXPIRED)) {
- dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED;
- trace_writeback_lazytime(inode);
- }
+ if (inode->i_state & I_DIRTY_TIME) {
+ if ((dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) ||
+ unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) ||
+ unlikely(time_after(jiffies,
+ (inode->dirtied_time_when +
+ dirtytime_expire_interval * HZ)))) {
+ dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED;
+ trace_writeback_lazytime(inode);
+ }
+ } else
+ inode->i_state &= ~I_DIRTY_TIME_EXPIRED;
inode->i_state &= ~dirty;
/*
@@ -1131,6 +1149,56 @@ void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
rcu_read_unlock();
}
+/*
+ * Wake up bdi's periodically to make sure dirtytime inodes gets
+ * written back periodically. We deliberately do *not* check the
+ * b_dirtytime list in wb_has_dirty_io(), since this would cause the
+ * kernel to be constantly waking up once there are any dirtytime
+ * inodes on the system. So instead we define a separate delayed work
+ * function which gets called much more rarely. (By default, only
+ * once every 12 hours.)
+ *
+ * If there is any other write activity going on in the file system,
+ * this function won't be necessary. But if the only thing that has
+ * happened on the file system is a dirtytime inode caused by an atime
+ * update, we need this infrastructure below to make sure that inode
+ * eventually gets pushed out to disk.
+ */
+static void wakeup_dirtytime_writeback(struct work_struct *w);
+static DECLARE_DELAYED_WORK(dirtytime_work, wakeup_dirtytime_writeback);
+
+static void wakeup_dirtytime_writeback(struct work_struct *w)
+{
+ struct backing_dev_info *bdi;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
+ if (list_empty(&bdi->wb.b_dirty_time))
+ continue;
+ bdi_wakeup_thread(bdi);
+ }
+ rcu_read_unlock();
+ schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
+}
+
+static int __init start_dirtytime_writeback(void)
+{
+ schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
+ return 0;
+}
+__initcall(start_dirtytime_writeback);
+
+int dirtytime_interval_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ int ret;
+
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ if (ret == 0 && write)
+ mod_delayed_work(system_wq, &dirtytime_work, 0);
+ return ret;
+}
+
static noinline void block_dump___mark_inode_dirty(struct inode *inode)
{
if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
@@ -1269,8 +1337,13 @@ void __mark_inode_dirty(struct inode *inode, int flags)
}
inode->dirtied_when = jiffies;
- list_move(&inode->i_wb_list, dirtytime ?
- &bdi->wb.b_dirty_time : &bdi->wb.b_dirty);
+ if (dirtytime)
+ inode->dirtied_time_when = jiffies;
+ if (inode->i_state & (I_DIRTY_INODE | I_DIRTY_PAGES))
+ list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
+ else
+ list_move(&inode->i_wb_list,
+ &bdi->wb.b_dirty_time);
spin_unlock(&bdi->wb.list_lock);
trace_writeback_dirty_inode_enqueue(inode);
diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c
index 6e560d56094b..754fdf8c6356 100644
--- a/fs/hfsplus/brec.c
+++ b/fs/hfsplus/brec.c
@@ -131,13 +131,16 @@ skip:
hfs_bnode_write(node, entry, data_off + key_len, entry_len);
hfs_bnode_dump(node);
- if (new_node) {
- /* update parent key if we inserted a key
- * at the start of the first node
- */
- if (!rec && new_node != node)
- hfs_brec_update_parent(fd);
+ /*
+ * update parent key if we inserted a key
+ * at the start of the node and it is not the new node
+ */
+ if (!rec && new_node != node) {
+ hfs_bnode_read_key(node, fd->search_key, data_off + size);
+ hfs_brec_update_parent(fd);
+ }
+ if (new_node) {
hfs_bnode_put(fd->bnode);
if (!new_node->parent) {
hfs_btree_inc_height(tree);
@@ -168,9 +171,6 @@ skip:
goto again;
}
- if (!rec)
- hfs_brec_update_parent(fd);
-
return 0;
}
@@ -370,6 +370,8 @@ again:
if (IS_ERR(parent))
return PTR_ERR(parent);
__hfs_brec_find(parent, fd, hfs_find_rec_by_key);
+ if (fd->record < 0)
+ return -ENOENT;
hfs_bnode_dump(parent);
rec = fd->record;
diff --git a/fs/locks.c b/fs/locks.c
index 528fedfda15e..40bc384728c0 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1388,9 +1388,8 @@ any_leases_conflict(struct inode *inode, struct file_lock *breaker)
int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
{
int error = 0;
- struct file_lock *new_fl;
struct file_lock_context *ctx = inode->i_flctx;
- struct file_lock *fl;
+ struct file_lock *new_fl, *fl, *tmp;
unsigned long break_time;
int want_write = (mode & O_ACCMODE) != O_RDONLY;
LIST_HEAD(dispose);
@@ -1420,7 +1419,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
break_time++; /* so that 0 means no break time */
}
- list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
+ list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
if (!leases_conflict(fl, new_fl))
continue;
if (want_write) {
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
index cdbc78c72542..03d647bf195d 100644
--- a/fs/nfsd/blocklayout.c
+++ b/fs/nfsd/blocklayout.c
@@ -137,7 +137,7 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp,
seg->offset = iomap.offset;
seg->length = iomap.length;
- dprintk("GET: %lld:%lld %d\n", bex->foff, bex->len, bex->es);
+ dprintk("GET: 0x%llx:0x%llx %d\n", bex->foff, bex->len, bex->es);
return 0;
out_error:
diff --git a/fs/nfsd/blocklayoutxdr.c b/fs/nfsd/blocklayoutxdr.c
index 9da89fddab33..9aa2796da90d 100644
--- a/fs/nfsd/blocklayoutxdr.c
+++ b/fs/nfsd/blocklayoutxdr.c
@@ -122,19 +122,19 @@ nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
p = xdr_decode_hyper(p, &bex.foff);
if (bex.foff & (block_size - 1)) {
- dprintk("%s: unaligned offset %lld\n",
+ dprintk("%s: unaligned offset 0x%llx\n",
__func__, bex.foff);
goto fail;
}
p = xdr_decode_hyper(p, &bex.len);
if (bex.len & (block_size - 1)) {
- dprintk("%s: unaligned length %lld\n",
+ dprintk("%s: unaligned length 0x%llx\n",
__func__, bex.foff);
goto fail;
}
p = xdr_decode_hyper(p, &bex.soff);
if (bex.soff & (block_size - 1)) {
- dprintk("%s: unaligned disk offset %lld\n",
+ dprintk("%s: unaligned disk offset 0x%llx\n",
__func__, bex.soff);
goto fail;
}
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index 1028a0629543..6904213a4363 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -118,7 +118,7 @@ void nfsd4_setup_layout_type(struct svc_export *exp)
{
struct super_block *sb = exp->ex_path.mnt->mnt_sb;
- if (exp->ex_flags & NFSEXP_NOPNFS)
+ if (!(exp->ex_flags & NFSEXP_PNFS))
return;
if (sb->s_export_op->get_uuid &&
@@ -440,15 +440,14 @@ nfsd4_return_file_layout(struct nfs4_layout *lp, struct nfsd4_layout_seg *seg,
list_move_tail(&lp->lo_perstate, reaplist);
return;
}
- end = seg->offset;
+ lo->offset = layout_end(seg);
} else {
/* retain the whole layout segment on a split. */
if (layout_end(seg) < end) {
dprintk("%s: split not supported\n", __func__);
return;
}
-
- lo->offset = layout_end(seg);
+ end = seg->offset;
}
layout_update_len(lo, end);
@@ -513,6 +512,9 @@ nfsd4_return_client_layouts(struct svc_rqst *rqstp,
spin_lock(&clp->cl_lock);
list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) {
+ if (ls->ls_layout_type != lrp->lr_layout_type)
+ continue;
+
if (lrp->lr_return_type == RETURN_FSID &&
!fh_fsid_match(&ls->ls_stid.sc_file->fi_fhandle,
&cstate->current_fh.fh_handle))
@@ -587,6 +589,8 @@ nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls)
rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str));
+ trace_layout_recall_fail(&ls->ls_stid.sc_stateid);
+
printk(KERN_WARNING
"nfsd: client %s failed to respond to layout recall. "
" Fencing..\n", addr_str);
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index d30bea8d0277..92b9d97aff4f 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1237,8 +1237,8 @@ nfsd4_getdeviceinfo(struct svc_rqst *rqstp,
nfserr = ops->proc_getdeviceinfo(exp->ex_path.mnt->mnt_sb, gdp);
gdp->gd_notify_types &= ops->notify_types;
- exp_put(exp);
out:
+ exp_put(exp);
return nfserr;
}
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index d2f2c37dc2db..8ba1d888f1e6 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -3221,7 +3221,7 @@ alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
} else
nfs4_free_openowner(&oo->oo_owner);
spin_unlock(&clp->cl_lock);
- return oo;
+ return ret;
}
static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
@@ -5062,7 +5062,7 @@ alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
} else
nfs4_free_lockowner(&lo->lo_owner);
spin_unlock(&clp->cl_lock);
- return lo;
+ return ret;
}
static void
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index df5e66caf100..5fb7e78169a6 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1562,7 +1562,11 @@ nfsd4_decode_layoutget(struct nfsd4_compoundargs *argp,
p = xdr_decode_hyper(p, &lgp->lg_seg.offset);
p = xdr_decode_hyper(p, &lgp->lg_seg.length);
p = xdr_decode_hyper(p, &lgp->lg_minlength);
- nfsd4_decode_stateid(argp, &lgp->lg_sid);
+
+ status = nfsd4_decode_stateid(argp, &lgp->lg_sid);
+ if (status)
+ return status;
+
READ_BUF(4);
lgp->lg_maxcount = be32_to_cpup(p++);
@@ -1580,7 +1584,11 @@ nfsd4_decode_layoutcommit(struct nfsd4_compoundargs *argp,
p = xdr_decode_hyper(p, &lcp->lc_seg.offset);
p = xdr_decode_hyper(p, &lcp->lc_seg.length);
lcp->lc_reclaim = be32_to_cpup(p++);
- nfsd4_decode_stateid(argp, &lcp->lc_sid);
+
+ status = nfsd4_decode_stateid(argp, &lcp->lc_sid);
+ if (status)
+ return status;
+
READ_BUF(4);
lcp->lc_newoffset = be32_to_cpup(p++);
if (lcp->lc_newoffset) {
@@ -1628,7 +1636,11 @@ nfsd4_decode_layoutreturn(struct nfsd4_compoundargs *argp,
READ_BUF(16);
p = xdr_decode_hyper(p, &lrp->lr_seg.offset);
p = xdr_decode_hyper(p, &lrp->lr_seg.length);
- nfsd4_decode_stateid(argp, &lrp->lr_sid);
+
+ status = nfsd4_decode_stateid(argp, &lrp->lr_sid);
+ if (status)
+ return status;
+
READ_BUF(4);
lrp->lrf_body_len = be32_to_cpup(p++);
if (lrp->lrf_body_len > 0) {
@@ -4123,7 +4135,7 @@ nfsd4_encode_layoutreturn(struct nfsd4_compoundres *resp, __be32 nfserr,
return nfserr_resource;
*p++ = cpu_to_be32(lrp->lrs_present);
if (lrp->lrs_present)
- nfsd4_encode_stateid(xdr, &lrp->lr_sid);
+ return nfsd4_encode_stateid(xdr, &lrp->lr_sid);
return nfs_ok;
}
#endif /* CONFIG_NFSD_PNFS */
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 83a9694ec485..46ec934f5dee 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -165,13 +165,17 @@ int nfsd_reply_cache_init(void)
{
unsigned int hashsize;
unsigned int i;
+ int status = 0;
max_drc_entries = nfsd_cache_size_limit();
atomic_set(&num_drc_entries, 0);
hashsize = nfsd_hashsize(max_drc_entries);
maskbits = ilog2(hashsize);
- register_shrinker(&nfsd_reply_cache_shrinker);
+ status = register_shrinker(&nfsd_reply_cache_shrinker);
+ if (status)
+ return status;
+
drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep),
0, 0, NULL);
if (!drc_slab)
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 46e0d4e857c7..ba1790e52ff2 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -2394,7 +2394,6 @@ relock:
/*
* for completing the rest of the request.
*/
- *ppos += written;
count -= written;
written_buffered = generic_perform_write(file, from, *ppos);
/*
@@ -2409,7 +2408,6 @@ relock:
goto out_dio;
}
- iocb->ki_pos = *ppos + written_buffered;
/* We need to ensure that the page cache pages are written to
* disk and invalidated to preserve the expected O_DIRECT
* semantics.
@@ -2418,6 +2416,7 @@ relock:
ret = filemap_write_and_wait_range(file->f_mapping, *ppos,
endbyte);
if (ret == 0) {
+ iocb->ki_pos = *ppos + written_buffered;
written += written_buffered;
invalidate_mapping_pages(mapping,
*ppos >> PAGE_CACHE_SHIFT,
@@ -2440,10 +2439,14 @@ out_dio:
/* buffered aio wouldn't have proper lock coverage today */
BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
+ if (unlikely(written <= 0))
+ goto no_sync;
+
if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
((file->f_flags & O_DIRECT) && !direct_io)) {
- ret = filemap_fdatawrite_range(file->f_mapping, *ppos,
- *ppos + count - 1);
+ ret = filemap_fdatawrite_range(file->f_mapping,
+ iocb->ki_pos - written,
+ iocb->ki_pos - 1);
if (ret < 0)
written = ret;
@@ -2454,10 +2457,12 @@ out_dio:
}
if (!ret)
- ret = filemap_fdatawait_range(file->f_mapping, *ppos,
- *ppos + count - 1);
+ ret = filemap_fdatawait_range(file->f_mapping,
+ iocb->ki_pos - written,
+ iocb->ki_pos - 1);
}
+no_sync:
/*
* deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
* function pointer which is called when o_direct io completes so that
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index b3f45a578344..e5966758c093 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -24,17 +24,14 @@
#include <linux/workqueue.h>
struct arch_timer_kvm {
-#ifdef CONFIG_KVM_ARM_TIMER
/* Is the timer enabled */
bool enabled;
/* Virtual offset */
cycle_t cntvoff;
-#endif
};
struct arch_timer_cpu {
-#ifdef CONFIG_KVM_ARM_TIMER
/* Registers: control register, timer value */
u32 cntv_ctl; /* Saved/restored */
cycle_t cntv_cval; /* Saved/restored */
@@ -55,10 +52,8 @@ struct arch_timer_cpu {
/* Timer IRQ */
const struct kvm_irq_level *irq;
-#endif
};
-#ifdef CONFIG_KVM_ARM_TIMER
int kvm_timer_hyp_init(void);
void kvm_timer_enable(struct kvm *kvm);
void kvm_timer_init(struct kvm *kvm);
@@ -72,30 +67,6 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid);
int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value);
-#else
-static inline int kvm_timer_hyp_init(void)
-{
- return 0;
-};
-
-static inline void kvm_timer_enable(struct kvm *kvm) {}
-static inline void kvm_timer_init(struct kvm *kvm) {}
-static inline void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
- const struct kvm_irq_level *irq) {}
-static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {}
-static inline void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) {}
-static inline void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) {}
-static inline void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) {}
-
-static inline int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
-{
- return 0;
-}
-
-static inline u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
-{
- return 0;
-}
-#endif
+bool kvm_timer_should_fire(struct kvm_vcpu *vcpu);
#endif
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 66203b268984..133ea00aa83b 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -24,6 +24,7 @@
#include <linux/irqreturn.h>
#include <linux/spinlock.h>
#include <linux/types.h>
+#include <kvm/iodev.h>
#define VGIC_NR_IRQS_LEGACY 256
#define VGIC_NR_SGIS 16
@@ -140,16 +141,21 @@ struct vgic_params {
};
struct vgic_vm_ops {
- bool (*handle_mmio)(struct kvm_vcpu *, struct kvm_run *,
- struct kvm_exit_mmio *);
bool (*queue_sgi)(struct kvm_vcpu *, int irq);
void (*add_sgi_source)(struct kvm_vcpu *, int irq, int source);
int (*init_model)(struct kvm *);
int (*map_resources)(struct kvm *, const struct vgic_params *);
};
+struct vgic_io_device {
+ gpa_t addr;
+ int len;
+ const struct vgic_io_range *reg_ranges;
+ struct kvm_vcpu *redist_vcpu;
+ struct kvm_io_device dev;
+};
+
struct vgic_dist {
-#ifdef CONFIG_KVM_ARM_VGIC
spinlock_t lock;
bool in_kernel;
bool ready;
@@ -197,6 +203,9 @@ struct vgic_dist {
/* Level-triggered interrupt queued on VCPU interface */
struct vgic_bitmap irq_queued;
+ /* Interrupt was active when unqueue from VCPU interface */
+ struct vgic_bitmap irq_active;
+
/* Interrupt priority. Not used yet. */
struct vgic_bytemap irq_priority;
@@ -237,8 +246,12 @@ struct vgic_dist {
/* Bitmap indicating which CPU has something pending */
unsigned long *irq_pending_on_cpu;
+ /* Bitmap indicating which CPU has active IRQs */
+ unsigned long *irq_active_on_cpu;
+
struct vgic_vm_ops vm_ops;
-#endif
+ struct vgic_io_device dist_iodev;
+ struct vgic_io_device *redist_iodevs;
};
struct vgic_v2_cpu_if {
@@ -266,13 +279,18 @@ struct vgic_v3_cpu_if {
};
struct vgic_cpu {
-#ifdef CONFIG_KVM_ARM_VGIC
/* per IRQ to LR mapping */
u8 *vgic_irq_lr_map;
- /* Pending interrupts on this VCPU */
+ /* Pending/active/both interrupts on this VCPU */
DECLARE_BITMAP( pending_percpu, VGIC_NR_PRIVATE_IRQS);
+ DECLARE_BITMAP( active_percpu, VGIC_NR_PRIVATE_IRQS);
+ DECLARE_BITMAP( pend_act_percpu, VGIC_NR_PRIVATE_IRQS);
+
+ /* Pending/active/both shared interrupts, dynamically sized */
unsigned long *pending_shared;
+ unsigned long *active_shared;
+ unsigned long *pend_act_shared;
/* Bitmap of used/free list registers */
DECLARE_BITMAP( lr_used, VGIC_V2_MAX_LRS);
@@ -285,7 +303,6 @@ struct vgic_cpu {
struct vgic_v2_cpu_if vgic_v2;
struct vgic_v3_cpu_if vgic_v3;
};
-#endif
};
#define LR_EMPTY 0xff
@@ -295,10 +312,7 @@ struct vgic_cpu {
struct kvm;
struct kvm_vcpu;
-struct kvm_run;
-struct kvm_exit_mmio;
-#ifdef CONFIG_KVM_ARM_VGIC
int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
int kvm_vgic_hyp_init(void);
int kvm_vgic_map_resources(struct kvm *kvm);
@@ -312,8 +326,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
bool level);
void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
-bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
- struct kvm_exit_mmio *mmio);
+int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu);
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus))
@@ -335,84 +348,4 @@ static inline int vgic_v3_probe(struct device_node *vgic_node,
}
#endif
-#else
-static inline int kvm_vgic_hyp_init(void)
-{
- return 0;
-}
-
-static inline int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)
-{
- return 0;
-}
-
-static inline int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
-{
- return -ENXIO;
-}
-
-static inline int kvm_vgic_map_resources(struct kvm *kvm)
-{
- return 0;
-}
-
-static inline int kvm_vgic_create(struct kvm *kvm, u32 type)
-{
- return 0;
-}
-
-static inline void kvm_vgic_destroy(struct kvm *kvm)
-{
-}
-
-static inline void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
-{
-}
-
-static inline int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
-{
- return 0;
-}
-
-static inline void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) {}
-static inline void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) {}
-
-static inline int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid,
- unsigned int irq_num, bool level)
-{
- return 0;
-}
-
-static inline int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
-{
- return 0;
-}
-
-static inline bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
- struct kvm_exit_mmio *mmio)
-{
- return false;
-}
-
-static inline int irqchip_in_kernel(struct kvm *kvm)
-{
- return 0;
-}
-
-static inline bool vgic_initialized(struct kvm *kvm)
-{
- return true;
-}
-
-static inline bool vgic_ready(struct kvm *kvm)
-{
- return true;
-}
-
-static inline int kvm_vgic_get_max_vcpus(void)
-{
- return KVM_MAX_VCPUS;
-}
-#endif
-
#endif
diff --git a/virt/kvm/iodev.h b/include/kvm/iodev.h
index 12fd3caffd2b..a6d208b916f5 100644
--- a/virt/kvm/iodev.h
+++ b/include/kvm/iodev.h
@@ -9,17 +9,17 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __KVM_IODEV_H__
#define __KVM_IODEV_H__
#include <linux/kvm_types.h>
-#include <asm/errno.h>
+#include <linux/errno.h>
struct kvm_io_device;
+struct kvm_vcpu;
/**
* kvm_io_device_ops are called under kvm slots_lock.
@@ -27,11 +27,13 @@ struct kvm_io_device;
* or non-zero to have it passed to the next device.
**/
struct kvm_io_device_ops {
- int (*read)(struct kvm_io_device *this,
+ int (*read)(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *this,
gpa_t addr,
int len,
void *val);
- int (*write)(struct kvm_io_device *this,
+ int (*write)(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *this,
gpa_t addr,
int len,
const void *val);
@@ -49,16 +51,20 @@ static inline void kvm_iodevice_init(struct kvm_io_device *dev,
dev->ops = ops;
}
-static inline int kvm_iodevice_read(struct kvm_io_device *dev,
- gpa_t addr, int l, void *v)
+static inline int kvm_iodevice_read(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *dev, gpa_t addr,
+ int l, void *v)
{
- return dev->ops->read ? dev->ops->read(dev, addr, l, v) : -EOPNOTSUPP;
+ return dev->ops->read ? dev->ops->read(vcpu, dev, addr, l, v)
+ : -EOPNOTSUPP;
}
-static inline int kvm_iodevice_write(struct kvm_io_device *dev,
- gpa_t addr, int l, const void *v)
+static inline int kvm_iodevice_write(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *dev, gpa_t addr,
+ int l, const void *v)
{
- return dev->ops->write ? dev->ops->write(dev, addr, l, v) : -EOPNOTSUPP;
+ return dev->ops->write ? dev->ops->write(vcpu, dev, addr, l, v)
+ : -EOPNOTSUPP;
}
static inline void kvm_iodevice_destructor(struct kvm_io_device *dev)
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index c294e3e25e37..a1b25e35ea5f 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -181,7 +181,9 @@ enum rq_flag_bits {
__REQ_ELVPRIV, /* elevator private data attached */
__REQ_FAILED, /* set if the request failed */
__REQ_QUIET, /* don't worry about errors */
- __REQ_PREEMPT, /* set for "ide_preempt" requests */
+ __REQ_PREEMPT, /* set for "ide_preempt" requests and also
+ for requests for which the SCSI "quiesce"
+ state must be ignored. */
__REQ_ALLOCED, /* request came from our alloc pool */
__REQ_COPY_USER, /* contains copies of user pages */
__REQ_FLUSH_SEQ, /* request for flush sequence */
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 1b45e4a0519b..0e41ca0e5927 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -192,29 +192,16 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
#include <uapi/linux/types.h>
-static __always_inline void data_access_exceeds_word_size(void)
-#ifdef __compiletime_warning
-__compiletime_warning("data access exceeds word size and won't be atomic")
-#endif
-;
-
-static __always_inline void data_access_exceeds_word_size(void)
-{
-}
-
static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
{
switch (size) {
case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
-#ifdef CONFIG_64BIT
case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
-#endif
default:
barrier();
__builtin_memcpy((void *)res, (const void *)p, size);
- data_access_exceeds_word_size();
barrier();
}
}
@@ -225,13 +212,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
-#ifdef CONFIG_64BIT
case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
-#endif
default:
barrier();
__builtin_memcpy((void *)p, (const void *)res, size);
- data_access_exceeds_word_size();
barrier();
}
}
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 306178d7309f..9c5e89254796 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -77,7 +77,6 @@ struct cpuidle_device {
unsigned int cpu;
int last_residency;
- int state_count;
struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX];
struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX];
struct cpuidle_driver_kobj *kobj_driver;
diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h
index 022e34fcbd1b..52456aa566a0 100644
--- a/include/linux/dmapool.h
+++ b/include/linux/dmapool.h
@@ -14,6 +14,8 @@
#include <asm/io.h>
#include <asm/scatterlist.h>
+struct device;
+
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
size_t size, size_t align, size_t allocation);
diff --git a/include/linux/efi.h b/include/linux/efi.h
index cf7e431cbc73..af5be0368dec 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -942,6 +942,7 @@ extern int __init efi_setup_pcdp_console(char *);
#define EFI_64BIT 5 /* Is the firmware 64-bit? */
#define EFI_PARAVIRT 6 /* Access is via a paravirt interface */
#define EFI_ARCH_1 7 /* First arch-specific bit */
+#define EFI_DBG 8 /* Print additional debug info at runtime */
#ifdef CONFIG_EFI
/*
diff --git a/include/linux/fs.h b/include/linux/fs.h
index b4d71b5e1ff2..52cc4492cb3a 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -604,6 +604,7 @@ struct inode {
struct mutex i_mutex;
unsigned long dirtied_when; /* jiffies of first dirtying */
+ unsigned long dirtied_time_when;
struct hlist_node i_hash;
struct list_head i_wb_list; /* backing dev IO list */
@@ -1548,7 +1549,7 @@ struct file_operations {
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
- void (*mremap)(struct file *, struct vm_area_struct *);
+ int (*mremap)(struct file *, struct vm_area_struct *);
int (*open) (struct inode *, struct file *);
int (*flush) (struct file *, fl_owner_t id);
int (*release) (struct inode *, struct file *);
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 781974afff9f..ffbc034c8810 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -126,8 +126,23 @@
#define GICR_PROPBASER_WaWb (5U << 7)
#define GICR_PROPBASER_RaWaWt (6U << 7)
#define GICR_PROPBASER_RaWaWb (7U << 7)
+#define GICR_PROPBASER_CACHEABILITY_MASK (7U << 7)
#define GICR_PROPBASER_IDBITS_MASK (0x1f)
+#define GICR_PENDBASER_NonShareable (0U << 10)
+#define GICR_PENDBASER_InnerShareable (1U << 10)
+#define GICR_PENDBASER_OuterShareable (2U << 10)
+#define GICR_PENDBASER_SHAREABILITY_MASK (3UL << 10)
+#define GICR_PENDBASER_nCnB (0U << 7)
+#define GICR_PENDBASER_nC (1U << 7)
+#define GICR_PENDBASER_RaWt (2U << 7)
+#define GICR_PENDBASER_RaWb (3U << 7)
+#define GICR_PENDBASER_WaWt (4U << 7)
+#define GICR_PENDBASER_WaWb (5U << 7)
+#define GICR_PENDBASER_RaWaWt (6U << 7)
+#define GICR_PENDBASER_RaWaWb (7U << 7)
+#define GICR_PENDBASER_CACHEABILITY_MASK (7U << 7)
+
/*
* Re-Distributor registers, offsets from SGI_base
*/
@@ -182,6 +197,7 @@
#define GITS_CBASER_WaWb (5UL << 59)
#define GITS_CBASER_RaWaWt (6UL << 59)
#define GITS_CBASER_RaWaWb (7UL << 59)
+#define GITS_CBASER_CACHEABILITY_MASK (7UL << 59)
#define GITS_CBASER_NonShareable (0UL << 10)
#define GITS_CBASER_InnerShareable (1UL << 10)
#define GITS_CBASER_OuterShareable (2UL << 10)
@@ -198,6 +214,7 @@
#define GITS_BASER_WaWb (5UL << 59)
#define GITS_BASER_RaWaWt (6UL << 59)
#define GITS_BASER_RaWaWb (7UL << 59)
+#define GITS_BASER_CACHEABILITY_MASK (7UL << 59)
#define GITS_BASER_TYPE_SHIFT (56)
#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7)
#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 98f923b6a0ea..f4de473f226b 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -45,6 +45,12 @@
* same as using STATIC_KEY_INIT_FALSE.
*/
+#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
+# define HAVE_JUMP_LABEL
+#endif
+
+#ifndef __ASSEMBLY__
+
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/bug.h>
@@ -55,7 +61,7 @@ extern bool static_key_initialized;
"%s used before call to jump_label_init", \
__func__)
-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
+#ifdef HAVE_JUMP_LABEL
struct static_key {
atomic_t enabled;
@@ -66,13 +72,18 @@ struct static_key {
#endif
};
-# include <asm/jump_label.h>
-# define HAVE_JUMP_LABEL
#else
struct static_key {
atomic_t enabled;
};
-#endif /* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */
+#endif /* HAVE_JUMP_LABEL */
+#endif /* __ASSEMBLY__ */
+
+#ifdef HAVE_JUMP_LABEL
+#include <asm/jump_label.h>
+#endif
+
+#ifndef __ASSEMBLY__
enum jump_label_type {
JUMP_LABEL_DISABLE = 0,
@@ -203,3 +214,5 @@ static inline bool static_key_enabled(struct static_key *key)
}
#endif /* _LINUX_JUMP_LABEL_H */
+
+#endif /* __ASSEMBLY__ */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index d12b2104d19b..82af5d0b996e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -165,12 +165,12 @@ enum kvm_bus {
KVM_NR_BUSES
};
-int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
int len, const void *val);
-int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
- int len, const void *val, long cookie);
-int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
- void *val);
+int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
+ gpa_t addr, int len, const void *val, long cookie);
+int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
+ int len, void *val);
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, struct kvm_io_device *dev);
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
@@ -658,7 +658,6 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
void *kvm_kvzalloc(unsigned long size);
-void kvm_kvfree(const void *addr);
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void)
@@ -700,6 +699,20 @@ static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
#endif
}
+#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
+/*
+ * returns true if the virtual interrupt controller is initialized and
+ * ready to accept virtual IRQ. On some architectures the virtual interrupt
+ * controller is dynamically instantiated and this is not always true.
+ */
+bool kvm_arch_intc_initialized(struct kvm *kvm);
+#else
+static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
+{
+ return true;
+}
+#endif
+
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
void kvm_arch_destroy_vm(struct kvm *kvm);
void kvm_arch_sync_events(struct kvm *kvm);
@@ -969,11 +982,16 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
#endif /* CONFIG_HAVE_KVM_EVENTFD */
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
-static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
+static inline bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
{
return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
}
+static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
+{
+ return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0;
+}
+
bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
#else
diff --git a/include/linux/lcm.h b/include/linux/lcm.h
index 7bf01d779b45..1ce79a7f1daa 100644
--- a/include/linux/lcm.h
+++ b/include/linux/lcm.h
@@ -4,5 +4,6 @@
#include <linux/compiler.h>
unsigned long lcm(unsigned long a, unsigned long b) __attribute_const__;
+unsigned long lcm_not_zero(unsigned long a, unsigned long b) __attribute_const__;
#endif /* _LCM_H */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index fc03efa64ffe..6b08cc106c21 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -232,6 +232,7 @@ enum {
* led */
ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */
ATA_FLAG_LOWTAG = (1 << 24), /* host wants lowest available tag */
+ ATA_FLAG_SAS_HOST = (1 << 25), /* SAS host */
/* bits 24:31 of ap->flags are reserved for LLD specific flags */
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
index fb0390a1a498..ee7b1ce7a6f8 100644
--- a/include/linux/mfd/palmas.h
+++ b/include/linux/mfd/palmas.h
@@ -2999,6 +2999,9 @@ enum usb_irq_events {
#define PALMAS_GPADC_TRIM15 0x0E
#define PALMAS_GPADC_TRIM16 0x0F
+/* TPS659038 regen2_ctrl offset iss different from palmas */
+#define TPS659038_REGEN2_CTRL 0x12
+
/* TPS65917 Interrupt registers */
/* Registers for function INTERRUPT */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index f279d9c158cd..2782df47101e 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -474,16 +474,15 @@ struct zone {
unsigned long wait_table_bits;
ZONE_PADDING(_pad1_)
-
- /* Write-intensive fields used from the page allocator */
- spinlock_t lock;
-
/* free areas of different sizes */
struct free_area free_area[MAX_ORDER];
/* zone flags, see below */
unsigned long flags;
+ /* Write-intensive fields used from the page allocator */
+ spinlock_t lock;
+
ZONE_PADDING(_pad2_)
/* Write-intensive fields used by page reclaim */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index dcf6ec27739b..278738873703 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2185,6 +2185,12 @@ void netdev_freemem(struct net_device *dev);
void synchronize_net(void);
int init_dummy_netdev(struct net_device *dev);
+DECLARE_PER_CPU(int, xmit_recursion);
+static inline int dev_recursion_level(void)
+{
+ return this_cpu_read(xmit_recursion);
+}
+
struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index d4ad5b5a02bb..045f709cb89b 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -316,7 +316,7 @@ struct regulator_desc {
* @driver_data: private regulator data
* @of_node: OpenFirmware node to parse for device tree bindings (may be
* NULL).
- * @regmap: regmap to use for core regmap helpers if dev_get_regulator() is
+ * @regmap: regmap to use for core regmap helpers if dev_get_regmap() is
* insufficient.
* @ena_gpio_initialized: GPIO controlling regulator enable was properly
* initialized, meaning that >= 0 is a valid gpio
diff --git a/include/linux/sched.h b/include/linux/sched.h
index fdca05c5f812..3f3308824fa4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -176,6 +176,14 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
extern void calc_global_load(unsigned long ticks);
extern void update_cpu_load_nohz(void);
+/* Notifier for when a task gets migrated to a new CPU */
+struct task_migration_notifier {
+ struct task_struct *task;
+ int from_cpu;
+ int to_cpu;
+};
+extern void register_task_migration_notifier(struct notifier_block *n);
+
extern unsigned long get_parent_ip(unsigned long addr);
extern void dump_cpu_task(int cpu);
@@ -1638,11 +1646,11 @@ struct task_struct {
/*
* numa_faults_locality tracks if faults recorded during the last
- * scan window were remote/local. The task scan period is adapted
- * based on the locality of the faults with different weights
- * depending on whether they were shared or private faults
+ * scan window were remote/local or failed to migrate. The task scan
+ * period is adapted based on the locality of the faults with different
+ * weights depending on whether they were shared or private faults
*/
- unsigned long numa_faults_locality[2];
+ unsigned long numa_faults_locality[3];
unsigned long numa_pages_migrated;
#endif /* CONFIG_NUMA_BALANCING */
@@ -1732,6 +1740,7 @@ struct task_struct {
#define TNF_NO_GROUP 0x02
#define TNF_SHARED 0x04
#define TNF_FAULT_LOCAL 0x08
+#define TNF_MIGRATE_FAIL 0x10
#ifdef CONFIG_NUMA_BALANCING
extern void task_numa_fault(int last_node, int node, int pages, int flags);
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index f5df8f687b4d..5f68d0a391ce 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -108,7 +108,7 @@ static inline unsigned __read_seqcount_begin(const seqcount_t *s)
unsigned ret;
repeat:
- ret = ACCESS_ONCE(s->sequence);
+ ret = READ_ONCE(s->sequence);
if (unlikely(ret & 1)) {
cpu_relax();
goto repeat;
@@ -127,7 +127,7 @@ repeat:
*/
static inline unsigned raw_read_seqcount(const seqcount_t *s)
{
- unsigned ret = ACCESS_ONCE(s->sequence);
+ unsigned ret = READ_ONCE(s->sequence);
smp_rmb();
return ret;
}
@@ -179,7 +179,7 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s)
*/
static inline unsigned raw_seqcount_begin(const seqcount_t *s)
{
- unsigned ret = ACCESS_ONCE(s->sequence);
+ unsigned ret = READ_ONCE(s->sequence);
smp_rmb();
return ret & ~1;
}
diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h
index c57d8ea0716c..59a7889e15db 100644
--- a/include/linux/sunrpc/debug.h
+++ b/include/linux/sunrpc/debug.h
@@ -60,17 +60,17 @@ struct rpc_xprt;
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
void rpc_register_sysctl(void);
void rpc_unregister_sysctl(void);
-int sunrpc_debugfs_init(void);
+void sunrpc_debugfs_init(void);
void sunrpc_debugfs_exit(void);
-int rpc_clnt_debugfs_register(struct rpc_clnt *);
+void rpc_clnt_debugfs_register(struct rpc_clnt *);
void rpc_clnt_debugfs_unregister(struct rpc_clnt *);
-int rpc_xprt_debugfs_register(struct rpc_xprt *);
+void rpc_xprt_debugfs_register(struct rpc_xprt *);
void rpc_xprt_debugfs_unregister(struct rpc_xprt *);
#else
-static inline int
+static inline void
sunrpc_debugfs_init(void)
{
- return 0;
+ return;
}
static inline void
@@ -79,10 +79,10 @@ sunrpc_debugfs_exit(void)
return;
}
-static inline int
+static inline void
rpc_clnt_debugfs_register(struct rpc_clnt *clnt)
{
- return 0;
+ return;
}
static inline void
@@ -91,10 +91,10 @@ rpc_clnt_debugfs_unregister(struct rpc_clnt *clnt)
return;
}
-static inline int
+static inline void
rpc_xprt_debugfs_register(struct rpc_xprt *xprt)
{
- return 0;
+ return;
}
static inline void
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index d9a4905e01d0..6e0ce8c7b8cb 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -227,9 +227,23 @@ struct skb_data { /* skb->cb is one of these */
struct urb *urb;
struct usbnet *dev;
enum skb_state state;
- size_t length;
+ long length;
+ unsigned long packets;
};
+/* Drivers that set FLAG_MULTI_PACKET must call this in their
+ * tx_fixup method before returning an skb.
+ */
+static inline void
+usbnet_set_skb_tx_stats(struct sk_buff *skb,
+ unsigned long packets, long bytes_delta)
+{
+ struct skb_data *entry = (struct skb_data *) skb->cb;
+
+ entry->packets = packets;
+ entry->length = bytes_delta;
+}
+
extern int usbnet_open(struct net_device *net);
extern int usbnet_stop(struct net_device *net);
extern netdev_tx_t usbnet_start_xmit(struct sk_buff *skb,
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 00048339c23e..b2dd371ec0ca 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -130,6 +130,7 @@ extern int vm_dirty_ratio;
extern unsigned long vm_dirty_bytes;
extern unsigned int dirty_writeback_interval;
extern unsigned int dirty_expire_interval;
+extern unsigned int dirtytime_expire_interval;
extern int vm_highmem_is_dirtyable;
extern int block_dump;
extern int laptop_mode;
@@ -146,6 +147,8 @@ extern int dirty_ratio_handler(struct ctl_table *table, int write,
extern int dirty_bytes_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
+int dirtytime_interval_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
struct ctl_table;
int dirty_writeback_centisecs_handler(struct ctl_table *, int,
diff --git a/include/media/atmel-isi.h b/include/media/atmel-isi.h
index c2e570336269..6008b0985b7b 100644
--- a/include/media/atmel-isi.h
+++ b/include/media/atmel-isi.h
@@ -59,6 +59,10 @@
#define ISI_CFG1_FRATE_DIV_MASK (7 << 8)
#define ISI_CFG1_DISCR (1 << 11)
#define ISI_CFG1_FULL_MODE (1 << 12)
+/* Definition for THMASK(ISI_V2) */
+#define ISI_CFG1_THMASK_BEATS_4 (0 << 13)
+#define ISI_CFG1_THMASK_BEATS_8 (1 << 13)
+#define ISI_CFG1_THMASK_BEATS_16 (2 << 13)
/* Bitfields in CFG2 */
#define ISI_CFG2_GRAYSCALE (1 << 13)
diff --git a/include/net/ip.h b/include/net/ip.h
index 025c61c0dffb..6cc1eafb153a 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -453,22 +453,6 @@ static __inline__ void inet_reset_saddr(struct sock *sk)
#endif
-static inline int sk_mc_loop(struct sock *sk)
-{
- if (!sk)
- return 1;
- switch (sk->sk_family) {
- case AF_INET:
- return inet_sk(sk)->mc_loop;
-#if IS_ENABLED(CONFIG_IPV6)
- case AF_INET6:
- return inet6_sk(sk)->mc_loop;
-#endif
- }
- WARN_ON(1);
- return 1;
-}
-
bool ip_call_ra_chain(struct sk_buff *skb);
/*
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 1d09b46c1e48..eda131d179d9 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -174,7 +174,8 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
{
- struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
+ struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
+ inet6_sk(skb->sk) : NULL;
return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ?
skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
index 534e1f2ac4fc..57639fca223a 100644
--- a/include/net/netfilter/nf_log.h
+++ b/include/net/netfilter/nf_log.h
@@ -79,6 +79,16 @@ void nf_log_packet(struct net *net,
const struct nf_loginfo *li,
const char *fmt, ...);
+__printf(8, 9)
+void nf_log_trace(struct net *net,
+ u_int8_t pf,
+ unsigned int hooknum,
+ const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const struct nf_loginfo *li,
+ const char *fmt, ...);
+
struct nf_log_buf;
struct nf_log_buf *nf_log_buf_open(void);
diff --git a/include/net/sock.h b/include/net/sock.h
index ab186b1d31ff..e4079c28e6b8 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1762,6 +1762,8 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
+bool sk_mc_loop(struct sock *sk);
+
static inline bool sk_can_gso(const struct sock *sk)
{
return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
diff --git a/include/trace/events/regmap.h b/include/trace/events/regmap.h
index 23d561512f64..22317d2b52ab 100644
--- a/include/trace/events/regmap.h
+++ b/include/trace/events/regmap.h
@@ -7,27 +7,26 @@
#include <linux/ktime.h>
#include <linux/tracepoint.h>
-struct device;
-struct regmap;
+#include "../../../drivers/base/regmap/internal.h"
/*
* Log register events
*/
DECLARE_EVENT_CLASS(regmap_reg,
- TP_PROTO(struct device *dev, unsigned int reg,
+ TP_PROTO(struct regmap *map, unsigned int reg,
unsigned int val),
- TP_ARGS(dev, reg, val),
+ TP_ARGS(map, reg, val),
TP_STRUCT__entry(
- __string( name, dev_name(dev) )
- __field( unsigned int, reg )
- __field( unsigned int, val )
+ __string( name, regmap_name(map) )
+ __field( unsigned int, reg )
+ __field( unsigned int, val )
),
TP_fast_assign(
- __assign_str(name, dev_name(dev));
+ __assign_str(name, regmap_name(map));
__entry->reg = reg;
__entry->val = val;
),
@@ -39,45 +38,45 @@ DECLARE_EVENT_CLASS(regmap_reg,
DEFINE_EVENT(regmap_reg, regmap_reg_write,
- TP_PROTO(struct device *dev, unsigned int reg,
+ TP_PROTO(struct regmap *map, unsigned int reg,
unsigned int val),
- TP_ARGS(dev, reg, val)
+ TP_ARGS(map, reg, val)
);
DEFINE_EVENT(regmap_reg, regmap_reg_read,
- TP_PROTO(struct device *dev, unsigned int reg,
+ TP_PROTO(struct regmap *map, unsigned int reg,
unsigned int val),
- TP_ARGS(dev, reg, val)
+ TP_ARGS(map, reg, val)
);
DEFINE_EVENT(regmap_reg, regmap_reg_read_cache,
- TP_PROTO(struct device *dev, unsigned int reg,
+ TP_PROTO(struct regmap *map, unsigned int reg,
unsigned int val),
- TP_ARGS(dev, reg, val)
+ TP_ARGS(map, reg, val)
);
DECLARE_EVENT_CLASS(regmap_block,
- TP_PROTO(struct device *dev, unsigned int reg, int count),
+ TP_PROTO(struct regmap *map, unsigned int reg, int count),
- TP_ARGS(dev, reg, count),
+ TP_ARGS(map, reg, count),
TP_STRUCT__entry(
- __string( name, dev_name(dev) )
- __field( unsigned int, reg )
- __field( int, count )
+ __string( name, regmap_name(map) )
+ __field( unsigned int, reg )
+ __field( int, count )
),
TP_fast_assign(
- __assign_str(name, dev_name(dev));
+ __assign_str(name, regmap_name(map));
__entry->reg = reg;
__entry->count = count;
),
@@ -89,48 +88,48 @@ DECLARE_EVENT_CLASS(regmap_block,
DEFINE_EVENT(regmap_block, regmap_hw_read_start,
- TP_PROTO(struct device *dev, unsigned int reg, int count),
+ TP_PROTO(struct regmap *map, unsigned int reg, int count),
- TP_ARGS(dev, reg, count)
+ TP_ARGS(map, reg, count)
);
DEFINE_EVENT(regmap_block, regmap_hw_read_done,
- TP_PROTO(struct device *dev, unsigned int reg, int count),
+ TP_PROTO(struct regmap *map, unsigned int reg, int count),
- TP_ARGS(dev, reg, count)
+ TP_ARGS(map, reg, count)
);
DEFINE_EVENT(regmap_block, regmap_hw_write_start,
- TP_PROTO(struct device *dev, unsigned int reg, int count),
+ TP_PROTO(struct regmap *map, unsigned int reg, int count),
- TP_ARGS(dev, reg, count)
+ TP_ARGS(map, reg, count)
);
DEFINE_EVENT(regmap_block, regmap_hw_write_done,
- TP_PROTO(struct device *dev, unsigned int reg, int count),
+ TP_PROTO(struct regmap *map, unsigned int reg, int count),
- TP_ARGS(dev, reg, count)
+ TP_ARGS(map, reg, count)
);
TRACE_EVENT(regcache_sync,
- TP_PROTO(struct device *dev, const char *type,
+ TP_PROTO(struct regmap *map, const char *type,
const char *status),
- TP_ARGS(dev, type, status),
+ TP_ARGS(map, type, status),
TP_STRUCT__entry(
- __string( name, dev_name(dev) )
- __string( status, status )
- __string( type, type )
- __field( int, type )
+ __string( name, regmap_name(map) )
+ __string( status, status )
+ __string( type, type )
+ __field( int, type )
),
TP_fast_assign(
- __assign_str(name, dev_name(dev));
+ __assign_str(name, regmap_name(map));
__assign_str(status, status);
__assign_str(type, type);
),
@@ -141,17 +140,17 @@ TRACE_EVENT(regcache_sync,
DECLARE_EVENT_CLASS(regmap_bool,
- TP_PROTO(struct device *dev, bool flag),
+ TP_PROTO(struct regmap *map, bool flag),
- TP_ARGS(dev, flag),
+ TP_ARGS(map, flag),
TP_STRUCT__entry(
- __string( name, dev_name(dev) )
- __field( int, flag )
+ __string( name, regmap_name(map) )
+ __field( int, flag )
),
TP_fast_assign(
- __assign_str(name, dev_name(dev));
+ __assign_str(name, regmap_name(map));
__entry->flag = flag;
),
@@ -161,32 +160,32 @@ DECLARE_EVENT_CLASS(regmap_bool,
DEFINE_EVENT(regmap_bool, regmap_cache_only,
- TP_PROTO(struct device *dev, bool flag),
+ TP_PROTO(struct regmap *map, bool flag),
- TP_ARGS(dev, flag)
+ TP_ARGS(map, flag)
);
DEFINE_EVENT(regmap_bool, regmap_cache_bypass,
- TP_PROTO(struct device *dev, bool flag),
+ TP_PROTO(struct regmap *map, bool flag),
- TP_ARGS(dev, flag)
+ TP_ARGS(map, flag)
);
DECLARE_EVENT_CLASS(regmap_async,
- TP_PROTO(struct device *dev),
+ TP_PROTO(struct regmap *map),
- TP_ARGS(dev),
+ TP_ARGS(map),
TP_STRUCT__entry(
- __string( name, dev_name(dev) )
+ __string( name, regmap_name(map) )
),
TP_fast_assign(
- __assign_str(name, dev_name(dev));
+ __assign_str(name, regmap_name(map));
),
TP_printk("%s", __get_str(name))
@@ -194,50 +193,50 @@ DECLARE_EVENT_CLASS(regmap_async,
DEFINE_EVENT(regmap_block, regmap_async_write_start,
- TP_PROTO(struct device *dev, unsigned int reg, int count),
+ TP_PROTO(struct regmap *map, unsigned int reg, int count),
- TP_ARGS(dev, reg, count)
+ TP_ARGS(map, reg, count)
);
DEFINE_EVENT(regmap_async, regmap_async_io_complete,
- TP_PROTO(struct device *dev),
+ TP_PROTO(struct regmap *map),
- TP_ARGS(dev)
+ TP_ARGS(map)
);
DEFINE_EVENT(regmap_async, regmap_async_complete_start,
- TP_PROTO(struct device *dev),
+ TP_PROTO(struct regmap *map),
- TP_ARGS(dev)
+ TP_ARGS(map)
);
DEFINE_EVENT(regmap_async, regmap_async_complete_done,
- TP_PROTO(struct device *dev),
+ TP_PROTO(struct regmap *map),
- TP_ARGS(dev)
+ TP_ARGS(map)
);
TRACE_EVENT(regcache_drop_region,
- TP_PROTO(struct device *dev, unsigned int from,
+ TP_PROTO(struct regmap *map, unsigned int from,
unsigned int to),
- TP_ARGS(dev, from, to),
+ TP_ARGS(map, from, to),
TP_STRUCT__entry(
- __string( name, dev_name(dev) )
- __field( unsigned int, from )
- __field( unsigned int, to )
+ __string( name, regmap_name(map) )
+ __field( unsigned int, from )
+ __field( unsigned int, to )
),
TP_fast_assign(
- __assign_str(name, dev_name(dev));
+ __assign_str(name, regmap_name(map));
__entry->from = from;
__entry->to = to;
),
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index b0a813079852..2f62ab2d7bf9 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -973,7 +973,8 @@ struct input_keymap_entry {
*/
#define MT_TOOL_FINGER 0
#define MT_TOOL_PEN 1
-#define MT_TOOL_MAX 1
+#define MT_TOOL_PALM 2
+#define MT_TOOL_MAX 2
/*
* Values describing the status of a force-feedback effect
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 805570650062..f574d7be7631 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -147,6 +147,16 @@ struct kvm_pit_config {
#define KVM_PIT_SPEAKER_DUMMY 1
+struct kvm_s390_skeys {
+ __u64 start_gfn;
+ __u64 count;
+ __u64 skeydata_addr;
+ __u32 flags;
+ __u32 reserved[9];
+};
+#define KVM_S390_GET_SKEYS_NONE 1
+#define KVM_S390_SKEYS_MAX 1048576
+
#define KVM_EXIT_UNKNOWN 0
#define KVM_EXIT_EXCEPTION 1
#define KVM_EXIT_IO 2
@@ -172,6 +182,7 @@ struct kvm_pit_config {
#define KVM_EXIT_S390_TSCH 22
#define KVM_EXIT_EPR 23
#define KVM_EXIT_SYSTEM_EVENT 24
+#define KVM_EXIT_S390_STSI 25
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -309,6 +320,15 @@ struct kvm_run {
__u32 type;
__u64 flags;
} system_event;
+ /* KVM_EXIT_S390_STSI */
+ struct {
+ __u64 addr;
+ __u8 ar;
+ __u8 reserved;
+ __u8 fc;
+ __u8 sel1;
+ __u16 sel2;
+ } s390_stsi;
/* Fix the size of the union. */
char padding[256];
};
@@ -324,7 +344,7 @@ struct kvm_run {
__u64 kvm_dirty_regs;
union {
struct kvm_sync_regs regs;
- char padding[1024];
+ char padding[2048];
} s;
};
@@ -365,6 +385,24 @@ struct kvm_translation {
__u8 pad[5];
};
+/* for KVM_S390_MEM_OP */
+struct kvm_s390_mem_op {
+ /* in */
+ __u64 gaddr; /* the guest address */
+ __u64 flags; /* flags */
+ __u32 size; /* amount of bytes */
+ __u32 op; /* type of operation */
+ __u64 buf; /* buffer in userspace */
+ __u8 ar; /* the access register number */
+ __u8 reserved[31]; /* should be set to 0 */
+};
+/* types for kvm_s390_mem_op->op */
+#define KVM_S390_MEMOP_LOGICAL_READ 0
+#define KVM_S390_MEMOP_LOGICAL_WRITE 1
+/* flags for kvm_s390_mem_op->flags */
+#define KVM_S390_MEMOP_F_CHECK_ONLY (1ULL << 0)
+#define KVM_S390_MEMOP_F_INJECT_EXCEPTION (1ULL << 1)
+
/* for KVM_INTERRUPT */
struct kvm_interrupt {
/* in */
@@ -520,6 +558,13 @@ struct kvm_s390_irq {
} u;
};
+struct kvm_s390_irq_state {
+ __u64 buf;
+ __u32 flags;
+ __u32 len;
+ __u32 reserved[4];
+};
+
/* for KVM_SET_GUEST_DEBUG */
#define KVM_GUESTDBG_ENABLE 0x00000001
@@ -760,6 +805,14 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_PPC_ENABLE_HCALL 104
#define KVM_CAP_CHECK_EXTENSION_VM 105
#define KVM_CAP_S390_USER_SIGP 106
+#define KVM_CAP_S390_VECTOR_REGISTERS 107
+#define KVM_CAP_S390_MEM_OP 108
+#define KVM_CAP_S390_USER_STSI 109
+#define KVM_CAP_S390_SKEYS 110
+#define KVM_CAP_MIPS_FPU 111
+#define KVM_CAP_MIPS_MSA 112
+#define KVM_CAP_S390_INJECT_IRQ 113
+#define KVM_CAP_S390_IRQ_STATE 114
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1135,6 +1188,16 @@ struct kvm_s390_ucas_mapping {
#define KVM_ARM_VCPU_INIT _IOW(KVMIO, 0xae, struct kvm_vcpu_init)
#define KVM_ARM_PREFERRED_TARGET _IOR(KVMIO, 0xaf, struct kvm_vcpu_init)
#define KVM_GET_REG_LIST _IOWR(KVMIO, 0xb0, struct kvm_reg_list)
+/* Available with KVM_CAP_S390_MEM_OP */
+#define KVM_S390_MEM_OP _IOW(KVMIO, 0xb1, struct kvm_s390_mem_op)
+/* Available with KVM_CAP_S390_SKEYS */
+#define KVM_S390_GET_SKEYS _IOW(KVMIO, 0xb2, struct kvm_s390_skeys)
+#define KVM_S390_SET_SKEYS _IOW(KVMIO, 0xb3, struct kvm_s390_skeys)
+/* Available with KVM_CAP_S390_INJECT_IRQ */
+#define KVM_S390_IRQ _IOW(KVMIO, 0xb4, struct kvm_s390_irq)
+/* Available with KVM_CAP_S390_IRQ_STATE */
+#define KVM_S390_SET_IRQ_STATE _IOW(KVMIO, 0xb5, struct kvm_s390_irq_state)
+#define KVM_S390_GET_IRQ_STATE _IOW(KVMIO, 0xb6, struct kvm_s390_irq_state)
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
diff --git a/include/uapi/linux/nfsd/export.h b/include/uapi/linux/nfsd/export.h
index 4742f2cb42f2..d3bd6ffec041 100644
--- a/include/uapi/linux/nfsd/export.h
+++ b/include/uapi/linux/nfsd/export.h
@@ -47,7 +47,7 @@
* exported filesystem.
*/
#define NFSEXP_V4ROOT 0x10000
-#define NFSEXP_NOPNFS 0x20000
+#define NFSEXP_PNFS 0x20000
/* All flags that we claim to support. (Note we don't support NOACL.) */
#define NFSEXP_ALLFLAGS 0x3FE7F
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 453ef61311d4..2fabc0627165 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4574,6 +4574,13 @@ static void perf_pending_event(struct irq_work *entry)
{
struct perf_event *event = container_of(entry,
struct perf_event, pending);
+ int rctx;
+
+ rctx = perf_swevent_get_recursion_context();
+ /*
+ * If we 'fail' here, that's OK, it means recursion is already disabled
+ * and we won't recurse 'further'.
+ */
if (event->pending_disable) {
event->pending_disable = 0;
@@ -4584,6 +4591,9 @@ static void perf_pending_event(struct irq_work *entry)
event->pending_wakeup = 0;
perf_event_wakeup(event);
}
+
+ if (rctx >= 0)
+ perf_swevent_put_recursion_context(rctx);
}
/*
diff --git a/kernel/futex.c b/kernel/futex.c
index 2a5e3830e953..2579e407ff67 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -900,7 +900,7 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
if (!p)
return -ESRCH;
- if (!p->mm) {
+ if (unlikely(p->flags & PF_KTHREAD)) {
put_task_struct(p);
return -EPERM;
}
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 88d0d4420ad2..ba77ab5f64dd 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -633,7 +633,7 @@ static int count_matching_names(struct lock_class *new_class)
if (!new_class->name)
return 0;
- list_for_each_entry(class, &all_lock_classes, lock_entry) {
+ list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) {
if (new_class->key - new_class->subclass == class->key)
return class->name_version;
if (class->name && !strcmp(class->name, new_class->name))
@@ -700,10 +700,12 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
hash_head = classhashentry(key);
/*
- * We can walk the hash lockfree, because the hash only
- * grows, and we are careful when adding entries to the end:
+ * We do an RCU walk of the hash, see lockdep_free_key_range().
*/
- list_for_each_entry(class, hash_head, hash_entry) {
+ if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
+ return NULL;
+
+ list_for_each_entry_rcu(class, hash_head, hash_entry) {
if (class->key == key) {
/*
* Huh! same key, different name? Did someone trample
@@ -728,7 +730,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
struct lockdep_subclass_key *key;
struct list_head *hash_head;
struct lock_class *class;
- unsigned long flags;
+
+ DEBUG_LOCKS_WARN_ON(!irqs_disabled());
class = look_up_lock_class(lock, subclass);
if (likely(class))
@@ -750,28 +753,26 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
key = lock->key->subkeys + subclass;
hash_head = classhashentry(key);
- raw_local_irq_save(flags);
if (!graph_lock()) {
- raw_local_irq_restore(flags);
return NULL;
}
/*
* We have to do the hash-walk again, to avoid races
* with another CPU:
*/
- list_for_each_entry(class, hash_head, hash_entry)
+ list_for_each_entry_rcu(class, hash_head, hash_entry) {
if (class->key == key)
goto out_unlock_set;
+ }
+
/*
* Allocate a new key from the static array, and add it to
* the hash:
*/
if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
if (!debug_locks_off_graph_unlock()) {
- raw_local_irq_restore(flags);
return NULL;
}
- raw_local_irq_restore(flags);
print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
dump_stack();
@@ -798,7 +799,6 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
if (verbose(class)) {
graph_unlock();
- raw_local_irq_restore(flags);
printk("\nnew class %p: %s", class->key, class->name);
if (class->name_version > 1)
@@ -806,15 +806,12 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
printk("\n");
dump_stack();
- raw_local_irq_save(flags);
if (!graph_lock()) {
- raw_local_irq_restore(flags);
return NULL;
}
}
out_unlock_set:
graph_unlock();
- raw_local_irq_restore(flags);
out_set_class_cache:
if (!subclass || force)
@@ -870,11 +867,9 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
entry->distance = distance;
entry->trace = *trace;
/*
- * Since we never remove from the dependency list, the list can
- * be walked lockless by other CPUs, it's only allocation
- * that must be protected by the spinlock. But this also means
- * we must make new entries visible only once writes to the
- * entry become visible - hence the RCU op:
+ * Both allocation and removal are done under the graph lock; but
+ * iteration is under RCU-sched; see look_up_lock_class() and
+ * lockdep_free_key_range().
*/
list_add_tail_rcu(&entry->entry, head);
@@ -1025,7 +1020,9 @@ static int __bfs(struct lock_list *source_entry,
else
head = &lock->class->locks_before;
- list_for_each_entry(entry, head, entry) {
+ DEBUG_LOCKS_WARN_ON(!irqs_disabled());
+
+ list_for_each_entry_rcu(entry, head, entry) {
if (!lock_accessed(entry)) {
unsigned int cq_depth;
mark_lock_accessed(entry, lock);
@@ -2022,7 +2019,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
* We can walk it lock-free, because entries only get added
* to the hash:
*/
- list_for_each_entry(chain, hash_head, entry) {
+ list_for_each_entry_rcu(chain, hash_head, entry) {
if (chain->chain_key == chain_key) {
cache_hit:
debug_atomic_inc(chain_lookup_hits);
@@ -2996,8 +2993,18 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
if (unlikely(!debug_locks))
return;
- if (subclass)
+ if (subclass) {
+ unsigned long flags;
+
+ if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
+ return;
+
+ raw_local_irq_save(flags);
+ current->lockdep_recursion = 1;
register_lock_class(lock, subclass, 1);
+ current->lockdep_recursion = 0;
+ raw_local_irq_restore(flags);
+ }
}
EXPORT_SYMBOL_GPL(lockdep_init_map);
@@ -3887,9 +3894,17 @@ static inline int within(const void *addr, void *start, unsigned long size)
return addr >= start && addr < start + size;
}
+/*
+ * Used in module.c to remove lock classes from memory that is going to be
+ * freed; and possibly re-used by other modules.
+ *
+ * We will have had one sync_sched() before getting here, so we're guaranteed
+ * nobody will look up these exact classes -- they're properly dead but still
+ * allocated.
+ */
void lockdep_free_key_range(void *start, unsigned long size)
{
- struct lock_class *class, *next;
+ struct lock_class *class;
struct list_head *head;
unsigned long flags;
int i;
@@ -3905,7 +3920,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
head = classhash_table + i;
if (list_empty(head))
continue;
- list_for_each_entry_safe(class, next, head, hash_entry) {
+ list_for_each_entry_rcu(class, head, hash_entry) {
if (within(class->key, start, size))
zap_class(class);
else if (within(class->name, start, size))
@@ -3916,11 +3931,25 @@ void lockdep_free_key_range(void *start, unsigned long size)
if (locked)
graph_unlock();
raw_local_irq_restore(flags);
+
+ /*
+ * Wait for any possible iterators from look_up_lock_class() to pass
+ * before continuing to free the memory they refer to.
+ *
+ * sync_sched() is sufficient because the read-side is IRQ disable.
+ */
+ synchronize_sched();
+
+ /*
+ * XXX at this point we could return the resources to the pool;
+ * instead we leak them. We would need to change to bitmap allocators
+ * instead of the linear allocators we have now.
+ */
}
void lockdep_reset_lock(struct lockdep_map *lock)
{
- struct lock_class *class, *next;
+ struct lock_class *class;
struct list_head *head;
unsigned long flags;
int i, j;
@@ -3948,7 +3977,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
head = classhash_table + i;
if (list_empty(head))
continue;
- list_for_each_entry_safe(class, next, head, hash_entry) {
+ list_for_each_entry_rcu(class, head, hash_entry) {
int match = 0;
for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
index d1fe2ba5bac9..75e114bdf3f2 100644
--- a/kernel/locking/mcs_spinlock.h
+++ b/kernel/locking/mcs_spinlock.h
@@ -78,7 +78,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
*/
return;
}
- ACCESS_ONCE(prev->next) = node;
+ WRITE_ONCE(prev->next, node);
/* Wait until the lock holder passes the lock down. */
arch_mcs_spin_lock_contended(&node->locked);
@@ -91,7 +91,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
static inline
void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
{
- struct mcs_spinlock *next = ACCESS_ONCE(node->next);
+ struct mcs_spinlock *next = READ_ONCE(node->next);
if (likely(!next)) {
/*
@@ -100,7 +100,7 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
if (likely(cmpxchg(lock, node, NULL) == node))
return;
/* Wait until the next pointer is set */
- while (!(next = ACCESS_ONCE(node->next)))
+ while (!(next = READ_ONCE(node->next)))
cpu_relax_lowlatency();
}
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 94674e5919cb..4cccea6b8934 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -25,7 +25,7 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/debug_locks.h>
-#include "mcs_spinlock.h"
+#include <linux/osq_lock.h>
/*
* In the DEBUG case we are using the "NULL fastpath" for mutexes,
@@ -217,44 +217,35 @@ ww_mutex_set_context_slowpath(struct ww_mutex *lock,
}
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
-static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
-{
- if (lock->owner != owner)
- return false;
-
- /*
- * Ensure we emit the owner->on_cpu, dereference _after_ checking
- * lock->owner still matches owner, if that fails, owner might
- * point to free()d memory, if it still matches, the rcu_read_lock()
- * ensures the memory stays valid.
- */
- barrier();
-
- return owner->on_cpu;
-}
-
/*
* Look out! "owner" is an entirely speculative pointer
* access and not reliable.
*/
static noinline
-int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
+bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
{
+ bool ret = true;
+
rcu_read_lock();
- while (owner_running(lock, owner)) {
- if (need_resched())
+ while (lock->owner == owner) {
+ /*
+ * Ensure we emit the owner->on_cpu, dereference _after_
+ * checking lock->owner still matches owner. If that fails,
+ * owner might point to freed memory. If it still matches,
+ * the rcu_read_lock() ensures the memory stays valid.
+ */
+ barrier();
+
+ if (!owner->on_cpu || need_resched()) {
+ ret = false;
break;
+ }
cpu_relax_lowlatency();
}
rcu_read_unlock();
- /*
- * We break out the loop above on need_resched() and when the
- * owner changed, which is a sign for heavy contention. Return
- * success only when lock->owner is NULL.
- */
- return lock->owner == NULL;
+ return ret;
}
/*
@@ -269,7 +260,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
return 0;
rcu_read_lock();
- owner = ACCESS_ONCE(lock->owner);
+ owner = READ_ONCE(lock->owner);
if (owner)
retval = owner->on_cpu;
rcu_read_unlock();
@@ -343,7 +334,7 @@ static bool mutex_optimistic_spin(struct mutex *lock,
* As such, when deadlock detection needs to be
* performed the optimistic spinning cannot be done.
*/
- if (ACCESS_ONCE(ww->ctx))
+ if (READ_ONCE(ww->ctx))
break;
}
@@ -351,7 +342,7 @@ static bool mutex_optimistic_spin(struct mutex *lock,
* If there's an owner, wait for it to either
* release the lock or go to sleep.
*/
- owner = ACCESS_ONCE(lock->owner);
+ owner = READ_ONCE(lock->owner);
if (owner && !mutex_spin_on_owner(lock, owner))
break;
@@ -490,7 +481,7 @@ static inline int __sched
__ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
{
struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
- struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
+ struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
if (!hold_ctx)
return 0;
diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
index c112d00341b0..dc85ee23a26f 100644
--- a/kernel/locking/osq_lock.c
+++ b/kernel/locking/osq_lock.c
@@ -98,7 +98,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
prev = decode_cpu(old);
node->prev = prev;
- ACCESS_ONCE(prev->next) = node;
+ WRITE_ONCE(prev->next, node);
/*
* Normally @prev is untouchable after the above store; because at that
@@ -109,7 +109,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
* cmpxchg in an attempt to undo our queueing.
*/
- while (!ACCESS_ONCE(node->locked)) {
+ while (!READ_ONCE(node->locked)) {
/*
* If we need to reschedule bail... so we can block.
*/
@@ -148,7 +148,7 @@ unqueue:
* Or we race against a concurrent unqueue()'s step-B, in which
* case its step-C will write us a new @node->prev pointer.
*/
- prev = ACCESS_ONCE(node->prev);
+ prev = READ_ONCE(node->prev);
}
/*
@@ -170,8 +170,8 @@ unqueue:
* it will wait in Step-A.
*/
- ACCESS_ONCE(next->prev) = prev;
- ACCESS_ONCE(prev->next) = next;
+ WRITE_ONCE(next->prev, prev);
+ WRITE_ONCE(prev->next, next);
return false;
}
@@ -193,11 +193,11 @@ void osq_unlock(struct optimistic_spin_queue *lock)
node = this_cpu_ptr(&osq_node);
next = xchg(&node->next, NULL);
if (next) {
- ACCESS_ONCE(next->locked) = 1;
+ WRITE_ONCE(next->locked, 1);
return;
}
next = osq_wait_next(lock, node, NULL);
if (next)
- ACCESS_ONCE(next->locked) = 1;
+ WRITE_ONCE(next->locked, 1);
}
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 6357265a31ad..b73279367087 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -349,7 +349,7 @@ static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
*
* @task: the task owning the mutex (owner) for which a chain walk is
* probably needed
- * @deadlock_detect: do we have to carry out deadlock detection?
+ * @chwalk: do we have to carry out deadlock detection?
* @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
* things for a task that has just got its priority adjusted, and
* is waiting on a mutex)
diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c
index 2555ae15ec14..3a5048572065 100644
--- a/kernel/locking/rwsem-spinlock.c
+++ b/kernel/locking/rwsem-spinlock.c
@@ -85,6 +85,13 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
list_del(&waiter->list);
tsk = waiter->task;
+ /*
+ * Make sure we do not wakeup the next reader before
+ * setting the nil condition to grant the next reader;
+ * otherwise we could miss the wakeup on the other
+ * side and end up sleeping again. See the pairing
+ * in rwsem_down_read_failed().
+ */
smp_mb();
waiter->task = NULL;
wake_up_process(tsk);
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 2f7cc4076f50..3417d0172a5d 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -14,8 +14,9 @@
#include <linux/init.h>
#include <linux/export.h>
#include <linux/sched/rt.h>
+#include <linux/osq_lock.h>
-#include "mcs_spinlock.h"
+#include "rwsem.h"
/*
* Guide to the rw_semaphore's count field for common values.
@@ -186,6 +187,13 @@ __rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type)
waiter = list_entry(next, struct rwsem_waiter, list);
next = waiter->list.next;
tsk = waiter->task;
+ /*
+ * Make sure we do not wakeup the next reader before
+ * setting the nil condition to grant the next reader;
+ * otherwise we could miss the wakeup on the other
+ * side and end up sleeping again. See the pairing
+ * in rwsem_down_read_failed().
+ */
smp_mb();
waiter->task = NULL;
wake_up_process(tsk);
@@ -258,6 +266,7 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
RWSEM_ACTIVE_WRITE_BIAS) == RWSEM_WAITING_BIAS) {
if (!list_is_singular(&sem->wait_list))
rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
+ rwsem_set_owner(sem);
return true;
}
@@ -270,15 +279,17 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
*/
static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
{
- long old, count = ACCESS_ONCE(sem->count);
+ long old, count = READ_ONCE(sem->count);
while (true) {
if (!(count == 0 || count == RWSEM_WAITING_BIAS))
return false;
old = cmpxchg(&sem->count, count, count + RWSEM_ACTIVE_WRITE_BIAS);
- if (old == count)
+ if (old == count) {
+ rwsem_set_owner(sem);
return true;
+ }
count = old;
}
@@ -287,60 +298,67 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
{
struct task_struct *owner;
- bool on_cpu = false;
+ bool ret = true;
if (need_resched())
return false;
rcu_read_lock();
- owner = ACCESS_ONCE(sem->owner);
- if (owner)
- on_cpu = owner->on_cpu;
- rcu_read_unlock();
-
- /*
- * If sem->owner is not set, yet we have just recently entered the
- * slowpath, then there is a possibility reader(s) may have the lock.
- * To be safe, avoid spinning in these situations.
- */
- return on_cpu;
-}
-
-static inline bool owner_running(struct rw_semaphore *sem,
- struct task_struct *owner)
-{
- if (sem->owner != owner)
- return false;
-
- /*
- * Ensure we emit the owner->on_cpu, dereference _after_ checking
- * sem->owner still matches owner, if that fails, owner might
- * point to free()d memory, if it still matches, the rcu_read_lock()
- * ensures the memory stays valid.
- */
- barrier();
+ owner = READ_ONCE(sem->owner);
+ if (!owner) {
+ long count = READ_ONCE(sem->count);
+ /*
+ * If sem->owner is not set, yet we have just recently entered the
+ * slowpath with the lock being active, then there is a possibility
+ * reader(s) may have the lock. To be safe, bail spinning in these
+ * situations.
+ */
+ if (count & RWSEM_ACTIVE_MASK)
+ ret = false;
+ goto done;
+ }
- return owner->on_cpu;
+ ret = owner->on_cpu;
+done:
+ rcu_read_unlock();
+ return ret;
}
static noinline
bool rwsem_spin_on_owner(struct rw_semaphore *sem, struct task_struct *owner)
{
+ long count;
+
rcu_read_lock();
- while (owner_running(sem, owner)) {
- if (need_resched())
- break;
+ while (sem->owner == owner) {
+ /*
+ * Ensure we emit the owner->on_cpu, dereference _after_
+ * checking sem->owner still matches owner, if that fails,
+ * owner might point to free()d memory, if it still matches,
+ * the rcu_read_lock() ensures the memory stays valid.
+ */
+ barrier();
+
+ /* abort spinning when need_resched or owner is not running */
+ if (!owner->on_cpu || need_resched()) {
+ rcu_read_unlock();
+ return false;
+ }
cpu_relax_lowlatency();
}
rcu_read_unlock();
+ if (READ_ONCE(sem->owner))
+ return true; /* new owner, continue spinning */
+
/*
- * We break out the loop above on need_resched() or when the
- * owner changed, which is a sign for heavy contention. Return
- * success only when sem->owner is NULL.
+ * When the owner is not set, the lock could be free or
+ * held by readers. Check the counter to verify the
+ * state.
*/
- return sem->owner == NULL;
+ count = READ_ONCE(sem->count);
+ return (count == 0 || count == RWSEM_WAITING_BIAS);
}
static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
@@ -358,7 +376,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
goto done;
while (true) {
- owner = ACCESS_ONCE(sem->owner);
+ owner = READ_ONCE(sem->owner);
if (owner && !rwsem_spin_on_owner(sem, owner))
break;
@@ -432,7 +450,7 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
/* we're now waiting on the lock, but no longer actively locking */
if (waiting) {
- count = ACCESS_ONCE(sem->count);
+ count = READ_ONCE(sem->count);
/*
* If there were already threads queued before us and there are
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index e2d3bc7f03b4..205be0ce34de 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -9,29 +9,9 @@
#include <linux/sched.h>
#include <linux/export.h>
#include <linux/rwsem.h>
-
#include <linux/atomic.h>
-#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
-static inline void rwsem_set_owner(struct rw_semaphore *sem)
-{
- sem->owner = current;
-}
-
-static inline void rwsem_clear_owner(struct rw_semaphore *sem)
-{
- sem->owner = NULL;
-}
-
-#else
-static inline void rwsem_set_owner(struct rw_semaphore *sem)
-{
-}
-
-static inline void rwsem_clear_owner(struct rw_semaphore *sem)
-{
-}
-#endif
+#include "rwsem.h"
/*
* lock for reading
diff --git a/kernel/locking/rwsem.h b/kernel/locking/rwsem.h
new file mode 100644
index 000000000000..870ed9a5b426
--- /dev/null
+++ b/kernel/locking/rwsem.h
@@ -0,0 +1,20 @@
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+static inline void rwsem_set_owner(struct rw_semaphore *sem)
+{
+ sem->owner = current;
+}
+
+static inline void rwsem_clear_owner(struct rw_semaphore *sem)
+{
+ sem->owner = NULL;
+}
+
+#else
+static inline void rwsem_set_owner(struct rw_semaphore *sem)
+{
+}
+
+static inline void rwsem_clear_owner(struct rw_semaphore *sem)
+{
+}
+#endif
diff --git a/kernel/module.c b/kernel/module.c
index b3d634ed06c9..ec53f594e9c9 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1865,7 +1865,7 @@ static void free_module(struct module *mod)
kfree(mod->args);
percpu_modfree(mod);
- /* Free lock-classes: */
+ /* Free lock-classes; relies on the preceding sync_rcu(). */
lockdep_free_key_range(mod->module_core, mod->core_size);
/* Finally, free the core (containing the module structure) */
@@ -2479,6 +2479,23 @@ static int elf_header_check(struct load_info *info)
return 0;
}
+#define COPY_CHUNK_SIZE (16*PAGE_SIZE)
+
+static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned long len)
+{
+ do {
+ unsigned long n = min(len, COPY_CHUNK_SIZE);
+
+ if (copy_from_user(dst, usrc, n) != 0)
+ return -EFAULT;
+ cond_resched();
+ dst += n;
+ usrc += n;
+ len -= n;
+ } while (len);
+ return 0;
+}
+
/* Sets info->hdr and info->len. */
static int copy_module_from_user(const void __user *umod, unsigned long len,
struct load_info *info)
@@ -2498,7 +2515,7 @@ static int copy_module_from_user(const void __user *umod, unsigned long len,
if (!info->hdr)
return -ENOMEM;
- if (copy_from_user(info->hdr, umod, info->len) != 0) {
+ if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) {
vfree(info->hdr);
return -EFAULT;
}
@@ -3349,9 +3366,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
module_bug_cleanup(mod);
mutex_unlock(&module_mutex);
- /* Free lock-classes: */
- lockdep_free_key_range(mod->module_core, mod->core_size);
-
/* we can't deallocate the module until we clear memory protection */
unset_module_init_ro_nx(mod);
unset_module_core_ro_nx(mod);
@@ -3375,6 +3389,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
synchronize_rcu();
mutex_unlock(&module_mutex);
free_module:
+ /* Free lock-classes; relies on the preceding sync_rcu() */
+ lockdep_free_key_range(mod->module_core, mod->core_size);
+
module_deallocate(mod, info);
free_copy:
free_copy(info);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index c24d5a23bf93..5235dd4e1e2f 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -955,25 +955,6 @@ static void mark_nosave_pages(struct memory_bitmap *bm)
}
}
-static bool is_nosave_page(unsigned long pfn)
-{
- struct nosave_region *region;
-
- list_for_each_entry(region, &nosave_regions, list) {
- if (pfn >= region->start_pfn && pfn < region->end_pfn) {
- pr_err("PM: %#010llx in e820 nosave region: "
- "[mem %#010llx-%#010llx]\n",
- (unsigned long long) pfn << PAGE_SHIFT,
- (unsigned long long) region->start_pfn << PAGE_SHIFT,
- ((unsigned long long) region->end_pfn << PAGE_SHIFT)
- - 1);
- return true;
- }
- }
-
- return false;
-}
-
/**
* create_basic_memory_bitmaps - create bitmaps needed for marking page
* frames that should not be saved and free page frames. The pointers
@@ -2042,7 +2023,7 @@ static int mark_unsafe_pages(struct memory_bitmap *bm)
do {
pfn = memory_bm_next_pfn(bm);
if (likely(pfn != BM_END_OF_MAP)) {
- if (likely(pfn_valid(pfn)) && !is_nosave_page(pfn))
+ if (likely(pfn_valid(pfn)))
swsusp_set_page_free(pfn_to_page(pfn));
else
return -EFAULT;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 8027cfd699d0..261af7bfcb67 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1013,6 +1013,13 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
rq_clock_skip_update(rq, true);
}
+static ATOMIC_NOTIFIER_HEAD(task_migration_notifier);
+
+void register_task_migration_notifier(struct notifier_block *n)
+{
+ atomic_notifier_chain_register(&task_migration_notifier, n);
+}
+
#ifdef CONFIG_SMP
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
{
@@ -1043,10 +1050,18 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
trace_sched_migrate_task(p, new_cpu);
if (task_cpu(p) != new_cpu) {
+ struct task_migration_notifier tmn;
+
if (p->sched_class->migrate_task_rq)
p->sched_class->migrate_task_rq(p, new_cpu);
p->se.nr_migrations++;
perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
+
+ tmn.task = p;
+ tmn.from_cpu = task_cpu(p);
+ tmn.to_cpu = new_cpu;
+
+ atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);
}
__set_task_cpu(p, new_cpu);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 46855d06666a..ffeaa4105e48 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1622,9 +1622,11 @@ static void update_task_scan_period(struct task_struct *p,
/*
* If there were no record hinting faults then either the task is
* completely idle or all activity is areas that are not of interest
- * to automatic numa balancing. Scan slower
+ * to automatic numa balancing. Related to that, if there were failed
+ * migration then it implies we are migrating too quickly or the local
+ * node is overloaded. In either case, scan slower
*/
- if (local + shared == 0) {
+ if (local + shared == 0 || p->numa_faults_locality[2]) {
p->numa_scan_period = min(p->numa_scan_period_max,
p->numa_scan_period << 1);
@@ -2095,6 +2097,8 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
if (migrated)
p->numa_pages_migrated += pages;
+ if (flags & TNF_MIGRATE_FAIL)
+ p->numa_faults_locality[2] += pages;
p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
@@ -2176,8 +2180,10 @@ void task_numa_work(struct callback_head *work)
vma = mm->mmap;
}
for (; vma; vma = vma->vm_next) {
- if (!vma_migratable(vma) || !vma_policy_mof(vma))
+ if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
+ is_vm_hugetlb_page(vma)) {
continue;
+ }
/*
* Shared library pages mapped by multiple processes are not
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 88ea2d6e0031..ce410bb9f2e1 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1228,6 +1228,14 @@ static struct ctl_table vm_table[] = {
.extra1 = &zero,
},
{
+ .procname = "dirtytime_expire_seconds",
+ .data = &dirtytime_expire_interval,
+ .maxlen = sizeof(dirty_expire_interval),
+ .mode = 0644,
+ .proc_handler = dirtytime_interval_handler,
+ .extra1 = &zero,
+ },
+ {
.procname = "nr_pdflush_threads",
.mode = 0444 /* read-only */,
.proc_handler = pdflush_proc_obsolete,
diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
index eb682d5c697c..6aac4beedbbe 100644
--- a/kernel/time/tick-broadcast-hrtimer.c
+++ b/kernel/time/tick-broadcast-hrtimer.c
@@ -49,6 +49,7 @@ static void bc_set_mode(enum clock_event_mode mode,
*/
static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
{
+ int bc_moved;
/*
* We try to cancel the timer first. If the callback is on
* flight on some other cpu then we let it handle it. If we
@@ -60,9 +61,15 @@ static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
* restart the timer because we are in the callback, but we
* can set the expiry time and let the callback return
* HRTIMER_RESTART.
+ *
+ * Since we are in the idle loop at this point and because
+ * hrtimer_{start/cancel} functions call into tracing,
+ * calls to these functions must be bound within RCU_NONIDLE.
*/
- if (hrtimer_try_to_cancel(&bctimer) >= 0) {
- hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED);
+ RCU_NONIDLE(bc_moved = (hrtimer_try_to_cancel(&bctimer) >= 0) ?
+ !hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED) :
+ 0);
+ if (bc_moved) {
/* Bind the "device" to the cpu */
bc->bound_on = smp_processor_id();
} else if (bc->bound_on == smp_processor_id()) {
diff --git a/lib/lcm.c b/lib/lcm.c
index e97dbd51e756..03d7fcb420b5 100644
--- a/lib/lcm.c
+++ b/lib/lcm.c
@@ -12,3 +12,14 @@ unsigned long lcm(unsigned long a, unsigned long b)
return 0;
}
EXPORT_SYMBOL_GPL(lcm);
+
+unsigned long lcm_not_zero(unsigned long a, unsigned long b)
+{
+ unsigned long l = lcm(a, b);
+
+ if (l)
+ return l;
+
+ return (b ? : a);
+}
+EXPORT_SYMBOL_GPL(lcm_not_zero);
diff --git a/lib/lockref.c b/lib/lockref.c
index ecb9a665ec19..494994bf17c8 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -18,7 +18,7 @@
#define CMPXCHG_LOOP(CODE, SUCCESS) do { \
struct lockref old; \
BUILD_BUG_ON(sizeof(old) != 8); \
- old.lock_count = ACCESS_ONCE(lockref->lock_count); \
+ old.lock_count = READ_ONCE(lockref->lock_count); \
while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
struct lockref new = old, prev = old; \
CODE \
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 76a1b59523ab..f5907d23272d 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -279,6 +279,8 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count)
int minlen = min_t(int, count, nla_len(src));
memcpy(dest, nla_data(src), minlen);
+ if (count > minlen)
+ memset(dest + minlen, 0, count - minlen);
return minlen;
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 626e93db28ba..6817b0350c71 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1260,6 +1260,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
int target_nid, last_cpupid = -1;
bool page_locked;
bool migrated = false;
+ bool was_writable;
int flags = 0;
/* A PROT_NONE fault should not end up here */
@@ -1291,17 +1292,8 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
flags |= TNF_FAULT_LOCAL;
}
- /*
- * Avoid grouping on DSO/COW pages in specific and RO pages
- * in general, RO pages shouldn't hurt as much anyway since
- * they can be in shared cache state.
- *
- * FIXME! This checks "pmd_dirty()" as an approximation of
- * "is this a read-only page", since checking "pmd_write()"
- * is even more broken. We haven't actually turned this into
- * a writable page, so pmd_write() will always be false.
- */
- if (!pmd_dirty(pmd))
+ /* See similar comment in do_numa_page for explanation */
+ if (!(vma->vm_flags & VM_WRITE))
flags |= TNF_NO_GROUP;
/*
@@ -1358,12 +1350,17 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (migrated) {
flags |= TNF_MIGRATED;
page_nid = target_nid;
- }
+ } else
+ flags |= TNF_MIGRATE_FAIL;
goto out;
clear_pmdnuma:
BUG_ON(!PageLocked(page));
+ was_writable = pmd_write(pmd);
pmd = pmd_modify(pmd, vma->vm_page_prot);
+ pmd = pmd_mkyoung(pmd);
+ if (was_writable)
+ pmd = pmd_mkwrite(pmd);
set_pmd_at(mm, haddr, pmdp, pmd);
update_mmu_cache_pmd(vma, addr, pmdp);
unlock_page(page);
@@ -1487,6 +1484,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
pmd_t entry;
+ bool preserve_write = prot_numa && pmd_write(*pmd);
ret = 1;
/*
@@ -1502,9 +1500,11 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
if (!prot_numa || !pmd_protnone(*pmd)) {
entry = pmdp_get_and_clear_notify(mm, addr, pmd);
entry = pmd_modify(entry, newprot);
+ if (preserve_write)
+ entry = pmd_mkwrite(entry);
ret = HPAGE_PMD_NR;
set_pmd_at(mm, addr, pmd, entry);
- BUG_ON(pmd_write(entry));
+ BUG_ON(!preserve_write && pmd_write(entry));
}
spin_unlock(ptl);
}
diff --git a/mm/memory.c b/mm/memory.c
index 411144f977b1..97839f5c8c30 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3035,6 +3035,7 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
int last_cpupid;
int target_nid;
bool migrated = false;
+ bool was_writable = pte_write(pte);
int flags = 0;
/* A PROT_NONE fault should not end up here */
@@ -3059,6 +3060,8 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
/* Make it present again */
pte = pte_modify(pte, vma->vm_page_prot);
pte = pte_mkyoung(pte);
+ if (was_writable)
+ pte = pte_mkwrite(pte);
set_pte_at(mm, addr, ptep, pte);
update_mmu_cache(vma, addr, ptep);
@@ -3069,16 +3072,14 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
}
/*
- * Avoid grouping on DSO/COW pages in specific and RO pages
- * in general, RO pages shouldn't hurt as much anyway since
- * they can be in shared cache state.
- *
- * FIXME! This checks "pmd_dirty()" as an approximation of
- * "is this a read-only page", since checking "pmd_write()"
- * is even more broken. We haven't actually turned this into
- * a writable page, so pmd_write() will always be false.
+ * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
+ * much anyway since they can be in shared cache state. This misses
+ * the case where a mapping is writable but the process never writes
+ * to it but pte_write gets cleared during protection updates and
+ * pte_dirty has unpredictable behaviour between PTE scan updates,
+ * background writeback, dirty balancing and application behaviour.
*/
- if (!pte_dirty(pte))
+ if (!(vma->vm_flags & VM_WRITE))
flags |= TNF_NO_GROUP;
/*
@@ -3102,7 +3103,8 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (migrated) {
page_nid = target_nid;
flags |= TNF_MIGRATED;
- }
+ } else
+ flags |= TNF_MIGRATE_FAIL;
out:
if (page_nid != -1)
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 9fab10795bea..65842d688b7c 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1092,6 +1092,10 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
return NULL;
arch_refresh_nodedata(nid, pgdat);
+ } else {
+ /* Reset the nr_zones and classzone_idx to 0 before reuse */
+ pgdat->nr_zones = 0;
+ pgdat->classzone_idx = 0;
}
/* we can use NODE_DATA(nid) from here */
@@ -1977,15 +1981,6 @@ void try_offline_node(int nid)
if (is_vmalloc_addr(zone->wait_table))
vfree(zone->wait_table);
}
-
- /*
- * Since there is no way to guarentee the address of pgdat/zone is not
- * on stack of any kernel threads or used by other kernel objects
- * without reference counting or other symchronizing method, do not
- * reset node_data and free pgdat here. Just reset it to 0 and reuse
- * the memory when the node is online again.
- */
- memset(pgdat, 0, sizeof(*pgdat));
}
EXPORT_SYMBOL(try_offline_node);
diff --git a/mm/mmap.c b/mm/mmap.c
index da9990acc08b..9ec50a368634 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -774,10 +774,8 @@ again: remove_next = 1 + (end > next->vm_end);
importer->anon_vma = exporter->anon_vma;
error = anon_vma_clone(importer, exporter);
- if (error) {
- importer->anon_vma = NULL;
+ if (error)
return error;
- }
}
}
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 44727811bf4c..88584838e704 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -75,6 +75,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
oldpte = *pte;
if (pte_present(oldpte)) {
pte_t ptent;
+ bool preserve_write = prot_numa && pte_write(oldpte);
/*
* Avoid trapping faults against the zero or KSM
@@ -94,6 +95,8 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
ptent = ptep_modify_prot_start(mm, addr, pte);
ptent = pte_modify(ptent, newprot);
+ if (preserve_write)
+ ptent = pte_mkwrite(ptent);
/* Avoid taking write faults for known dirty pages */
if (dirty_accountable && pte_dirty(ptent) &&
diff --git a/mm/mremap.c b/mm/mremap.c
index 57dadc025c64..2dc44b1cb1df 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -286,8 +286,14 @@ static unsigned long move_vma(struct vm_area_struct *vma,
old_len = new_len;
old_addr = new_addr;
new_addr = -ENOMEM;
- } else if (vma->vm_file && vma->vm_file->f_op->mremap)
- vma->vm_file->f_op->mremap(vma->vm_file, new_vma);
+ } else if (vma->vm_file && vma->vm_file->f_op->mremap) {
+ err = vma->vm_file->f_op->mremap(vma->vm_file, new_vma);
+ if (err < 0) {
+ move_page_tables(new_vma, new_addr, vma, old_addr,
+ moved_len, true);
+ return err;
+ }
+ }
/* Conceal VM_ACCOUNT so old reservation is not undone */
if (vm_flags & VM_ACCOUNT) {
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 45e187b2d971..644bcb665773 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -857,8 +857,11 @@ static void bdi_update_write_bandwidth(struct backing_dev_info *bdi,
* bw * elapsed + write_bandwidth * (period - elapsed)
* write_bandwidth = ---------------------------------------------------
* period
+ *
+ * @written may have decreased due to account_page_redirty().
+ * Avoid underflowing @bw calculation.
*/
- bw = written - bdi->written_stamp;
+ bw = written - min(written, bdi->written_stamp);
bw *= HZ;
if (unlikely(elapsed > period)) {
do_div(bw, elapsed);
@@ -922,7 +925,7 @@ static void global_update_bandwidth(unsigned long thresh,
unsigned long now)
{
static DEFINE_SPINLOCK(dirty_lock);
- static unsigned long update_time;
+ static unsigned long update_time = INITIAL_JIFFIES;
/*
* check locklessly first to optimize away locking for the most time
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 72f5ac381ab3..755a42c76eb4 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -103,6 +103,7 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype)
if (!is_migrate_isolate_page(buddy)) {
__isolate_free_page(page, order);
+ kernel_map_pages(page, (1 << order), 1);
set_page_refcounted(page);
isolated_page = page;
}
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 75c1f2878519..29f2f8b853ae 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -265,8 +265,15 @@ int walk_page_range(unsigned long start, unsigned long end,
vma = vma->vm_next;
err = walk_page_test(start, next, walk);
- if (err > 0)
+ if (err > 0) {
+ /*
+ * positive return values are purely for
+ * controlling the pagewalk, so should never
+ * be passed to the callers.
+ */
+ err = 0;
continue;
+ }
if (err < 0)
break;
}
diff --git a/mm/rmap.c b/mm/rmap.c
index 5e3e09081164..c161a14b6a8f 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -287,6 +287,13 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
return 0;
enomem_failure:
+ /*
+ * dst->anon_vma is dropped here otherwise its degree can be incorrectly
+ * decremented in unlink_anon_vmas().
+ * We can safely do this because callers of anon_vma_clone() don't care
+ * about dst->anon_vma if anon_vma_clone() failed.
+ */
+ dst->anon_vma = NULL;
unlink_anon_vmas(dst);
return -ENOMEM;
}
diff --git a/mm/slub.c b/mm/slub.c
index 6832c4eab104..82c473780c91 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2449,7 +2449,8 @@ redo:
do {
tid = this_cpu_read(s->cpu_slab->tid);
c = raw_cpu_ptr(s->cpu_slab);
- } while (IS_ENABLED(CONFIG_PREEMPT) && unlikely(tid != c->tid));
+ } while (IS_ENABLED(CONFIG_PREEMPT) &&
+ unlikely(tid != READ_ONCE(c->tid)));
/*
* Irqless object alloc/free algorithm used here depends on sequence
@@ -2718,7 +2719,8 @@ redo:
do {
tid = this_cpu_read(s->cpu_slab->tid);
c = raw_cpu_ptr(s->cpu_slab);
- } while (IS_ENABLED(CONFIG_PREEMPT) && unlikely(tid != c->tid));
+ } while (IS_ENABLED(CONFIG_PREEMPT) &&
+ unlikely(tid != READ_ONCE(c->tid)));
/* Same with comment on barrier() in slab_alloc_node() */
barrier();
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 6b3f54ed65ba..a9f4ae45b7fb 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -484,7 +484,7 @@ static int ceph_tcp_connect(struct ceph_connection *con)
IPPROTO_TCP, &sock);
if (ret)
return ret;
- sock->sk->sk_allocation = GFP_NOFS | __GFP_MEMALLOC;
+ sock->sk->sk_allocation = GFP_NOFS;
#ifdef CONFIG_LOCKDEP
lockdep_set_class(&sock->sk->sk_lock, &socket_class);
@@ -520,8 +520,6 @@ static int ceph_tcp_connect(struct ceph_connection *con)
ret);
}
- sk_set_memalloc(sock->sk);
-
con->sock = sock;
return 0;
}
@@ -2808,11 +2806,8 @@ static void con_work(struct work_struct *work)
{
struct ceph_connection *con = container_of(work, struct ceph_connection,
work.work);
- unsigned long pflags = current->flags;
bool fault;
- current->flags |= PF_MEMALLOC;
-
mutex_lock(&con->mutex);
while (true) {
int ret;
@@ -2866,8 +2861,6 @@ static void con_work(struct work_struct *work)
con_fault_finish(con);
con->ops->put(con);
-
- tsk_restore_flags(current, pflags, PF_MEMALLOC);
}
/*
diff --git a/net/compat.c b/net/compat.c
index 94d3d5e97883..f7bd286a8280 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -49,6 +49,13 @@ ssize_t get_compat_msghdr(struct msghdr *kmsg,
__get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
__get_user(kmsg->msg_flags, &umsg->msg_flags))
return -EFAULT;
+
+ if (!uaddr)
+ kmsg->msg_namelen = 0;
+
+ if (kmsg->msg_namelen < 0)
+ return -EINVAL;
+
if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
kmsg->msg_namelen = sizeof(struct sockaddr_storage);
kmsg->msg_control = compat_ptr(tmp3);
diff --git a/net/core/dev.c b/net/core/dev.c
index 962ee9d71964..45109b70664e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2848,7 +2848,9 @@ static void skb_update_prio(struct sk_buff *skb)
#define skb_update_prio(skb)
#endif
-static DEFINE_PER_CPU(int, xmit_recursion);
+DEFINE_PER_CPU(int, xmit_recursion);
+EXPORT_SYMBOL(xmit_recursion);
+
#define RECURSION_LIMIT 10
/**
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 44706e81b2e0..e4fdc9dfb2c7 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -175,9 +175,9 @@ void fib_rules_unregister(struct fib_rules_ops *ops)
spin_lock(&net->rules_mod_lock);
list_del_rcu(&ops->list);
- fib_rules_cleanup_ops(ops);
spin_unlock(&net->rules_mod_lock);
+ fib_rules_cleanup_ops(ops);
call_rcu(&ops->rcu, fib_rules_put_rcu);
}
EXPORT_SYMBOL_GPL(fib_rules_unregister);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index cb5290b8c428..70d3450588b2 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -198,8 +198,10 @@ static int __peernet2id(struct net *net, struct net *peer, bool alloc)
*/
int peernet2id(struct net *net, struct net *peer)
{
- int id = __peernet2id(net, peer, true);
+ bool alloc = atomic_read(&peer->count) == 0 ? false : true;
+ int id;
+ id = __peernet2id(net, peer, alloc);
return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
}
EXPORT_SYMBOL(peernet2id);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index ee0608bb3bc0..7ebed55b5f7d 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1932,10 +1932,10 @@ static int rtnl_group_changelink(const struct sk_buff *skb,
struct ifinfomsg *ifm,
struct nlattr **tb)
{
- struct net_device *dev;
+ struct net_device *dev, *aux;
int err;
- for_each_netdev(net, dev) {
+ for_each_netdev_safe(net, dev, aux) {
if (dev->group == group) {
err = do_setlink(skb, dev, ifm, tb, NULL, 0);
if (err < 0)
diff --git a/net/core/sock.c b/net/core/sock.c
index 78e89eb7eb70..71e3e5f1eaa0 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -653,6 +653,25 @@ static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
sock_reset_flag(sk, bit);
}
+bool sk_mc_loop(struct sock *sk)
+{
+ if (dev_recursion_level())
+ return false;
+ if (!sk)
+ return true;
+ switch (sk->sk_family) {
+ case AF_INET:
+ return inet_sk(sk)->mc_loop;
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ return inet6_sk(sk)->mc_loop;
+#endif
+ }
+ WARN_ON(1);
+ return true;
+}
+EXPORT_SYMBOL(sk_mc_loop);
+
/*
* This is meant for all protocols to use and covers goings on
* at the socket level. Everything here is generic.
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index faf7cc3483fe..9d66a0f72f90 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -248,7 +248,9 @@ void __init dn_fib_rules_init(void)
void __exit dn_fib_rules_cleanup(void)
{
+ rtnl_lock();
fib_rules_unregister(dn_fib_rules_ops);
+ rtnl_unlock();
rcu_barrier();
}
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 2173402d87e0..4dea2e0681d1 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -501,12 +501,10 @@ static struct net_device *dev_to_net_device(struct device *dev)
#ifdef CONFIG_OF
static int dsa_of_setup_routing_table(struct dsa_platform_data *pd,
struct dsa_chip_data *cd,
- int chip_index,
+ int chip_index, int port_index,
struct device_node *link)
{
- int ret;
const __be32 *reg;
- int link_port_addr;
int link_sw_addr;
struct device_node *parent_sw;
int len;
@@ -519,6 +517,10 @@ static int dsa_of_setup_routing_table(struct dsa_platform_data *pd,
if (!reg || (len != sizeof(*reg) * 2))
return -EINVAL;
+ /*
+ * Get the destination switch number from the second field of its 'reg'
+ * property, i.e. for "reg = <0x19 1>" sw_addr is '1'.
+ */
link_sw_addr = be32_to_cpup(reg + 1);
if (link_sw_addr >= pd->nr_chips)
@@ -535,20 +537,9 @@ static int dsa_of_setup_routing_table(struct dsa_platform_data *pd,
memset(cd->rtable, -1, pd->nr_chips * sizeof(s8));
}
- reg = of_get_property(link, "reg", NULL);
- if (!reg) {
- ret = -EINVAL;
- goto out;
- }
-
- link_port_addr = be32_to_cpup(reg);
-
- cd->rtable[link_sw_addr] = link_port_addr;
+ cd->rtable[link_sw_addr] = port_index;
return 0;
-out:
- kfree(cd->rtable);
- return ret;
}
static void dsa_of_free_platform_data(struct dsa_platform_data *pd)
@@ -658,7 +649,7 @@ static int dsa_of_probe(struct platform_device *pdev)
if (!strcmp(port_name, "dsa") && link &&
pd->nr_chips > 1) {
ret = dsa_of_setup_routing_table(pd, cd,
- chip_index, link);
+ chip_index, port_index, link);
if (ret)
goto out_free_chip;
}
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 57be71dd6a9e..23b9b3e86f4c 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1111,11 +1111,10 @@ static void ip_fib_net_exit(struct net *net)
{
unsigned int i;
+ rtnl_lock();
#ifdef CONFIG_IP_MULTIPLE_TABLES
fib4_rules_exit(net);
#endif
-
- rtnl_lock();
for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
struct fib_table *tb;
struct hlist_head *head;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 9d78427652d2..fe54eba6d00d 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -268,7 +268,7 @@ static int __net_init ipmr_rules_init(struct net *net)
return 0;
err2:
- kfree(mrt);
+ ipmr_free_table(mrt);
err1:
fib_rules_unregister(ops);
return err;
@@ -278,11 +278,13 @@ static void __net_exit ipmr_rules_exit(struct net *net)
{
struct mr_table *mrt, *next;
+ rtnl_lock();
list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
list_del(&mrt->list);
ipmr_free_table(mrt);
}
fib_rules_unregister(net->ipv4.mr_rules_ops);
+ rtnl_unlock();
}
#else
#define ipmr_for_each_table(mrt, net) \
@@ -308,7 +310,10 @@ static int __net_init ipmr_rules_init(struct net *net)
static void __net_exit ipmr_rules_exit(struct net *net)
{
+ rtnl_lock();
ipmr_free_table(net->ipv4.mrt);
+ net->ipv4.mrt = NULL;
+ rtnl_unlock();
}
#endif
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 99e810f84671..cf5e82f39d3b 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -272,9 +272,9 @@ static void trace_packet(const struct sk_buff *skb,
&chainname, &comment, &rulenum) != 0)
break;
- nf_log_packet(net, AF_INET, hook, skb, in, out, &trace_loginfo,
- "TRACE: %s:%s:%s:%u ",
- tablename, chainname, comment, rulenum);
+ nf_log_trace(net, AF_INET, hook, skb, in, out, &trace_loginfo,
+ "TRACE: %s:%s:%s:%u ",
+ tablename, chainname, comment, rulenum);
}
#endif
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index fb4cf8b8e121..f501ac048366 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3105,10 +3105,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
if (!first_ackt.v64)
first_ackt = last_ackt;
- if (!(sacked & TCPCB_SACKED_ACKED))
+ if (!(sacked & TCPCB_SACKED_ACKED)) {
reord = min(pkts_acked, reord);
- if (!after(scb->end_seq, tp->high_seq))
- flag |= FLAG_ORIG_SACK_ACKED;
+ if (!after(scb->end_seq, tp->high_seq))
+ flag |= FLAG_ORIG_SACK_ACKED;
+ }
}
if (sacked & TCPCB_SACKED_ACKED)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 5a2dfed4783b..f1756ee02207 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1518,7 +1518,7 @@ void tcp_v4_early_demux(struct sk_buff *skb)
skb->sk = sk;
skb->destructor = sock_edemux;
if (sk->sk_state != TCP_TIME_WAIT) {
- struct dst_entry *dst = sk->sk_rx_dst;
+ struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
if (dst)
dst = dst_check(dst, 0);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index a2a796c5536b..1db253e36045 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2773,15 +2773,11 @@ void tcp_send_fin(struct sock *sk)
} else {
/* Socket is locked, keep trying until memory is available. */
for (;;) {
- skb = alloc_skb_fclone(MAX_TCP_HEADER,
- sk->sk_allocation);
+ skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
if (skb)
break;
yield();
}
-
- /* Reserve space for headers and prepare control bits. */
- skb_reserve(skb, MAX_TCP_HEADER);
/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
tcp_init_nondata_skb(skb, tp->write_seq,
TCPHDR_ACK | TCPHDR_FIN);
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index b4d5e1d97c1b..70bc6abc0639 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -104,6 +104,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
goto again;
flp6->saddr = saddr;
}
+ err = rt->dst.error;
goto out;
}
again:
@@ -321,7 +322,9 @@ out_fib6_rules_ops:
static void __net_exit fib6_rules_net_exit(struct net *net)
{
+ rtnl_lock();
fib_rules_unregister(net->ipv6.fib6_rules_ops);
+ rtnl_unlock();
}
static struct pernet_operations fib6_rules_net_ops = {
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 7e80b61b51ff..36cf0ab685a0 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -542,7 +542,8 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
{
struct sk_buff *frag;
struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
- struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
+ struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
+ inet6_sk(skb->sk) : NULL;
struct ipv6hdr *tmp_hdr;
struct frag_hdr *fh;
unsigned int mtu, hlen, left, len;
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 34b682617f50..312e0ff47339 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -252,7 +252,7 @@ static int __net_init ip6mr_rules_init(struct net *net)
return 0;
err2:
- kfree(mrt);
+ ip6mr_free_table(mrt);
err1:
fib_rules_unregister(ops);
return err;
@@ -267,8 +267,8 @@ static void __net_exit ip6mr_rules_exit(struct net *net)
list_del(&mrt->list);
ip6mr_free_table(mrt);
}
- rtnl_unlock();
fib_rules_unregister(net->ipv6.mr6_rules_ops);
+ rtnl_unlock();
}
#else
#define ip6mr_for_each_table(mrt, net) \
@@ -336,7 +336,7 @@ static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
static void ip6mr_free_table(struct mr6_table *mrt)
{
- del_timer(&mrt->ipmr_expire_timer);
+ del_timer_sync(&mrt->ipmr_expire_timer);
mroute_clean_tables(mrt);
kfree(mrt);
}
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 471ed24aabae..14ecdaf06bf7 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1218,7 +1218,14 @@ static void ndisc_router_discovery(struct sk_buff *skb)
if (rt)
rt6_set_expires(rt, jiffies + (HZ * lifetime));
if (ra_msg->icmph.icmp6_hop_limit) {
- in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
+ /* Only set hop_limit on the interface if it is higher than
+ * the current hop_limit.
+ */
+ if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) {
+ in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
+ } else {
+ ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n");
+ }
if (rt)
dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
ra_msg->icmph.icmp6_hop_limit);
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index e080fbbbc0e5..bb00c6f2a885 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -298,9 +298,9 @@ static void trace_packet(const struct sk_buff *skb,
&chainname, &comment, &rulenum) != 0)
break;
- nf_log_packet(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
- "TRACE: %s:%s:%s:%u ",
- tablename, chainname, comment, rulenum);
+ nf_log_trace(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
+ "TRACE: %s:%s:%s:%u ",
+ tablename, chainname, comment, rulenum);
}
#endif
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5d46832c6f72..1f5e62229aaa 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1411,6 +1411,15 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
TCP_SKB_CB(skb)->sacked = 0;
}
+static void tcp_v6_restore_cb(struct sk_buff *skb)
+{
+ /* We need to move header back to the beginning if xfrm6_policy_check()
+ * and tcp_v6_fill_cb() are going to be called again.
+ */
+ memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
+ sizeof(struct inet6_skb_parm));
+}
+
static int tcp_v6_rcv(struct sk_buff *skb)
{
const struct tcphdr *th;
@@ -1543,6 +1552,7 @@ do_time_wait:
inet_twsk_deschedule(tw, &tcp_death_row);
inet_twsk_put(tw);
sk = sk2;
+ tcp_v6_restore_cb(skb);
goto process;
}
/* Fall through to ACK */
@@ -1551,6 +1561,7 @@ do_time_wait:
tcp_v6_timewait_ack(sk, skb);
break;
case TCP_TW_RST:
+ tcp_v6_restore_cb(skb);
goto no_tcp_socket;
case TCP_TW_SUCCESS:
;
@@ -1585,7 +1596,7 @@ static void tcp_v6_early_demux(struct sk_buff *skb)
skb->sk = sk;
skb->destructor = sock_edemux;
if (sk->sk_state != TCP_TIME_WAIT) {
- struct dst_entry *dst = sk->sk_rx_dst;
+ struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
if (dst)
dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index ab889bb16b3c..be2c0ba82c85 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -112,11 +112,9 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
fptr->nexthdr = nexthdr;
fptr->reserved = 0;
- if (skb_shinfo(skb)->ip6_frag_id)
- fptr->identification = skb_shinfo(skb)->ip6_frag_id;
- else
- ipv6_select_ident(fptr,
- (struct rt6_info *)skb_dst(skb));
+ if (!skb_shinfo(skb)->ip6_frag_id)
+ ipv6_proxy_select_ident(skb);
+ fptr->identification = skb_shinfo(skb)->ip6_frag_id;
/* Fragment the skb. ipv6 header and the remaining fields of the
* fragment header are updated in ipv6_gso_segment()
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 2e9953b2db84..53d931172088 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1114,10 +1114,8 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
noblock, &err);
else
skb = sock_alloc_send_skb(sk, len, noblock, &err);
- if (!skb) {
- err = -ENOMEM;
+ if (!skb)
goto out;
- }
if (iucv->transport == AF_IUCV_TRANS_HIPER)
skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 895348e44c7d..a29a504492af 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1871,6 +1871,7 @@ static int __init l2tp_init(void)
l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0);
if (!l2tp_wq) {
pr_err("alloc_workqueue failed\n");
+ unregister_pernet_device(&l2tp_net_ops);
rc = -ENOMEM;
goto out;
}
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index a48bad468880..7702978a4c99 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -49,8 +49,6 @@ static void ieee80211_free_tid_rx(struct rcu_head *h)
container_of(h, struct tid_ampdu_rx, rcu_head);
int i;
- del_timer_sync(&tid_rx->reorder_timer);
-
for (i = 0; i < tid_rx->buf_size; i++)
__skb_queue_purge(&tid_rx->reorder_buf[i]);
kfree(tid_rx->reorder_buf);
@@ -93,6 +91,12 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
del_timer_sync(&tid_rx->session_timer);
+ /* make sure ieee80211_sta_reorder_release() doesn't re-arm the timer */
+ spin_lock_bh(&tid_rx->reorder_lock);
+ tid_rx->removed = true;
+ spin_unlock_bh(&tid_rx->reorder_lock);
+ del_timer_sync(&tid_rx->reorder_timer);
+
call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx);
}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 944bdc04e913..1eb730bf8752 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -873,9 +873,10 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
set_release_timer:
- mod_timer(&tid_agg_rx->reorder_timer,
- tid_agg_rx->reorder_time[j] + 1 +
- HT_RX_REORDER_BUF_TIMEOUT);
+ if (!tid_agg_rx->removed)
+ mod_timer(&tid_agg_rx->reorder_timer,
+ tid_agg_rx->reorder_time[j] + 1 +
+ HT_RX_REORDER_BUF_TIMEOUT);
} else {
del_timer(&tid_agg_rx->reorder_timer);
}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 925e68fe64c7..fb0fc1302a58 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -175,6 +175,7 @@ struct tid_ampdu_tx {
* @reorder_lock: serializes access to reorder buffer, see below.
* @auto_seq: used for offloaded BA sessions to automatically pick head_seq_and
* and ssn.
+ * @removed: this session is removed (but might have been found due to RCU)
*
* This structure's lifetime is managed by RCU, assignments to
* the array holding it must hold the aggregation mutex.
@@ -199,6 +200,7 @@ struct tid_ampdu_rx {
u16 timeout;
u8 dialog_token;
bool auto_seq;
+ bool removed;
};
/**
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 0d8448f19dfe..675d12c69e32 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -212,6 +212,30 @@ void nf_log_packet(struct net *net,
}
EXPORT_SYMBOL(nf_log_packet);
+void nf_log_trace(struct net *net,
+ u_int8_t pf,
+ unsigned int hooknum,
+ const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const struct nf_loginfo *loginfo, const char *fmt, ...)
+{
+ va_list args;
+ char prefix[NF_LOG_PREFIXLEN];
+ const struct nf_logger *logger;
+
+ rcu_read_lock();
+ logger = rcu_dereference(net->nf.nf_loggers[pf]);
+ if (logger) {
+ va_start(args, fmt);
+ vsnprintf(prefix, sizeof(prefix), fmt, args);
+ va_end(args);
+ logger->logfn(net, pf, hooknum, skb, in, out, loginfo, prefix);
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(nf_log_trace);
+
#define S_SIZE (1024 - (sizeof(unsigned int) + 1))
struct nf_log_buf {
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 6ab777912237..ac1a9528dbf2 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1225,7 +1225,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
if (nla[NFTA_CHAIN_POLICY]) {
if ((chain != NULL &&
- !(chain->flags & NFT_BASE_CHAIN)) ||
+ !(chain->flags & NFT_BASE_CHAIN)))
+ return -EOPNOTSUPP;
+
+ if (chain == NULL &&
nla[NFTA_CHAIN_HOOK] == NULL)
return -EOPNOTSUPP;
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index 3b90eb2b2c55..2d298dccb6dd 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -94,10 +94,10 @@ static void nft_trace_packet(const struct nft_pktinfo *pkt,
{
struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
- nf_log_packet(net, pkt->xt.family, pkt->ops->hooknum, pkt->skb, pkt->in,
- pkt->out, &trace_loginfo, "TRACE: %s:%s:%s:%u ",
- chain->table->name, chain->name, comments[type],
- rulenum);
+ nf_log_trace(net, pkt->xt.family, pkt->ops->hooknum, pkt->skb, pkt->in,
+ pkt->out, &trace_loginfo, "TRACE: %s:%s:%s:%u ",
+ chain->table->name, chain->name, comments[type],
+ rulenum);
}
unsigned int
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index a5599fc51a6f..54330fb5efaf 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -77,6 +77,9 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple,
if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM])
return -EINVAL;
+ /* Not all fields are initialized so first zero the tuple */
+ memset(tuple, 0, sizeof(struct nf_conntrack_tuple));
+
tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM]));
tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]);
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 213584cf04b3..65f3e2b6be44 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -133,6 +133,9 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par,
entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0;
break;
case AF_INET6:
+ if (proto)
+ entry->e6.ipv6.flags |= IP6T_F_PROTO;
+
entry->e6.ipv6.proto = proto;
entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
break;
@@ -344,6 +347,9 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0;
break;
case AF_INET6:
+ if (proto)
+ entry->e6.ipv6.flags |= IP6T_F_PROTO;
+
entry->e6.ipv6.proto = proto;
entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
break;
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index c82df0a48fcd..37c15e674884 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -153,6 +153,8 @@ static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
iter->err = err;
goto out;
}
+
+ continue;
}
if (iter->count < iter->skip)
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index ef8a926752a9..50e1e5aaf4ce 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -513,8 +513,8 @@ static int tproxy_tg6_check(const struct xt_tgchk_param *par)
{
const struct ip6t_ip6 *i = par->entryinfo;
- if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP)
- && !(i->flags & IP6T_INV_PROTO))
+ if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP) &&
+ !(i->invflags & IP6T_INV_PROTO))
return 0;
pr_info("Can be used only in combination with "
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index ec2954ffc690..067a3fff1d2c 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -274,10 +274,8 @@ void ovs_vport_del(struct vport *vport)
ASSERT_OVSL();
hlist_del_rcu(&vport->hash_node);
-
- vport->ops->destroy(vport);
-
module_put(vport->ops->owner);
+ vport->ops->destroy(vport);
}
/**
diff --git a/net/socket.c b/net/socket.c
index bbedbfcb42c2..245330ca0015 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1702,6 +1702,8 @@ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
if (len > INT_MAX)
len = INT_MAX;
+ if (unlikely(!access_ok(VERIFY_READ, buff, len)))
+ return -EFAULT;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
@@ -1760,6 +1762,8 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
if (size > INT_MAX)
size = INT_MAX;
+ if (unlikely(!access_ok(VERIFY_WRITE, ubuf, size)))
+ return -EFAULT;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 612aa73bbc60..e6ce1517367f 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -303,9 +303,7 @@ static int rpc_client_register(struct rpc_clnt *clnt,
struct super_block *pipefs_sb;
int err;
- err = rpc_clnt_debugfs_register(clnt);
- if (err)
- return err;
+ rpc_clnt_debugfs_register(clnt);
pipefs_sb = rpc_get_sb_net(net);
if (pipefs_sb) {
diff --git a/net/sunrpc/debugfs.c b/net/sunrpc/debugfs.c
index e811f390f9f6..82962f7e6e88 100644
--- a/net/sunrpc/debugfs.c
+++ b/net/sunrpc/debugfs.c
@@ -129,48 +129,52 @@ static const struct file_operations tasks_fops = {
.release = tasks_release,
};
-int
+void
rpc_clnt_debugfs_register(struct rpc_clnt *clnt)
{
- int len, err;
+ int len;
char name[24]; /* enough for "../../rpc_xprt/ + 8 hex digits + NULL */
+ struct rpc_xprt *xprt;
/* Already registered? */
- if (clnt->cl_debugfs)
- return 0;
+ if (clnt->cl_debugfs || !rpc_clnt_dir)
+ return;
len = snprintf(name, sizeof(name), "%x", clnt->cl_clid);
if (len >= sizeof(name))
- return -EINVAL;
+ return;
/* make the per-client dir */
clnt->cl_debugfs = debugfs_create_dir(name, rpc_clnt_dir);
if (!clnt->cl_debugfs)
- return -ENOMEM;
+ return;
/* make tasks file */
- err = -ENOMEM;
if (!debugfs_create_file("tasks", S_IFREG | S_IRUSR, clnt->cl_debugfs,
clnt, &tasks_fops))
goto out_err;
- err = -EINVAL;
rcu_read_lock();
+ xprt = rcu_dereference(clnt->cl_xprt);
+ /* no "debugfs" dentry? Don't bother with the symlink. */
+ if (!xprt->debugfs) {
+ rcu_read_unlock();
+ return;
+ }
len = snprintf(name, sizeof(name), "../../rpc_xprt/%s",
- rcu_dereference(clnt->cl_xprt)->debugfs->d_name.name);
+ xprt->debugfs->d_name.name);
rcu_read_unlock();
+
if (len >= sizeof(name))
goto out_err;
- err = -ENOMEM;
if (!debugfs_create_symlink("xprt", clnt->cl_debugfs, name))
goto out_err;
- return 0;
+ return;
out_err:
debugfs_remove_recursive(clnt->cl_debugfs);
clnt->cl_debugfs = NULL;
- return err;
}
void
@@ -226,33 +230,33 @@ static const struct file_operations xprt_info_fops = {
.release = xprt_info_release,
};
-int
+void
rpc_xprt_debugfs_register(struct rpc_xprt *xprt)
{
int len, id;
static atomic_t cur_id;
char name[9]; /* 8 hex digits + NULL term */
+ if (!rpc_xprt_dir)
+ return;
+
id = (unsigned int)atomic_inc_return(&cur_id);
len = snprintf(name, sizeof(name), "%x", id);
if (len >= sizeof(name))
- return -EINVAL;
+ return;
/* make the per-client dir */
xprt->debugfs = debugfs_create_dir(name, rpc_xprt_dir);
if (!xprt->debugfs)
- return -ENOMEM;
+ return;
/* make tasks file */
if (!debugfs_create_file("info", S_IFREG | S_IRUSR, xprt->debugfs,
xprt, &xprt_info_fops)) {
debugfs_remove_recursive(xprt->debugfs);
xprt->debugfs = NULL;
- return -ENOMEM;
}
-
- return 0;
}
void
@@ -266,14 +270,17 @@ void __exit
sunrpc_debugfs_exit(void)
{
debugfs_remove_recursive(topdir);
+ topdir = NULL;
+ rpc_clnt_dir = NULL;
+ rpc_xprt_dir = NULL;
}
-int __init
+void __init
sunrpc_debugfs_init(void)
{
topdir = debugfs_create_dir("sunrpc", NULL);
if (!topdir)
- goto out;
+ return;
rpc_clnt_dir = debugfs_create_dir("rpc_clnt", topdir);
if (!rpc_clnt_dir)
@@ -283,10 +290,9 @@ sunrpc_debugfs_init(void)
if (!rpc_xprt_dir)
goto out_remove;
- return 0;
+ return;
out_remove:
debugfs_remove_recursive(topdir);
topdir = NULL;
-out:
- return -ENOMEM;
+ rpc_clnt_dir = NULL;
}
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index e37fbed87956..ee5d3d253102 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -98,10 +98,7 @@ init_sunrpc(void)
if (err)
goto out4;
- err = sunrpc_debugfs_init();
- if (err)
- goto out5;
-
+ sunrpc_debugfs_init();
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
rpc_register_sysctl();
#endif
@@ -109,8 +106,6 @@ init_sunrpc(void)
init_socket_xprt(); /* clnt sock transport */
return 0;
-out5:
- unregister_rpc_pipefs();
out4:
unregister_pernet_subsys(&sunrpc_net_ops);
out3:
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index e3015aede0d9..9949722d99ce 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1331,7 +1331,6 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net)
*/
struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
{
- int err;
struct rpc_xprt *xprt;
struct xprt_class *t;
@@ -1372,11 +1371,7 @@ found:
return ERR_PTR(-ENOMEM);
}
- err = rpc_xprt_debugfs_register(xprt);
- if (err) {
- xprt_destroy(xprt);
- return ERR_PTR(err);
- }
+ rpc_xprt_debugfs_register(xprt);
dprintk("RPC: created transport %p with %u slots\n", xprt,
xprt->max_reqs);
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 935205e6bcfe..be1c9fa60b09 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -152,11 +152,11 @@ out_netlink:
static void __exit tipc_exit(void)
{
tipc_bearer_cleanup();
+ unregister_pernet_subsys(&tipc_net_ops);
tipc_netlink_stop();
tipc_netlink_compat_stop();
tipc_socket_stop();
tipc_unregister_sysctl();
- unregister_pernet_subsys(&tipc_net_ops);
pr_info("Deactivated\n");
}
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 1684bcc78b34..5fde34326dcf 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -152,7 +152,7 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
goto out;
/* No partial writes. */
- length = EINVAL;
+ length = -EINVAL;
if (*ppos != 0)
goto out;
diff --git a/sound/firewire/bebob/bebob_maudio.c b/sound/firewire/bebob/bebob_maudio.c
index a422aaa3bb0c..9ee25a63f684 100644
--- a/sound/firewire/bebob/bebob_maudio.c
+++ b/sound/firewire/bebob/bebob_maudio.c
@@ -96,10 +96,10 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit)
struct fw_device *device = fw_parent_device(unit);
int err, rcode;
u64 date;
- __be32 cues[3] = {
- MAUDIO_BOOTLOADER_CUE1,
- MAUDIO_BOOTLOADER_CUE2,
- MAUDIO_BOOTLOADER_CUE3
+ __le32 cues[3] = {
+ cpu_to_le32(MAUDIO_BOOTLOADER_CUE1),
+ cpu_to_le32(MAUDIO_BOOTLOADER_CUE2),
+ cpu_to_le32(MAUDIO_BOOTLOADER_CUE3)
};
/* check date of software used to build */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 4ca3d5d02436..a8a1e14272a1 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1989,7 +1989,7 @@ static const struct pci_device_id azx_ids[] = {
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
/* Sunrise Point */
{ PCI_DEVICE(0x8086, 0xa170),
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
/* Sunrise Point-LP */
{ PCI_DEVICE(0x8086, 0x9d70),
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 526398a4a442..f9d12c0a7e5a 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -396,7 +396,7 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on)
{
/* We currently only handle front, HP */
static hda_nid_t pins[] = {
- 0x0f, 0x10, 0x14, 0x15, 0
+ 0x0f, 0x10, 0x14, 0x15, 0x17, 0
};
hda_nid_t *p;
for (p = pins; *p; p++)
@@ -2912,6 +2912,8 @@ static void alc283_init(struct hda_codec *codec)
if (!hp_pin)
return;
+
+ msleep(30);
hp_pin_sense = snd_hda_jack_detect(codec, hp_pin);
/* Index 0x43 Direct Drive HP AMP LPM Control 1 */
@@ -3607,6 +3609,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
switch (codec->vendor_id) {
case 0x10ec0255:
+ case 0x10ec0256:
alc_process_coef_fw(codec, coef0255);
break;
case 0x10ec0233:
@@ -3662,6 +3665,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
switch (codec->vendor_id) {
case 0x10ec0255:
+ case 0x10ec0256:
alc_write_coef_idx(codec, 0x45, 0xc489);
snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
alc_process_coef_fw(codec, coef0255);
@@ -3731,6 +3735,7 @@ static void alc_headset_mode_default(struct hda_codec *codec)
switch (codec->vendor_id) {
case 0x10ec0255:
+ case 0x10ec0256:
alc_process_coef_fw(codec, coef0255);
break;
case 0x10ec0233:
@@ -3785,6 +3790,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
switch (codec->vendor_id) {
case 0x10ec0255:
+ case 0x10ec0256:
alc_process_coef_fw(codec, coef0255);
break;
case 0x10ec0233:
@@ -3839,6 +3845,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
switch (codec->vendor_id) {
case 0x10ec0255:
+ case 0x10ec0256:
alc_process_coef_fw(codec, coef0255);
break;
case 0x10ec0233:
@@ -3884,6 +3891,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
switch (codec->vendor_id) {
case 0x10ec0255:
+ case 0x10ec0256:
alc_process_coef_fw(codec, coef0255);
msleep(300);
val = alc_read_coef_idx(codec, 0x46);
@@ -4364,6 +4372,7 @@ enum {
ALC269_FIXUP_QUANTA_MUTE,
ALC269_FIXUP_LIFEBOOK,
ALC269_FIXUP_LIFEBOOK_EXTMIC,
+ ALC269_FIXUP_LIFEBOOK_HP_PIN,
ALC269_FIXUP_AMIC,
ALC269_FIXUP_DMIC,
ALC269VB_FIXUP_AMIC,
@@ -4517,6 +4526,13 @@ static const struct hda_fixup alc269_fixups[] = {
{ }
},
},
+ [ALC269_FIXUP_LIFEBOOK_HP_PIN] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x21, 0x0221102f }, /* HP out */
+ { }
+ },
+ },
[ALC269_FIXUP_AMIC] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
@@ -5010,6 +5026,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX),
SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
+ SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_BXBT2807_MIC),
@@ -5036,6 +5053,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
@@ -5216,6 +5234,16 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x17, 0x40000000},
{0x1d, 0x40700001},
{0x21, 0x02211050}),
+ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x12, 0x90a60140},
+ {0x13, 0x40000000},
+ {0x14, 0x90170110},
+ {0x19, 0x411111f0},
+ {0x1a, 0x411111f0},
+ {0x1b, 0x411111f0},
+ {0x1d, 0x40700001},
+ {0x1e, 0x411111f0},
+ {0x21, 0x02211020}),
SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
{0x12, 0x90a60130},
{0x13, 0x40000000},
diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
index 9974f201a08f..474cae82a874 100644
--- a/sound/soc/codecs/pcm512x.c
+++ b/sound/soc/codecs/pcm512x.c
@@ -1156,25 +1156,6 @@ static int pcm512x_hw_params(struct snd_pcm_substream *substream,
ret, pcm512x->pll_out);
return ret;
}
-
- gpio = PCM512x_G1OE << (4 - 1);
- ret = regmap_update_bits(pcm512x->regmap, PCM512x_GPIO_EN,
- gpio, gpio);
- if (ret != 0) {
- dev_err(codec->dev, "Failed to enable gpio %d: %d\n",
- 4, ret);
- return ret;
- }
-
- gpio = PCM512x_GPIO_OUTPUT_1 + 4 - 1;
- ret = regmap_update_bits(pcm512x->regmap, gpio,
- PCM512x_GxSL, PCM512x_GxSL_PLLLK);
- if (ret != 0) {
- dev_err(codec->dev,
- "Failed to output pll lock on %d: %d\n",
- ret, 4);
- return ret;
- }
}
ret = regmap_update_bits(pcm512x->regmap, PCM512x_SYNCHRONIZE,
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index dc9df007d3e3..337c317ead6f 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -192,6 +192,7 @@ static const struct rc_config {
{ USB_ID(0x041e, 0x3040), 2, 2, 6, 6, 2, 0x6e91 }, /* Live! 24-bit */
{ USB_ID(0x041e, 0x3042), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 */
{ USB_ID(0x041e, 0x30df), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */
+ { USB_ID(0x041e, 0x3237), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */
{ USB_ID(0x041e, 0x3048), 2, 2, 6, 6, 2, 0x6e91 }, /* Toshiba SB0500 */
};
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 753a47de8459..9a28365126f9 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1113,8 +1113,13 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
{
- /* MS Lifecam HD-5000 doesn't support reading the sample rate. */
- return chip->usb_id == USB_ID(0x045E, 0x076D);
+ /* devices which do not support reading the sample rate. */
+ switch (chip->usb_id) {
+ case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
+ case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
+ return true;
+ }
+ return false;
}
/* Marantz/Denon USB DACs need a vendor cmd to switch
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 4e511221a0c1..d643d5242537 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -22,6 +22,14 @@ TARGETS += vm
TARGETS_HOTPLUG = cpu-hotplug
TARGETS_HOTPLUG += memory-hotplug
+# Clear LDFLAGS and MAKEFLAGS if called from main
+# Makefile to avoid test build failures when test
+# Makefile doesn't have explicit build rules.
+ifeq (1,$(MAKELEVEL))
+undefine LDFLAGS
+override MAKEFLAGS =
+endif
+
all:
for TARGET in $(TARGETS); do \
make -C $$TARGET; \
@@ -47,7 +55,40 @@ clean_hotplug:
make -C $$TARGET clean; \
done;
+INSTALL_PATH ?= install
+INSTALL_PATH := $(abspath $(INSTALL_PATH))
+ALL_SCRIPT := $(INSTALL_PATH)/run_kselftest.sh
+
+install:
+ifdef INSTALL_PATH
+ @# Ask all targets to install their files
+ mkdir -p $(INSTALL_PATH)
+ for TARGET in $(TARGETS); do \
+ mkdir -p $(INSTALL_PATH)/$$TARGET ; \
+ make -C $$TARGET INSTALL_PATH=$(INSTALL_PATH)/$$TARGET install; \
+ done;
+
+ @# Ask all targets to emit their test scripts
+ echo "#!/bin/bash" > $(ALL_SCRIPT)
+ echo "cd \$$(dirname \$$0)" >> $(ALL_SCRIPT)
+ echo "ROOT=\$$PWD" >> $(ALL_SCRIPT)
+
+ for TARGET in $(TARGETS); do \
+ echo "echo ; echo Running tests in $$TARGET" >> $(ALL_SCRIPT); \
+ echo "echo ========================================" >> $(ALL_SCRIPT); \
+ echo "cd $$TARGET" >> $(ALL_SCRIPT); \
+ make -s --no-print-directory -C $$TARGET emit_tests >> $(ALL_SCRIPT); \
+ echo "cd \$$ROOT" >> $(ALL_SCRIPT); \
+ done;
+
+ chmod u+x $(ALL_SCRIPT)
+else
+ $(error Error: set INSTALL_PATH to use install)
+endif
+
clean:
for TARGET in $(TARGETS); do \
make -C $$TARGET clean; \
done;
+
+.PHONY: install
diff --git a/tools/testing/selftests/breakpoints/Makefile b/tools/testing/selftests/breakpoints/Makefile
index e18b42b254af..182235640209 100644
--- a/tools/testing/selftests/breakpoints/Makefile
+++ b/tools/testing/selftests/breakpoints/Makefile
@@ -16,8 +16,9 @@ else
echo "Not an x86 target, can't build breakpoints selftests"
endif
-run_tests:
- @./breakpoint_test || echo "breakpoints selftests: [FAIL]"
+TEST_PROGS := breakpoint_test
+
+include ../lib.mk
clean:
rm -fr breakpoint_test
diff --git a/tools/testing/selftests/cpu-hotplug/Makefile b/tools/testing/selftests/cpu-hotplug/Makefile
index e9c28d8dc84b..fe1f99101c5d 100644
--- a/tools/testing/selftests/cpu-hotplug/Makefile
+++ b/tools/testing/selftests/cpu-hotplug/Makefile
@@ -1,9 +1,10 @@
all:
-run_tests:
- @/bin/bash ./on-off-test.sh || echo "cpu-hotplug selftests: [FAIL]"
+TEST_PROGS := cpu-on-off-test.sh
+
+include ../lib.mk
run_full_test:
- @/bin/bash ./on-off-test.sh -a || echo "cpu-hotplug selftests: [FAIL]"
+ @/bin/bash ./cpu-on-off-test.sh -a || echo "cpu-hotplug selftests: [FAIL]"
clean:
diff --git a/tools/testing/selftests/cpu-hotplug/on-off-test.sh b/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
index 98b1d6565f2c..98b1d6565f2c 100644..100755
--- a/tools/testing/selftests/cpu-hotplug/on-off-test.sh
+++ b/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
diff --git a/tools/testing/selftests/efivarfs/Makefile b/tools/testing/selftests/efivarfs/Makefile
index 29e8c6bc81b0..736c3ddfc787 100644
--- a/tools/testing/selftests/efivarfs/Makefile
+++ b/tools/testing/selftests/efivarfs/Makefile
@@ -1,12 +1,13 @@
-CC = $(CROSS_COMPILE)gcc
CFLAGS = -Wall
test_objs = open-unlink create-read
all: $(test_objs)
-run_tests: all
- @/bin/bash ./efivarfs.sh || echo "efivarfs selftests: [FAIL]"
+TEST_PROGS := efivarfs.sh
+TEST_FILES := $(test_objs)
+
+include ../lib.mk
clean:
rm -f $(test_objs)
diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh
index 77edcdcc016b..77edcdcc016b 100644..100755
--- a/tools/testing/selftests/efivarfs/efivarfs.sh
+++ b/tools/testing/selftests/efivarfs/efivarfs.sh
diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile
index 66dfc2ce1788..4edb7d0da29b 100644
--- a/tools/testing/selftests/exec/Makefile
+++ b/tools/testing/selftests/exec/Makefile
@@ -1,4 +1,3 @@
-CC = $(CROSS_COMPILE)gcc
CFLAGS = -Wall
BINARIES = execveat
DEPS = execveat.symlink execveat.denatured script subdir
@@ -18,8 +17,12 @@ execveat.denatured: execveat
%: %.c
$(CC) $(CFLAGS) -o $@ $^
-run_tests: all
- ./execveat
+TEST_PROGS := execveat
+TEST_FILES := $(DEPS)
+
+include ../lib.mk
+
+override EMIT_TESTS := echo "mkdir -p subdir; (./execveat && echo \"selftests: execveat [PASS]\") || echo \"selftests: execveat [FAIL]\""
clean:
rm -rf $(BINARIES) $(DEPS) subdir.moved execveat.moved xxxxx*
diff --git a/tools/testing/selftests/firmware/Makefile b/tools/testing/selftests/firmware/Makefile
index e23cce0bbc3a..9bf82234855b 100644
--- a/tools/testing/selftests/firmware/Makefile
+++ b/tools/testing/selftests/firmware/Makefile
@@ -3,25 +3,9 @@
# No binaries, but make sure arg-less "make" doesn't trigger "run_tests"
all:
-fw_filesystem:
- @if /bin/sh ./fw_filesystem.sh ; then \
- echo "fw_filesystem: ok"; \
- else \
- echo "fw_filesystem: [FAIL]"; \
- exit 1; \
- fi
+TEST_PROGS := fw_filesystem.sh fw_userhelper.sh
-fw_userhelper:
- @if /bin/sh ./fw_userhelper.sh ; then \
- echo "fw_userhelper: ok"; \
- else \
- echo "fw_userhelper: [FAIL]"; \
- exit 1; \
- fi
-
-run_tests: all fw_filesystem fw_userhelper
+include ../lib.mk
# Nothing to clean up.
clean:
-
-.PHONY: all clean run_tests fw_filesystem fw_userhelper
diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh
index 3fc6c10c2479..3fc6c10c2479 100644..100755
--- a/tools/testing/selftests/firmware/fw_filesystem.sh
+++ b/tools/testing/selftests/firmware/fw_filesystem.sh
diff --git a/tools/testing/selftests/firmware/fw_userhelper.sh b/tools/testing/selftests/firmware/fw_userhelper.sh
index 6efbade12139..6efbade12139 100644..100755
--- a/tools/testing/selftests/firmware/fw_userhelper.sh
+++ b/tools/testing/selftests/firmware/fw_userhelper.sh
diff --git a/tools/testing/selftests/ftrace/Makefile b/tools/testing/selftests/ftrace/Makefile
index 76cc9f156267..346720639d1d 100644
--- a/tools/testing/selftests/ftrace/Makefile
+++ b/tools/testing/selftests/ftrace/Makefile
@@ -1,7 +1,8 @@
all:
-run_tests:
- @/bin/sh ./ftracetest || echo "ftrace selftests: [FAIL]"
+TEST_PROGS := ftracetest
+
+include ../lib.mk
clean:
rm -rf logs/*
diff --git a/tools/testing/selftests/ftrace/test.d/00basic/basic4.tc b/tools/testing/selftests/ftrace/test.d/00basic/basic4.tc
index fd9c49a13612..aa51f6c17359 100644
--- a/tools/testing/selftests/ftrace/test.d/00basic/basic4.tc
+++ b/tools/testing/selftests/ftrace/test.d/00basic/basic4.tc
@@ -2,4 +2,4 @@
# description: Basic event tracing check
test -f available_events -a -f set_event -a -d events
# check scheduler events are available
-grep -q sched available_events && exit 0 || exit -1 \ No newline at end of file
+grep -q sched available_events && exit 0 || exit $FAIL
diff --git a/tools/testing/selftests/ftrace/test.d/event/event-enable.tc b/tools/testing/selftests/ftrace/test.d/event/event-enable.tc
index 668616d9bb03..87eb9d6dd4ca 100644
--- a/tools/testing/selftests/ftrace/test.d/event/event-enable.tc
+++ b/tools/testing/selftests/ftrace/test.d/event/event-enable.tc
@@ -9,7 +9,11 @@ do_reset() {
fail() { #msg
do_reset
echo $1
- exit -1
+ exit $FAIL
+}
+
+yield() {
+ ping localhost -c 1 || sleep .001 || usleep 1 || sleep 1
}
if [ ! -f set_event -o ! -d events/sched ]; then
@@ -21,7 +25,8 @@ reset_tracer
do_reset
echo 'sched:sched_switch' > set_event
-usleep 1
+
+yield
count=`cat trace | grep sched_switch | wc -l`
if [ $count -eq 0 ]; then
@@ -31,7 +36,8 @@ fi
do_reset
echo 1 > events/sched/sched_switch/enable
-usleep 1
+
+yield
count=`cat trace | grep sched_switch | wc -l`
if [ $count -eq 0 ]; then
@@ -41,7 +47,8 @@ fi
do_reset
echo 0 > events/sched/sched_switch/enable
-usleep 1
+
+yield
count=`cat trace | grep sched_switch | wc -l`
if [ $count -ne 0 ]; then
diff --git a/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc b/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
index 655c415b6e7f..ced27ef0638f 100644
--- a/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
+++ b/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
@@ -9,7 +9,11 @@ do_reset() {
fail() { #msg
do_reset
echo $1
- exit -1
+ exit $FAIL
+}
+
+yield() {
+ ping localhost -c 1 || sleep .001 || usleep 1 || sleep 1
}
if [ ! -f set_event -o ! -d events/sched ]; then
@@ -21,7 +25,8 @@ reset_tracer
do_reset
echo 'sched:*' > set_event
-usleep 1
+
+yield
count=`cat trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
if [ $count -lt 3 ]; then
@@ -31,7 +36,8 @@ fi
do_reset
echo 1 > events/sched/enable
-usleep 1
+
+yield
count=`cat trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
if [ $count -lt 3 ]; then
@@ -41,7 +47,8 @@ fi
do_reset
echo 0 > events/sched/enable
-usleep 1
+
+yield
count=`cat trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
if [ $count -ne 0 ]; then
diff --git a/tools/testing/selftests/ftrace/test.d/event/toplevel-enable.tc b/tools/testing/selftests/ftrace/test.d/event/toplevel-enable.tc
index 480845774007..0bb5df3c00d4 100644
--- a/tools/testing/selftests/ftrace/test.d/event/toplevel-enable.tc
+++ b/tools/testing/selftests/ftrace/test.d/event/toplevel-enable.tc
@@ -9,7 +9,11 @@ do_reset() {
fail() { #msg
do_reset
echo $1
- exit -1
+ exit $FAIL
+}
+
+yield() {
+ ping localhost -c 1 || sleep .001 || usleep 1 || sleep 1
}
if [ ! -f available_events -o ! -f set_event -o ! -d events ]; then
@@ -21,6 +25,9 @@ reset_tracer
do_reset
echo '*:*' > set_event
+
+yield
+
count=`cat trace | grep -v ^# | wc -l`
if [ $count -eq 0 ]; then
fail "none of events are recorded"
@@ -29,6 +36,9 @@ fi
do_reset
echo 1 > events/enable
+
+yield
+
count=`cat trace | grep -v ^# | wc -l`
if [ $count -eq 0 ]; then
fail "none of events are recorded"
@@ -37,6 +47,9 @@ fi
do_reset
echo 0 > events/enable
+
+yield
+
count=`cat trace | grep -v ^# | wc -l`
if [ $count -ne 0 ]; then
fail "any of events should not be recorded"
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter-stack.tc b/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter-stack.tc
index c15e018e0220..15c2dba06ea2 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter-stack.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter-stack.tc
@@ -16,7 +16,9 @@ fi
do_reset() {
reset_tracer
- echo 0 > /proc/sys/kernel/stack_tracer_enabled
+ if [ -e /proc/sys/kernel/stack_tracer_enabled ]; then
+ echo 0 > /proc/sys/kernel/stack_tracer_enabled
+ fi
enable_tracing
clear_trace
echo > set_ftrace_filter
@@ -25,7 +27,7 @@ do_reset() {
fail() { # msg
do_reset
echo $1
- exit -1
+ exit $FAIL
}
disable_tracing
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter.tc b/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter.tc
index 6af5f6360b18..0ab2189613ef 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter.tc
@@ -17,7 +17,7 @@ do_reset() {
fail() { # msg
do_reset
echo $1
- exit -1
+ exit $FAIL
}
disable_tracing
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_profiler.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_profiler.tc
index 2e719cb1fc4d..7808336d6f50 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/func_profiler.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_profiler.tc
@@ -31,7 +31,7 @@ fail() { # mesg
reset_tracer
echo > set_ftrace_filter
echo $1
- exit -1
+ exit $FAIL
}
echo "Testing function tracer with profiler:"
diff --git a/tools/testing/selftests/gen_kselftest_tar.sh b/tools/testing/selftests/gen_kselftest_tar.sh
new file mode 100755
index 000000000000..17d5bd0c0936
--- /dev/null
+++ b/tools/testing/selftests/gen_kselftest_tar.sh
@@ -0,0 +1,55 @@
+#!/bin/bash
+#
+# gen_kselftest_tar
+# Generate kselftest tarball
+# Author: Shuah Khan <shuahkh@osg.samsung.com>
+# Copyright (C) 2015 Samsung Electronics Co., Ltd.
+
+# This software may be freely redistributed under the terms of the GNU
+# General Public License (GPLv2).
+
+# main
+main()
+{
+ if [ "$#" -eq 0 ]; then
+ echo "$0: Generating default compression gzip"
+ copts="cvzf"
+ ext=".tar.gz"
+ else
+ case "$1" in
+ tar)
+ copts="cvf"
+ ext=".tar"
+ ;;
+ targz)
+ copts="cvzf"
+ ext=".tar.gz"
+ ;;
+ tarbz2)
+ copts="cvjf"
+ ext=".tar.bz2"
+ ;;
+ tarxz)
+ copts="cvJf"
+ ext=".tar.xz"
+ ;;
+ *)
+ echo "Unknown tarball format $1"
+ exit 1
+ ;;
+ esac
+ fi
+
+ install_dir=./kselftest
+
+# Run install using INSTALL_KSFT_PATH override to generate install
+# directory
+./kselftest_install.sh
+tar $copts kselftest${ext} $install_dir
+echo "Kselftest archive kselftest${ext} created!"
+
+# clean up install directory
+rm -rf kselftest
+}
+
+main "$@"
diff --git a/tools/testing/selftests/ipc/Makefile b/tools/testing/selftests/ipc/Makefile
index 74bbefdeaf4c..25d2e702c68a 100644
--- a/tools/testing/selftests/ipc/Makefile
+++ b/tools/testing/selftests/ipc/Makefile
@@ -12,14 +12,11 @@ endif
CFLAGS += -I../../../../usr/include/
all:
-ifeq ($(ARCH),x86)
- gcc $(CFLAGS) msgque.c -o msgque_test
-else
- echo "Not an x86 target, can't build msgque selftest"
-endif
+ $(CC) $(CFLAGS) msgque.c -o msgque_test
+
+TEST_PROGS := msgque_test
-run_tests: all
- ./msgque_test
+include ../lib.mk
clean:
rm -fr ./msgque_test
diff --git a/tools/testing/selftests/kcmp/Makefile b/tools/testing/selftests/kcmp/Makefile
index ff0eefdc6ceb..2ae7450a9a89 100644
--- a/tools/testing/selftests/kcmp/Makefile
+++ b/tools/testing/selftests/kcmp/Makefile
@@ -1,10 +1,10 @@
-CC := $(CROSS_COMPILE)$(CC)
CFLAGS += -I../../../../usr/include/
all: kcmp_test
-run_tests: all
- @./kcmp_test || echo "kcmp_test: [FAIL]"
+TEST_PROGS := kcmp_test
+
+include ../lib.mk
clean:
$(RM) kcmp_test kcmp-test-file
diff --git a/tools/testing/selftests/kselftest_install.sh b/tools/testing/selftests/kselftest_install.sh
new file mode 100755
index 000000000000..1555fbdb08da
--- /dev/null
+++ b/tools/testing/selftests/kselftest_install.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+#
+# Kselftest Install
+# Install kselftest tests
+# Author: Shuah Khan <shuahkh@osg.samsung.com>
+# Copyright (C) 2015 Samsung Electronics Co., Ltd.
+
+# This software may be freely redistributed under the terms of the GNU
+# General Public License (GPLv2).
+
+install_loc=`pwd`
+
+main()
+{
+ if [ $(basename $install_loc) != "selftests" ]; then
+ echo "$0: Please run it in selftests directory ..."
+ exit 1;
+ fi
+ if [ "$#" -eq 0 ]; then
+ echo "$0: Installing in default location - $install_loc ..."
+ elif [ ! -d "$1" ]; then
+ echo "$0: $1 doesn't exist!!"
+ exit 1;
+ else
+ install_loc=$1
+ echo "$0: Installing in specified location - $install_loc ..."
+ fi
+
+ install_dir=$install_loc/kselftest
+
+# Create install directory
+ mkdir -p $install_dir
+# Build tests
+ INSTALL_PATH=$install_dir make install
+}
+
+main "$@"
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
new file mode 100644
index 000000000000..2194155ae62a
--- /dev/null
+++ b/tools/testing/selftests/lib.mk
@@ -0,0 +1,35 @@
+# This mimics the top-level Makefile. We do it explicitly here so that this
+# Makefile can operate with or without the kbuild infrastructure.
+CC := $(CROSS_COMPILE)gcc
+
+define RUN_TESTS
+ @for TEST in $(TEST_PROGS); do \
+ (./$$TEST && echo "selftests: $$TEST [PASS]") || echo "selftests: $$TEST [FAIL]"; \
+ done;
+endef
+
+run_tests: all
+ $(RUN_TESTS)
+
+define INSTALL_RULE
+ mkdir -p $(INSTALL_PATH)
+ install -t $(INSTALL_PATH) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES)
+endef
+
+install: all
+ifdef INSTALL_PATH
+ $(INSTALL_RULE)
+else
+ $(error Error: set INSTALL_PATH to use install)
+endif
+
+define EMIT_TESTS
+ @for TEST in $(TEST_PROGS); do \
+ echo "(./$$TEST && echo \"selftests: $$TEST [PASS]\") || echo \"selftests: $$TEST [FAIL]\""; \
+ done;
+endef
+
+emit_tests:
+ $(EMIT_TESTS)
+
+.PHONY: run_tests all clean install emit_tests
diff --git a/tools/testing/selftests/memfd/Makefile b/tools/testing/selftests/memfd/Makefile
index b80cd10d53ba..3e7eb7972511 100644
--- a/tools/testing/selftests/memfd/Makefile
+++ b/tools/testing/selftests/memfd/Makefile
@@ -1,17 +1,19 @@
+CC = $(CROSS_COMPILE)gcc
CFLAGS += -D_FILE_OFFSET_BITS=64
CFLAGS += -I../../../../include/uapi/
CFLAGS += -I../../../../include/
+CFLAGS += -I../../../../usr/include/
all:
- gcc $(CFLAGS) memfd_test.c -o memfd_test
+ $(CC) $(CFLAGS) memfd_test.c -o memfd_test
-run_tests: all
- gcc $(CFLAGS) memfd_test.c -o memfd_test
- @./memfd_test || echo "memfd_test: [FAIL]"
+TEST_PROGS := memfd_test
+
+include ../lib.mk
build_fuse:
- gcc $(CFLAGS) fuse_mnt.c `pkg-config fuse --cflags --libs` -o fuse_mnt
- gcc $(CFLAGS) fuse_test.c -o fuse_test
+ $(CC) $(CFLAGS) fuse_mnt.c `pkg-config fuse --cflags --libs` -o fuse_mnt
+ $(CC) $(CFLAGS) fuse_test.c -o fuse_test
run_fuse: build_fuse
@./run_fuse_test.sh || echo "fuse_test: [FAIL]"
diff --git a/tools/testing/selftests/memory-hotplug/Makefile b/tools/testing/selftests/memory-hotplug/Makefile
index d46b8d489cd2..afb2624c7048 100644
--- a/tools/testing/selftests/memory-hotplug/Makefile
+++ b/tools/testing/selftests/memory-hotplug/Makefile
@@ -1,9 +1,12 @@
all:
-run_tests:
- @/bin/bash ./on-off-test.sh -r 2 || echo "memory-hotplug selftests: [FAIL]"
+include ../lib.mk
+
+TEST_PROGS := mem-on-off-test.sh
+override RUN_TESTS := ./mem-on-off-test.sh -r 2 || echo "selftests: memory-hotplug [FAIL]"
+override EMIT_TESTS := echo "$(RUN_TESTS)"
run_full_test:
- @/bin/bash ./on-off-test.sh || echo "memory-hotplug selftests: [FAIL]"
+ @/bin/bash ./mem-on-off-test.sh || echo "memory-hotplug selftests: [FAIL]"
clean:
diff --git a/tools/testing/selftests/memory-hotplug/on-off-test.sh b/tools/testing/selftests/memory-hotplug/mem-on-off-test.sh
index 6cddde0b96f8..6cddde0b96f8 100644..100755
--- a/tools/testing/selftests/memory-hotplug/on-off-test.sh
+++ b/tools/testing/selftests/memory-hotplug/mem-on-off-test.sh
diff --git a/tools/testing/selftests/mount/.gitignore b/tools/testing/selftests/mount/.gitignore
new file mode 100644
index 000000000000..856ad4107eb3
--- /dev/null
+++ b/tools/testing/selftests/mount/.gitignore
@@ -0,0 +1 @@
+unprivileged-remount-test
diff --git a/tools/testing/selftests/mount/Makefile b/tools/testing/selftests/mount/Makefile
index 337d853c2b72..95580a97326e 100644
--- a/tools/testing/selftests/mount/Makefile
+++ b/tools/testing/selftests/mount/Makefile
@@ -1,17 +1,16 @@
# Makefile for mount selftests.
-
+CFLAGS = -Wall \
+ -O2
all: unprivileged-remount-test
unprivileged-remount-test: unprivileged-remount-test.c
- gcc -Wall -O2 unprivileged-remount-test.c -o unprivileged-remount-test
+ $(CC) $(CFLAGS) unprivileged-remount-test.c -o unprivileged-remount-test
-# Allow specific tests to be selected.
-test_unprivileged_remount: unprivileged-remount-test
- @if [ -f /proc/self/uid_map ] ; then ./unprivileged-remount-test ; fi
+include ../lib.mk
-run_tests: all test_unprivileged_remount
+TEST_PROGS := unprivileged-remount-test
+override RUN_TESTS := if [ -f /proc/self/uid_map ] ; then ./unprivileged-remount-test ; fi
+override EMIT_TESTS := echo "$(RUN_TESTS)"
clean:
rm -f unprivileged-remount-test
-
-.PHONY: all test_unprivileged_remount
diff --git a/tools/testing/selftests/mqueue/Makefile b/tools/testing/selftests/mqueue/Makefile
index 8056e2e68fa4..0e3b41eb85cd 100644
--- a/tools/testing/selftests/mqueue/Makefile
+++ b/tools/testing/selftests/mqueue/Makefile
@@ -1,10 +1,22 @@
+CFLAGS = -O2
+
all:
- gcc -O2 mq_open_tests.c -o mq_open_tests -lrt
- gcc -O2 -o mq_perf_tests mq_perf_tests.c -lrt -lpthread -lpopt
+ $(CC) $(CFLAGS) mq_open_tests.c -o mq_open_tests -lrt
+ $(CC) $(CFLAGS) -o mq_perf_tests mq_perf_tests.c -lrt -lpthread -lpopt
+
+include ../lib.mk
+
+override define RUN_TESTS
+ @./mq_open_tests /test1 || echo "selftests: mq_open_tests [FAIL]"
+ @./mq_perf_tests || echo "selftests: mq_perf_tests [FAIL]"
+endef
+
+TEST_PROGS := mq_open_tests mq_perf_tests
-run_tests:
- @./mq_open_tests /test1 || echo "mq_open_tests: [FAIL]"
- @./mq_perf_tests || echo "mq_perf_tests: [FAIL]"
+override define EMIT_TESTS
+ echo "./mq_open_tests /test1 || echo \"selftests: mq_open_tests [FAIL]\""
+ echo "./mq_perf_tests || echo \"selftests: mq_perf_tests [FAIL]\""
+endef
clean:
rm -f mq_open_tests mq_perf_tests
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 62f22cc9941c..fac4782c51d8 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -1,6 +1,5 @@
# Makefile for net selftests
-CC = $(CROSS_COMPILE)gcc
CFLAGS = -Wall -O2 -g
CFLAGS += -I../../../../usr/include/
@@ -11,9 +10,10 @@ all: $(NET_PROGS)
%: %.c
$(CC) $(CFLAGS) -o $@ $^
-run_tests: all
- @/bin/sh ./run_netsocktests || echo "sockettests: [FAIL]"
- @/bin/sh ./run_afpackettests || echo "afpackettests: [FAIL]"
- ./test_bpf.sh
+TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh
+TEST_FILES := $(NET_PROGS)
+
+include ../lib.mk
+
clean:
$(RM) $(NET_PROGS)
diff --git a/tools/testing/selftests/net/run_afpackettests b/tools/testing/selftests/net/run_afpackettests
index 5246e782d6e8..5246e782d6e8 100644..100755
--- a/tools/testing/selftests/net/run_afpackettests
+++ b/tools/testing/selftests/net/run_afpackettests
diff --git a/tools/testing/selftests/net/run_netsocktests b/tools/testing/selftests/net/run_netsocktests
index c09a682df56a..c09a682df56a 100644..100755
--- a/tools/testing/selftests/net/run_netsocktests
+++ b/tools/testing/selftests/net/run_netsocktests
diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile
index 1d5e7ad2c460..2958fe9a74e9 100644
--- a/tools/testing/selftests/powerpc/Makefile
+++ b/tools/testing/selftests/powerpc/Makefile
@@ -8,10 +8,9 @@ ifeq ($(ARCH),powerpc)
GIT_VERSION = $(shell git describe --always --long --dirty || echo "unknown")
-CC := $(CROSS_COMPILE)$(CC)
CFLAGS := -Wall -O2 -flto -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CURDIR) $(CFLAGS)
-export CC CFLAGS
+export CFLAGS
TARGETS = pmu copyloops mm tm primitives stringloops
@@ -22,10 +21,25 @@ all: $(TARGETS)
$(TARGETS):
$(MAKE) -k -C $@ all
-run_tests: all
+include ../lib.mk
+
+override define RUN_TESTS
@for TARGET in $(TARGETS); do \
$(MAKE) -C $$TARGET run_tests; \
done;
+endef
+
+override define INSTALL_RULE
+ @for TARGET in $(TARGETS); do \
+ $(MAKE) -C $$TARGET install; \
+ done;
+endef
+
+override define EMIT_TESTS
+ @for TARGET in $(TARGETS); do \
+ $(MAKE) -s -C $$TARGET emit_tests; \
+ done;
+endef
clean:
@for TARGET in $(TARGETS); do \
@@ -36,4 +50,4 @@ clean:
tags:
find . -name '*.c' -o -name '*.h' | xargs ctags
-.PHONY: all run_tests clean tags $(TARGETS)
+.PHONY: tags $(TARGETS)
diff --git a/tools/testing/selftests/powerpc/copyloops/Makefile b/tools/testing/selftests/powerpc/copyloops/Makefile
index 6f2d3be227f9..c05023514ce8 100644
--- a/tools/testing/selftests/powerpc/copyloops/Makefile
+++ b/tools/testing/selftests/powerpc/copyloops/Makefile
@@ -6,24 +6,19 @@ CFLAGS += -D SELFTEST
# Use our CFLAGS for the implicit .S rule
ASFLAGS = $(CFLAGS)
-PROGS := copyuser_64 copyuser_power7 memcpy_64 memcpy_power7
+TEST_PROGS := copyuser_64 copyuser_power7 memcpy_64 memcpy_power7
EXTRA_SOURCES := validate.c ../harness.c
-all: $(PROGS)
+all: $(TEST_PROGS)
copyuser_64: CPPFLAGS += -D COPY_LOOP=test___copy_tofrom_user_base
copyuser_power7: CPPFLAGS += -D COPY_LOOP=test___copy_tofrom_user_power7
memcpy_64: CPPFLAGS += -D COPY_LOOP=test_memcpy
memcpy_power7: CPPFLAGS += -D COPY_LOOP=test_memcpy_power7
-$(PROGS): $(EXTRA_SOURCES)
+$(TEST_PROGS): $(EXTRA_SOURCES)
-run_tests: all
- @-for PROG in $(PROGS); do \
- ./$$PROG; \
- done;
+include ../../lib.mk
clean:
- rm -f $(PROGS) *.o
-
-.PHONY: all run_tests clean
+ rm -f $(TEST_PROGS) *.o
diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile
index a14c538dd7f8..41cc3ed66818 100644
--- a/tools/testing/selftests/powerpc/mm/Makefile
+++ b/tools/testing/selftests/powerpc/mm/Makefile
@@ -1,21 +1,16 @@
noarg:
$(MAKE) -C ../
-PROGS := hugetlb_vs_thp_test subpage_prot
+TEST_PROGS := hugetlb_vs_thp_test subpage_prot
-all: $(PROGS) tempfile
+all: $(TEST_PROGS) tempfile
-$(PROGS): ../harness.c
+$(TEST_PROGS): ../harness.c
-run_tests: all
- @-for PROG in $(PROGS); do \
- ./$$PROG; \
- done;
+include ../../lib.mk
tempfile:
dd if=/dev/zero of=tempfile bs=64k count=1
clean:
- rm -f $(PROGS) tempfile
-
-.PHONY: all run_tests clean
+ rm -f $(TEST_PROGS) tempfile
diff --git a/tools/testing/selftests/powerpc/pmu/Makefile b/tools/testing/selftests/powerpc/pmu/Makefile
index c9f4263906a5..5a161175bbd4 100644
--- a/tools/testing/selftests/powerpc/pmu/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/Makefile
@@ -1,38 +1,42 @@
noarg:
$(MAKE) -C ../
-PROGS := count_instructions l3_bank_test per_event_excludes
+TEST_PROGS := count_instructions l3_bank_test per_event_excludes
EXTRA_SOURCES := ../harness.c event.c lib.c
-SUB_TARGETS = ebb
+all: $(TEST_PROGS) ebb
-all: $(PROGS) $(SUB_TARGETS)
-
-$(PROGS): $(EXTRA_SOURCES)
+$(TEST_PROGS): $(EXTRA_SOURCES)
# loop.S can only be built 64-bit
count_instructions: loop.S count_instructions.c $(EXTRA_SOURCES)
$(CC) $(CFLAGS) -m64 -o $@ $^
-run_tests: all sub_run_tests
- @-for PROG in $(PROGS); do \
- ./$$PROG; \
- done;
+include ../../lib.mk
-clean: sub_clean
- rm -f $(PROGS) loop.o
+DEFAULT_RUN_TESTS := $(RUN_TESTS)
+override define RUN_TESTS
+ $(DEFAULT_RUN_TESTS)
+ $(MAKE) -C ebb run_tests
+endef
-$(SUB_TARGETS):
- $(MAKE) -k -C $@ all
+DEFAULT_EMIT_TESTS := $(EMIT_TESTS)
+override define EMIT_TESTS
+ $(DEFAULT_EMIT_TESTS)
+ $(MAKE) -s -C ebb emit_tests
+endef
-sub_run_tests: all
- @for TARGET in $(SUB_TARGETS); do \
- $(MAKE) -C $$TARGET run_tests; \
- done;
+DEFAULT_INSTALL := $(INSTALL_RULE)
+override define INSTALL_RULE
+ $(DEFAULT_INSTALL_RULE)
+ $(MAKE) -C ebb install
+endef
-sub_clean:
- @for TARGET in $(SUB_TARGETS); do \
- $(MAKE) -C $$TARGET clean; \
- done;
+clean:
+ rm -f $(TEST_PROGS) loop.o
+ $(MAKE) -C ebb clean
+
+ebb:
+ $(MAKE) -k -C $@ all
-.PHONY: all run_tests clean sub_run_tests sub_clean $(SUB_TARGETS)
+.PHONY: all run_tests clean ebb
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/Makefile b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
index 3dc4332698cb..5cdc9dbf2b27 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
@@ -4,7 +4,7 @@ noarg:
# The EBB handler is 64-bit code and everything links against it
CFLAGS += -m64
-PROGS := reg_access_test event_attributes_test cycles_test \
+TEST_PROGS := reg_access_test event_attributes_test cycles_test \
cycles_with_freeze_test pmc56_overflow_test \
ebb_vs_cpu_event_test cpu_event_vs_ebb_test \
cpu_event_pinned_vs_ebb_test task_event_vs_ebb_test \
@@ -16,18 +16,15 @@ PROGS := reg_access_test event_attributes_test cycles_test \
lost_exception_test no_handler_test \
cycles_with_mmcr2_test
-all: $(PROGS)
+all: $(TEST_PROGS)
-$(PROGS): ../../harness.c ../event.c ../lib.c ebb.c ebb_handler.S trace.c busy_loop.S
+$(TEST_PROGS): ../../harness.c ../event.c ../lib.c ebb.c ebb_handler.S trace.c busy_loop.S
instruction_count_test: ../loop.S
lost_exception_test: ../lib.c
-run_tests: all
- @-for PROG in $(PROGS); do \
- ./$$PROG; \
- done;
+include ../../../lib.mk
clean:
- rm -f $(PROGS)
+ rm -f $(TEST_PROGS)
diff --git a/tools/testing/selftests/powerpc/primitives/Makefile b/tools/testing/selftests/powerpc/primitives/Makefile
index ea737ca01732..b68c6221d3d1 100644
--- a/tools/testing/selftests/powerpc/primitives/Makefile
+++ b/tools/testing/selftests/powerpc/primitives/Makefile
@@ -1,17 +1,12 @@
CFLAGS += -I$(CURDIR)
-PROGS := load_unaligned_zeropad
+TEST_PROGS := load_unaligned_zeropad
-all: $(PROGS)
+all: $(TEST_PROGS)
-$(PROGS): ../harness.c
+$(TEST_PROGS): ../harness.c
-run_tests: all
- @-for PROG in $(PROGS); do \
- ./$$PROG; \
- done;
+include ../../lib.mk
clean:
- rm -f $(PROGS) *.o
-
-.PHONY: all run_tests clean
+ rm -f $(TEST_PROGS) *.o
diff --git a/tools/testing/selftests/powerpc/stringloops/Makefile b/tools/testing/selftests/powerpc/stringloops/Makefile
index 506d77346477..2a728f4d2873 100644
--- a/tools/testing/selftests/powerpc/stringloops/Makefile
+++ b/tools/testing/selftests/powerpc/stringloops/Makefile
@@ -2,19 +2,14 @@
CFLAGS += -m64
CFLAGS += -I$(CURDIR)
-PROGS := memcmp
+TEST_PROGS := memcmp
EXTRA_SOURCES := memcmp_64.S ../harness.c
-all: $(PROGS)
+all: $(TEST_PROGS)
-$(PROGS): $(EXTRA_SOURCES)
+$(TEST_PROGS): $(EXTRA_SOURCES)
-run_tests: all
- @-for PROG in $(PROGS); do \
- ./$$PROG; \
- done;
+include ../../lib.mk
clean:
- rm -f $(PROGS) *.o
-
-.PHONY: all run_tests clean
+ rm -f $(TEST_PROGS) *.o
diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile
index 2cede239a074..34f2ec634b40 100644
--- a/tools/testing/selftests/powerpc/tm/Makefile
+++ b/tools/testing/selftests/powerpc/tm/Makefile
@@ -1,15 +1,10 @@
-PROGS := tm-resched-dscr
+TEST_PROGS := tm-resched-dscr
-all: $(PROGS)
+all: $(TEST_PROGS)
-$(PROGS): ../harness.c
+$(TEST_PROGS): ../harness.c
-run_tests: all
- @-for PROG in $(PROGS); do \
- ./$$PROG; \
- done;
+include ../../lib.mk
clean:
- rm -f $(PROGS) *.o
-
-.PHONY: all run_tests clean
+ rm -f $(TEST_PROGS) *.o
diff --git a/tools/testing/selftests/ptrace/Makefile b/tools/testing/selftests/ptrace/Makefile
index 47ae2d385ce8..453927fea90c 100644
--- a/tools/testing/selftests/ptrace/Makefile
+++ b/tools/testing/selftests/ptrace/Makefile
@@ -6,5 +6,6 @@ all: peeksiginfo
clean:
rm -f peeksiginfo
-run_tests: all
- @./peeksiginfo || echo "peeksiginfo selftests: [FAIL]"
+TEST_PROGS := peeksiginfo
+
+include ../lib.mk
diff --git a/tools/testing/selftests/size/Makefile b/tools/testing/selftests/size/Makefile
index 04dc25e4fa92..bbd0b5398b61 100644
--- a/tools/testing/selftests/size/Makefile
+++ b/tools/testing/selftests/size/Makefile
@@ -1,12 +1,11 @@
-CC = $(CROSS_COMPILE)gcc
-
all: get_size
get_size: get_size.c
$(CC) -static -ffreestanding -nostartfiles -s $< -o $@
-run_tests: all
- ./get_size
+TEST_PROGS := get_size
+
+include ../lib.mk
clean:
$(RM) get_size
diff --git a/tools/testing/selftests/sysctl/Makefile b/tools/testing/selftests/sysctl/Makefile
index 0a92adaf0865..b3c33e071f10 100644
--- a/tools/testing/selftests/sysctl/Makefile
+++ b/tools/testing/selftests/sysctl/Makefile
@@ -4,16 +4,10 @@
# No binaries, but make sure arg-less "make" doesn't trigger "run_tests".
all:
-# Allow specific tests to be selected.
-test_num:
- @/bin/sh ./run_numerictests
+TEST_PROGS := run_numerictests run_stringtests
+TEST_FILES := common_tests
-test_string:
- @/bin/sh ./run_stringtests
-
-run_tests: all test_num test_string
+include ../lib.mk
# Nothing to clean up.
clean:
-
-.PHONY: all run_tests clean test_num test_string
diff --git a/tools/testing/selftests/sysctl/run_numerictests b/tools/testing/selftests/sysctl/run_numerictests
index 8510f93f2d14..8510f93f2d14 100644..100755
--- a/tools/testing/selftests/sysctl/run_numerictests
+++ b/tools/testing/selftests/sysctl/run_numerictests
diff --git a/tools/testing/selftests/sysctl/run_stringtests b/tools/testing/selftests/sysctl/run_stringtests
index 90a9293d520c..90a9293d520c 100644..100755
--- a/tools/testing/selftests/sysctl/run_stringtests
+++ b/tools/testing/selftests/sysctl/run_stringtests
diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile
index eb2859f4ad21..89a3f44bf355 100644
--- a/tools/testing/selftests/timers/Makefile
+++ b/tools/testing/selftests/timers/Makefile
@@ -1,8 +1,36 @@
-all:
- gcc posix_timers.c -o posix_timers -lrt
+CC = $(CROSS_COMPILE)gcc
+BUILD_FLAGS = -DKTEST
+CFLAGS += -O3 -Wl,-no-as-needed -Wall $(BUILD_FLAGS)
+LDFLAGS += -lrt -lpthread
-run_tests: all
- ./posix_timers
+# these are all "safe" tests that don't modify
+# system time or require escalated privledges
+TEST_PROGS = posix_timers nanosleep nsleep-lat set-timer-lat mqueue-lat \
+ inconsistency-check raw_skew threadtest rtctest
+
+TEST_PROGS_EXTENDED = alarmtimer-suspend valid-adjtimex change_skew \
+ skew_consistency clocksource-switch leap-a-day \
+ leapcrash set-tai set-2038
+
+bins = $(TEST_PROGS) $(TEST_PROGS_EXTENDED)
+
+all: ${bins}
+
+include ../lib.mk
+
+# these tests require escalated privledges
+# and may modify the system time or trigger
+# other behavior like suspend
+run_destructive_tests: run_tests
+ ./alarmtimer-suspend
+ ./valid-adjtimex
+ ./change_skew
+ ./skew_consistency
+ ./clocksource-switch
+ ./leap-a-day -s -i 10
+ ./leapcrash
+ ./set-tai
+ ./set-2038
clean:
- rm -f ./posix_timers
+ rm -f ${bins}
diff --git a/tools/testing/selftests/timers/alarmtimer-suspend.c b/tools/testing/selftests/timers/alarmtimer-suspend.c
new file mode 100644
index 000000000000..aaffbde1d5ee
--- /dev/null
+++ b/tools/testing/selftests/timers/alarmtimer-suspend.c
@@ -0,0 +1,185 @@
+/* alarmtimer suspend test
+ * John Stultz (john.stultz@linaro.org)
+ * (C) Copyright Linaro 2013
+ * Licensed under the GPLv2
+ *
+ * This test makes sure the alarmtimer & RTC wakeup code is
+ * functioning.
+ *
+ * To build:
+ * $ gcc alarmtimer-suspend.c -o alarmtimer-suspend -lrt
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <stdio.h>
+#include <unistd.h>
+#include <time.h>
+#include <string.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <pthread.h>
+#ifdef KTEST
+#include "../kselftest.h"
+#else
+static inline int ksft_exit_pass(void)
+{
+ exit(0);
+}
+static inline int ksft_exit_fail(void)
+{
+ exit(1);
+}
+#endif
+
+#define CLOCK_REALTIME 0
+#define CLOCK_MONOTONIC 1
+#define CLOCK_PROCESS_CPUTIME_ID 2
+#define CLOCK_THREAD_CPUTIME_ID 3
+#define CLOCK_MONOTONIC_RAW 4
+#define CLOCK_REALTIME_COARSE 5
+#define CLOCK_MONOTONIC_COARSE 6
+#define CLOCK_BOOTTIME 7
+#define CLOCK_REALTIME_ALARM 8
+#define CLOCK_BOOTTIME_ALARM 9
+#define CLOCK_HWSPECIFIC 10
+#define CLOCK_TAI 11
+#define NR_CLOCKIDS 12
+
+
+#define NSEC_PER_SEC 1000000000ULL
+#define UNREASONABLE_LAT (NSEC_PER_SEC * 4) /* hopefully we resume in 4secs */
+
+#define SUSPEND_SECS 15
+int alarmcount;
+int alarm_clock_id;
+struct timespec start_time;
+
+
+char *clockstring(int clockid)
+{
+ switch (clockid) {
+ case CLOCK_REALTIME:
+ return "CLOCK_REALTIME";
+ case CLOCK_MONOTONIC:
+ return "CLOCK_MONOTONIC";
+ case CLOCK_PROCESS_CPUTIME_ID:
+ return "CLOCK_PROCESS_CPUTIME_ID";
+ case CLOCK_THREAD_CPUTIME_ID:
+ return "CLOCK_THREAD_CPUTIME_ID";
+ case CLOCK_MONOTONIC_RAW:
+ return "CLOCK_MONOTONIC_RAW";
+ case CLOCK_REALTIME_COARSE:
+ return "CLOCK_REALTIME_COARSE";
+ case CLOCK_MONOTONIC_COARSE:
+ return "CLOCK_MONOTONIC_COARSE";
+ case CLOCK_BOOTTIME:
+ return "CLOCK_BOOTTIME";
+ case CLOCK_REALTIME_ALARM:
+ return "CLOCK_REALTIME_ALARM";
+ case CLOCK_BOOTTIME_ALARM:
+ return "CLOCK_BOOTTIME_ALARM";
+ case CLOCK_TAI:
+ return "CLOCK_TAI";
+ };
+ return "UNKNOWN_CLOCKID";
+}
+
+
+long long timespec_sub(struct timespec a, struct timespec b)
+{
+ long long ret = NSEC_PER_SEC * b.tv_sec + b.tv_nsec;
+
+ ret -= NSEC_PER_SEC * a.tv_sec + a.tv_nsec;
+ return ret;
+}
+
+int final_ret = 0;
+
+void sigalarm(int signo)
+{
+ long long delta_ns;
+ struct timespec ts;
+
+ clock_gettime(alarm_clock_id, &ts);
+ alarmcount++;
+
+ delta_ns = timespec_sub(start_time, ts);
+ delta_ns -= NSEC_PER_SEC * SUSPEND_SECS * alarmcount;
+
+ printf("ALARM(%i): %ld:%ld latency: %lld ns ", alarmcount, ts.tv_sec,
+ ts.tv_nsec, delta_ns);
+
+ if (delta_ns > UNREASONABLE_LAT) {
+ printf("[FAIL]\n");
+ final_ret = -1;
+ } else
+ printf("[OK]\n");
+
+}
+
+int main(void)
+{
+ timer_t tm1;
+ struct itimerspec its1, its2;
+ struct sigevent se;
+ struct sigaction act;
+ int signum = SIGRTMAX;
+
+ /* Set up signal handler: */
+ sigfillset(&act.sa_mask);
+ act.sa_flags = 0;
+ act.sa_handler = sigalarm;
+ sigaction(signum, &act, NULL);
+
+ /* Set up timer: */
+ memset(&se, 0, sizeof(se));
+ se.sigev_notify = SIGEV_SIGNAL;
+ se.sigev_signo = signum;
+ se.sigev_value.sival_int = 0;
+
+ for (alarm_clock_id = CLOCK_REALTIME_ALARM;
+ alarm_clock_id <= CLOCK_BOOTTIME_ALARM;
+ alarm_clock_id++) {
+
+ alarmcount = 0;
+ timer_create(alarm_clock_id, &se, &tm1);
+
+ clock_gettime(alarm_clock_id, &start_time);
+ printf("Start time (%s): %ld:%ld\n", clockstring(alarm_clock_id),
+ start_time.tv_sec, start_time.tv_nsec);
+ printf("Setting alarm for every %i seconds\n", SUSPEND_SECS);
+ its1.it_value = start_time;
+ its1.it_value.tv_sec += SUSPEND_SECS;
+ its1.it_interval.tv_sec = SUSPEND_SECS;
+ its1.it_interval.tv_nsec = 0;
+
+ timer_settime(tm1, TIMER_ABSTIME, &its1, &its2);
+
+ while (alarmcount < 5)
+ sleep(1); /* First 5 alarms, do nothing */
+
+ printf("Starting suspend loops\n");
+ while (alarmcount < 10) {
+ int ret;
+
+ sleep(1);
+ ret = system("echo mem > /sys/power/state");
+ if (ret)
+ break;
+ }
+ timer_delete(tm1);
+ }
+ if (final_ret)
+ return ksft_exit_fail();
+ return ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/timers/change_skew.c b/tools/testing/selftests/timers/change_skew.c
new file mode 100644
index 000000000000..cb1968977c04
--- /dev/null
+++ b/tools/testing/selftests/timers/change_skew.c
@@ -0,0 +1,107 @@
+/* ADJ_FREQ Skew change test
+ * by: john stultz (johnstul@us.ibm.com)
+ * (C) Copyright IBM 2012
+ * Licensed under the GPLv2
+ *
+ * NOTE: This is a meta-test which cranks the ADJ_FREQ knob and
+ * then uses other tests to detect problems. Thus this test requires
+ * that the raw_skew, inconsistency-check and nanosleep tests be
+ * present in the same directory it is run from.
+ *
+ * To build:
+ * $ gcc change_skew.c -o change_skew -lrt
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/time.h>
+#include <sys/timex.h>
+#include <time.h>
+#ifdef KTEST
+#include "../kselftest.h"
+#else
+static inline int ksft_exit_pass(void)
+{
+ exit(0);
+}
+static inline int ksft_exit_fail(void)
+{
+ exit(1);
+}
+#endif
+
+#define NSEC_PER_SEC 1000000000LL
+
+
+int change_skew_test(int ppm)
+{
+ struct timex tx;
+ int ret;
+
+ tx.modes = ADJ_FREQUENCY;
+ tx.freq = ppm << 16;
+
+ ret = adjtimex(&tx);
+ if (ret < 0) {
+ printf("Error adjusting freq\n");
+ return ret;
+ }
+
+ ret = system("./raw_skew");
+ ret |= system("./inconsistency-check");
+ ret |= system("./nanosleep");
+
+ return ret;
+}
+
+
+int main(int argv, char **argc)
+{
+ struct timex tx;
+ int i, ret;
+
+ int ppm[5] = {0, 250, 500, -250, -500};
+
+ /* Kill ntpd */
+ ret = system("killall -9 ntpd");
+
+ /* Make sure there's no offset adjustment going on */
+ tx.modes = ADJ_OFFSET;
+ tx.offset = 0;
+ ret = adjtimex(&tx);
+
+ if (ret < 0) {
+ printf("Maybe you're not running as root?\n");
+ return -1;
+ }
+
+ for (i = 0; i < 5; i++) {
+ printf("Using %i ppm adjustment\n", ppm[i]);
+ ret = change_skew_test(ppm[i]);
+ if (ret)
+ break;
+ }
+
+ /* Set things back */
+ tx.modes = ADJ_FREQUENCY;
+ tx.offset = 0;
+ adjtimex(&tx);
+
+ if (ret) {
+ printf("[FAIL]");
+ return ksft_exit_fail();
+ }
+ printf("[OK]");
+ return ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/timers/clocksource-switch.c b/tools/testing/selftests/timers/clocksource-switch.c
new file mode 100644
index 000000000000..627ec7425f78
--- /dev/null
+++ b/tools/testing/selftests/timers/clocksource-switch.c
@@ -0,0 +1,179 @@
+/* Clocksource change test
+ * by: john stultz (johnstul@us.ibm.com)
+ * (C) Copyright IBM 2012
+ * Licensed under the GPLv2
+ *
+ * NOTE: This is a meta-test which quickly changes the clocksourc and
+ * then uses other tests to detect problems. Thus this test requires
+ * that the inconsistency-check and nanosleep tests be present in the
+ * same directory it is run from.
+ *
+ * To build:
+ * $ gcc clocksource-switch.c -o clocksource-switch -lrt
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <sys/time.h>
+#include <sys/timex.h>
+#include <time.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <string.h>
+#include <sys/wait.h>
+#ifdef KTEST
+#include "../kselftest.h"
+#else
+static inline int ksft_exit_pass(void)
+{
+ exit(0);
+}
+static inline int ksft_exit_fail(void)
+{
+ exit(1);
+}
+#endif
+
+
+int get_clocksources(char list[][30])
+{
+ int fd, i;
+ size_t size;
+ char buf[512];
+ char *head, *tmp;
+
+ fd = open("/sys/devices/system/clocksource/clocksource0/available_clocksource", O_RDONLY);
+
+ size = read(fd, buf, 512);
+
+ close(fd);
+
+ for (i = 0; i < 30; i++)
+ list[i][0] = '\0';
+
+ head = buf;
+ i = 0;
+ while (head - buf < size) {
+ /* Find the next space */
+ for (tmp = head; *tmp != ' '; tmp++) {
+ if (*tmp == '\n')
+ break;
+ if (*tmp == '\0')
+ break;
+ }
+ *tmp = '\0';
+ strcpy(list[i], head);
+ head = tmp + 1;
+ i++;
+ }
+
+ return i-1;
+}
+
+int get_cur_clocksource(char *buf, size_t size)
+{
+ int fd;
+
+ fd = open("/sys/devices/system/clocksource/clocksource0/current_clocksource", O_RDONLY);
+
+ size = read(fd, buf, size);
+
+ return 0;
+}
+
+int change_clocksource(char *clocksource)
+{
+ int fd;
+ size_t size;
+
+ fd = open("/sys/devices/system/clocksource/clocksource0/current_clocksource", O_WRONLY);
+
+ if (fd < 0)
+ return -1;
+
+ size = write(fd, clocksource, strlen(clocksource));
+
+ if (size < 0)
+ return -1;
+
+ close(fd);
+ return 0;
+}
+
+
+int run_tests(int secs)
+{
+ int ret;
+ char buf[255];
+
+ sprintf(buf, "./inconsistency-check -t %i", secs);
+ ret = system(buf);
+ if (ret)
+ return ret;
+ ret = system("./nanosleep");
+ return ret;
+}
+
+
+char clocksource_list[10][30];
+
+int main(int argv, char **argc)
+{
+ char orig_clk[512];
+ int count, i, status;
+ pid_t pid;
+
+ get_cur_clocksource(orig_clk, 512);
+
+ count = get_clocksources(clocksource_list);
+
+ if (change_clocksource(clocksource_list[0])) {
+ printf("Error: You probably need to run this as root\n");
+ return -1;
+ }
+
+ /* Check everything is sane before we start switching asyncrhonously */
+ for (i = 0; i < count; i++) {
+ printf("Validating clocksource %s\n", clocksource_list[i]);
+ if (change_clocksource(clocksource_list[i])) {
+ status = -1;
+ goto out;
+ }
+ if (run_tests(5)) {
+ status = -1;
+ goto out;
+ }
+ }
+
+
+ printf("Running Asyncrhonous Switching Tests...\n");
+ pid = fork();
+ if (!pid)
+ return run_tests(60);
+
+ while (pid != waitpid(pid, &status, WNOHANG))
+ for (i = 0; i < count; i++)
+ if (change_clocksource(clocksource_list[i])) {
+ status = -1;
+ goto out;
+ }
+out:
+ change_clocksource(orig_clk);
+
+ if (status)
+ return ksft_exit_fail();
+ return ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/timers/inconsistency-check.c b/tools/testing/selftests/timers/inconsistency-check.c
new file mode 100644
index 000000000000..caf1bc9257c4
--- /dev/null
+++ b/tools/testing/selftests/timers/inconsistency-check.c
@@ -0,0 +1,204 @@
+/* Time inconsistency check test
+ * by: john stultz (johnstul@us.ibm.com)
+ * (C) Copyright IBM 2003, 2004, 2005, 2012
+ * (C) Copyright Linaro Limited 2015
+ * Licensed under the GPLv2
+ *
+ * To build:
+ * $ gcc inconsistency-check.c -o inconsistency-check -lrt
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <time.h>
+#include <sys/time.h>
+#include <sys/timex.h>
+#include <string.h>
+#include <signal.h>
+#ifdef KTEST
+#include "../kselftest.h"
+#else
+static inline int ksft_exit_pass(void)
+{
+ exit(0);
+}
+static inline int ksft_exit_fail(void)
+{
+ exit(1);
+}
+#endif
+
+#define CALLS_PER_LOOP 64
+#define NSEC_PER_SEC 1000000000ULL
+
+#define CLOCK_REALTIME 0
+#define CLOCK_MONOTONIC 1
+#define CLOCK_PROCESS_CPUTIME_ID 2
+#define CLOCK_THREAD_CPUTIME_ID 3
+#define CLOCK_MONOTONIC_RAW 4
+#define CLOCK_REALTIME_COARSE 5
+#define CLOCK_MONOTONIC_COARSE 6
+#define CLOCK_BOOTTIME 7
+#define CLOCK_REALTIME_ALARM 8
+#define CLOCK_BOOTTIME_ALARM 9
+#define CLOCK_HWSPECIFIC 10
+#define CLOCK_TAI 11
+#define NR_CLOCKIDS 12
+
+char *clockstring(int clockid)
+{
+ switch (clockid) {
+ case CLOCK_REALTIME:
+ return "CLOCK_REALTIME";
+ case CLOCK_MONOTONIC:
+ return "CLOCK_MONOTONIC";
+ case CLOCK_PROCESS_CPUTIME_ID:
+ return "CLOCK_PROCESS_CPUTIME_ID";
+ case CLOCK_THREAD_CPUTIME_ID:
+ return "CLOCK_THREAD_CPUTIME_ID";
+ case CLOCK_MONOTONIC_RAW:
+ return "CLOCK_MONOTONIC_RAW";
+ case CLOCK_REALTIME_COARSE:
+ return "CLOCK_REALTIME_COARSE";
+ case CLOCK_MONOTONIC_COARSE:
+ return "CLOCK_MONOTONIC_COARSE";
+ case CLOCK_BOOTTIME:
+ return "CLOCK_BOOTTIME";
+ case CLOCK_REALTIME_ALARM:
+ return "CLOCK_REALTIME_ALARM";
+ case CLOCK_BOOTTIME_ALARM:
+ return "CLOCK_BOOTTIME_ALARM";
+ case CLOCK_TAI:
+ return "CLOCK_TAI";
+ };
+ return "UNKNOWN_CLOCKID";
+}
+
+/* returns 1 if a <= b, 0 otherwise */
+static inline int in_order(struct timespec a, struct timespec b)
+{
+ /* use unsigned to avoid false positives on 2038 rollover */
+ if ((unsigned long)a.tv_sec < (unsigned long)b.tv_sec)
+ return 1;
+ if ((unsigned long)a.tv_sec > (unsigned long)b.tv_sec)
+ return 0;
+ if (a.tv_nsec > b.tv_nsec)
+ return 0;
+ return 1;
+}
+
+
+
+int consistency_test(int clock_type, unsigned long seconds)
+{
+ struct timespec list[CALLS_PER_LOOP];
+ int i, inconsistent;
+ long now, then;
+ time_t t;
+ char *start_str;
+
+ clock_gettime(clock_type, &list[0]);
+ now = then = list[0].tv_sec;
+
+ /* timestamp start of test */
+ t = time(0);
+ start_str = ctime(&t);
+
+ while (seconds == -1 || now - then < seconds) {
+ inconsistent = 0;
+
+ /* Fill list */
+ for (i = 0; i < CALLS_PER_LOOP; i++)
+ clock_gettime(clock_type, &list[i]);
+
+ /* Check for inconsistencies */
+ for (i = 0; i < CALLS_PER_LOOP - 1; i++)
+ if (!in_order(list[i], list[i+1]))
+ inconsistent = i;
+
+ /* display inconsistency */
+ if (inconsistent) {
+ unsigned long long delta;
+
+ printf("\%s\n", start_str);
+ for (i = 0; i < CALLS_PER_LOOP; i++) {
+ if (i == inconsistent)
+ printf("--------------------\n");
+ printf("%lu:%lu\n", list[i].tv_sec,
+ list[i].tv_nsec);
+ if (i == inconsistent + 1)
+ printf("--------------------\n");
+ }
+ delta = list[inconsistent].tv_sec * NSEC_PER_SEC;
+ delta += list[inconsistent].tv_nsec;
+ delta -= list[inconsistent+1].tv_sec * NSEC_PER_SEC;
+ delta -= list[inconsistent+1].tv_nsec;
+ printf("Delta: %llu ns\n", delta);
+ fflush(0);
+ /* timestamp inconsistency*/
+ t = time(0);
+ printf("%s\n", ctime(&t));
+ printf("[FAILED]\n");
+ return -1;
+ }
+ now = list[0].tv_sec;
+ }
+ printf("[OK]\n");
+ return 0;
+}
+
+
+int main(int argc, char *argv[])
+{
+ int clockid, opt;
+ int userclock = CLOCK_REALTIME;
+ int maxclocks = NR_CLOCKIDS;
+ int runtime = 10;
+ struct timespec ts;
+
+ /* Process arguments */
+ while ((opt = getopt(argc, argv, "t:c:")) != -1) {
+ switch (opt) {
+ case 't':
+ runtime = atoi(optarg);
+ break;
+ case 'c':
+ userclock = atoi(optarg);
+ maxclocks = userclock + 1;
+ break;
+ default:
+ printf("Usage: %s [-t <secs>] [-c <clockid>]\n", argv[0]);
+ printf(" -t: Number of seconds to run\n");
+ printf(" -c: clockid to use (default, all clockids)\n");
+ exit(-1);
+ }
+ }
+
+ setbuf(stdout, NULL);
+
+ for (clockid = userclock; clockid < maxclocks; clockid++) {
+
+ if (clockid == CLOCK_HWSPECIFIC)
+ continue;
+
+ if (!clock_gettime(clockid, &ts)) {
+ printf("Consistent %-30s ", clockstring(clockid));
+ if (consistency_test(clockid, runtime))
+ return ksft_exit_fail();
+ }
+ }
+ return ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/timers/leap-a-day.c b/tools/testing/selftests/timers/leap-a-day.c
new file mode 100644
index 000000000000..b8272e6c4b3b
--- /dev/null
+++ b/tools/testing/selftests/timers/leap-a-day.c
@@ -0,0 +1,319 @@
+/* Leap second stress test
+ * by: John Stultz (john.stultz@linaro.org)
+ * (C) Copyright IBM 2012
+ * (C) Copyright 2013, 2015 Linaro Limited
+ * Licensed under the GPLv2
+ *
+ * This test signals the kernel to insert a leap second
+ * every day at midnight GMT. This allows for stessing the
+ * kernel's leap-second behavior, as well as how well applications
+ * handle the leap-second discontinuity.
+ *
+ * Usage: leap-a-day [-s] [-i <num>]
+ *
+ * Options:
+ * -s: Each iteration, set the date to 10 seconds before midnight GMT.
+ * This speeds up the number of leapsecond transitions tested,
+ * but because it calls settimeofday frequently, advancing the
+ * time by 24 hours every ~16 seconds, it may cause application
+ * disruption.
+ *
+ * -i: Number of iterations to run (default: infinite)
+ *
+ * Other notes: Disabling NTP prior to running this is advised, as the two
+ * may conflict in their commands to the kernel.
+ *
+ * To build:
+ * $ gcc leap-a-day.c -o leap-a-day -lrt
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+#include <sys/time.h>
+#include <sys/timex.h>
+#include <string.h>
+#include <signal.h>
+#include <unistd.h>
+#ifdef KTEST
+#include "../kselftest.h"
+#else
+static inline int ksft_exit_pass(void)
+{
+ exit(0);
+}
+static inline int ksft_exit_fail(void)
+{
+ exit(1);
+}
+#endif
+
+#define NSEC_PER_SEC 1000000000ULL
+#define CLOCK_TAI 11
+
+/* returns 1 if a <= b, 0 otherwise */
+static inline int in_order(struct timespec a, struct timespec b)
+{
+ if (a.tv_sec < b.tv_sec)
+ return 1;
+ if (a.tv_sec > b.tv_sec)
+ return 0;
+ if (a.tv_nsec > b.tv_nsec)
+ return 0;
+ return 1;
+}
+
+struct timespec timespec_add(struct timespec ts, unsigned long long ns)
+{
+ ts.tv_nsec += ns;
+ while (ts.tv_nsec >= NSEC_PER_SEC) {
+ ts.tv_nsec -= NSEC_PER_SEC;
+ ts.tv_sec++;
+ }
+ return ts;
+}
+
+char *time_state_str(int state)
+{
+ switch (state) {
+ case TIME_OK: return "TIME_OK";
+ case TIME_INS: return "TIME_INS";
+ case TIME_DEL: return "TIME_DEL";
+ case TIME_OOP: return "TIME_OOP";
+ case TIME_WAIT: return "TIME_WAIT";
+ case TIME_BAD: return "TIME_BAD";
+ }
+ return "ERROR";
+}
+
+/* clear NTP time_status & time_state */
+int clear_time_state(void)
+{
+ struct timex tx;
+ int ret;
+
+ /*
+ * We have to call adjtime twice here, as kernels
+ * prior to 6b1859dba01c7 (included in 3.5 and
+ * -stable), had an issue with the state machine
+ * and wouldn't clear the STA_INS/DEL flag directly.
+ */
+ tx.modes = ADJ_STATUS;
+ tx.status = STA_PLL;
+ ret = adjtimex(&tx);
+
+ /* Clear maxerror, as it can cause UNSYNC to be set */
+ tx.modes = ADJ_MAXERROR;
+ tx.maxerror = 0;
+ ret = adjtimex(&tx);
+
+ /* Clear the status */
+ tx.modes = ADJ_STATUS;
+ tx.status = 0;
+ ret = adjtimex(&tx);
+
+ return ret;
+}
+
+/* Make sure we cleanup on ctrl-c */
+void handler(int unused)
+{
+ clear_time_state();
+ exit(0);
+}
+
+/* Test for known hrtimer failure */
+void test_hrtimer_failure(void)
+{
+ struct timespec now, target;
+
+ clock_gettime(CLOCK_REALTIME, &now);
+ target = timespec_add(now, NSEC_PER_SEC/2);
+ clock_nanosleep(CLOCK_REALTIME, TIMER_ABSTIME, &target, NULL);
+ clock_gettime(CLOCK_REALTIME, &now);
+
+ if (!in_order(target, now))
+ printf("ERROR: hrtimer early expiration failure observed.\n");
+}
+
+int main(int argc, char **argv)
+{
+ int settime = 0;
+ int tai_time = 0;
+ int insert = 1;
+ int iterations = -1;
+ int opt;
+
+ /* Process arguments */
+ while ((opt = getopt(argc, argv, "sti:")) != -1) {
+ switch (opt) {
+ case 's':
+ printf("Setting time to speed up testing\n");
+ settime = 1;
+ break;
+ case 'i':
+ iterations = atoi(optarg);
+ break;
+ case 't':
+ tai_time = 1;
+ break;
+ default:
+ printf("Usage: %s [-s] [-i <iterations>]\n", argv[0]);
+ printf(" -s: Set time to right before leap second each iteration\n");
+ printf(" -i: Number of iterations\n");
+ printf(" -t: Print TAI time\n");
+ exit(-1);
+ }
+ }
+
+ /* Make sure TAI support is present if -t was used */
+ if (tai_time) {
+ struct timespec ts;
+
+ if (clock_gettime(CLOCK_TAI, &ts)) {
+ printf("System doesn't support CLOCK_TAI\n");
+ ksft_exit_fail();
+ }
+ }
+
+ signal(SIGINT, handler);
+ signal(SIGKILL, handler);
+
+ if (iterations < 0)
+ printf("This runs continuously. Press ctrl-c to stop\n");
+ else
+ printf("Running for %i iterations. Press ctrl-c to stop\n", iterations);
+
+ printf("\n");
+ while (1) {
+ int ret;
+ struct timespec ts;
+ struct timex tx;
+ time_t now, next_leap;
+
+ /* Get the current time */
+ clock_gettime(CLOCK_REALTIME, &ts);
+
+ /* Calculate the next possible leap second 23:59:60 GMT */
+ next_leap = ts.tv_sec;
+ next_leap += 86400 - (next_leap % 86400);
+
+ if (settime) {
+ struct timeval tv;
+
+ tv.tv_sec = next_leap - 10;
+ tv.tv_usec = 0;
+ settimeofday(&tv, NULL);
+ printf("Setting time to %s", ctime(&tv.tv_sec));
+ }
+
+ /* Reset NTP time state */
+ clear_time_state();
+
+ /* Set the leap second insert flag */
+ tx.modes = ADJ_STATUS;
+ if (insert)
+ tx.status = STA_INS;
+ else
+ tx.status = STA_DEL;
+ ret = adjtimex(&tx);
+ if (ret < 0) {
+ printf("Error: Problem setting STA_INS/STA_DEL!: %s\n",
+ time_state_str(ret));
+ return ksft_exit_fail();
+ }
+
+ /* Validate STA_INS was set */
+ tx.modes = 0;
+ ret = adjtimex(&tx);
+ if (tx.status != STA_INS && tx.status != STA_DEL) {
+ printf("Error: STA_INS/STA_DEL not set!: %s\n",
+ time_state_str(ret));
+ return ksft_exit_fail();
+ }
+
+ if (tai_time) {
+ printf("Using TAI time,"
+ " no inconsistencies should be seen!\n");
+ }
+
+ printf("Scheduling leap second for %s", ctime(&next_leap));
+
+ /* Wake up 3 seconds before leap */
+ ts.tv_sec = next_leap - 3;
+ ts.tv_nsec = 0;
+
+ while (clock_nanosleep(CLOCK_REALTIME, TIMER_ABSTIME, &ts, NULL))
+ printf("Something woke us up, returning to sleep\n");
+
+ /* Validate STA_INS is still set */
+ tx.modes = 0;
+ ret = adjtimex(&tx);
+ if (tx.status != STA_INS && tx.status != STA_DEL) {
+ printf("Something cleared STA_INS/STA_DEL, setting it again.\n");
+ tx.modes = ADJ_STATUS;
+ if (insert)
+ tx.status = STA_INS;
+ else
+ tx.status = STA_DEL;
+ ret = adjtimex(&tx);
+ }
+
+ /* Check adjtimex output every half second */
+ now = tx.time.tv_sec;
+ while (now < next_leap + 2) {
+ char buf[26];
+ struct timespec tai;
+
+ tx.modes = 0;
+ ret = adjtimex(&tx);
+
+ if (tai_time) {
+ clock_gettime(CLOCK_TAI, &tai);
+ printf("%ld sec, %9ld ns\t%s\n",
+ tai.tv_sec,
+ tai.tv_nsec,
+ time_state_str(ret));
+ } else {
+ ctime_r(&tx.time.tv_sec, buf);
+ buf[strlen(buf)-1] = 0; /*remove trailing\n */
+
+ printf("%s + %6ld us (%i)\t%s\n",
+ buf,
+ tx.time.tv_usec,
+ tx.tai,
+ time_state_str(ret));
+ }
+ now = tx.time.tv_sec;
+ /* Sleep for another half second */
+ ts.tv_sec = 0;
+ ts.tv_nsec = NSEC_PER_SEC / 2;
+ clock_nanosleep(CLOCK_MONOTONIC, 0, &ts, NULL);
+ }
+ /* Switch to using other mode */
+ insert = !insert;
+
+ /* Note if kernel has known hrtimer failure */
+ test_hrtimer_failure();
+
+ printf("Leap complete\n\n");
+
+ if ((iterations != -1) && !(--iterations))
+ break;
+ }
+
+ clear_time_state();
+ return ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/timers/leapcrash.c b/tools/testing/selftests/timers/leapcrash.c
new file mode 100644
index 000000000000..a1071bdbdeb7
--- /dev/null
+++ b/tools/testing/selftests/timers/leapcrash.c
@@ -0,0 +1,120 @@
+/* Demo leapsecond deadlock
+ * by: John Stultz (john.stultz@linaro.org)
+ * (C) Copyright IBM 2012
+ * (C) Copyright 2013, 2015 Linaro Limited
+ * Licensed under the GPL
+ *
+ * This test demonstrates leapsecond deadlock that is possibe
+ * on kernels from 2.6.26 to 3.3.
+ *
+ * WARNING: THIS WILL LIKELY HARDHANG SYSTEMS AND MAY LOSE DATA
+ * RUN AT YOUR OWN RISK!
+ * To build:
+ * $ gcc leapcrash.c -o leapcrash -lrt
+ */
+
+
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+#include <sys/time.h>
+#include <sys/timex.h>
+#include <string.h>
+#include <signal.h>
+#ifdef KTEST
+#include "../kselftest.h"
+#else
+static inline int ksft_exit_pass(void)
+{
+ exit(0);
+}
+static inline int ksft_exit_fail(void)
+{
+ exit(1);
+}
+#endif
+
+
+
+/* clear NTP time_status & time_state */
+int clear_time_state(void)
+{
+ struct timex tx;
+ int ret;
+
+ /*
+ * We have to call adjtime twice here, as kernels
+ * prior to 6b1859dba01c7 (included in 3.5 and
+ * -stable), had an issue with the state machine
+ * and wouldn't clear the STA_INS/DEL flag directly.
+ */
+ tx.modes = ADJ_STATUS;
+ tx.status = STA_PLL;
+ ret = adjtimex(&tx);
+
+ tx.modes = ADJ_STATUS;
+ tx.status = 0;
+ ret = adjtimex(&tx);
+
+ return ret;
+}
+
+/* Make sure we cleanup on ctrl-c */
+void handler(int unused)
+{
+ clear_time_state();
+ exit(0);
+}
+
+
+int main(void)
+{
+ struct timex tx;
+ struct timespec ts;
+ time_t next_leap;
+ int count = 0;
+
+ setbuf(stdout, NULL);
+
+ signal(SIGINT, handler);
+ signal(SIGKILL, handler);
+ printf("This runs for a few minutes. Press ctrl-c to stop\n");
+
+ clear_time_state();
+
+
+ /* Get the current time */
+ clock_gettime(CLOCK_REALTIME, &ts);
+
+ /* Calculate the next possible leap second 23:59:60 GMT */
+ next_leap = ts.tv_sec;
+ next_leap += 86400 - (next_leap % 86400);
+
+ for (count = 0; count < 20; count++) {
+ struct timeval tv;
+
+
+ /* set the time to 2 seconds before the leap */
+ tv.tv_sec = next_leap - 2;
+ tv.tv_usec = 0;
+ if (settimeofday(&tv, NULL)) {
+ printf("Error: You're likely not running with proper (ie: root) permissions\n");
+ return ksft_exit_fail();
+ }
+ tx.modes = 0;
+ adjtimex(&tx);
+
+ /* hammer on adjtime w/ STA_INS */
+ while (tx.time.tv_sec < next_leap + 1) {
+ /* Set the leap second insert flag */
+ tx.modes = ADJ_STATUS;
+ tx.status = STA_INS;
+ adjtimex(&tx);
+ }
+ clear_time_state();
+ printf(".");
+ }
+ printf("[OK]\n");
+ return ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/timers/mqueue-lat.c b/tools/testing/selftests/timers/mqueue-lat.c
new file mode 100644
index 000000000000..a2a3924d0b41
--- /dev/null
+++ b/tools/testing/selftests/timers/mqueue-lat.c
@@ -0,0 +1,124 @@
+/* Measure mqueue timeout latency
+ * by: john stultz (john.stultz@linaro.org)
+ * (C) Copyright Linaro 2013
+ *
+ * Inspired with permission from example test by:
+ * Romain Francoise <romain@orebokech.com>
+ * Licensed under the GPLv2
+ *
+ * To build:
+ * $ gcc mqueue-lat.c -o mqueue-lat -lrt
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+#include <sys/time.h>
+#include <sys/timex.h>
+#include <string.h>
+#include <signal.h>
+#include <errno.h>
+#include <mqueue.h>
+#ifdef KTEST
+#include "../kselftest.h"
+#else
+static inline int ksft_exit_pass(void)
+{
+ exit(0);
+}
+static inline int ksft_exit_fail(void)
+{
+ exit(1);
+}
+#endif
+
+#define NSEC_PER_SEC 1000000000ULL
+
+#define TARGET_TIMEOUT 100000000 /* 100ms in nanoseconds */
+#define UNRESONABLE_LATENCY 40000000 /* 40ms in nanosecs */
+
+
+long long timespec_sub(struct timespec a, struct timespec b)
+{
+ long long ret = NSEC_PER_SEC * b.tv_sec + b.tv_nsec;
+
+ ret -= NSEC_PER_SEC * a.tv_sec + a.tv_nsec;
+ return ret;
+}
+
+struct timespec timespec_add(struct timespec ts, unsigned long long ns)
+{
+ ts.tv_nsec += ns;
+ while (ts.tv_nsec >= NSEC_PER_SEC) {
+ ts.tv_nsec -= NSEC_PER_SEC;
+ ts.tv_sec++;
+ }
+ return ts;
+}
+
+int mqueue_lat_test(void)
+{
+
+ mqd_t q;
+ struct mq_attr attr;
+ struct timespec start, end, now, target;
+ int i, count, ret;
+
+ q = mq_open("/foo", O_CREAT | O_RDONLY, 0666, NULL);
+ if (q < 0) {
+ perror("mq_open");
+ return -1;
+ }
+ mq_getattr(q, &attr);
+
+
+ count = 100;
+ clock_gettime(CLOCK_MONOTONIC, &start);
+
+ for (i = 0; i < count; i++) {
+ char buf[attr.mq_msgsize];
+
+ clock_gettime(CLOCK_REALTIME, &now);
+ target = now;
+ target = timespec_add(now, TARGET_TIMEOUT); /* 100ms */
+
+ ret = mq_timedreceive(q, buf, sizeof(buf), NULL, &target);
+ if (ret < 0 && errno != ETIMEDOUT) {
+ perror("mq_timedreceive");
+ return -1;
+ }
+ }
+ clock_gettime(CLOCK_MONOTONIC, &end);
+
+ mq_close(q);
+
+ if ((timespec_sub(start, end)/count) > TARGET_TIMEOUT + UNRESONABLE_LATENCY)
+ return -1;
+
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ int ret;
+
+ printf("Mqueue latency : ");
+
+ ret = mqueue_lat_test();
+ if (ret < 0) {
+ printf("[FAILED]\n");
+ return ksft_exit_fail();
+ }
+ printf("[OK]\n");
+ return ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/timers/nanosleep.c b/tools/testing/selftests/timers/nanosleep.c
new file mode 100644
index 000000000000..8a3c29de7d49
--- /dev/null
+++ b/tools/testing/selftests/timers/nanosleep.c
@@ -0,0 +1,174 @@
+/* Make sure timers don't return early
+ * by: john stultz (johnstul@us.ibm.com)
+ * John Stultz (john.stultz@linaro.org)
+ * (C) Copyright IBM 2012
+ * (C) Copyright Linaro 2013 2015
+ * Licensed under the GPLv2
+ *
+ * To build:
+ * $ gcc nanosleep.c -o nanosleep -lrt
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+#include <sys/time.h>
+#include <sys/timex.h>
+#include <string.h>
+#include <signal.h>
+#ifdef KTEST
+#include "../kselftest.h"
+#else
+static inline int ksft_exit_pass(void)
+{
+ exit(0);
+}
+static inline int ksft_exit_fail(void)
+{
+ exit(1);
+}
+#endif
+
+#define NSEC_PER_SEC 1000000000ULL
+
+#define CLOCK_REALTIME 0
+#define CLOCK_MONOTONIC 1
+#define CLOCK_PROCESS_CPUTIME_ID 2
+#define CLOCK_THREAD_CPUTIME_ID 3
+#define CLOCK_MONOTONIC_RAW 4
+#define CLOCK_REALTIME_COARSE 5
+#define CLOCK_MONOTONIC_COARSE 6
+#define CLOCK_BOOTTIME 7
+#define CLOCK_REALTIME_ALARM 8
+#define CLOCK_BOOTTIME_ALARM 9
+#define CLOCK_HWSPECIFIC 10
+#define CLOCK_TAI 11
+#define NR_CLOCKIDS 12
+
+#define UNSUPPORTED 0xf00f
+
+char *clockstring(int clockid)
+{
+ switch (clockid) {
+ case CLOCK_REALTIME:
+ return "CLOCK_REALTIME";
+ case CLOCK_MONOTONIC:
+ return "CLOCK_MONOTONIC";
+ case CLOCK_PROCESS_CPUTIME_ID:
+ return "CLOCK_PROCESS_CPUTIME_ID";
+ case CLOCK_THREAD_CPUTIME_ID:
+ return "CLOCK_THREAD_CPUTIME_ID";
+ case CLOCK_MONOTONIC_RAW:
+ return "CLOCK_MONOTONIC_RAW";
+ case CLOCK_REALTIME_COARSE:
+ return "CLOCK_REALTIME_COARSE";
+ case CLOCK_MONOTONIC_COARSE:
+ return "CLOCK_MONOTONIC_COARSE";
+ case CLOCK_BOOTTIME:
+ return "CLOCK_BOOTTIME";
+ case CLOCK_REALTIME_ALARM:
+ return "CLOCK_REALTIME_ALARM";
+ case CLOCK_BOOTTIME_ALARM:
+ return "CLOCK_BOOTTIME_ALARM";
+ case CLOCK_TAI:
+ return "CLOCK_TAI";
+ };
+ return "UNKNOWN_CLOCKID";
+}
+
+/* returns 1 if a <= b, 0 otherwise */
+static inline int in_order(struct timespec a, struct timespec b)
+{
+ if (a.tv_sec < b.tv_sec)
+ return 1;
+ if (a.tv_sec > b.tv_sec)
+ return 0;
+ if (a.tv_nsec > b.tv_nsec)
+ return 0;
+ return 1;
+}
+
+struct timespec timespec_add(struct timespec ts, unsigned long long ns)
+{
+ ts.tv_nsec += ns;
+ while (ts.tv_nsec >= NSEC_PER_SEC) {
+ ts.tv_nsec -= NSEC_PER_SEC;
+ ts.tv_sec++;
+ }
+ return ts;
+}
+
+int nanosleep_test(int clockid, long long ns)
+{
+ struct timespec now, target, rel;
+
+ /* First check abs time */
+ if (clock_gettime(clockid, &now))
+ return UNSUPPORTED;
+ target = timespec_add(now, ns);
+
+ if (clock_nanosleep(clockid, TIMER_ABSTIME, &target, NULL))
+ return UNSUPPORTED;
+ clock_gettime(clockid, &now);
+
+ if (!in_order(target, now))
+ return -1;
+
+ /* Second check reltime */
+ clock_gettime(clockid, &now);
+ rel.tv_sec = 0;
+ rel.tv_nsec = 0;
+ rel = timespec_add(rel, ns);
+ target = timespec_add(now, ns);
+ clock_nanosleep(clockid, 0, &rel, NULL);
+ clock_gettime(clockid, &now);
+
+ if (!in_order(target, now))
+ return -1;
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ long long length;
+ int clockid, ret;
+
+ for (clockid = CLOCK_REALTIME; clockid < NR_CLOCKIDS; clockid++) {
+
+ /* Skip cputime clockids since nanosleep won't increment cputime */
+ if (clockid == CLOCK_PROCESS_CPUTIME_ID ||
+ clockid == CLOCK_THREAD_CPUTIME_ID ||
+ clockid == CLOCK_HWSPECIFIC)
+ continue;
+
+ printf("Nanosleep %-31s ", clockstring(clockid));
+
+ length = 10;
+ while (length <= (NSEC_PER_SEC * 10)) {
+ ret = nanosleep_test(clockid, length);
+ if (ret == UNSUPPORTED) {
+ printf("[UNSUPPORTED]\n");
+ goto next;
+ }
+ if (ret < 0) {
+ printf("[FAILED]\n");
+ return ksft_exit_fail();
+ }
+ length *= 100;
+ }
+ printf("[OK]\n");
+next:
+ ret = 0;
+ }
+ return ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/timers/nsleep-lat.c b/tools/testing/selftests/timers/nsleep-lat.c
new file mode 100644
index 000000000000..2d7898fda0f1
--- /dev/null
+++ b/tools/testing/selftests/timers/nsleep-lat.c
@@ -0,0 +1,190 @@
+/* Measure nanosleep timer latency
+ * by: john stultz (john.stultz@linaro.org)
+ * (C) Copyright Linaro 2013
+ * Licensed under the GPLv2
+ *
+ * To build:
+ * $ gcc nsleep-lat.c -o nsleep-lat -lrt
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+#include <sys/time.h>
+#include <sys/timex.h>
+#include <string.h>
+#include <signal.h>
+#ifdef KTEST
+#include "../kselftest.h"
+#else
+static inline int ksft_exit_pass(void)
+{
+ exit(0);
+}
+static inline int ksft_exit_fail(void)
+{
+ exit(1);
+}
+#endif
+
+#define NSEC_PER_SEC 1000000000ULL
+
+#define UNRESONABLE_LATENCY 40000000 /* 40ms in nanosecs */
+
+
+#define CLOCK_REALTIME 0
+#define CLOCK_MONOTONIC 1
+#define CLOCK_PROCESS_CPUTIME_ID 2
+#define CLOCK_THREAD_CPUTIME_ID 3
+#define CLOCK_MONOTONIC_RAW 4
+#define CLOCK_REALTIME_COARSE 5
+#define CLOCK_MONOTONIC_COARSE 6
+#define CLOCK_BOOTTIME 7
+#define CLOCK_REALTIME_ALARM 8
+#define CLOCK_BOOTTIME_ALARM 9
+#define CLOCK_HWSPECIFIC 10
+#define CLOCK_TAI 11
+#define NR_CLOCKIDS 12
+
+#define UNSUPPORTED 0xf00f
+
+char *clockstring(int clockid)
+{
+ switch (clockid) {
+ case CLOCK_REALTIME:
+ return "CLOCK_REALTIME";
+ case CLOCK_MONOTONIC:
+ return "CLOCK_MONOTONIC";
+ case CLOCK_PROCESS_CPUTIME_ID:
+ return "CLOCK_PROCESS_CPUTIME_ID";
+ case CLOCK_THREAD_CPUTIME_ID:
+ return "CLOCK_THREAD_CPUTIME_ID";
+ case CLOCK_MONOTONIC_RAW:
+ return "CLOCK_MONOTONIC_RAW";
+ case CLOCK_REALTIME_COARSE:
+ return "CLOCK_REALTIME_COARSE";
+ case CLOCK_MONOTONIC_COARSE:
+ return "CLOCK_MONOTONIC_COARSE";
+ case CLOCK_BOOTTIME:
+ return "CLOCK_BOOTTIME";
+ case CLOCK_REALTIME_ALARM:
+ return "CLOCK_REALTIME_ALARM";
+ case CLOCK_BOOTTIME_ALARM:
+ return "CLOCK_BOOTTIME_ALARM";
+ case CLOCK_TAI:
+ return "CLOCK_TAI";
+ };
+ return "UNKNOWN_CLOCKID";
+}
+
+struct timespec timespec_add(struct timespec ts, unsigned long long ns)
+{
+ ts.tv_nsec += ns;
+ while (ts.tv_nsec >= NSEC_PER_SEC) {
+ ts.tv_nsec -= NSEC_PER_SEC;
+ ts.tv_sec++;
+ }
+ return ts;
+}
+
+
+long long timespec_sub(struct timespec a, struct timespec b)
+{
+ long long ret = NSEC_PER_SEC * b.tv_sec + b.tv_nsec;
+
+ ret -= NSEC_PER_SEC * a.tv_sec + a.tv_nsec;
+ return ret;
+}
+
+int nanosleep_lat_test(int clockid, long long ns)
+{
+ struct timespec start, end, target;
+ long long latency = 0;
+ int i, count;
+
+ target.tv_sec = ns/NSEC_PER_SEC;
+ target.tv_nsec = ns%NSEC_PER_SEC;
+
+ if (clock_gettime(clockid, &start))
+ return UNSUPPORTED;
+ if (clock_nanosleep(clockid, 0, &target, NULL))
+ return UNSUPPORTED;
+
+ count = 10;
+
+ /* First check relative latency */
+ clock_gettime(clockid, &start);
+ for (i = 0; i < count; i++)
+ clock_nanosleep(clockid, 0, &target, NULL);
+ clock_gettime(clockid, &end);
+
+ if (((timespec_sub(start, end)/count)-ns) > UNRESONABLE_LATENCY) {
+ printf("Large rel latency: %lld ns :", (timespec_sub(start, end)/count)-ns);
+ return -1;
+ }
+
+ /* Next check absolute latency */
+ for (i = 0; i < count; i++) {
+ clock_gettime(clockid, &start);
+ target = timespec_add(start, ns);
+ clock_nanosleep(clockid, TIMER_ABSTIME, &target, NULL);
+ clock_gettime(clockid, &end);
+ latency += timespec_sub(target, end);
+ }
+
+ if (latency/count > UNRESONABLE_LATENCY) {
+ printf("Large abs latency: %lld ns :", latency/count);
+ return -1;
+ }
+
+ return 0;
+}
+
+
+
+int main(int argc, char **argv)
+{
+ long long length;
+ int clockid, ret;
+
+ for (clockid = CLOCK_REALTIME; clockid < NR_CLOCKIDS; clockid++) {
+
+ /* Skip cputime clockids since nanosleep won't increment cputime */
+ if (clockid == CLOCK_PROCESS_CPUTIME_ID ||
+ clockid == CLOCK_THREAD_CPUTIME_ID ||
+ clockid == CLOCK_HWSPECIFIC)
+ continue;
+
+ printf("nsleep latency %-26s ", clockstring(clockid));
+
+ length = 10;
+ while (length <= (NSEC_PER_SEC * 10)) {
+ ret = nanosleep_lat_test(clockid, length);
+ if (ret)
+ break;
+ length *= 100;
+
+ }
+
+ if (ret == UNSUPPORTED) {
+ printf("[UNSUPPORTED]\n");
+ continue;
+ }
+ if (ret < 0) {
+ printf("[FAILED]\n");
+ return ksft_exit_fail();
+ }
+ printf("[OK]\n");
+ }
+ return ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/timers/posix_timers.c b/tools/testing/selftests/timers/posix_timers.c
index f87d970a485c..5a246a02dff3 100644
--- a/tools/testing/selftests/timers/posix_timers.c
+++ b/tools/testing/selftests/timers/posix_timers.c
@@ -35,10 +35,11 @@ static void user_loop(void)
static void kernel_loop(void)
{
void *addr = sbrk(0);
+ int err = 0;
- while (!done) {
- brk(addr + 4096);
- brk(addr);
+ while (!done && !err) {
+ err = brk(addr + 4096);
+ err |= brk(addr);
}
}
@@ -190,8 +191,6 @@ static int check_timer_create(int which)
int main(int argc, char **argv)
{
- int err;
-
printf("Testing posix timers. False negative may happen on CPU execution \n");
printf("based timers if other threads run on the CPU...\n");
diff --git a/tools/testing/selftests/timers/raw_skew.c b/tools/testing/selftests/timers/raw_skew.c
new file mode 100644
index 000000000000..30906bfd9c1b
--- /dev/null
+++ b/tools/testing/selftests/timers/raw_skew.c
@@ -0,0 +1,154 @@
+/* CLOCK_MONOTONIC vs CLOCK_MONOTONIC_RAW skew test
+ * by: john stultz (johnstul@us.ibm.com)
+ * John Stultz <john.stultz@linaro.org>
+ * (C) Copyright IBM 2012
+ * (C) Copyright Linaro Limited 2015
+ * Licensed under the GPLv2
+ *
+ * To build:
+ * $ gcc raw_skew.c -o raw_skew -lrt
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <sys/time.h>
+#include <sys/timex.h>
+#include <time.h>
+#ifdef KTEST
+#include "../kselftest.h"
+#else
+static inline int ksft_exit_pass(void)
+{
+ exit(0);
+}
+static inline int ksft_exit_fail(void)
+{
+ exit(1);
+}
+#endif
+
+
+#define CLOCK_MONOTONIC_RAW 4
+#define NSEC_PER_SEC 1000000000LL
+
+#define shift_right(x, s) ({ \
+ __typeof__(x) __x = (x); \
+ __typeof__(s) __s = (s); \
+ __x < 0 ? -(-__x >> __s) : __x >> __s; \
+})
+
+long long llabs(long long val)
+{
+ if (val < 0)
+ val = -val;
+ return val;
+}
+
+unsigned long long ts_to_nsec(struct timespec ts)
+{
+ return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
+}
+
+struct timespec nsec_to_ts(long long ns)
+{
+ struct timespec ts;
+
+ ts.tv_sec = ns/NSEC_PER_SEC;
+ ts.tv_nsec = ns%NSEC_PER_SEC;
+ return ts;
+}
+
+long long diff_timespec(struct timespec start, struct timespec end)
+{
+ long long start_ns, end_ns;
+
+ start_ns = ts_to_nsec(start);
+ end_ns = ts_to_nsec(end);
+ return end_ns - start_ns;
+}
+
+void get_monotonic_and_raw(struct timespec *mon, struct timespec *raw)
+{
+ struct timespec start, mid, end;
+ long long diff = 0, tmp;
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ long long newdiff;
+
+ clock_gettime(CLOCK_MONOTONIC, &start);
+ clock_gettime(CLOCK_MONOTONIC_RAW, &mid);
+ clock_gettime(CLOCK_MONOTONIC, &end);
+
+ newdiff = diff_timespec(start, end);
+ if (diff == 0 || newdiff < diff) {
+ diff = newdiff;
+ *raw = mid;
+ tmp = (ts_to_nsec(start) + ts_to_nsec(end))/2;
+ *mon = nsec_to_ts(tmp);
+ }
+ }
+}
+
+int main(int argv, char **argc)
+{
+ struct timespec mon, raw, start, end;
+ long long delta1, delta2, interval, eppm, ppm;
+ struct timex tx1, tx2;
+
+ setbuf(stdout, NULL);
+
+ if (clock_gettime(CLOCK_MONOTONIC_RAW, &raw)) {
+ printf("ERR: NO CLOCK_MONOTONIC_RAW\n");
+ return -1;
+ }
+
+ tx1.modes = 0;
+ adjtimex(&tx1);
+ get_monotonic_and_raw(&mon, &raw);
+ start = mon;
+ delta1 = diff_timespec(mon, raw);
+
+ if (tx1.offset)
+ printf("WARNING: ADJ_OFFSET in progress, this will cause inaccurate results\n");
+
+ printf("Estimating clock drift: ");
+ sleep(120);
+
+ get_monotonic_and_raw(&mon, &raw);
+ end = mon;
+ tx2.modes = 0;
+ adjtimex(&tx2);
+ delta2 = diff_timespec(mon, raw);
+
+ interval = diff_timespec(start, end);
+
+ /* calculate measured ppm between MONOTONIC and MONOTONIC_RAW */
+ eppm = ((delta2-delta1)*NSEC_PER_SEC)/interval;
+ eppm = -eppm;
+ printf("%lld.%i(est)", eppm/1000, abs((int)(eppm%1000)));
+
+ /* Avg the two actual freq samples adjtimex gave us */
+ ppm = (tx1.freq + tx2.freq) * 1000 / 2;
+ ppm = (long long)tx1.freq * 1000;
+ ppm = shift_right(ppm, 16);
+ printf(" %lld.%i(act)", ppm/1000, abs((int)(ppm%1000)));
+
+ if (llabs(eppm - ppm) > 1000) {
+ printf(" [FAILED]\n");
+ return ksft_exit_fail();
+ }
+ printf(" [OK]\n");
+ return ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/timers/rtctest.c b/tools/testing/selftests/timers/rtctest.c
new file mode 100644
index 000000000000..d80ae852334d
--- /dev/null
+++ b/tools/testing/selftests/timers/rtctest.c
@@ -0,0 +1,271 @@
+/*
+ * Real Time Clock Driver Test/Example Program
+ *
+ * Compile with:
+ * gcc -s -Wall -Wstrict-prototypes rtctest.c -o rtctest
+ *
+ * Copyright (C) 1996, Paul Gortmaker.
+ *
+ * Released under the GNU General Public License, version 2,
+ * included herein by reference.
+ *
+ */
+
+#include <stdio.h>
+#include <linux/rtc.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <errno.h>
+
+
+/*
+ * This expects the new RTC class driver framework, working with
+ * clocks that will often not be clones of what the PC-AT had.
+ * Use the command line to specify another RTC if you need one.
+ */
+static const char default_rtc[] = "/dev/rtc0";
+
+
+int main(int argc, char **argv)
+{
+ int i, fd, retval, irqcount = 0;
+ unsigned long tmp, data;
+ struct rtc_time rtc_tm;
+ const char *rtc = default_rtc;
+ struct timeval start, end, diff;
+
+ switch (argc) {
+ case 2:
+ rtc = argv[1];
+ /* FALLTHROUGH */
+ case 1:
+ break;
+ default:
+ fprintf(stderr, "usage: rtctest [rtcdev]\n");
+ return 1;
+ }
+
+ fd = open(rtc, O_RDONLY);
+
+ if (fd == -1) {
+ perror(rtc);
+ exit(errno);
+ }
+
+ fprintf(stderr, "\n\t\t\tRTC Driver Test Example.\n\n");
+
+ /* Turn on update interrupts (one per second) */
+ retval = ioctl(fd, RTC_UIE_ON, 0);
+ if (retval == -1) {
+ if (errno == ENOTTY) {
+ fprintf(stderr,
+ "\n...Update IRQs not supported.\n");
+ goto test_READ;
+ }
+ perror("RTC_UIE_ON ioctl");
+ exit(errno);
+ }
+
+ fprintf(stderr, "Counting 5 update (1/sec) interrupts from reading %s:",
+ rtc);
+ fflush(stderr);
+ for (i=1; i<6; i++) {
+ /* This read will block */
+ retval = read(fd, &data, sizeof(unsigned long));
+ if (retval == -1) {
+ perror("read");
+ exit(errno);
+ }
+ fprintf(stderr, " %d",i);
+ fflush(stderr);
+ irqcount++;
+ }
+
+ fprintf(stderr, "\nAgain, from using select(2) on /dev/rtc:");
+ fflush(stderr);
+ for (i=1; i<6; i++) {
+ struct timeval tv = {5, 0}; /* 5 second timeout on select */
+ fd_set readfds;
+
+ FD_ZERO(&readfds);
+ FD_SET(fd, &readfds);
+ /* The select will wait until an RTC interrupt happens. */
+ retval = select(fd+1, &readfds, NULL, NULL, &tv);
+ if (retval == -1) {
+ perror("select");
+ exit(errno);
+ }
+ /* This read won't block unlike the select-less case above. */
+ retval = read(fd, &data, sizeof(unsigned long));
+ if (retval == -1) {
+ perror("read");
+ exit(errno);
+ }
+ fprintf(stderr, " %d",i);
+ fflush(stderr);
+ irqcount++;
+ }
+
+ /* Turn off update interrupts */
+ retval = ioctl(fd, RTC_UIE_OFF, 0);
+ if (retval == -1) {
+ perror("RTC_UIE_OFF ioctl");
+ exit(errno);
+ }
+
+test_READ:
+ /* Read the RTC time/date */
+ retval = ioctl(fd, RTC_RD_TIME, &rtc_tm);
+ if (retval == -1) {
+ perror("RTC_RD_TIME ioctl");
+ exit(errno);
+ }
+
+ fprintf(stderr, "\n\nCurrent RTC date/time is %d-%d-%d, %02d:%02d:%02d.\n",
+ rtc_tm.tm_mday, rtc_tm.tm_mon + 1, rtc_tm.tm_year + 1900,
+ rtc_tm.tm_hour, rtc_tm.tm_min, rtc_tm.tm_sec);
+
+ /* Set the alarm to 5 sec in the future, and check for rollover */
+ rtc_tm.tm_sec += 5;
+ if (rtc_tm.tm_sec >= 60) {
+ rtc_tm.tm_sec %= 60;
+ rtc_tm.tm_min++;
+ }
+ if (rtc_tm.tm_min == 60) {
+ rtc_tm.tm_min = 0;
+ rtc_tm.tm_hour++;
+ }
+ if (rtc_tm.tm_hour == 24)
+ rtc_tm.tm_hour = 0;
+
+ retval = ioctl(fd, RTC_ALM_SET, &rtc_tm);
+ if (retval == -1) {
+ if (errno == ENOTTY) {
+ fprintf(stderr,
+ "\n...Alarm IRQs not supported.\n");
+ goto test_PIE;
+ }
+ perror("RTC_ALM_SET ioctl");
+ exit(errno);
+ }
+
+ /* Read the current alarm settings */
+ retval = ioctl(fd, RTC_ALM_READ, &rtc_tm);
+ if (retval == -1) {
+ perror("RTC_ALM_READ ioctl");
+ exit(errno);
+ }
+
+ fprintf(stderr, "Alarm time now set to %02d:%02d:%02d.\n",
+ rtc_tm.tm_hour, rtc_tm.tm_min, rtc_tm.tm_sec);
+
+ /* Enable alarm interrupts */
+ retval = ioctl(fd, RTC_AIE_ON, 0);
+ if (retval == -1) {
+ perror("RTC_AIE_ON ioctl");
+ exit(errno);
+ }
+
+ fprintf(stderr, "Waiting 5 seconds for alarm...");
+ fflush(stderr);
+ /* This blocks until the alarm ring causes an interrupt */
+ retval = read(fd, &data, sizeof(unsigned long));
+ if (retval == -1) {
+ perror("read");
+ exit(errno);
+ }
+ irqcount++;
+ fprintf(stderr, " okay. Alarm rang.\n");
+
+ /* Disable alarm interrupts */
+ retval = ioctl(fd, RTC_AIE_OFF, 0);
+ if (retval == -1) {
+ perror("RTC_AIE_OFF ioctl");
+ exit(errno);
+ }
+
+test_PIE:
+ /* Read periodic IRQ rate */
+ retval = ioctl(fd, RTC_IRQP_READ, &tmp);
+ if (retval == -1) {
+ /* not all RTCs support periodic IRQs */
+ if (errno == ENOTTY) {
+ fprintf(stderr, "\nNo periodic IRQ support\n");
+ goto done;
+ }
+ perror("RTC_IRQP_READ ioctl");
+ exit(errno);
+ }
+ fprintf(stderr, "\nPeriodic IRQ rate is %ldHz.\n", tmp);
+
+ fprintf(stderr, "Counting 20 interrupts at:");
+ fflush(stderr);
+
+ /* The frequencies 128Hz, 256Hz, ... 8192Hz are only allowed for root. */
+ for (tmp=2; tmp<=64; tmp*=2) {
+
+ retval = ioctl(fd, RTC_IRQP_SET, tmp);
+ if (retval == -1) {
+ /* not all RTCs can change their periodic IRQ rate */
+ if (errno == ENOTTY) {
+ fprintf(stderr,
+ "\n...Periodic IRQ rate is fixed\n");
+ goto done;
+ }
+ perror("RTC_IRQP_SET ioctl");
+ exit(errno);
+ }
+
+ fprintf(stderr, "\n%ldHz:\t", tmp);
+ fflush(stderr);
+
+ /* Enable periodic interrupts */
+ retval = ioctl(fd, RTC_PIE_ON, 0);
+ if (retval == -1) {
+ perror("RTC_PIE_ON ioctl");
+ exit(errno);
+ }
+
+ for (i=1; i<21; i++) {
+ gettimeofday(&start, NULL);
+ /* This blocks */
+ retval = read(fd, &data, sizeof(unsigned long));
+ if (retval == -1) {
+ perror("read");
+ exit(errno);
+ }
+ gettimeofday(&end, NULL);
+ timersub(&end, &start, &diff);
+ if (diff.tv_sec > 0 ||
+ diff.tv_usec > ((1000000L / tmp) * 1.10)) {
+ fprintf(stderr, "\nPIE delta error: %ld.%06ld should be close to 0.%06ld\n",
+ diff.tv_sec, diff.tv_usec,
+ (1000000L / tmp));
+ fflush(stdout);
+ exit(-1);
+ }
+
+ fprintf(stderr, " %d",i);
+ fflush(stderr);
+ irqcount++;
+ }
+
+ /* Disable periodic interrupts */
+ retval = ioctl(fd, RTC_PIE_OFF, 0);
+ if (retval == -1) {
+ perror("RTC_PIE_OFF ioctl");
+ exit(errno);
+ }
+ }
+
+done:
+ fprintf(stderr, "\n\n\t\t\t *** Test complete ***\n");
+
+ close(fd);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/timers/set-2038.c b/tools/testing/selftests/timers/set-2038.c
new file mode 100644
index 000000000000..c8a7e14446b1
--- /dev/null
+++ b/tools/testing/selftests/timers/set-2038.c
@@ -0,0 +1,144 @@
+/* Time bounds setting test
+ * by: john stultz (johnstul@us.ibm.com)
+ * (C) Copyright IBM 2012
+ * Licensed under the GPLv2
+ *
+ * NOTE: This is a meta-test which sets the time to edge cases then
+ * uses other tests to detect problems. Thus this test requires that
+ * the inconsistency-check and nanosleep tests be present in the same
+ * directory it is run from.
+ *
+ * To build:
+ * $ gcc set-2038.c -o set-2038 -lrt
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <time.h>
+#include <sys/time.h>
+#ifdef KTEST
+#include "../kselftest.h"
+#else
+static inline int ksft_exit_pass(void)
+{
+ exit(0);
+}
+static inline int ksft_exit_fail(void)
+{
+ exit(1);
+}
+#endif
+
+#define NSEC_PER_SEC 1000000000LL
+
+#define KTIME_MAX ((long long)~((unsigned long long)1 << 63))
+#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
+
+#define YEAR_1901 (-0x7fffffffL)
+#define YEAR_1970 1
+#define YEAR_2038 0x7fffffffL /*overflows 32bit time_t */
+#define YEAR_2262 KTIME_SEC_MAX /*overflows 64bit ktime_t */
+#define YEAR_MAX ((long long)((1ULL<<63)-1)) /*overflows 64bit time_t */
+
+int is32bits(void)
+{
+ return (sizeof(long) == 4);
+}
+
+int settime(long long time)
+{
+ struct timeval now;
+ int ret;
+
+ now.tv_sec = (time_t)time;
+ now.tv_usec = 0;
+
+ ret = settimeofday(&now, NULL);
+
+ printf("Setting time to 0x%lx: %d\n", (long)time, ret);
+ return ret;
+}
+
+int do_tests(void)
+{
+ int ret;
+
+ ret = system("date");
+ ret = system("./inconsistency-check -c 0 -t 20");
+ ret |= system("./nanosleep");
+ ret |= system("./nsleep-lat");
+ return ret;
+
+}
+
+int main(int argc, char *argv[])
+{
+ int ret = 0;
+ int opt, dangerous = 0;
+ time_t start;
+
+ /* Process arguments */
+ while ((opt = getopt(argc, argv, "d")) != -1) {
+ switch (opt) {
+ case 'd':
+ dangerous = 1;
+ }
+ }
+
+ start = time(0);
+
+ /* First test that crazy values don't work */
+ if (!settime(YEAR_1901)) {
+ ret = -1;
+ goto out;
+ }
+ if (!settime(YEAR_MAX)) {
+ ret = -1;
+ goto out;
+ }
+ if (!is32bits() && !settime(YEAR_2262)) {
+ ret = -1;
+ goto out;
+ }
+
+ /* Now test behavior near edges */
+ settime(YEAR_1970);
+ ret = do_tests();
+ if (ret)
+ goto out;
+
+ settime(YEAR_2038 - 600);
+ ret = do_tests();
+ if (ret)
+ goto out;
+
+ /* The rest of the tests can blowup on 32bit systems */
+ if (is32bits() && !dangerous)
+ goto out;
+ /* Test rollover behavior 32bit edge */
+ settime(YEAR_2038 - 10);
+ ret = do_tests();
+ if (ret)
+ goto out;
+
+ settime(YEAR_2262 - 600);
+ ret = do_tests();
+
+out:
+ /* restore clock */
+ settime(start);
+ if (ret)
+ return ksft_exit_fail();
+ return ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/timers/set-tai.c b/tools/testing/selftests/timers/set-tai.c
new file mode 100644
index 000000000000..dc88dbc8831f
--- /dev/null
+++ b/tools/testing/selftests/timers/set-tai.c
@@ -0,0 +1,79 @@
+/* Set tai offset
+ * by: John Stultz <john.stultz@linaro.org>
+ * (C) Copyright Linaro 2013
+ * Licensed under the GPLv2
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+#include <sys/time.h>
+#include <sys/timex.h>
+#include <string.h>
+#include <signal.h>
+#include <unistd.h>
+#ifdef KTEST
+#include "../kselftest.h"
+#else
+static inline int ksft_exit_pass(void)
+{
+ exit(0);
+}
+static inline int ksft_exit_fail(void)
+{
+ exit(1);
+}
+#endif
+
+int set_tai(int offset)
+{
+ struct timex tx;
+
+ memset(&tx, 0, sizeof(tx));
+
+ tx.modes = ADJ_TAI;
+ tx.constant = offset;
+
+ return adjtimex(&tx);
+}
+
+int get_tai(void)
+{
+ struct timex tx;
+
+ memset(&tx, 0, sizeof(tx));
+
+ adjtimex(&tx);
+ return tx.tai;
+}
+
+int main(int argc, char **argv)
+{
+ int i, ret;
+
+ ret = get_tai();
+ printf("tai offset started at %i\n", ret);
+
+ printf("Checking tai offsets can be properly set: ");
+ for (i = 1; i <= 60; i++) {
+ ret = set_tai(i);
+ ret = get_tai();
+ if (ret != i) {
+ printf("[FAILED] expected: %i got %i\n", i, ret);
+ return ksft_exit_fail();
+ }
+ }
+ printf("[OK]\n");
+ return ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/timers/set-timer-lat.c b/tools/testing/selftests/timers/set-timer-lat.c
new file mode 100644
index 000000000000..4fc98c5b0899
--- /dev/null
+++ b/tools/testing/selftests/timers/set-timer-lat.c
@@ -0,0 +1,216 @@
+/* set_timer latency test
+ * John Stultz (john.stultz@linaro.org)
+ * (C) Copyright Linaro 2014
+ * Licensed under the GPLv2
+ *
+ * This test makes sure the set_timer api is correct
+ *
+ * To build:
+ * $ gcc set-timer-lat.c -o set-timer-lat -lrt
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <stdio.h>
+#include <unistd.h>
+#include <time.h>
+#include <string.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <pthread.h>
+#ifdef KTEST
+#include "../kselftest.h"
+#else
+static inline int ksft_exit_pass(void)
+{
+ exit(0);
+}
+static inline int ksft_exit_fail(void)
+{
+ exit(1);
+}
+#endif
+
+#define CLOCK_REALTIME 0
+#define CLOCK_MONOTONIC 1
+#define CLOCK_PROCESS_CPUTIME_ID 2
+#define CLOCK_THREAD_CPUTIME_ID 3
+#define CLOCK_MONOTONIC_RAW 4
+#define CLOCK_REALTIME_COARSE 5
+#define CLOCK_MONOTONIC_COARSE 6
+#define CLOCK_BOOTTIME 7
+#define CLOCK_REALTIME_ALARM 8
+#define CLOCK_BOOTTIME_ALARM 9
+#define CLOCK_HWSPECIFIC 10
+#define CLOCK_TAI 11
+#define NR_CLOCKIDS 12
+
+
+#define NSEC_PER_SEC 1000000000ULL
+#define UNRESONABLE_LATENCY 40000000 /* 40ms in nanosecs */
+
+#define TIMER_SECS 1
+int alarmcount;
+int clock_id;
+struct timespec start_time;
+long long max_latency_ns;
+
+char *clockstring(int clockid)
+{
+ switch (clockid) {
+ case CLOCK_REALTIME:
+ return "CLOCK_REALTIME";
+ case CLOCK_MONOTONIC:
+ return "CLOCK_MONOTONIC";
+ case CLOCK_PROCESS_CPUTIME_ID:
+ return "CLOCK_PROCESS_CPUTIME_ID";
+ case CLOCK_THREAD_CPUTIME_ID:
+ return "CLOCK_THREAD_CPUTIME_ID";
+ case CLOCK_MONOTONIC_RAW:
+ return "CLOCK_MONOTONIC_RAW";
+ case CLOCK_REALTIME_COARSE:
+ return "CLOCK_REALTIME_COARSE";
+ case CLOCK_MONOTONIC_COARSE:
+ return "CLOCK_MONOTONIC_COARSE";
+ case CLOCK_BOOTTIME:
+ return "CLOCK_BOOTTIME";
+ case CLOCK_REALTIME_ALARM:
+ return "CLOCK_REALTIME_ALARM";
+ case CLOCK_BOOTTIME_ALARM:
+ return "CLOCK_BOOTTIME_ALARM";
+ case CLOCK_TAI:
+ return "CLOCK_TAI";
+ };
+ return "UNKNOWN_CLOCKID";
+}
+
+
+long long timespec_sub(struct timespec a, struct timespec b)
+{
+ long long ret = NSEC_PER_SEC * b.tv_sec + b.tv_nsec;
+
+ ret -= NSEC_PER_SEC * a.tv_sec + a.tv_nsec;
+ return ret;
+}
+
+
+void sigalarm(int signo)
+{
+ long long delta_ns;
+ struct timespec ts;
+
+ clock_gettime(clock_id, &ts);
+ alarmcount++;
+
+ delta_ns = timespec_sub(start_time, ts);
+ delta_ns -= NSEC_PER_SEC * TIMER_SECS * alarmcount;
+
+ if (delta_ns < 0)
+ printf("%s timer fired early: FAIL\n", clockstring(clock_id));
+
+ if (delta_ns > max_latency_ns)
+ max_latency_ns = delta_ns;
+}
+
+int do_timer(int clock_id, int flags)
+{
+ struct sigevent se;
+ timer_t tm1;
+ struct itimerspec its1, its2;
+ int err;
+
+ /* Set up timer: */
+ memset(&se, 0, sizeof(se));
+ se.sigev_notify = SIGEV_SIGNAL;
+ se.sigev_signo = SIGRTMAX;
+ se.sigev_value.sival_int = 0;
+
+ max_latency_ns = 0;
+ alarmcount = 0;
+
+ err = timer_create(clock_id, &se, &tm1);
+ if (err) {
+ if ((clock_id == CLOCK_REALTIME_ALARM) ||
+ (clock_id == CLOCK_BOOTTIME_ALARM)) {
+ printf("%-22s %s missing CAP_WAKE_ALARM? : [UNSUPPORTED]\n",
+ clockstring(clock_id),
+ flags ? "ABSTIME":"RELTIME");
+ return 0;
+ }
+ printf("%s - timer_create() failed\n", clockstring(clock_id));
+ return -1;
+ }
+
+ clock_gettime(clock_id, &start_time);
+ if (flags) {
+ its1.it_value = start_time;
+ its1.it_value.tv_sec += TIMER_SECS;
+ } else {
+ its1.it_value.tv_sec = TIMER_SECS;
+ its1.it_value.tv_nsec = 0;
+ }
+ its1.it_interval.tv_sec = TIMER_SECS;
+ its1.it_interval.tv_nsec = 0;
+
+ err = timer_settime(tm1, flags, &its1, &its2);
+ if (err) {
+ printf("%s - timer_settime() failed\n", clockstring(clock_id));
+ return -1;
+ }
+
+ while (alarmcount < 5)
+ sleep(1);
+
+ printf("%-22s %s max latency: %10lld ns : ",
+ clockstring(clock_id),
+ flags ? "ABSTIME":"RELTIME",
+ max_latency_ns);
+
+ timer_delete(tm1);
+ if (max_latency_ns < UNRESONABLE_LATENCY) {
+ printf("[OK]\n");
+ return 0;
+ }
+ printf("[FAILED]\n");
+ return -1;
+}
+
+int main(void)
+{
+ struct sigaction act;
+ int signum = SIGRTMAX;
+ int ret = 0;
+
+ /* Set up signal handler: */
+ sigfillset(&act.sa_mask);
+ act.sa_flags = 0;
+ act.sa_handler = sigalarm;
+ sigaction(signum, &act, NULL);
+
+ printf("Setting timers for every %i seconds\n", TIMER_SECS);
+ for (clock_id = 0; clock_id < NR_CLOCKIDS; clock_id++) {
+
+ if ((clock_id == CLOCK_PROCESS_CPUTIME_ID) ||
+ (clock_id == CLOCK_THREAD_CPUTIME_ID) ||
+ (clock_id == CLOCK_MONOTONIC_RAW) ||
+ (clock_id == CLOCK_REALTIME_COARSE) ||
+ (clock_id == CLOCK_MONOTONIC_COARSE) ||
+ (clock_id == CLOCK_HWSPECIFIC))
+ continue;
+
+ ret |= do_timer(clock_id, TIMER_ABSTIME);
+ ret |= do_timer(clock_id, 0);
+ }
+ if (ret)
+ return ksft_exit_fail();
+ return ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/timers/skew_consistency.c b/tools/testing/selftests/timers/skew_consistency.c
new file mode 100644
index 000000000000..5562f84ee07c
--- /dev/null
+++ b/tools/testing/selftests/timers/skew_consistency.c
@@ -0,0 +1,89 @@
+/* ADJ_FREQ Skew consistency test
+ * by: john stultz (johnstul@us.ibm.com)
+ * (C) Copyright IBM 2012
+ * Licensed under the GPLv2
+ *
+ * NOTE: This is a meta-test which cranks the ADJ_FREQ knob back
+ * and forth and watches for consistency problems. Thus this test requires
+ * that the inconsistency-check tests be present in the same directory it
+ * is run from.
+ *
+ * To build:
+ * $ gcc skew_consistency.c -o skew_consistency -lrt
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/time.h>
+#include <sys/timex.h>
+#include <time.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/wait.h>
+#ifdef KTEST
+#include "../kselftest.h"
+#else
+static inline int ksft_exit_pass(void)
+{
+ exit(0);
+}
+static inline int ksft_exit_fail(void)
+{
+ exit(1);
+}
+#endif
+
+#define NSEC_PER_SEC 1000000000LL
+
+int main(int argv, char **argc)
+{
+ struct timex tx;
+ int ret, ppm;
+ pid_t pid;
+
+
+ printf("Running Asyncrhonous Frequency Changing Tests...\n");
+
+ pid = fork();
+ if (!pid)
+ return system("./inconsistency-check -c 1 -t 600");
+
+ ppm = 500;
+ ret = 0;
+
+ while (pid != waitpid(pid, &ret, WNOHANG)) {
+ ppm = -ppm;
+ tx.modes = ADJ_FREQUENCY;
+ tx.freq = ppm << 16;
+ adjtimex(&tx);
+ usleep(500000);
+ }
+
+ /* Set things back */
+ tx.modes = ADJ_FREQUENCY;
+ tx.offset = 0;
+ adjtimex(&tx);
+
+
+ if (ret) {
+ printf("[FAILED]\n");
+ return ksft_exit_fail();
+ }
+ printf("[OK]\n");
+ return ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/timers/threadtest.c b/tools/testing/selftests/timers/threadtest.c
new file mode 100644
index 000000000000..e632e116f05e
--- /dev/null
+++ b/tools/testing/selftests/timers/threadtest.c
@@ -0,0 +1,204 @@
+/* threadtest.c
+ * by: john stultz (johnstul@us.ibm.com)
+ * (C) Copyright IBM 2004, 2005, 2006, 2012
+ * Licensed under the GPLv2
+ *
+ * To build:
+ * $ gcc threadtest.c -o threadtest -lrt
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <sys/time.h>
+#include <pthread.h>
+#ifdef KTEST
+#include "../kselftest.h"
+#else
+static inline int ksft_exit_pass(void)
+{
+ exit(0);
+}
+static inline int ksft_exit_fail(void)
+{
+ exit(1);
+}
+#endif
+
+
+/* serializes shared list access */
+pthread_mutex_t list_lock = PTHREAD_MUTEX_INITIALIZER;
+/* serializes console output */
+pthread_mutex_t print_lock = PTHREAD_MUTEX_INITIALIZER;
+
+
+#define MAX_THREADS 128
+#define LISTSIZE 128
+
+int done = 0;
+
+struct timespec global_list[LISTSIZE];
+int listcount = 0;
+
+
+void checklist(struct timespec *list, int size)
+{
+ int i, j;
+ struct timespec *a, *b;
+
+ /* scan the list */
+ for (i = 0; i < size-1; i++) {
+ a = &list[i];
+ b = &list[i+1];
+
+ /* look for any time inconsistencies */
+ if ((b->tv_sec <= a->tv_sec) &&
+ (b->tv_nsec < a->tv_nsec)) {
+
+ /* flag other threads */
+ done = 1;
+
+ /*serialize printing to avoid junky output*/
+ pthread_mutex_lock(&print_lock);
+
+ /* dump the list */
+ printf("\n");
+ for (j = 0; j < size; j++) {
+ if (j == i)
+ printf("---------------\n");
+ printf("%lu:%lu\n", list[j].tv_sec, list[j].tv_nsec);
+ if (j == i+1)
+ printf("---------------\n");
+ }
+ printf("[FAILED]\n");
+
+ pthread_mutex_unlock(&print_lock);
+ }
+ }
+}
+
+/* The shared thread shares a global list
+ * that each thread fills while holding the lock.
+ * This stresses clock syncronization across cpus.
+ */
+void *shared_thread(void *arg)
+{
+ while (!done) {
+ /* protect the list */
+ pthread_mutex_lock(&list_lock);
+
+ /* see if we're ready to check the list */
+ if (listcount >= LISTSIZE) {
+ checklist(global_list, LISTSIZE);
+ listcount = 0;
+ }
+ clock_gettime(CLOCK_MONOTONIC, &global_list[listcount++]);
+
+ pthread_mutex_unlock(&list_lock);
+ }
+ return NULL;
+}
+
+
+/* Each independent thread fills in its own
+ * list. This stresses clock_gettime() lock contention.
+ */
+void *independent_thread(void *arg)
+{
+ struct timespec my_list[LISTSIZE];
+ int count;
+
+ while (!done) {
+ /* fill the list */
+ for (count = 0; count < LISTSIZE; count++)
+ clock_gettime(CLOCK_MONOTONIC, &my_list[count]);
+ checklist(my_list, LISTSIZE);
+ }
+ return NULL;
+}
+
+#define DEFAULT_THREAD_COUNT 8
+#define DEFAULT_RUNTIME 30
+
+int main(int argc, char **argv)
+{
+ int thread_count, i;
+ time_t start, now, runtime;
+ char buf[255];
+ pthread_t pth[MAX_THREADS];
+ int opt;
+ void *tret;
+ int ret = 0;
+ void *(*thread)(void *) = shared_thread;
+
+ thread_count = DEFAULT_THREAD_COUNT;
+ runtime = DEFAULT_RUNTIME;
+
+ /* Process arguments */
+ while ((opt = getopt(argc, argv, "t:n:i")) != -1) {
+ switch (opt) {
+ case 't':
+ runtime = atoi(optarg);
+ break;
+ case 'n':
+ thread_count = atoi(optarg);
+ break;
+ case 'i':
+ thread = independent_thread;
+ printf("using independent threads\n");
+ break;
+ default:
+ printf("Usage: %s [-t <secs>] [-n <numthreads>] [-i]\n", argv[0]);
+ printf(" -t: time to run\n");
+ printf(" -n: number of threads\n");
+ printf(" -i: use independent threads\n");
+ return -1;
+ }
+ }
+
+ if (thread_count > MAX_THREADS)
+ thread_count = MAX_THREADS;
+
+
+ setbuf(stdout, NULL);
+
+ start = time(0);
+ strftime(buf, 255, "%a, %d %b %Y %T %z", localtime(&start));
+ printf("%s\n", buf);
+ printf("Testing consistency with %i threads for %ld seconds: ", thread_count, runtime);
+
+ /* spawn */
+ for (i = 0; i < thread_count; i++)
+ pthread_create(&pth[i], 0, thread, 0);
+
+ while (time(&now) < start + runtime) {
+ sleep(1);
+ if (done) {
+ ret = 1;
+ strftime(buf, 255, "%a, %d %b %Y %T %z", localtime(&now));
+ printf("%s\n", buf);
+ goto out;
+ }
+ }
+ printf("[OK]\n");
+ done = 1;
+
+out:
+ /* wait */
+ for (i = 0; i < thread_count; i++)
+ pthread_join(pth[i], &tret);
+
+ /* die */
+ if (ret)
+ ksft_exit_fail();
+ return ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/timers/valid-adjtimex.c b/tools/testing/selftests/timers/valid-adjtimex.c
new file mode 100644
index 000000000000..e86d937cc22c
--- /dev/null
+++ b/tools/testing/selftests/timers/valid-adjtimex.c
@@ -0,0 +1,202 @@
+/* valid adjtimex test
+ * by: John Stultz <john.stultz@linaro.org>
+ * (C) Copyright Linaro 2015
+ * Licensed under the GPLv2
+ *
+ * This test validates adjtimex interface with valid
+ * and invalid test data.
+ *
+ * Usage: valid-adjtimex
+ *
+ * To build:
+ * $ gcc valid-adjtimex.c -o valid-adjtimex -lrt
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+#include <sys/time.h>
+#include <sys/timex.h>
+#include <string.h>
+#include <signal.h>
+#include <unistd.h>
+#ifdef KTEST
+#include "../kselftest.h"
+#else
+static inline int ksft_exit_pass(void)
+{
+ exit(0);
+}
+static inline int ksft_exit_fail(void)
+{
+ exit(1);
+}
+#endif
+
+#define NSEC_PER_SEC 1000000000L
+
+/* clear NTP time_status & time_state */
+int clear_time_state(void)
+{
+ struct timex tx;
+ int ret;
+
+ tx.modes = ADJ_STATUS;
+ tx.status = 0;
+ ret = adjtimex(&tx);
+ return ret;
+}
+
+#define NUM_FREQ_VALID 32
+#define NUM_FREQ_OUTOFRANGE 4
+#define NUM_FREQ_INVALID 2
+
+long valid_freq[NUM_FREQ_VALID] = {
+ -499<<16,
+ -450<<16,
+ -400<<16,
+ -350<<16,
+ -300<<16,
+ -250<<16,
+ -200<<16,
+ -150<<16,
+ -100<<16,
+ -75<<16,
+ -50<<16,
+ -25<<16,
+ -10<<16,
+ -5<<16,
+ -1<<16,
+ -1000,
+ 1<<16,
+ 5<<16,
+ 10<<16,
+ 25<<16,
+ 50<<16,
+ 75<<16,
+ 100<<16,
+ 150<<16,
+ 200<<16,
+ 250<<16,
+ 300<<16,
+ 350<<16,
+ 400<<16,
+ 450<<16,
+ 499<<16,
+};
+
+long outofrange_freq[NUM_FREQ_OUTOFRANGE] = {
+ -1000<<16,
+ -550<<16,
+ 550<<16,
+ 1000<<16,
+};
+
+#define LONG_MAX (~0UL>>1)
+#define LONG_MIN (-LONG_MAX - 1)
+
+long invalid_freq[NUM_FREQ_INVALID] = {
+ LONG_MAX,
+ LONG_MIN,
+};
+
+int validate_freq(void)
+{
+ struct timex tx;
+ int ret, pass = 0;
+ int i;
+
+ clear_time_state();
+
+ memset(&tx, 0, sizeof(struct timex));
+ /* Set the leap second insert flag */
+
+ printf("Testing ADJ_FREQ... ");
+ for (i = 0; i < NUM_FREQ_VALID; i++) {
+ tx.modes = ADJ_FREQUENCY;
+ tx.freq = valid_freq[i];
+
+ ret = adjtimex(&tx);
+ if (ret < 0) {
+ printf("[FAIL]\n");
+ printf("Error: adjtimex(ADJ_FREQ, %ld - %ld ppm\n",
+ valid_freq[i], valid_freq[i]>>16);
+ pass = -1;
+ goto out;
+ }
+ tx.modes = 0;
+ ret = adjtimex(&tx);
+ if (tx.freq != valid_freq[i]) {
+ printf("Warning: freq value %ld not what we set it (%ld)!\n",
+ tx.freq, valid_freq[i]);
+ }
+ }
+ for (i = 0; i < NUM_FREQ_OUTOFRANGE; i++) {
+ tx.modes = ADJ_FREQUENCY;
+ tx.freq = outofrange_freq[i];
+
+ ret = adjtimex(&tx);
+ if (ret < 0) {
+ printf("[FAIL]\n");
+ printf("Error: adjtimex(ADJ_FREQ, %ld - %ld ppm\n",
+ outofrange_freq[i], outofrange_freq[i]>>16);
+ pass = -1;
+ goto out;
+ }
+ tx.modes = 0;
+ ret = adjtimex(&tx);
+ if (tx.freq == outofrange_freq[i]) {
+ printf("[FAIL]\n");
+ printf("ERROR: out of range value %ld actually set!\n",
+ tx.freq);
+ pass = -1;
+ goto out;
+ }
+ }
+
+
+ if (sizeof(long) == 8) { /* this case only applies to 64bit systems */
+ for (i = 0; i < NUM_FREQ_INVALID; i++) {
+ tx.modes = ADJ_FREQUENCY;
+ tx.freq = invalid_freq[i];
+ ret = adjtimex(&tx);
+ if (ret >= 0) {
+ printf("[FAIL]\n");
+ printf("Error: No failure on invalid ADJ_FREQUENCY %ld\n",
+ invalid_freq[i]);
+ pass = -1;
+ goto out;
+ }
+ }
+ }
+
+ printf("[OK]\n");
+out:
+ /* reset freq to zero */
+ tx.modes = ADJ_FREQUENCY;
+ tx.freq = 0;
+ ret = adjtimex(&tx);
+
+ return pass;
+}
+
+
+int main(int argc, char **argv)
+{
+ if (validate_freq())
+ return ksft_exit_fail();
+
+ return ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/user/Makefile b/tools/testing/selftests/user/Makefile
index 12c9d15bab07..d401b63c5b1a 100644
--- a/tools/testing/selftests/user/Makefile
+++ b/tools/testing/selftests/user/Makefile
@@ -3,5 +3,6 @@
# No binaries, but make sure arg-less "make" doesn't trigger "run_tests"
all:
-run_tests: all
- ./test_user_copy.sh
+TEST_PROGS := test_user_copy.sh
+
+include ../lib.mk
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index 077828c889f1..a5ce9534eb15 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -1,6 +1,5 @@
# Makefile for vm selftests
-CC = $(CROSS_COMPILE)gcc
CFLAGS = -Wall
BINARIES = hugepage-mmap hugepage-shm map_hugetlb thuge-gen hugetlbfstest
BINARIES += transhuge-stress
@@ -9,8 +8,10 @@ all: $(BINARIES)
%: %.c
$(CC) $(CFLAGS) -o $@ $^ -lrt
-run_tests: all
- @/bin/sh ./run_vmtests || (echo "vmtests: [FAIL]"; exit 1)
+TEST_PROGS := run_vmtests
+TEST_FILES := $(BINARIES)
+
+include ../lib.mk
clean:
$(RM) $(BINARIES)
diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests
index c87b6812300d..c87b6812300d 100644..100755
--- a/tools/testing/selftests/vm/run_vmtests
+++ b/tools/testing/selftests/vm/run_vmtests
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 6e54f3542126..98c95f2fcba4 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -85,13 +85,22 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
+/*
+ * Work function for handling the backup timer that we schedule when a vcpu is
+ * no longer running, but had a timer programmed to fire in the future.
+ */
static void kvm_timer_inject_irq_work(struct work_struct *work)
{
struct kvm_vcpu *vcpu;
vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
vcpu->arch.timer_cpu.armed = false;
- kvm_timer_inject_irq(vcpu);
+
+ /*
+ * If the vcpu is blocked we want to wake it up so that it will see
+ * the timer has expired when entering the guest.
+ */
+ kvm_vcpu_kick(vcpu);
}
static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
@@ -102,6 +111,21 @@ static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
return HRTIMER_NORESTART;
}
+bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
+{
+ struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ cycle_t cval, now;
+
+ if ((timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) ||
+ !(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE))
+ return false;
+
+ cval = timer->cntv_cval;
+ now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
+
+ return cval <= now;
+}
+
/**
* kvm_timer_flush_hwstate - prepare to move the virt timer to the cpu
* @vcpu: The vcpu pointer
@@ -119,6 +143,13 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
* populate the CPU timer again.
*/
timer_disarm(timer);
+
+ /*
+ * If the timer expired while we were not scheduled, now is the time
+ * to inject it.
+ */
+ if (kvm_timer_should_fire(vcpu))
+ kvm_timer_inject_irq(vcpu);
}
/**
@@ -134,16 +165,9 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
cycle_t cval, now;
u64 ns;
- if ((timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) ||
- !(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE))
- return;
-
- cval = timer->cntv_cval;
- now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
-
BUG_ON(timer_is_armed(timer));
- if (cval <= now) {
+ if (kvm_timer_should_fire(vcpu)) {
/*
* Timer has already expired while we were not
* looking. Inject the interrupt and carry on.
@@ -152,6 +176,9 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
return;
}
+ cval = timer->cntv_cval;
+ now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
+
ns = cyclecounter_cyc2ns(timecounter->cc, cval - now, timecounter->mask,
&timecounter->frac);
timer_arm(timer, ns);
diff --git a/virt/kvm/arm/vgic-v2-emul.c b/virt/kvm/arm/vgic-v2-emul.c
index 19c6210f02cf..13907970d11c 100644
--- a/virt/kvm/arm/vgic-v2-emul.c
+++ b/virt/kvm/arm/vgic-v2-emul.c
@@ -107,6 +107,22 @@ static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
vcpu->vcpu_id);
}
+static bool handle_mmio_set_active_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ return vgic_handle_set_active_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id);
+}
+
+static bool handle_mmio_clear_active_reg(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ return vgic_handle_clear_active_reg(vcpu->kvm, mmio, offset,
+ vcpu->vcpu_id);
+}
+
static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio,
phys_addr_t offset)
@@ -303,7 +319,7 @@ static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
}
-static const struct kvm_mmio_range vgic_dist_ranges[] = {
+static const struct vgic_io_range vgic_dist_ranges[] = {
{
.base = GIC_DIST_CTRL,
.len = 12,
@@ -344,13 +360,13 @@ static const struct kvm_mmio_range vgic_dist_ranges[] = {
.base = GIC_DIST_ACTIVE_SET,
.len = VGIC_MAX_IRQS / 8,
.bits_per_irq = 1,
- .handle_mmio = handle_mmio_raz_wi,
+ .handle_mmio = handle_mmio_set_active_reg,
},
{
.base = GIC_DIST_ACTIVE_CLEAR,
.len = VGIC_MAX_IRQS / 8,
.bits_per_irq = 1,
- .handle_mmio = handle_mmio_raz_wi,
+ .handle_mmio = handle_mmio_clear_active_reg,
},
{
.base = GIC_DIST_PRI,
@@ -388,24 +404,6 @@ static const struct kvm_mmio_range vgic_dist_ranges[] = {
{}
};
-static bool vgic_v2_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
- struct kvm_exit_mmio *mmio)
-{
- unsigned long base = vcpu->kvm->arch.vgic.vgic_dist_base;
-
- if (!is_in_range(mmio->phys_addr, mmio->len, base,
- KVM_VGIC_V2_DIST_SIZE))
- return false;
-
- /* GICv2 does not support accesses wider than 32 bits */
- if (mmio->len > 4) {
- kvm_inject_dabt(vcpu, mmio->phys_addr);
- return true;
- }
-
- return vgic_handle_mmio_range(vcpu, run, mmio, vgic_dist_ranges, base);
-}
-
static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
{
struct kvm *kvm = vcpu->kvm;
@@ -490,6 +488,7 @@ static bool vgic_v2_queue_sgi(struct kvm_vcpu *vcpu, int irq)
static int vgic_v2_map_resources(struct kvm *kvm,
const struct vgic_params *params)
{
+ struct vgic_dist *dist = &kvm->arch.vgic;
int ret = 0;
if (!irqchip_in_kernel(kvm))
@@ -500,13 +499,17 @@ static int vgic_v2_map_resources(struct kvm *kvm,
if (vgic_ready(kvm))
goto out;
- if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) ||
- IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) {
+ if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
+ IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
kvm_err("Need to set vgic cpu and dist addresses first\n");
ret = -ENXIO;
goto out;
}
+ vgic_register_kvm_io_dev(kvm, dist->vgic_dist_base,
+ KVM_VGIC_V2_DIST_SIZE,
+ vgic_dist_ranges, -1, &dist->dist_iodev);
+
/*
* Initialize the vgic if this hasn't already been done on demand by
* accessing the vgic state from userspace.
@@ -514,18 +517,23 @@ static int vgic_v2_map_resources(struct kvm *kvm,
ret = vgic_init(kvm);
if (ret) {
kvm_err("Unable to allocate maps\n");
- goto out;
+ goto out_unregister;
}
- ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
+ ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
params->vcpu_base, KVM_VGIC_V2_CPU_SIZE,
true);
if (ret) {
kvm_err("Unable to remap VGIC CPU to VCPU\n");
- goto out;
+ goto out_unregister;
}
- kvm->arch.vgic.ready = true;
+ dist->ready = true;
+ goto out;
+
+out_unregister:
+ kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dist->dist_iodev.dev);
+
out:
if (ret)
kvm_vgic_destroy(kvm);
@@ -554,7 +562,6 @@ void vgic_v2_init_emulation(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
- dist->vm_ops.handle_mmio = vgic_v2_handle_mmio;
dist->vm_ops.queue_sgi = vgic_v2_queue_sgi;
dist->vm_ops.add_sgi_source = vgic_v2_add_sgi_source;
dist->vm_ops.init_model = vgic_v2_init_model;
@@ -631,7 +638,7 @@ static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
* CPU Interface Register accesses - these are not accessed by the VM, but by
* user space for saving and restoring VGIC state.
*/
-static const struct kvm_mmio_range vgic_cpu_ranges[] = {
+static const struct vgic_io_range vgic_cpu_ranges[] = {
{
.base = GIC_CPU_CTRL,
.len = 12,
@@ -658,12 +665,13 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
struct kvm_device_attr *attr,
u32 *reg, bool is_write)
{
- const struct kvm_mmio_range *r = NULL, *ranges;
+ const struct vgic_io_range *r = NULL, *ranges;
phys_addr_t offset;
int ret, cpuid, c;
struct kvm_vcpu *vcpu, *tmp_vcpu;
struct vgic_dist *vgic;
struct kvm_exit_mmio mmio;
+ u32 data;
offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
@@ -685,6 +693,7 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
mmio.len = 4;
mmio.is_write = is_write;
+ mmio.data = &data;
if (is_write)
mmio_data_write(&mmio, ~0, *reg);
switch (attr->group) {
@@ -699,7 +708,7 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
default:
BUG();
}
- r = vgic_find_range(ranges, &mmio, offset);
+ r = vgic_find_range(ranges, 4, offset);
if (unlikely(!r || !r->handle_mmio)) {
ret = -ENXIO;
diff --git a/virt/kvm/arm/vgic-v3-emul.c b/virt/kvm/arm/vgic-v3-emul.c
index b3f154631515..e9c3a7a83833 100644
--- a/virt/kvm/arm/vgic-v3-emul.c
+++ b/virt/kvm/arm/vgic-v3-emul.c
@@ -340,7 +340,7 @@ static bool handle_mmio_idregs(struct kvm_vcpu *vcpu,
return false;
}
-static const struct kvm_mmio_range vgic_v3_dist_ranges[] = {
+static const struct vgic_io_range vgic_v3_dist_ranges[] = {
{
.base = GICD_CTLR,
.len = 0x04,
@@ -502,6 +502,43 @@ static const struct kvm_mmio_range vgic_v3_dist_ranges[] = {
{},
};
+static bool handle_mmio_ctlr_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ /* since we don't support LPIs, this register is zero for now */
+ vgic_reg_access(mmio, NULL, offset,
+ ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
+ return false;
+}
+
+static bool handle_mmio_typer_redist(struct kvm_vcpu *vcpu,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset)
+{
+ u32 reg;
+ u64 mpidr;
+ struct kvm_vcpu *redist_vcpu = mmio->private;
+ int target_vcpu_id = redist_vcpu->vcpu_id;
+
+ /* the upper 32 bits contain the affinity value */
+ if ((offset & ~3) == 4) {
+ mpidr = kvm_vcpu_get_mpidr_aff(redist_vcpu);
+ reg = compress_mpidr(mpidr);
+
+ vgic_reg_access(mmio, &reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+ return false;
+ }
+
+ reg = redist_vcpu->vcpu_id << 8;
+ if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1)
+ reg |= GICR_TYPER_LAST;
+ vgic_reg_access(mmio, &reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
+ return false;
+}
+
static bool handle_mmio_set_enable_reg_redist(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio,
phys_addr_t offset)
@@ -570,186 +607,107 @@ static bool handle_mmio_cfg_reg_redist(struct kvm_vcpu *vcpu,
return vgic_handle_cfg_reg(reg, mmio, offset);
}
-static const struct kvm_mmio_range vgic_redist_sgi_ranges[] = {
+#define SGI_base(x) ((x) + SZ_64K)
+
+static const struct vgic_io_range vgic_redist_ranges[] = {
+ {
+ .base = GICR_CTLR,
+ .len = 0x04,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_ctlr_redist,
+ },
+ {
+ .base = GICR_TYPER,
+ .len = 0x08,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_typer_redist,
+ },
+ {
+ .base = GICR_IIDR,
+ .len = 0x04,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_iidr,
+ },
+ {
+ .base = GICR_WAKER,
+ .len = 0x04,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_raz_wi,
+ },
{
- .base = GICR_IGROUPR0,
+ .base = GICR_IDREGS,
+ .len = 0x30,
+ .bits_per_irq = 0,
+ .handle_mmio = handle_mmio_idregs,
+ },
+ {
+ .base = SGI_base(GICR_IGROUPR0),
.len = 0x04,
.bits_per_irq = 1,
.handle_mmio = handle_mmio_rao_wi,
},
{
- .base = GICR_ISENABLER0,
+ .base = SGI_base(GICR_ISENABLER0),
.len = 0x04,
.bits_per_irq = 1,
.handle_mmio = handle_mmio_set_enable_reg_redist,
},
{
- .base = GICR_ICENABLER0,
+ .base = SGI_base(GICR_ICENABLER0),
.len = 0x04,
.bits_per_irq = 1,
.handle_mmio = handle_mmio_clear_enable_reg_redist,
},
{
- .base = GICR_ISPENDR0,
+ .base = SGI_base(GICR_ISPENDR0),
.len = 0x04,
.bits_per_irq = 1,
.handle_mmio = handle_mmio_set_pending_reg_redist,
},
{
- .base = GICR_ICPENDR0,
+ .base = SGI_base(GICR_ICPENDR0),
.len = 0x04,
.bits_per_irq = 1,
.handle_mmio = handle_mmio_clear_pending_reg_redist,
},
{
- .base = GICR_ISACTIVER0,
+ .base = SGI_base(GICR_ISACTIVER0),
.len = 0x04,
.bits_per_irq = 1,
.handle_mmio = handle_mmio_raz_wi,
},
{
- .base = GICR_ICACTIVER0,
+ .base = SGI_base(GICR_ICACTIVER0),
.len = 0x04,
.bits_per_irq = 1,
.handle_mmio = handle_mmio_raz_wi,
},
{
- .base = GICR_IPRIORITYR0,
+ .base = SGI_base(GICR_IPRIORITYR0),
.len = 0x20,
.bits_per_irq = 8,
.handle_mmio = handle_mmio_priority_reg_redist,
},
{
- .base = GICR_ICFGR0,
+ .base = SGI_base(GICR_ICFGR0),
.len = 0x08,
.bits_per_irq = 2,
.handle_mmio = handle_mmio_cfg_reg_redist,
},
{
- .base = GICR_IGRPMODR0,
+ .base = SGI_base(GICR_IGRPMODR0),
.len = 0x04,
.bits_per_irq = 1,
.handle_mmio = handle_mmio_raz_wi,
},
{
- .base = GICR_NSACR,
+ .base = SGI_base(GICR_NSACR),
.len = 0x04,
.handle_mmio = handle_mmio_raz_wi,
},
{},
};
-static bool handle_mmio_ctlr_redist(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- /* since we don't support LPIs, this register is zero for now */
- vgic_reg_access(mmio, NULL, offset,
- ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
- return false;
-}
-
-static bool handle_mmio_typer_redist(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- u32 reg;
- u64 mpidr;
- struct kvm_vcpu *redist_vcpu = mmio->private;
- int target_vcpu_id = redist_vcpu->vcpu_id;
-
- /* the upper 32 bits contain the affinity value */
- if ((offset & ~3) == 4) {
- mpidr = kvm_vcpu_get_mpidr_aff(redist_vcpu);
- reg = compress_mpidr(mpidr);
-
- vgic_reg_access(mmio, &reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
- return false;
- }
-
- reg = redist_vcpu->vcpu_id << 8;
- if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1)
- reg |= GICR_TYPER_LAST;
- vgic_reg_access(mmio, &reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
- return false;
-}
-
-static const struct kvm_mmio_range vgic_redist_ranges[] = {
- {
- .base = GICR_CTLR,
- .len = 0x04,
- .bits_per_irq = 0,
- .handle_mmio = handle_mmio_ctlr_redist,
- },
- {
- .base = GICR_TYPER,
- .len = 0x08,
- .bits_per_irq = 0,
- .handle_mmio = handle_mmio_typer_redist,
- },
- {
- .base = GICR_IIDR,
- .len = 0x04,
- .bits_per_irq = 0,
- .handle_mmio = handle_mmio_iidr,
- },
- {
- .base = GICR_WAKER,
- .len = 0x04,
- .bits_per_irq = 0,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- .base = GICR_IDREGS,
- .len = 0x30,
- .bits_per_irq = 0,
- .handle_mmio = handle_mmio_idregs,
- },
- {},
-};
-
-/*
- * This function splits accesses between the distributor and the two
- * redistributor parts (private/SPI). As each redistributor is accessible
- * from any CPU, we have to determine the affected VCPU by taking the faulting
- * address into account. We then pass this VCPU to the handler function via
- * the private parameter.
- */
-#define SGI_BASE_OFFSET SZ_64K
-static bool vgic_v3_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
- struct kvm_exit_mmio *mmio)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- unsigned long dbase = dist->vgic_dist_base;
- unsigned long rdbase = dist->vgic_redist_base;
- int nrcpus = atomic_read(&vcpu->kvm->online_vcpus);
- int vcpu_id;
- const struct kvm_mmio_range *mmio_range;
-
- if (is_in_range(mmio->phys_addr, mmio->len, dbase, GIC_V3_DIST_SIZE)) {
- return vgic_handle_mmio_range(vcpu, run, mmio,
- vgic_v3_dist_ranges, dbase);
- }
-
- if (!is_in_range(mmio->phys_addr, mmio->len, rdbase,
- GIC_V3_REDIST_SIZE * nrcpus))
- return false;
-
- vcpu_id = (mmio->phys_addr - rdbase) / GIC_V3_REDIST_SIZE;
- rdbase += (vcpu_id * GIC_V3_REDIST_SIZE);
- mmio->private = kvm_get_vcpu(vcpu->kvm, vcpu_id);
-
- if (mmio->phys_addr >= rdbase + SGI_BASE_OFFSET) {
- rdbase += SGI_BASE_OFFSET;
- mmio_range = vgic_redist_sgi_ranges;
- } else {
- mmio_range = vgic_redist_ranges;
- }
- return vgic_handle_mmio_range(vcpu, run, mmio, mmio_range, rdbase);
-}
-
static bool vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, int irq)
{
if (vgic_queue_irq(vcpu, 0, irq)) {
@@ -766,6 +724,9 @@ static int vgic_v3_map_resources(struct kvm *kvm,
{
int ret = 0;
struct vgic_dist *dist = &kvm->arch.vgic;
+ gpa_t rdbase = dist->vgic_redist_base;
+ struct vgic_io_device *iodevs = NULL;
+ int i;
if (!irqchip_in_kernel(kvm))
return 0;
@@ -791,7 +752,41 @@ static int vgic_v3_map_resources(struct kvm *kvm,
goto out;
}
- kvm->arch.vgic.ready = true;
+ ret = vgic_register_kvm_io_dev(kvm, dist->vgic_dist_base,
+ GIC_V3_DIST_SIZE, vgic_v3_dist_ranges,
+ -1, &dist->dist_iodev);
+ if (ret)
+ goto out;
+
+ iodevs = kcalloc(dist->nr_cpus, sizeof(iodevs[0]), GFP_KERNEL);
+ if (!iodevs) {
+ ret = -ENOMEM;
+ goto out_unregister;
+ }
+
+ for (i = 0; i < dist->nr_cpus; i++) {
+ ret = vgic_register_kvm_io_dev(kvm, rdbase,
+ SZ_128K, vgic_redist_ranges,
+ i, &iodevs[i]);
+ if (ret)
+ goto out_unregister;
+ rdbase += GIC_V3_REDIST_SIZE;
+ }
+
+ dist->redist_iodevs = iodevs;
+ dist->ready = true;
+ goto out;
+
+out_unregister:
+ kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dist->dist_iodev.dev);
+ if (iodevs) {
+ for (i = 0; i < dist->nr_cpus; i++) {
+ if (iodevs[i].dev.ops)
+ kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS,
+ &iodevs[i].dev);
+ }
+ }
+
out:
if (ret)
kvm_vgic_destroy(kvm);
@@ -832,7 +827,6 @@ void vgic_v3_init_emulation(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
- dist->vm_ops.handle_mmio = vgic_v3_handle_mmio;
dist->vm_ops.queue_sgi = vgic_v3_queue_sgi;
dist->vm_ops.add_sgi_source = vgic_v3_add_sgi_source;
dist->vm_ops.init_model = vgic_v3_init_model;
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index c9f60f524588..8d550ff14700 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -31,6 +31,9 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_mmu.h>
+#include <trace/events/kvm.h>
+#include <asm/kvm.h>
+#include <kvm/iodev.h>
/*
* How the whole thing works (courtesy of Christoffer Dall):
@@ -263,6 +266,13 @@ static int vgic_irq_is_queued(struct kvm_vcpu *vcpu, int irq)
return vgic_bitmap_get_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq);
}
+static int vgic_irq_is_active(struct kvm_vcpu *vcpu, int irq)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+ return vgic_bitmap_get_irq_val(&dist->irq_active, vcpu->vcpu_id, irq);
+}
+
static void vgic_irq_set_queued(struct kvm_vcpu *vcpu, int irq)
{
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
@@ -277,6 +287,20 @@ static void vgic_irq_clear_queued(struct kvm_vcpu *vcpu, int irq)
vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0);
}
+static void vgic_irq_set_active(struct kvm_vcpu *vcpu, int irq)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+ vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 1);
+}
+
+static void vgic_irq_clear_active(struct kvm_vcpu *vcpu, int irq)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+ vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 0);
+}
+
static int vgic_dist_irq_get_level(struct kvm_vcpu *vcpu, int irq)
{
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
@@ -520,6 +544,44 @@ bool vgic_handle_clear_pending_reg(struct kvm *kvm,
return false;
}
+bool vgic_handle_set_active_reg(struct kvm *kvm,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, int vcpu_id)
+{
+ u32 *reg;
+ struct vgic_dist *dist = &kvm->arch.vgic;
+
+ reg = vgic_bitmap_get_reg(&dist->irq_active, vcpu_id, offset);
+ vgic_reg_access(mmio, reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
+
+ if (mmio->is_write) {
+ vgic_update_state(kvm);
+ return true;
+ }
+
+ return false;
+}
+
+bool vgic_handle_clear_active_reg(struct kvm *kvm,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, int vcpu_id)
+{
+ u32 *reg;
+ struct vgic_dist *dist = &kvm->arch.vgic;
+
+ reg = vgic_bitmap_get_reg(&dist->irq_active, vcpu_id, offset);
+ vgic_reg_access(mmio, reg, offset,
+ ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
+
+ if (mmio->is_write) {
+ vgic_update_state(kvm);
+ return true;
+ }
+
+ return false;
+}
+
static u32 vgic_cfg_expand(u16 val)
{
u32 res = 0;
@@ -588,16 +650,12 @@ bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
}
/**
- * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor
+ * vgic_unqueue_irqs - move pending/active IRQs from LRs to the distributor
* @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
*
- * Move any pending IRQs that have already been assigned to LRs back to the
+ * Move any IRQs that have already been assigned to LRs back to the
* emulated distributor state so that the complete emulated state can be read
* from the main emulation structures without investigating the LRs.
- *
- * Note that IRQs in the active state in the LRs get their pending state moved
- * to the distributor but the active state stays in the LRs, because we don't
- * track the active state on the distributor side.
*/
void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
{
@@ -613,12 +671,22 @@ void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
* 01: pending
* 10: active
* 11: pending and active
- *
- * If the LR holds only an active interrupt (not pending) then
- * just leave it alone.
*/
- if ((lr.state & LR_STATE_MASK) == LR_STATE_ACTIVE)
- continue;
+ BUG_ON(!(lr.state & LR_STATE_MASK));
+
+ /* Reestablish SGI source for pending and active IRQs */
+ if (lr.irq < VGIC_NR_SGIS)
+ add_sgi_source(vcpu, lr.irq, lr.source);
+
+ /*
+ * If the LR holds an active (10) or a pending and active (11)
+ * interrupt then move the active state to the
+ * distributor tracking bit.
+ */
+ if (lr.state & LR_STATE_ACTIVE) {
+ vgic_irq_set_active(vcpu, lr.irq);
+ lr.state &= ~LR_STATE_ACTIVE;
+ }
/*
* Reestablish the pending state on the distributor and the
@@ -626,21 +694,19 @@ void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
* is fine, then we are only setting a few bits that were
* already set.
*/
- vgic_dist_irq_set_pending(vcpu, lr.irq);
- if (lr.irq < VGIC_NR_SGIS)
- add_sgi_source(vcpu, lr.irq, lr.source);
- lr.state &= ~LR_STATE_PENDING;
+ if (lr.state & LR_STATE_PENDING) {
+ vgic_dist_irq_set_pending(vcpu, lr.irq);
+ lr.state &= ~LR_STATE_PENDING;
+ }
+
vgic_set_lr(vcpu, i, lr);
/*
- * If there's no state left on the LR (it could still be
- * active), then the LR does not hold any useful info and can
- * be marked as free for other use.
+ * Mark the LR as free for other use.
*/
- if (!(lr.state & LR_STATE_MASK)) {
- vgic_retire_lr(i, lr.irq, vcpu);
- vgic_irq_clear_queued(vcpu, lr.irq);
- }
+ BUG_ON(lr.state & LR_STATE_MASK);
+ vgic_retire_lr(i, lr.irq, vcpu);
+ vgic_irq_clear_queued(vcpu, lr.irq);
/* Finally update the VGIC state. */
vgic_update_state(vcpu->kvm);
@@ -648,24 +714,21 @@ void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
}
const
-struct kvm_mmio_range *vgic_find_range(const struct kvm_mmio_range *ranges,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- const struct kvm_mmio_range *r = ranges;
-
- while (r->len) {
- if (offset >= r->base &&
- (offset + mmio->len) <= (r->base + r->len))
- return r;
- r++;
+struct vgic_io_range *vgic_find_range(const struct vgic_io_range *ranges,
+ int len, gpa_t offset)
+{
+ while (ranges->len) {
+ if (offset >= ranges->base &&
+ (offset + len) <= (ranges->base + ranges->len))
+ return ranges;
+ ranges++;
}
return NULL;
}
static bool vgic_validate_access(const struct vgic_dist *dist,
- const struct kvm_mmio_range *range,
+ const struct vgic_io_range *range,
unsigned long offset)
{
int irq;
@@ -693,9 +756,8 @@ static bool vgic_validate_access(const struct vgic_dist *dist,
static bool call_range_handler(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio,
unsigned long offset,
- const struct kvm_mmio_range *range)
+ const struct vgic_io_range *range)
{
- u32 *data32 = (void *)mmio->data;
struct kvm_exit_mmio mmio32;
bool ret;
@@ -712,91 +774,142 @@ static bool call_range_handler(struct kvm_vcpu *vcpu,
mmio32.private = mmio->private;
mmio32.phys_addr = mmio->phys_addr + 4;
- if (mmio->is_write)
- *(u32 *)mmio32.data = data32[1];
+ mmio32.data = &((u32 *)mmio->data)[1];
ret = range->handle_mmio(vcpu, &mmio32, offset + 4);
- if (!mmio->is_write)
- data32[1] = *(u32 *)mmio32.data;
mmio32.phys_addr = mmio->phys_addr;
- if (mmio->is_write)
- *(u32 *)mmio32.data = data32[0];
+ mmio32.data = &((u32 *)mmio->data)[0];
ret |= range->handle_mmio(vcpu, &mmio32, offset);
- if (!mmio->is_write)
- data32[0] = *(u32 *)mmio32.data;
return ret;
}
/**
- * vgic_handle_mmio_range - handle an in-kernel MMIO access
+ * vgic_handle_mmio_access - handle an in-kernel MMIO access
+ * This is called by the read/write KVM IO device wrappers below.
* @vcpu: pointer to the vcpu performing the access
- * @run: pointer to the kvm_run structure
- * @mmio: pointer to the data describing the access
- * @ranges: array of MMIO ranges in a given region
- * @mmio_base: base address of that region
+ * @this: pointer to the KVM IO device in charge
+ * @addr: guest physical address of the access
+ * @len: size of the access
+ * @val: pointer to the data region
+ * @is_write: read or write access
*
* returns true if the MMIO access could be performed
*/
-bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
- struct kvm_exit_mmio *mmio,
- const struct kvm_mmio_range *ranges,
- unsigned long mmio_base)
+static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *this, gpa_t addr,
+ int len, void *val, bool is_write)
{
- const struct kvm_mmio_range *range;
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ struct vgic_io_device *iodev = container_of(this,
+ struct vgic_io_device, dev);
+ struct kvm_run *run = vcpu->run;
+ const struct vgic_io_range *range;
+ struct kvm_exit_mmio mmio;
bool updated_state;
- unsigned long offset;
+ gpa_t offset;
- offset = mmio->phys_addr - mmio_base;
- range = vgic_find_range(ranges, mmio, offset);
+ offset = addr - iodev->addr;
+ range = vgic_find_range(iodev->reg_ranges, len, offset);
if (unlikely(!range || !range->handle_mmio)) {
- pr_warn("Unhandled access %d %08llx %d\n",
- mmio->is_write, mmio->phys_addr, mmio->len);
- return false;
+ pr_warn("Unhandled access %d %08llx %d\n", is_write, addr, len);
+ return -ENXIO;
}
- spin_lock(&vcpu->kvm->arch.vgic.lock);
+ mmio.phys_addr = addr;
+ mmio.len = len;
+ mmio.is_write = is_write;
+ mmio.data = val;
+ mmio.private = iodev->redist_vcpu;
+
+ spin_lock(&dist->lock);
offset -= range->base;
if (vgic_validate_access(dist, range, offset)) {
- updated_state = call_range_handler(vcpu, mmio, offset, range);
+ updated_state = call_range_handler(vcpu, &mmio, offset, range);
} else {
- if (!mmio->is_write)
- memset(mmio->data, 0, mmio->len);
+ if (!is_write)
+ memset(val, 0, len);
updated_state = false;
}
- spin_unlock(&vcpu->kvm->arch.vgic.lock);
- kvm_prepare_mmio(run, mmio);
+ spin_unlock(&dist->lock);
+ run->mmio.is_write = is_write;
+ run->mmio.len = len;
+ run->mmio.phys_addr = addr;
+ memcpy(run->mmio.data, val, len);
+
kvm_handle_mmio_return(vcpu, run);
if (updated_state)
vgic_kick_vcpus(vcpu->kvm);
- return true;
+ return 0;
+}
+
+static int vgic_handle_mmio_read(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *this,
+ gpa_t addr, int len, void *val)
+{
+ return vgic_handle_mmio_access(vcpu, this, addr, len, val, false);
}
+static int vgic_handle_mmio_write(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *this,
+ gpa_t addr, int len, const void *val)
+{
+ return vgic_handle_mmio_access(vcpu, this, addr, len, (void *)val,
+ true);
+}
+
+struct kvm_io_device_ops vgic_io_ops = {
+ .read = vgic_handle_mmio_read,
+ .write = vgic_handle_mmio_write,
+};
+
/**
- * vgic_handle_mmio - handle an in-kernel MMIO access for the GIC emulation
- * @vcpu: pointer to the vcpu performing the access
- * @run: pointer to the kvm_run structure
- * @mmio: pointer to the data describing the access
+ * vgic_register_kvm_io_dev - register VGIC register frame on the KVM I/O bus
+ * @kvm: The VM structure pointer
+ * @base: The (guest) base address for the register frame
+ * @len: Length of the register frame window
+ * @ranges: Describing the handler functions for each register
+ * @redist_vcpu_id: The VCPU ID to pass on to the handlers on call
+ * @iodev: Points to memory to be passed on to the handler
*
- * returns true if the MMIO access has been performed in kernel space,
- * and false if it needs to be emulated in user space.
- * Calls the actual handling routine for the selected VGIC model.
+ * @iodev stores the parameters of this function to be usable by the handler
+ * respectively the dispatcher function (since the KVM I/O bus framework lacks
+ * an opaque parameter). Initialization is done in this function, but the
+ * reference should be valid and unique for the whole VGIC lifetime.
+ * If the register frame is not mapped for a specific VCPU, pass -1 to
+ * @redist_vcpu_id.
*/
-bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
- struct kvm_exit_mmio *mmio)
+int vgic_register_kvm_io_dev(struct kvm *kvm, gpa_t base, int len,
+ const struct vgic_io_range *ranges,
+ int redist_vcpu_id,
+ struct vgic_io_device *iodev)
{
- if (!irqchip_in_kernel(vcpu->kvm))
- return false;
+ struct kvm_vcpu *vcpu = NULL;
+ int ret;
- /*
- * This will currently call either vgic_v2_handle_mmio() or
- * vgic_v3_handle_mmio(), which in turn will call
- * vgic_handle_mmio_range() defined above.
- */
- return vcpu->kvm->arch.vgic.vm_ops.handle_mmio(vcpu, run, mmio);
+ if (redist_vcpu_id >= 0)
+ vcpu = kvm_get_vcpu(kvm, redist_vcpu_id);
+
+ iodev->addr = base;
+ iodev->len = len;
+ iodev->reg_ranges = ranges;
+ iodev->redist_vcpu = vcpu;
+
+ kvm_iodevice_init(&iodev->dev, &vgic_io_ops);
+
+ mutex_lock(&kvm->slots_lock);
+
+ ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, base, len,
+ &iodev->dev);
+ mutex_unlock(&kvm->slots_lock);
+
+ /* Mark the iodev as invalid if registration fails. */
+ if (ret)
+ iodev->dev.ops = NULL;
+
+ return ret;
}
static int vgic_nr_shared_irqs(struct vgic_dist *dist)
@@ -804,6 +917,36 @@ static int vgic_nr_shared_irqs(struct vgic_dist *dist)
return dist->nr_irqs - VGIC_NR_PRIVATE_IRQS;
}
+static int compute_active_for_cpu(struct kvm_vcpu *vcpu)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ unsigned long *active, *enabled, *act_percpu, *act_shared;
+ unsigned long active_private, active_shared;
+ int nr_shared = vgic_nr_shared_irqs(dist);
+ int vcpu_id;
+
+ vcpu_id = vcpu->vcpu_id;
+ act_percpu = vcpu->arch.vgic_cpu.active_percpu;
+ act_shared = vcpu->arch.vgic_cpu.active_shared;
+
+ active = vgic_bitmap_get_cpu_map(&dist->irq_active, vcpu_id);
+ enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
+ bitmap_and(act_percpu, active, enabled, VGIC_NR_PRIVATE_IRQS);
+
+ active = vgic_bitmap_get_shared_map(&dist->irq_active);
+ enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
+ bitmap_and(act_shared, active, enabled, nr_shared);
+ bitmap_and(act_shared, act_shared,
+ vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
+ nr_shared);
+
+ active_private = find_first_bit(act_percpu, VGIC_NR_PRIVATE_IRQS);
+ active_shared = find_first_bit(act_shared, nr_shared);
+
+ return (active_private < VGIC_NR_PRIVATE_IRQS ||
+ active_shared < nr_shared);
+}
+
static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
{
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
@@ -835,7 +978,7 @@ static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
/*
* Update the interrupt state and determine which CPUs have pending
- * interrupts. Must be called with distributor lock held.
+ * or active interrupts. Must be called with distributor lock held.
*/
void vgic_update_state(struct kvm *kvm)
{
@@ -849,10 +992,13 @@ void vgic_update_state(struct kvm *kvm)
}
kvm_for_each_vcpu(c, vcpu, kvm) {
- if (compute_pending_for_cpu(vcpu)) {
- pr_debug("CPU%d has pending interrupts\n", c);
+ if (compute_pending_for_cpu(vcpu))
set_bit(c, dist->irq_pending_on_cpu);
- }
+
+ if (compute_active_for_cpu(vcpu))
+ set_bit(c, dist->irq_active_on_cpu);
+ else
+ clear_bit(c, dist->irq_active_on_cpu);
}
}
@@ -955,6 +1101,26 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
}
}
+static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
+ int lr_nr, struct vgic_lr vlr)
+{
+ if (vgic_irq_is_active(vcpu, irq)) {
+ vlr.state |= LR_STATE_ACTIVE;
+ kvm_debug("Set active, clear distributor: 0x%x\n", vlr.state);
+ vgic_irq_clear_active(vcpu, irq);
+ vgic_update_state(vcpu->kvm);
+ } else if (vgic_dist_irq_is_pending(vcpu, irq)) {
+ vlr.state |= LR_STATE_PENDING;
+ kvm_debug("Set pending: 0x%x\n", vlr.state);
+ }
+
+ if (!vgic_irq_is_edge(vcpu, irq))
+ vlr.state |= LR_EOI_INT;
+
+ vgic_set_lr(vcpu, lr_nr, vlr);
+ vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
+}
+
/*
* Queue an interrupt to a CPU virtual interface. Return true on success,
* or false if it wasn't possible to queue it.
@@ -982,9 +1148,7 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
if (vlr.source == sgi_source_id) {
kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
- vlr.state |= LR_STATE_PENDING;
- vgic_set_lr(vcpu, lr, vlr);
- vgic_sync_lr_elrsr(vcpu, lr, vlr);
+ vgic_queue_irq_to_lr(vcpu, irq, lr, vlr);
return true;
}
}
@@ -1001,12 +1165,8 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
vlr.irq = irq;
vlr.source = sgi_source_id;
- vlr.state = LR_STATE_PENDING;
- if (!vgic_irq_is_edge(vcpu, irq))
- vlr.state |= LR_EOI_INT;
-
- vgic_set_lr(vcpu, lr, vlr);
- vgic_sync_lr_elrsr(vcpu, lr, vlr);
+ vlr.state = 0;
+ vgic_queue_irq_to_lr(vcpu, irq, lr, vlr);
return true;
}
@@ -1038,39 +1198,49 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ unsigned long *pa_percpu, *pa_shared;
int i, vcpu_id;
int overflow = 0;
+ int nr_shared = vgic_nr_shared_irqs(dist);
vcpu_id = vcpu->vcpu_id;
+ pa_percpu = vcpu->arch.vgic_cpu.pend_act_percpu;
+ pa_shared = vcpu->arch.vgic_cpu.pend_act_shared;
+
+ bitmap_or(pa_percpu, vgic_cpu->pending_percpu, vgic_cpu->active_percpu,
+ VGIC_NR_PRIVATE_IRQS);
+ bitmap_or(pa_shared, vgic_cpu->pending_shared, vgic_cpu->active_shared,
+ nr_shared);
/*
* We may not have any pending interrupt, or the interrupts
* may have been serviced from another vcpu. In all cases,
* move along.
*/
- if (!kvm_vgic_vcpu_pending_irq(vcpu)) {
- pr_debug("CPU%d has no pending interrupt\n", vcpu_id);
+ if (!kvm_vgic_vcpu_pending_irq(vcpu) && !kvm_vgic_vcpu_active_irq(vcpu))
goto epilog;
- }
/* SGIs */
- for_each_set_bit(i, vgic_cpu->pending_percpu, VGIC_NR_SGIS) {
+ for_each_set_bit(i, pa_percpu, VGIC_NR_SGIS) {
if (!queue_sgi(vcpu, i))
overflow = 1;
}
/* PPIs */
- for_each_set_bit_from(i, vgic_cpu->pending_percpu, VGIC_NR_PRIVATE_IRQS) {
+ for_each_set_bit_from(i, pa_percpu, VGIC_NR_PRIVATE_IRQS) {
if (!vgic_queue_hwirq(vcpu, i))
overflow = 1;
}
/* SPIs */
- for_each_set_bit(i, vgic_cpu->pending_shared, vgic_nr_shared_irqs(dist)) {
+ for_each_set_bit(i, pa_shared, nr_shared) {
if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS))
overflow = 1;
}
+
+
+
epilog:
if (overflow) {
vgic_enable_underflow(vcpu);
@@ -1089,7 +1259,9 @@ epilog:
static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
{
u32 status = vgic_get_interrupt_status(vcpu);
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
bool level_pending = false;
+ struct kvm *kvm = vcpu->kvm;
kvm_debug("STATUS = %08x\n", status);
@@ -1106,6 +1278,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
+ spin_lock(&dist->lock);
vgic_irq_clear_queued(vcpu, vlr.irq);
WARN_ON(vlr.state & LR_STATE_MASK);
vlr.state = 0;
@@ -1124,6 +1297,17 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
*/
vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
+ /*
+ * kvm_notify_acked_irq calls kvm_set_irq()
+ * to reset the IRQ level. Need to release the
+ * lock for kvm_set_irq to grab it.
+ */
+ spin_unlock(&dist->lock);
+
+ kvm_notify_acked_irq(kvm, 0,
+ vlr.irq - VGIC_NR_PRIVATE_IRQS);
+ spin_lock(&dist->lock);
+
/* Any additional pending interrupt? */
if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
vgic_cpu_irq_set(vcpu, vlr.irq);
@@ -1133,6 +1317,8 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
vgic_cpu_irq_clear(vcpu, vlr.irq);
}
+ spin_unlock(&dist->lock);
+
/*
* Despite being EOIed, the LR may not have
* been marked as empty.
@@ -1155,10 +1341,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
return level_pending;
}
-/*
- * Sync back the VGIC state after a guest run. The distributor lock is
- * needed so we don't get preempted in the middle of the state processing.
- */
+/* Sync back the VGIC state after a guest run */
static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
@@ -1205,14 +1388,10 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
if (!irqchip_in_kernel(vcpu->kvm))
return;
- spin_lock(&dist->lock);
__kvm_vgic_sync_hwstate(vcpu);
- spin_unlock(&dist->lock);
}
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
@@ -1225,6 +1404,17 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
}
+int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+ if (!irqchip_in_kernel(vcpu->kvm))
+ return 0;
+
+ return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu);
+}
+
+
void vgic_kick_vcpus(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
@@ -1397,8 +1587,12 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
kfree(vgic_cpu->pending_shared);
+ kfree(vgic_cpu->active_shared);
+ kfree(vgic_cpu->pend_act_shared);
kfree(vgic_cpu->vgic_irq_lr_map);
vgic_cpu->pending_shared = NULL;
+ vgic_cpu->active_shared = NULL;
+ vgic_cpu->pend_act_shared = NULL;
vgic_cpu->vgic_irq_lr_map = NULL;
}
@@ -1408,9 +1602,14 @@ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
+ vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
+ vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
vgic_cpu->vgic_irq_lr_map = kmalloc(nr_irqs, GFP_KERNEL);
- if (!vgic_cpu->pending_shared || !vgic_cpu->vgic_irq_lr_map) {
+ if (!vgic_cpu->pending_shared
+ || !vgic_cpu->active_shared
+ || !vgic_cpu->pend_act_shared
+ || !vgic_cpu->vgic_irq_lr_map) {
kvm_vgic_vcpu_destroy(vcpu);
return -ENOMEM;
}
@@ -1463,10 +1662,12 @@ void kvm_vgic_destroy(struct kvm *kvm)
kfree(dist->irq_spi_mpidr);
kfree(dist->irq_spi_target);
kfree(dist->irq_pending_on_cpu);
+ kfree(dist->irq_active_on_cpu);
dist->irq_sgi_sources = NULL;
dist->irq_spi_cpu = NULL;
dist->irq_spi_target = NULL;
dist->irq_pending_on_cpu = NULL;
+ dist->irq_active_on_cpu = NULL;
dist->nr_cpus = 0;
}
@@ -1502,6 +1703,7 @@ int vgic_init(struct kvm *kvm)
ret |= vgic_init_bitmap(&dist->irq_pending, nr_cpus, nr_irqs);
ret |= vgic_init_bitmap(&dist->irq_soft_pend, nr_cpus, nr_irqs);
ret |= vgic_init_bitmap(&dist->irq_queued, nr_cpus, nr_irqs);
+ ret |= vgic_init_bitmap(&dist->irq_active, nr_cpus, nr_irqs);
ret |= vgic_init_bitmap(&dist->irq_cfg, nr_cpus, nr_irqs);
ret |= vgic_init_bytemap(&dist->irq_priority, nr_cpus, nr_irqs);
@@ -1514,10 +1716,13 @@ int vgic_init(struct kvm *kvm)
GFP_KERNEL);
dist->irq_pending_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long),
GFP_KERNEL);
+ dist->irq_active_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long),
+ GFP_KERNEL);
if (!dist->irq_sgi_sources ||
!dist->irq_spi_cpu ||
!dist->irq_spi_target ||
- !dist->irq_pending_on_cpu) {
+ !dist->irq_pending_on_cpu ||
+ !dist->irq_active_on_cpu) {
ret = -ENOMEM;
goto out;
}
@@ -1845,12 +2050,9 @@ int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
return r;
}
-int vgic_has_attr_regs(const struct kvm_mmio_range *ranges, phys_addr_t offset)
+int vgic_has_attr_regs(const struct vgic_io_range *ranges, phys_addr_t offset)
{
- struct kvm_exit_mmio dev_attr_mmio;
-
- dev_attr_mmio.len = 4;
- if (vgic_find_range(ranges, &dev_attr_mmio, offset))
+ if (vgic_find_range(ranges, 4, offset))
return 0;
else
return -ENXIO;
@@ -1883,8 +2085,10 @@ static struct notifier_block vgic_cpu_nb = {
};
static const struct of_device_id vgic_ids[] = {
- { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, },
- { .compatible = "arm,gic-v3", .data = vgic_v3_probe, },
+ { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, },
+ { .compatible = "arm,cortex-a7-gic", .data = vgic_v2_probe, },
+ { .compatible = "arm,gic-400", .data = vgic_v2_probe, },
+ { .compatible = "arm,gic-v3", .data = vgic_v3_probe, },
{},
};
@@ -1932,3 +2136,38 @@ out_free_irq:
free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
return ret;
}
+
+int kvm_irq_map_gsi(struct kvm *kvm,
+ struct kvm_kernel_irq_routing_entry *entries,
+ int gsi)
+{
+ return gsi;
+}
+
+int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
+{
+ return pin;
+}
+
+int kvm_set_irq(struct kvm *kvm, int irq_source_id,
+ u32 irq, int level, bool line_status)
+{
+ unsigned int spi = irq + VGIC_NR_PRIVATE_IRQS;
+
+ trace_kvm_set_irq(irq, level, irq_source_id);
+
+ BUG_ON(!vgic_initialized(kvm));
+
+ if (spi > kvm->arch.vgic.nr_irqs)
+ return -EINVAL;
+ return kvm_vgic_inject_irq(kvm, 0, spi, level);
+
+}
+
+/* MSI not implemented yet */
+int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int irq_source_id,
+ int level, bool line_status)
+{
+ return 0;
+}
diff --git a/virt/kvm/arm/vgic.h b/virt/kvm/arm/vgic.h
index 1e83bdf5f499..0df74cbb6200 100644
--- a/virt/kvm/arm/vgic.h
+++ b/virt/kvm/arm/vgic.h
@@ -20,6 +20,8 @@
#ifndef __KVM_VGIC_H__
#define __KVM_VGIC_H__
+#include <kvm/iodev.h>
+
#define VGIC_ADDR_UNDEF (-1)
#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
@@ -57,6 +59,14 @@ void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq);
void vgic_unqueue_irqs(struct kvm_vcpu *vcpu);
+struct kvm_exit_mmio {
+ phys_addr_t phys_addr;
+ void *data;
+ u32 len;
+ bool is_write;
+ void *private;
+};
+
void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
phys_addr_t offset, int mode);
bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
@@ -74,7 +84,7 @@ void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
*((u32 *)mmio->data) = cpu_to_le32(value) & mask;
}
-struct kvm_mmio_range {
+struct vgic_io_range {
phys_addr_t base;
unsigned long len;
int bits_per_irq;
@@ -82,6 +92,11 @@ struct kvm_mmio_range {
phys_addr_t offset);
};
+int vgic_register_kvm_io_dev(struct kvm *kvm, gpa_t base, int len,
+ const struct vgic_io_range *ranges,
+ int redist_id,
+ struct vgic_io_device *iodev);
+
static inline bool is_in_range(phys_addr_t addr, unsigned long len,
phys_addr_t baseaddr, unsigned long size)
{
@@ -89,14 +104,8 @@ static inline bool is_in_range(phys_addr_t addr, unsigned long len,
}
const
-struct kvm_mmio_range *vgic_find_range(const struct kvm_mmio_range *ranges,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset);
-
-bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
- struct kvm_exit_mmio *mmio,
- const struct kvm_mmio_range *ranges,
- unsigned long mmio_base);
+struct vgic_io_range *vgic_find_range(const struct vgic_io_range *ranges,
+ int len, gpa_t offset);
bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
phys_addr_t offset, int vcpu_id, int access);
@@ -107,12 +116,20 @@ bool vgic_handle_set_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
bool vgic_handle_clear_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
phys_addr_t offset, int vcpu_id);
+bool vgic_handle_set_active_reg(struct kvm *kvm,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, int vcpu_id);
+
+bool vgic_handle_clear_active_reg(struct kvm *kvm,
+ struct kvm_exit_mmio *mmio,
+ phys_addr_t offset, int vcpu_id);
+
bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
phys_addr_t offset);
void vgic_kick_vcpus(struct kvm *kvm);
-int vgic_has_attr_regs(const struct kvm_mmio_range *ranges, phys_addr_t offset);
+int vgic_has_attr_regs(const struct vgic_io_range *ranges, phys_addr_t offset);
int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr);
int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr);
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 00d86427af0f..571c1ce37d15 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -8,7 +8,7 @@
*
*/
-#include "iodev.h"
+#include <kvm/iodev.h>
#include <linux/kvm_host.h>
#include <linux/slab.h>
@@ -60,8 +60,9 @@ static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
return 1;
}
-static int coalesced_mmio_write(struct kvm_io_device *this,
- gpa_t addr, int len, const void *val)
+static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
+ struct kvm_io_device *this, gpa_t addr,
+ int len, const void *val)
{
struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 148b2392c762..9ff4193dfa49 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -36,7 +36,7 @@
#include <linux/seqlock.h>
#include <trace/events/kvm.h>
-#include "iodev.h"
+#include <kvm/iodev.h>
#ifdef CONFIG_HAVE_KVM_IRQFD
/*
@@ -311,6 +311,9 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
unsigned int events;
int idx;
+ if (!kvm_arch_intc_initialized(kvm))
+ return -EAGAIN;
+
irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
if (!irqfd)
return -ENOMEM;
@@ -712,8 +715,8 @@ ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
/* MMIO/PIO writes trigger an event if the addr/val match */
static int
-ioeventfd_write(struct kvm_io_device *this, gpa_t addr, int len,
- const void *val)
+ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr,
+ int len, const void *val)
{
struct _ioeventfd *p = to_ioeventfd(this);
diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c
index 7f256f31df10..1d56a901e791 100644
--- a/virt/kvm/irqchip.c
+++ b/virt/kvm/irqchip.c
@@ -105,7 +105,7 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
i = kvm_irq_map_gsi(kvm, irq_set, irq);
srcu_read_unlock(&kvm->irq_srcu, idx);
- while(i--) {
+ while (i--) {
int r;
r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level,
line_status);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a2214d9609bd..d3fc9399062a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -16,7 +16,7 @@
*
*/
-#include "iodev.h"
+#include <kvm/iodev.h>
#include <linux/kvm_host.h>
#include <linux/kvm.h>
@@ -66,13 +66,13 @@
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
-unsigned int halt_poll_ns = 0;
+static unsigned int halt_poll_ns;
module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR);
/*
* Ordering of locks:
*
- * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
+ * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
*/
DEFINE_SPINLOCK(kvm_lock);
@@ -80,7 +80,7 @@ static DEFINE_RAW_SPINLOCK(kvm_count_lock);
LIST_HEAD(vm_list);
static cpumask_var_t cpus_hardware_enabled;
-static int kvm_usage_count = 0;
+static int kvm_usage_count;
static atomic_t hardware_enable_failed;
struct kmem_cache *kvm_vcpu_cache;
@@ -471,7 +471,7 @@ static struct kvm *kvm_create_vm(unsigned long type)
BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
r = -ENOMEM;
- kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+ kvm->memslots = kvm_kvzalloc(sizeof(struct kvm_memslots));
if (!kvm->memslots)
goto out_err_no_srcu;
@@ -522,7 +522,7 @@ out_err_no_srcu:
out_err_no_disable:
for (i = 0; i < KVM_NR_BUSES; i++)
kfree(kvm->buses[i]);
- kfree(kvm->memslots);
+ kvfree(kvm->memslots);
kvm_arch_free_vm(kvm);
return ERR_PTR(r);
}
@@ -539,20 +539,12 @@ void *kvm_kvzalloc(unsigned long size)
return kzalloc(size, GFP_KERNEL);
}
-void kvm_kvfree(const void *addr)
-{
- if (is_vmalloc_addr(addr))
- vfree(addr);
- else
- kfree(addr);
-}
-
static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
{
if (!memslot->dirty_bitmap)
return;
- kvm_kvfree(memslot->dirty_bitmap);
+ kvfree(memslot->dirty_bitmap);
memslot->dirty_bitmap = NULL;
}
@@ -578,7 +570,7 @@ static void kvm_free_physmem(struct kvm *kvm)
kvm_for_each_memslot(memslot, slots)
kvm_free_physmem_slot(kvm, memslot, NULL);
- kfree(kvm->memslots);
+ kvfree(kvm->memslots);
}
static void kvm_destroy_devices(struct kvm *kvm)
@@ -871,10 +863,10 @@ int __kvm_set_memory_region(struct kvm *kvm,
goto out_free;
}
- slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
- GFP_KERNEL);
+ slots = kvm_kvzalloc(sizeof(struct kvm_memslots));
if (!slots)
goto out_free;
+ memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
slot = id_to_memslot(slots, mem->slot);
@@ -888,8 +880,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
* or moved, memslot will be created.
*
* validation of sp->gfn happens in:
- * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
- * - kvm_is_visible_gfn (mmu_check_roots)
+ * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
+ * - kvm_is_visible_gfn (mmu_check_roots)
*/
kvm_arch_flush_shadow_memslot(kvm, slot);
@@ -917,7 +909,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
kvm_arch_commit_memory_region(kvm, mem, &old, change);
kvm_free_physmem_slot(kvm, &old, &new);
- kfree(old_memslots);
+ kvfree(old_memslots);
/*
* IOMMU mapping: New slots need to be mapped. Old slots need to be
@@ -936,7 +928,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
return 0;
out_slots:
- kfree(slots);
+ kvfree(slots);
out_free:
kvm_free_physmem_slot(kvm, &new, &old);
out:
@@ -1061,9 +1053,11 @@ int kvm_get_dirty_log_protect(struct kvm *kvm,
mask = xchg(&dirty_bitmap[i], 0);
dirty_bitmap_buffer[i] = mask;
- offset = i * BITS_PER_LONG;
- kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset,
- mask);
+ if (mask) {
+ offset = i * BITS_PER_LONG;
+ kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
+ offset, mask);
+ }
}
spin_unlock(&kvm->mmu_lock);
@@ -1193,16 +1187,6 @@ unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
return gfn_to_hva_memslot_prot(slot, gfn, writable);
}
-static int kvm_read_hva(void *data, void __user *hva, int len)
-{
- return __copy_from_user(data, hva, len);
-}
-
-static int kvm_read_hva_atomic(void *data, void __user *hva, int len)
-{
- return __copy_from_user_inatomic(data, hva, len);
-}
-
static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int write, struct page **page)
{
@@ -1481,7 +1465,6 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
return kvm_pfn_to_page(pfn);
}
-
EXPORT_SYMBOL_GPL(gfn_to_page);
void kvm_release_page_clean(struct page *page)
@@ -1517,6 +1500,7 @@ void kvm_set_pfn_dirty(pfn_t pfn)
{
if (!kvm_is_reserved_pfn(pfn)) {
struct page *page = pfn_to_page(pfn);
+
if (!PageReserved(page))
SetPageDirty(page);
}
@@ -1554,7 +1538,7 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
addr = gfn_to_hva_prot(kvm, gfn, NULL);
if (kvm_is_error_hva(addr))
return -EFAULT;
- r = kvm_read_hva(data, (void __user *)addr + offset, len);
+ r = __copy_from_user(data, (void __user *)addr + offset, len);
if (r)
return -EFAULT;
return 0;
@@ -1593,7 +1577,7 @@ int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
if (kvm_is_error_hva(addr))
return -EFAULT;
pagefault_disable();
- r = kvm_read_hva_atomic(data, (void __user *)addr + offset, len);
+ r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
pagefault_enable();
if (r)
return -EFAULT;
@@ -1653,8 +1637,8 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
ghc->generation = slots->generation;
ghc->len = len;
ghc->memslot = gfn_to_memslot(kvm, start_gfn);
- ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail);
- if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {
+ ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL);
+ if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) {
ghc->hva += offset;
} else {
/*
@@ -1742,7 +1726,7 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
int offset = offset_in_page(gpa);
int ret;
- while ((seg = next_segment(len, offset)) != 0) {
+ while ((seg = next_segment(len, offset)) != 0) {
ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
if (ret < 0)
return ret;
@@ -1800,6 +1784,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
start = cur = ktime_get();
if (halt_poll_ns) {
ktime_t stop = ktime_add_ns(ktime_get(), halt_poll_ns);
+
do {
/*
* This sets KVM_REQ_UNHALT if an interrupt
@@ -2118,7 +2103,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
* Special cases: vcpu ioctls that are asynchronous to vcpu execution,
* so vcpu_load() would break it.
*/
- if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_INTERRUPT)
+ if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_S390_IRQ || ioctl == KVM_INTERRUPT)
return kvm_arch_vcpu_ioctl(filp, ioctl, arg);
#endif
@@ -2135,6 +2120,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
/* The thread running this VCPU changed. */
struct pid *oldpid = vcpu->pid;
struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
+
rcu_assign_pointer(vcpu->pid, newpid);
if (oldpid)
synchronize_rcu();
@@ -2205,7 +2191,7 @@ out_free1:
if (r)
goto out;
r = -EFAULT;
- if (copy_to_user(argp, &mp_state, sizeof mp_state))
+ if (copy_to_user(argp, &mp_state, sizeof(mp_state)))
goto out;
r = 0;
break;
@@ -2214,7 +2200,7 @@ out_free1:
struct kvm_mp_state mp_state;
r = -EFAULT;
- if (copy_from_user(&mp_state, argp, sizeof mp_state))
+ if (copy_from_user(&mp_state, argp, sizeof(mp_state)))
goto out;
r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
break;
@@ -2223,13 +2209,13 @@ out_free1:
struct kvm_translation tr;
r = -EFAULT;
- if (copy_from_user(&tr, argp, sizeof tr))
+ if (copy_from_user(&tr, argp, sizeof(tr)))
goto out;
r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
if (r)
goto out;
r = -EFAULT;
- if (copy_to_user(argp, &tr, sizeof tr))
+ if (copy_to_user(argp, &tr, sizeof(tr)))
goto out;
r = 0;
break;
@@ -2238,7 +2224,7 @@ out_free1:
struct kvm_guest_debug dbg;
r = -EFAULT;
- if (copy_from_user(&dbg, argp, sizeof dbg))
+ if (copy_from_user(&dbg, argp, sizeof(dbg)))
goto out;
r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
break;
@@ -2252,14 +2238,14 @@ out_free1:
if (argp) {
r = -EFAULT;
if (copy_from_user(&kvm_sigmask, argp,
- sizeof kvm_sigmask))
+ sizeof(kvm_sigmask)))
goto out;
r = -EINVAL;
- if (kvm_sigmask.len != sizeof sigset)
+ if (kvm_sigmask.len != sizeof(sigset))
goto out;
r = -EFAULT;
if (copy_from_user(&sigset, sigmask_arg->sigset,
- sizeof sigset))
+ sizeof(sigset)))
goto out;
p = &sigset;
}
@@ -2321,14 +2307,14 @@ static long kvm_vcpu_compat_ioctl(struct file *filp,
if (argp) {
r = -EFAULT;
if (copy_from_user(&kvm_sigmask, argp,
- sizeof kvm_sigmask))
+ sizeof(kvm_sigmask)))
goto out;
r = -EINVAL;
- if (kvm_sigmask.len != sizeof csigset)
+ if (kvm_sigmask.len != sizeof(csigset))
goto out;
r = -EFAULT;
if (copy_from_user(&csigset, sigmask_arg->sigset,
- sizeof csigset))
+ sizeof(csigset)))
goto out;
sigset_from_compat(&sigset, &csigset);
r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
@@ -2525,7 +2511,7 @@ static long kvm_vm_ioctl(struct file *filp,
r = -EFAULT;
if (copy_from_user(&kvm_userspace_mem, argp,
- sizeof kvm_userspace_mem))
+ sizeof(kvm_userspace_mem)))
goto out;
r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
@@ -2535,7 +2521,7 @@ static long kvm_vm_ioctl(struct file *filp,
struct kvm_dirty_log log;
r = -EFAULT;
- if (copy_from_user(&log, argp, sizeof log))
+ if (copy_from_user(&log, argp, sizeof(log)))
goto out;
r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
break;
@@ -2543,16 +2529,18 @@ static long kvm_vm_ioctl(struct file *filp,
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
case KVM_REGISTER_COALESCED_MMIO: {
struct kvm_coalesced_mmio_zone zone;
+
r = -EFAULT;
- if (copy_from_user(&zone, argp, sizeof zone))
+ if (copy_from_user(&zone, argp, sizeof(zone)))
goto out;
r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
break;
}
case KVM_UNREGISTER_COALESCED_MMIO: {
struct kvm_coalesced_mmio_zone zone;
+
r = -EFAULT;
- if (copy_from_user(&zone, argp, sizeof zone))
+ if (copy_from_user(&zone, argp, sizeof(zone)))
goto out;
r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
break;
@@ -2562,7 +2550,7 @@ static long kvm_vm_ioctl(struct file *filp,
struct kvm_irqfd data;
r = -EFAULT;
- if (copy_from_user(&data, argp, sizeof data))
+ if (copy_from_user(&data, argp, sizeof(data)))
goto out;
r = kvm_irqfd(kvm, &data);
break;
@@ -2571,7 +2559,7 @@ static long kvm_vm_ioctl(struct file *filp,
struct kvm_ioeventfd data;
r = -EFAULT;
- if (copy_from_user(&data, argp, sizeof data))
+ if (copy_from_user(&data, argp, sizeof(data)))
goto out;
r = kvm_ioeventfd(kvm, &data);
break;
@@ -2592,7 +2580,7 @@ static long kvm_vm_ioctl(struct file *filp,
struct kvm_msi msi;
r = -EFAULT;
- if (copy_from_user(&msi, argp, sizeof msi))
+ if (copy_from_user(&msi, argp, sizeof(msi)))
goto out;
r = kvm_send_userspace_msi(kvm, &msi);
break;
@@ -2604,7 +2592,7 @@ static long kvm_vm_ioctl(struct file *filp,
struct kvm_irq_level irq_event;
r = -EFAULT;
- if (copy_from_user(&irq_event, argp, sizeof irq_event))
+ if (copy_from_user(&irq_event, argp, sizeof(irq_event)))
goto out;
r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
@@ -2614,7 +2602,7 @@ static long kvm_vm_ioctl(struct file *filp,
r = -EFAULT;
if (ioctl == KVM_IRQ_LINE_STATUS) {
- if (copy_to_user(argp, &irq_event, sizeof irq_event))
+ if (copy_to_user(argp, &irq_event, sizeof(irq_event)))
goto out;
}
@@ -2647,7 +2635,7 @@ static long kvm_vm_ioctl(struct file *filp,
goto out_free_irq_routing;
r = kvm_set_irq_routing(kvm, entries, routing.nr,
routing.flags);
- out_free_irq_routing:
+out_free_irq_routing:
vfree(entries);
break;
}
@@ -2822,8 +2810,7 @@ static void hardware_enable_nolock(void *junk)
if (r) {
cpumask_clear_cpu(cpu, cpus_hardware_enabled);
atomic_inc(&hardware_enable_failed);
- printk(KERN_INFO "kvm: enabling virtualization on "
- "CPU%d failed\n", cpu);
+ pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu);
}
}
@@ -2899,12 +2886,12 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
val &= ~CPU_TASKS_FROZEN;
switch (val) {
case CPU_DYING:
- printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
+ pr_info("kvm: disabling virtualization on CPU%d\n",
cpu);
hardware_disable();
break;
case CPU_STARTING:
- printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
+ pr_info("kvm: enabling virtualization on CPU%d\n",
cpu);
hardware_enable();
break;
@@ -2921,7 +2908,7 @@ static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
*
* And Intel TXT required VMX off for all cpu when system shutdown.
*/
- printk(KERN_INFO "kvm: exiting hardware virtualization\n");
+ pr_info("kvm: exiting hardware virtualization\n");
kvm_rebooting = true;
on_each_cpu(hardware_disable_nolock, NULL, 1);
return NOTIFY_OK;
@@ -2945,7 +2932,7 @@ static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
}
static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
- const struct kvm_io_range *r2)
+ const struct kvm_io_range *r2)
{
if (r1->addr < r2->addr)
return -1;
@@ -2998,7 +2985,7 @@ static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
return off;
}
-static int __kvm_io_bus_write(struct kvm_io_bus *bus,
+static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
struct kvm_io_range *range, const void *val)
{
int idx;
@@ -3009,7 +2996,7 @@ static int __kvm_io_bus_write(struct kvm_io_bus *bus,
while (idx < bus->dev_count &&
kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
- if (!kvm_iodevice_write(bus->range[idx].dev, range->addr,
+ if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
range->len, val))
return idx;
idx++;
@@ -3019,7 +3006,7 @@ static int __kvm_io_bus_write(struct kvm_io_bus *bus,
}
/* kvm_io_bus_write - called under kvm->slots_lock */
-int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
int len, const void *val)
{
struct kvm_io_bus *bus;
@@ -3031,14 +3018,14 @@ int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
.len = len,
};
- bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
- r = __kvm_io_bus_write(bus, &range, val);
+ bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
+ r = __kvm_io_bus_write(vcpu, bus, &range, val);
return r < 0 ? r : 0;
}
/* kvm_io_bus_write_cookie - called under kvm->slots_lock */
-int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
- int len, const void *val, long cookie)
+int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
+ gpa_t addr, int len, const void *val, long cookie)
{
struct kvm_io_bus *bus;
struct kvm_io_range range;
@@ -3048,12 +3035,12 @@ int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
.len = len,
};
- bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
+ bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
/* First try the device referenced by cookie. */
if ((cookie >= 0) && (cookie < bus->dev_count) &&
(kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
- if (!kvm_iodevice_write(bus->range[cookie].dev, addr, len,
+ if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
val))
return cookie;
@@ -3061,11 +3048,11 @@ int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
* cookie contained garbage; fall back to search and return the
* correct cookie value.
*/
- return __kvm_io_bus_write(bus, &range, val);
+ return __kvm_io_bus_write(vcpu, bus, &range, val);
}
-static int __kvm_io_bus_read(struct kvm_io_bus *bus, struct kvm_io_range *range,
- void *val)
+static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
+ struct kvm_io_range *range, void *val)
{
int idx;
@@ -3075,7 +3062,7 @@ static int __kvm_io_bus_read(struct kvm_io_bus *bus, struct kvm_io_range *range,
while (idx < bus->dev_count &&
kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
- if (!kvm_iodevice_read(bus->range[idx].dev, range->addr,
+ if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
range->len, val))
return idx;
idx++;
@@ -3086,7 +3073,7 @@ static int __kvm_io_bus_read(struct kvm_io_bus *bus, struct kvm_io_range *range,
EXPORT_SYMBOL_GPL(kvm_io_bus_write);
/* kvm_io_bus_read - called under kvm->slots_lock */
-int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
int len, void *val)
{
struct kvm_io_bus *bus;
@@ -3098,8 +3085,8 @@ int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
.len = len,
};
- bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
- r = __kvm_io_bus_read(bus, &range, val);
+ bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
+ r = __kvm_io_bus_read(vcpu, bus, &range, val);
return r < 0 ? r : 0;
}
@@ -3269,6 +3256,7 @@ struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
{
struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
+
if (vcpu->preempted)
vcpu->preempted = false;
@@ -3350,7 +3338,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
r = misc_register(&kvm_dev);
if (r) {
- printk(KERN_ERR "kvm: misc device register failed\n");
+ pr_err("kvm: misc device register failed\n");
goto out_unreg;
}
@@ -3361,7 +3349,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
r = kvm_init_debug();
if (r) {
- printk(KERN_ERR "kvm: create debugfs files failed\n");
+ pr_err("kvm: create debugfs files failed\n");
goto out_undebugfs;
}