summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/dmaengine.c2
-rw-r--r--drivers/dma/mv_xor.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c40
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c130
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c2
-rw-r--r--drivers/gpu/drm/radeon/sid.h4
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/ntc_thermistor.c15
-rw-r--r--drivers/infiniband/hw/mlx4/main.c67
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h3
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c8
-rw-r--r--drivers/iommu/Kconfig26
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd_iommu.c8
-rw-r--r--drivers/iommu/amd_iommu_v2.c184
-rw-r--r--drivers/iommu/arm-smmu.c4
-rw-r--r--drivers/iommu/exynos-iommu.c1052
-rw-r--r--drivers/iommu/ipmmu-vmsa.c1255
-rw-r--r--drivers/iommu/msm_iommu_dev.c38
-rw-r--r--drivers/iommu/omap-iommu.c31
-rw-r--r--drivers/iommu/omap-iopgtable.h3
-rw-r--r--drivers/iommu/shmobile-ipmmu.c20
-rw-r--r--drivers/net/bonding/bond_alb.c54
-rw-r--r--drivers/net/bonding/bond_main.c134
-rw-r--r--drivers/net/bonding/bond_options.c1
-rw-r--r--drivers/net/bonding/bonding.h1
-rw-r--r--drivers/net/can/c_can/Kconfig7
-rw-r--r--drivers/net/can/c_can/c_can.c36
-rw-r--r--drivers/net/can/sja1000/peak_pci.c14
-rw-r--r--drivers/net/ethernet/Kconfig12
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/altera/Makefile1
-rw-r--r--drivers/net/ethernet/altera/altera_msgdma.c110
-rw-r--r--drivers/net/ethernet/altera/altera_msgdmahw.h13
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.c181
-rw-r--r--drivers/net/ethernet/altera/altera_sgdmahw.h26
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h47
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c108
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c133
-rw-r--r--drivers/net/ethernet/altera/altera_utils.c20
-rw-r--r--drivers/net/ethernet/altera/altera_utils.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c2
-rw-r--r--drivers/net/ethernet/ec_bhf.c706
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c6
-rw-r--r--drivers/net/ethernet/jme.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c54
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h16
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c57
-rw-r--r--drivers/net/ethernet/sfc/nic.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/ethernet/sun/cassini.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c17
-rw-r--r--drivers/net/macvlan.c18
-rw-r--r--drivers/net/phy/mdio-gpio.c4
-rw-r--r--drivers/net/phy/phy.c16
-rw-r--r--drivers/net/phy/phy_device.c4
-rw-r--r--drivers/net/usb/cdc_mbim.c57
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h8
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c9
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c55
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c19
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c10
-rw-r--r--drivers/net/xen-netback/common.h2
-rw-r--r--drivers/net/xen-netback/interface.c30
-rw-r--r--drivers/net/xen-netback/netback.c102
-rw-r--r--drivers/ptp/Kconfig3
-rw-r--r--drivers/scsi/scsi_transport_sas.c3
84 files changed, 3960 insertions, 1318 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index a886713937fd..d5d30ed863ce 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1009,6 +1009,7 @@ static void dmaengine_unmap(struct kref *kref)
dma_unmap_page(dev, unmap->addr[i], unmap->len,
DMA_BIDIRECTIONAL);
}
+ cnt = unmap->map_cnt;
mempool_free(unmap, __get_unmap_pool(cnt)->pool);
}
@@ -1074,6 +1075,7 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
memset(unmap, 0, sizeof(*unmap));
kref_init(&unmap->kref);
unmap->dev = dev;
+ unmap->map_cnt = nr;
return unmap;
}
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 766b68ed505c..394cbc5c93e3 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -191,12 +191,10 @@ static void mv_set_mode(struct mv_xor_chan *chan,
static void mv_chan_activate(struct mv_xor_chan *chan)
{
- u32 activation;
-
dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
- activation = readl_relaxed(XOR_ACTIVATION(chan));
- activation |= 0x1;
- writel_relaxed(activation, XOR_ACTIVATION(chan));
+
+ /* writel ensures all descriptors are flushed before activation */
+ writel(BIT(0), XOR_ACTIVATION(chan));
}
static char mv_chan_is_busy(struct mv_xor_chan *chan)
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 7762665ad8fd..876de9ac3793 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -1009,7 +1009,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id,
}
if (outp == 8)
- return false;
+ return conf;
data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1);
if (data == 0x0000)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
index 43fec17ea540..bbf117be572f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
@@ -40,6 +40,7 @@ pwm_info(struct nouveau_therm *therm, int line)
case 0x00: return 2;
case 0x19: return 1;
case 0x1c: return 0;
+ case 0x1e: return 2;
default:
break;
}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 68528619834a..8149e7cf4303 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1642,6 +1642,7 @@ struct radeon_vce {
unsigned fb_version;
atomic_t handles[RADEON_MAX_VCE_HANDLES];
struct drm_file *filp[RADEON_MAX_VCE_HANDLES];
+ unsigned img_size[RADEON_MAX_VCE_HANDLES];
struct delayed_work idle_work;
};
@@ -1655,7 +1656,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
uint32_t handle, struct radeon_fence **fence);
void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp);
void radeon_vce_note_usage(struct radeon_device *rdev);
-int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi);
+int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, unsigned size);
int radeon_vce_cs_parse(struct radeon_cs_parser *p);
bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
@@ -2640,7 +2641,8 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
#define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE))
#define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI))
#define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE))
-#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI))
+#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI) || \
+ (rdev->family == CHIP_MULLINS))
#define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \
(rdev->ddev->pdev->device == 0x6850) || \
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index b3633d9a5317..9ab30976287d 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -196,6 +196,20 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
}
}
+ if (!found) {
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
+ dhandle = ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+ continue;
+
+ status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
+ if (!ACPI_FAILURE(status)) {
+ found = true;
+ break;
+ }
+ }
+ }
+
if (!found)
return false;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 408b6ac53f0b..f00dbbf4d806 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -999,7 +999,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
/* avoid high jitter with small fractional dividers */
if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) {
- fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 60);
+ fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 50);
if (fb_div < fb_div_min) {
unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div);
fb_div *= tmp;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 0cc47f12d995..eaaedba04675 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -577,28 +577,29 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
return r;
}
- r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
- if (r) {
- radeon_vm_fini(rdev, &fpriv->vm);
- kfree(fpriv);
- return r;
- }
+ if (rdev->accel_working) {
+ r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+ if (r) {
+ radeon_vm_fini(rdev, &fpriv->vm);
+ kfree(fpriv);
+ return r;
+ }
- /* map the ib pool buffer read only into
- * virtual address space */
- bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
- rdev->ring_tmp_bo.bo);
- r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
- RADEON_VM_PAGE_READABLE |
- RADEON_VM_PAGE_SNOOPED);
+ /* map the ib pool buffer read only into
+ * virtual address space */
+ bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
+ rdev->ring_tmp_bo.bo);
+ r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
+ RADEON_VM_PAGE_READABLE |
+ RADEON_VM_PAGE_SNOOPED);
- radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
- if (r) {
- radeon_vm_fini(rdev, &fpriv->vm);
- kfree(fpriv);
- return r;
+ radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+ if (r) {
+ radeon_vm_fini(rdev, &fpriv->vm);
+ kfree(fpriv);
+ return r;
+ }
}
-
file_priv->driver_priv = fpriv;
}
@@ -626,13 +627,15 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
struct radeon_bo_va *bo_va;
int r;
- r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
- if (!r) {
- bo_va = radeon_vm_bo_find(&fpriv->vm,
- rdev->ring_tmp_bo.bo);
- if (bo_va)
- radeon_vm_bo_rmv(rdev, bo_va);
- radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+ if (rdev->accel_working) {
+ r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+ if (!r) {
+ bo_va = radeon_vm_bo_find(&fpriv->vm,
+ rdev->ring_tmp_bo.bo);
+ if (bo_va)
+ radeon_vm_bo_rmv(rdev, bo_va);
+ radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+ }
}
radeon_vm_fini(rdev, &fpriv->vm);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 19bec0dbfa38..4faa4d6f9bb4 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -458,7 +458,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
* into account. We don't want to disallow buffer moves
* completely.
*/
- if (current_domain != RADEON_GEM_DOMAIN_CPU &&
+ if ((lobj->alt_domain & current_domain) != 0 &&
(domain & current_domain) == 0 && /* will be moved */
bytes_moved > bytes_moved_threshold) {
/* don't move it */
@@ -699,22 +699,30 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
rbo = container_of(bo, struct radeon_bo, tbo);
radeon_bo_check_tiling(rbo, 0, 0);
rdev = rbo->rdev;
- if (bo->mem.mem_type == TTM_PL_VRAM) {
- size = bo->mem.num_pages << PAGE_SHIFT;
- offset = bo->mem.start << PAGE_SHIFT;
- if ((offset + size) > rdev->mc.visible_vram_size) {
- /* hurrah the memory is not visible ! */
- radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
- rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
- r = ttm_bo_validate(bo, &rbo->placement, false, false);
- if (unlikely(r != 0))
- return r;
- offset = bo->mem.start << PAGE_SHIFT;
- /* this should not happen */
- if ((offset + size) > rdev->mc.visible_vram_size)
- return -EINVAL;
- }
+ if (bo->mem.mem_type != TTM_PL_VRAM)
+ return 0;
+
+ size = bo->mem.num_pages << PAGE_SHIFT;
+ offset = bo->mem.start << PAGE_SHIFT;
+ if ((offset + size) <= rdev->mc.visible_vram_size)
+ return 0;
+
+ /* hurrah the memory is not visible ! */
+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
+ rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+ r = ttm_bo_validate(bo, &rbo->placement, false, false);
+ if (unlikely(r == -ENOMEM)) {
+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
+ return ttm_bo_validate(bo, &rbo->placement, false, false);
+ } else if (unlikely(r != 0)) {
+ return r;
}
+
+ offset = bo->mem.start << PAGE_SHIFT;
+ /* this should never happen */
+ if ((offset + size) > rdev->mc.visible_vram_size)
+ return -EINVAL;
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index f30b8426eee2..53d6e1bb48dc 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -361,6 +361,11 @@ static ssize_t radeon_set_pm_profile(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
+ /* Can't set profile when the card is off */
+ if ((rdev->flags & RADEON_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
mutex_lock(&rdev->pm.mutex);
if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (strncmp("default", buf, strlen("default")) == 0)
@@ -409,6 +414,13 @@ static ssize_t radeon_set_pm_method(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
+ /* Can't set method when the card is off */
+ if ((rdev->flags & RADEON_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
+ count = -EINVAL;
+ goto fail;
+ }
+
/* we don't support the legacy modes with dpm */
if (rdev->pm.pm_method == PM_METHOD_DPM) {
count = -EINVAL;
@@ -446,6 +458,10 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
struct radeon_device *rdev = ddev->dev_private;
enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
+ if ((rdev->flags & RADEON_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return snprintf(buf, PAGE_SIZE, "off\n");
+
return snprintf(buf, PAGE_SIZE, "%s\n",
(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
@@ -459,6 +475,11 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
+ /* Can't set dpm state when the card is off */
+ if ((rdev->flags & RADEON_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
mutex_lock(&rdev->pm.mutex);
if (strncmp("battery", buf, strlen("battery")) == 0)
rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
@@ -485,6 +506,10 @@ static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
struct radeon_device *rdev = ddev->dev_private;
enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
+ if ((rdev->flags & RADEON_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return snprintf(buf, PAGE_SIZE, "off\n");
+
return snprintf(buf, PAGE_SIZE, "%s\n",
(level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
(level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
@@ -500,6 +525,11 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
enum radeon_dpm_forced_level level;
int ret = 0;
+ /* Can't force performance level when the card is off */
+ if ((rdev->flags & RADEON_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
mutex_lock(&rdev->pm.mutex);
if (strncmp("low", buf, strlen("low")) == 0) {
level = RADEON_DPM_FORCED_LEVEL_LOW;
@@ -538,8 +568,14 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
char *buf)
{
struct radeon_device *rdev = dev_get_drvdata(dev);
+ struct drm_device *ddev = rdev->ddev;
int temp;
+ /* Can't get temperature when the card is off */
+ if ((rdev->flags & RADEON_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
if (rdev->asic->pm.get_temperature)
temp = radeon_get_temperature(rdev);
else
@@ -1614,8 +1650,12 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
+ struct drm_device *ddev = rdev->ddev;
- if (rdev->pm.dpm_enabled) {
+ if ((rdev->flags & RADEON_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
+ seq_printf(m, "PX asic powered off\n");
+ } else if (rdev->pm.dpm_enabled) {
mutex_lock(&rdev->pm.mutex);
if (rdev->asic->dpm.debugfs_print_current_performance_level)
radeon_dpm_debugfs_print_current_performance_level(rdev, m);
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index f73324c81491..3971d968af6c 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -443,13 +443,16 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
* @p: parser context
* @lo: address of lower dword
* @hi: address of higher dword
+ * @size: size of checker for relocation buffer
*
* Patch relocation inside command stream with real buffer address
*/
-int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
+int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
+ unsigned size)
{
struct radeon_cs_chunk *relocs_chunk;
- uint64_t offset;
+ struct radeon_cs_reloc *reloc;
+ uint64_t start, end, offset;
unsigned idx;
relocs_chunk = &p->chunks[p->chunk_relocs_idx];
@@ -462,15 +465,60 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
return -EINVAL;
}
- offset += p->relocs_ptr[(idx / 4)]->gpu_offset;
+ reloc = p->relocs_ptr[(idx / 4)];
+ start = reloc->gpu_offset;
+ end = start + radeon_bo_size(reloc->robj);
+ start += offset;
- p->ib.ptr[lo] = offset & 0xFFFFFFFF;
- p->ib.ptr[hi] = offset >> 32;
+ p->ib.ptr[lo] = start & 0xFFFFFFFF;
+ p->ib.ptr[hi] = start >> 32;
+
+ if (end <= start) {
+ DRM_ERROR("invalid reloc offset %llX!\n", offset);
+ return -EINVAL;
+ }
+ if ((end - start) < size) {
+ DRM_ERROR("buffer to small (%d / %d)!\n",
+ (unsigned)(end - start), size);
+ return -EINVAL;
+ }
return 0;
}
/**
+ * radeon_vce_validate_handle - validate stream handle
+ *
+ * @p: parser context
+ * @handle: handle to validate
+ *
+ * Validates the handle and return the found session index or -EINVAL
+ * we we don't have another free session index.
+ */
+int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
+{
+ unsigned i;
+
+ /* validate the handle */
+ for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
+ if (atomic_read(&p->rdev->vce.handles[i]) == handle)
+ return i;
+ }
+
+ /* handle not found try to alloc a new one */
+ for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
+ if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
+ p->rdev->vce.filp[i] = p->filp;
+ p->rdev->vce.img_size[i] = 0;
+ return i;
+ }
+ }
+
+ DRM_ERROR("No more free VCE handles!\n");
+ return -EINVAL;
+}
+
+/**
* radeon_vce_cs_parse - parse and validate the command stream
*
* @p: parser context
@@ -478,8 +526,10 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
*/
int radeon_vce_cs_parse(struct radeon_cs_parser *p)
{
- uint32_t handle = 0;
- bool destroy = false;
+ int session_idx = -1;
+ bool destroyed = false;
+ uint32_t tmp, handle = 0;
+ uint32_t *size = &tmp;
int i, r;
while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) {
@@ -491,13 +541,29 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
return -EINVAL;
}
+ if (destroyed) {
+ DRM_ERROR("No other command allowed after destroy!\n");
+ return -EINVAL;
+ }
+
switch (cmd) {
case 0x00000001: // session
handle = radeon_get_ib_value(p, p->idx + 2);
+ session_idx = radeon_vce_validate_handle(p, handle);
+ if (session_idx < 0)
+ return session_idx;
+ size = &p->rdev->vce.img_size[session_idx];
break;
case 0x00000002: // task info
+ break;
+
case 0x01000001: // create
+ *size = radeon_get_ib_value(p, p->idx + 8) *
+ radeon_get_ib_value(p, p->idx + 10) *
+ 8 * 3 / 2;
+ break;
+
case 0x04000001: // config extension
case 0x04000002: // pic control
case 0x04000005: // rate control
@@ -506,23 +572,39 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
break;
case 0x03000001: // encode
- r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9);
+ r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9,
+ *size);
if (r)
return r;
- r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11);
+ r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11,
+ *size / 3);
if (r)
return r;
break;
case 0x02000001: // destroy
- destroy = true;
+ destroyed = true;
break;
case 0x05000001: // context buffer
+ r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+ *size * 2);
+ if (r)
+ return r;
+ break;
+
case 0x05000004: // video bitstream buffer
+ tmp = radeon_get_ib_value(p, p->idx + 4);
+ r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+ tmp);
+ if (r)
+ return r;
+ break;
+
case 0x05000005: // feedback buffer
- r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2);
+ r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+ 4096);
if (r)
return r;
break;
@@ -532,33 +614,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
return -EINVAL;
}
+ if (session_idx == -1) {
+ DRM_ERROR("no session command at start of IB\n");
+ return -EINVAL;
+ }
+
p->idx += len / 4;
}
- if (destroy) {
+ if (destroyed) {
/* IB contains a destroy msg, free the handle */
for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
-
- return 0;
- }
-
- /* create or encode, validate the handle */
- for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
- if (atomic_read(&p->rdev->vce.handles[i]) == handle)
- return 0;
}
- /* handle not found try to alloc a new one */
- for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
- if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
- p->rdev->vce.filp[i] = p->filp;
- return 0;
- }
- }
-
- DRM_ERROR("No more free VCE handles!\n");
- return -EINVAL;
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 2aae6ce49d32..d9ab99f47612 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -595,7 +595,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
ndw = 64;
/* assume the worst case */
- ndw += vm->max_pde_used * 12;
+ ndw += vm->max_pde_used * 16;
/* update too big for an IB */
if (ndw > 0xfffff)
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 683532f84931..7321283602ce 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -107,8 +107,8 @@
#define SPLL_CHG_STATUS (1 << 1)
#define SPLL_CNTL_MODE 0x618
#define SPLL_SW_DIR_CONTROL (1 << 0)
-# define SPLL_REFCLK_SEL(x) ((x) << 8)
-# define SPLL_REFCLK_SEL_MASK 0xFF00
+# define SPLL_REFCLK_SEL(x) ((x) << 26)
+# define SPLL_REFCLK_SEL_MASK (3 << 26)
#define CG_SPLL_SPREAD_SPECTRUM 0x620
#define SSEN (1 << 0)
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index bc196f49ec53..4af0da96c2e2 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1053,7 +1053,7 @@ config SENSORS_PC87427
config SENSORS_NTC_THERMISTOR
tristate "NTC thermistor support"
- depends on (!OF && !IIO) || (OF && IIO)
+ depends on !OF || IIO=n || IIO
help
This driver supports NTC thermistors sensor reading and its
interpretation. The driver can also monitor the temperature and
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index 8a17f01e8672..e76feb86a1d4 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -44,6 +44,7 @@ struct ntc_compensation {
unsigned int ohm;
};
+/* Order matters, ntc_match references the entries by index */
static const struct platform_device_id ntc_thermistor_id[] = {
{ "ncp15wb473", TYPE_NCPXXWB473 },
{ "ncp18wb473", TYPE_NCPXXWB473 },
@@ -141,7 +142,7 @@ struct ntc_data {
char name[PLATFORM_NAME_SIZE];
};
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && IS_ENABLED(CONFIG_IIO)
static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
{
struct iio_channel *channel = pdata->chan;
@@ -163,15 +164,15 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
static const struct of_device_id ntc_match[] = {
{ .compatible = "ntc,ncp15wb473",
- .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
+ .data = &ntc_thermistor_id[0] },
{ .compatible = "ntc,ncp18wb473",
- .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
+ .data = &ntc_thermistor_id[1] },
{ .compatible = "ntc,ncp21wb473",
- .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
+ .data = &ntc_thermistor_id[2] },
{ .compatible = "ntc,ncp03wb473",
- .data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
+ .data = &ntc_thermistor_id[3] },
{ .compatible = "ntc,ncp15wl333",
- .data = &ntc_thermistor_id[TYPE_NCPXXWL333] },
+ .data = &ntc_thermistor_id[4] },
{ },
};
MODULE_DEVICE_TABLE(of, ntc_match);
@@ -223,6 +224,8 @@ ntc_thermistor_parse_dt(struct platform_device *pdev)
return NULL;
}
+#define ntc_match NULL
+
static void ntc_iio_channel_release(struct ntc_thermistor_platform_data *pdata)
{ }
#endif
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 1b6dbe156a37..199c7896f081 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -48,6 +48,7 @@
#include <linux/mlx4/driver.h>
#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
#include "mlx4_ib.h"
#include "user.h"
@@ -1614,6 +1615,53 @@ static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event,
}
#endif
+#define MLX4_IB_INVALID_MAC ((u64)-1)
+static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
+ struct net_device *dev,
+ int port)
+{
+ u64 new_smac = 0;
+ u64 release_mac = MLX4_IB_INVALID_MAC;
+ struct mlx4_ib_qp *qp;
+
+ read_lock(&dev_base_lock);
+ new_smac = mlx4_mac_to_u64(dev->dev_addr);
+ read_unlock(&dev_base_lock);
+
+ mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
+ qp = ibdev->qp1_proxy[port - 1];
+ if (qp) {
+ int new_smac_index;
+ u64 old_smac = qp->pri.smac;
+ struct mlx4_update_qp_params update_params;
+
+ if (new_smac == old_smac)
+ goto unlock;
+
+ new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
+
+ if (new_smac_index < 0)
+ goto unlock;
+
+ update_params.smac_index = new_smac_index;
+ if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC,
+ &update_params)) {
+ release_mac = new_smac;
+ goto unlock;
+ }
+
+ qp->pri.smac = new_smac;
+ qp->pri.smac_index = new_smac_index;
+
+ release_mac = old_smac;
+ }
+
+unlock:
+ mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
+ if (release_mac != MLX4_IB_INVALID_MAC)
+ mlx4_unregister_mac(ibdev->dev, port, release_mac);
+}
+
static void mlx4_ib_get_dev_addr(struct net_device *dev,
struct mlx4_ib_dev *ibdev, u8 port)
{
@@ -1689,9 +1737,13 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
return 0;
}
-static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
+static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
+ struct net_device *dev,
+ unsigned long event)
+
{
struct mlx4_ib_iboe *iboe;
+ int update_qps_port = -1;
int port;
iboe = &ibdev->iboe;
@@ -1719,6 +1771,11 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
}
curr_master = iboe->masters[port - 1];
+ if (dev == iboe->netdevs[port - 1] &&
+ (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
+ event == NETDEV_UP || event == NETDEV_CHANGE))
+ update_qps_port = port;
+
if (curr_netdev) {
port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
IB_PORT_ACTIVE : IB_PORT_DOWN;
@@ -1752,6 +1809,9 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
}
spin_unlock(&iboe->lock);
+
+ if (update_qps_port > 0)
+ mlx4_ib_update_qps(ibdev, dev, update_qps_port);
}
static int mlx4_ib_netdev_event(struct notifier_block *this,
@@ -1764,7 +1824,7 @@ static int mlx4_ib_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
- mlx4_ib_scan_netdevs(ibdev);
+ mlx4_ib_scan_netdevs(ibdev, dev, event);
return NOTIFY_DONE;
}
@@ -2043,6 +2103,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
goto err_map;
for (i = 0; i < ibdev->num_ports; ++i) {
+ mutex_init(&ibdev->qp1_proxy_lock[i]);
if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
IB_LINK_LAYER_ETHERNET) {
err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
@@ -2126,7 +2187,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
for (i = 1 ; i <= ibdev->num_ports ; ++i)
reset_gid_table(ibdev, i);
rtnl_lock();
- mlx4_ib_scan_netdevs(ibdev);
+ mlx4_ib_scan_netdevs(ibdev, NULL, 0);
rtnl_unlock();
mlx4_ib_init_gid_table(ibdev);
}
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index f589522fddfd..66b0b7dbd9f4 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -522,6 +522,9 @@ struct mlx4_ib_dev {
int steer_qpn_count;
int steer_qpn_base;
int steering_support;
+ struct mlx4_ib_qp *qp1_proxy[MLX4_MAX_PORTS];
+ /* lock when destroying qp1_proxy and getting netdev events */
+ struct mutex qp1_proxy_lock[MLX4_MAX_PORTS];
};
struct ib_event_work {
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 41308af4163c..dc57482ae7af 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1132,6 +1132,12 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp)
if (is_qp0(dev, mqp))
mlx4_CLOSE_PORT(dev->dev, mqp->port);
+ if (dev->qp1_proxy[mqp->port - 1] == mqp) {
+ mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]);
+ dev->qp1_proxy[mqp->port - 1] = NULL;
+ mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]);
+ }
+
pd = get_pd(mqp);
destroy_qp_common(dev, mqp, !!pd->ibpd.uobject);
@@ -1646,6 +1652,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
if (err)
return -EINVAL;
+ if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
+ dev->qp1_proxy[qp->port - 1] = qp;
}
}
}
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index df56e4c74a7e..d260605e6d5f 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -178,13 +178,13 @@ config TEGRA_IOMMU_SMMU
config EXYNOS_IOMMU
bool "Exynos IOMMU Support"
- depends on ARCH_EXYNOS && EXYNOS_DEV_SYSMMU
+ depends on ARCH_EXYNOS
select IOMMU_API
help
- Support for the IOMMU(System MMU) of Samsung Exynos application
- processor family. This enables H/W multimedia accellerators to see
- non-linear physical memory chunks as a linear memory in their
- address spaces
+ Support for the IOMMU (System MMU) of Samsung Exynos application
+ processor family. This enables H/W multimedia accelerators to see
+ non-linear physical memory chunks as linear memory in their
+ address space.
If unsure, say N here.
@@ -193,9 +193,9 @@ config EXYNOS_IOMMU_DEBUG
depends on EXYNOS_IOMMU
help
Select this to see the detailed log message that shows what
- happens in the IOMMU driver
+ happens in the IOMMU driver.
- Say N unless you need kernel log message for IOMMU debugging
+ Say N unless you need kernel log message for IOMMU debugging.
config SHMOBILE_IPMMU
bool
@@ -272,6 +272,18 @@ config SHMOBILE_IOMMU_L1SIZE
default 256 if SHMOBILE_IOMMU_ADDRSIZE_64MB
default 128 if SHMOBILE_IOMMU_ADDRSIZE_32MB
+config IPMMU_VMSA
+ bool "Renesas VMSA-compatible IPMMU"
+ depends on ARM_LPAE
+ depends on ARCH_SHMOBILE || COMPILE_TEST
+ select IOMMU_API
+ select ARM_DMA_USE_IOMMU
+ help
+ Support for the Renesas VMSA-compatible IPMMU Renesas found in the
+ R-Mobile APE6 and R-Car H2/M2 SoCs.
+
+ If unsure, say N.
+
config SPAPR_TCE_IOMMU
bool "sPAPR TCE IOMMU Support"
depends on PPC_POWERNV || PPC_PSERIES
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 5d58bf16e9e3..8893bad048e0 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
+obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 57068e8035b5..4aec6a29e316 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3499,8 +3499,6 @@ int __init amd_iommu_init_passthrough(void)
{
struct iommu_dev_data *dev_data;
struct pci_dev *dev = NULL;
- struct amd_iommu *iommu;
- u16 devid;
int ret;
ret = alloc_passthrough_domain();
@@ -3514,12 +3512,6 @@ int __init amd_iommu_init_passthrough(void)
dev_data = get_dev_data(&dev->dev);
dev_data->passthrough = true;
- devid = get_device_id(&dev->dev);
-
- iommu = amd_iommu_rlookup_table[devid];
- if (!iommu)
- continue;
-
attach_device(&dev->dev, pt_domain);
}
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 203b2e6a91cf..d4daa05efe60 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -45,6 +45,8 @@ struct pri_queue {
struct pasid_state {
struct list_head list; /* For global state-list */
atomic_t count; /* Reference count */
+ atomic_t mmu_notifier_count; /* Counting nested mmu_notifier
+ calls */
struct task_struct *task; /* Task bound to this PASID */
struct mm_struct *mm; /* mm_struct for the faults */
struct mmu_notifier mn; /* mmu_otifier handle */
@@ -56,6 +58,8 @@ struct pasid_state {
};
struct device_state {
+ struct list_head list;
+ u16 devid;
atomic_t count;
struct pci_dev *pdev;
struct pasid_state **states;
@@ -81,13 +85,9 @@ struct fault {
u16 flags;
};
-static struct device_state **state_table;
+static LIST_HEAD(state_list);
static spinlock_t state_lock;
-/* List and lock for all pasid_states */
-static LIST_HEAD(pasid_state_list);
-static DEFINE_SPINLOCK(ps_lock);
-
static struct workqueue_struct *iommu_wq;
/*
@@ -99,7 +99,6 @@ static u64 *empty_page_table;
static void free_pasid_states(struct device_state *dev_state);
static void unbind_pasid(struct device_state *dev_state, int pasid);
-static int task_exit(struct notifier_block *nb, unsigned long e, void *data);
static u16 device_id(struct pci_dev *pdev)
{
@@ -111,13 +110,25 @@ static u16 device_id(struct pci_dev *pdev)
return devid;
}
+static struct device_state *__get_device_state(u16 devid)
+{
+ struct device_state *dev_state;
+
+ list_for_each_entry(dev_state, &state_list, list) {
+ if (dev_state->devid == devid)
+ return dev_state;
+ }
+
+ return NULL;
+}
+
static struct device_state *get_device_state(u16 devid)
{
struct device_state *dev_state;
unsigned long flags;
spin_lock_irqsave(&state_lock, flags);
- dev_state = state_table[devid];
+ dev_state = __get_device_state(devid);
if (dev_state != NULL)
atomic_inc(&dev_state->count);
spin_unlock_irqrestore(&state_lock, flags);
@@ -158,29 +169,6 @@ static void put_device_state_wait(struct device_state *dev_state)
free_device_state(dev_state);
}
-static struct notifier_block profile_nb = {
- .notifier_call = task_exit,
-};
-
-static void link_pasid_state(struct pasid_state *pasid_state)
-{
- spin_lock(&ps_lock);
- list_add_tail(&pasid_state->list, &pasid_state_list);
- spin_unlock(&ps_lock);
-}
-
-static void __unlink_pasid_state(struct pasid_state *pasid_state)
-{
- list_del(&pasid_state->list);
-}
-
-static void unlink_pasid_state(struct pasid_state *pasid_state)
-{
- spin_lock(&ps_lock);
- __unlink_pasid_state(pasid_state);
- spin_unlock(&ps_lock);
-}
-
/* Must be called under dev_state->lock */
static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
int pasid, bool alloc)
@@ -337,7 +325,6 @@ static void unbind_pasid(struct device_state *dev_state, int pasid)
if (pasid_state == NULL)
return;
- unlink_pasid_state(pasid_state);
__unbind_pasid(pasid_state);
put_pasid_state_wait(pasid_state); /* Reference taken in this function */
}
@@ -379,7 +366,12 @@ static void free_pasid_states(struct device_state *dev_state)
continue;
put_pasid_state(pasid_state);
- unbind_pasid(dev_state, i);
+
+ /*
+ * This will call the mn_release function and
+ * unbind the PASID
+ */
+ mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
}
if (dev_state->pasid_levels == 2)
@@ -443,8 +435,11 @@ static void mn_invalidate_range_start(struct mmu_notifier *mn,
pasid_state = mn_to_state(mn);
dev_state = pasid_state->device_state;
- amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid,
- __pa(empty_page_table));
+ if (atomic_add_return(1, &pasid_state->mmu_notifier_count) == 1) {
+ amd_iommu_domain_set_gcr3(dev_state->domain,
+ pasid_state->pasid,
+ __pa(empty_page_table));
+ }
}
static void mn_invalidate_range_end(struct mmu_notifier *mn,
@@ -457,11 +452,31 @@ static void mn_invalidate_range_end(struct mmu_notifier *mn,
pasid_state = mn_to_state(mn);
dev_state = pasid_state->device_state;
- amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid,
- __pa(pasid_state->mm->pgd));
+ if (atomic_dec_and_test(&pasid_state->mmu_notifier_count)) {
+ amd_iommu_domain_set_gcr3(dev_state->domain,
+ pasid_state->pasid,
+ __pa(pasid_state->mm->pgd));
+ }
+}
+
+static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
+{
+ struct pasid_state *pasid_state;
+ struct device_state *dev_state;
+
+ might_sleep();
+
+ pasid_state = mn_to_state(mn);
+ dev_state = pasid_state->device_state;
+
+ if (pasid_state->device_state->inv_ctx_cb)
+ dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
+
+ unbind_pasid(dev_state, pasid_state->pasid);
}
static struct mmu_notifier_ops iommu_mn = {
+ .release = mn_release,
.clear_flush_young = mn_clear_flush_young,
.change_pte = mn_change_pte,
.invalidate_page = mn_invalidate_page,
@@ -606,53 +621,6 @@ static struct notifier_block ppr_nb = {
.notifier_call = ppr_notifier,
};
-static int task_exit(struct notifier_block *nb, unsigned long e, void *data)
-{
- struct pasid_state *pasid_state;
- struct task_struct *task;
-
- task = data;
-
- /*
- * Using this notifier is a hack - but there is no other choice
- * at the moment. What I really want is a sleeping notifier that
- * is called when an MM goes down. But such a notifier doesn't
- * exist yet. The notifier needs to sleep because it has to make
- * sure that the device does not use the PASID and the address
- * space anymore before it is destroyed. This includes waiting
- * for pending PRI requests to pass the workqueue. The
- * MMU-Notifiers would be a good fit, but they use RCU and so
- * they are not allowed to sleep. Lets see how we can solve this
- * in a more intelligent way in the future.
- */
-again:
- spin_lock(&ps_lock);
- list_for_each_entry(pasid_state, &pasid_state_list, list) {
- struct device_state *dev_state;
- int pasid;
-
- if (pasid_state->task != task)
- continue;
-
- /* Drop Lock and unbind */
- spin_unlock(&ps_lock);
-
- dev_state = pasid_state->device_state;
- pasid = pasid_state->pasid;
-
- if (pasid_state->device_state->inv_ctx_cb)
- dev_state->inv_ctx_cb(dev_state->pdev, pasid);
-
- unbind_pasid(dev_state, pasid);
-
- /* Task may be in the list multiple times */
- goto again;
- }
- spin_unlock(&ps_lock);
-
- return NOTIFY_OK;
-}
-
int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
struct task_struct *task)
{
@@ -682,6 +650,7 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
goto out;
atomic_set(&pasid_state->count, 1);
+ atomic_set(&pasid_state->mmu_notifier_count, 0);
init_waitqueue_head(&pasid_state->wq);
spin_lock_init(&pasid_state->lock);
@@ -705,8 +674,6 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
if (ret)
goto out_clear_state;
- link_pasid_state(pasid_state);
-
return 0;
out_clear_state:
@@ -727,6 +694,7 @@ EXPORT_SYMBOL(amd_iommu_bind_pasid);
void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
{
+ struct pasid_state *pasid_state;
struct device_state *dev_state;
u16 devid;
@@ -743,7 +711,17 @@ void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
if (pasid < 0 || pasid >= dev_state->max_pasids)
goto out;
- unbind_pasid(dev_state, pasid);
+ pasid_state = get_pasid_state(dev_state, pasid);
+ if (pasid_state == NULL)
+ goto out;
+ /*
+ * Drop reference taken here. We are safe because we still hold
+ * the reference taken in the amd_iommu_bind_pasid function.
+ */
+ put_pasid_state(pasid_state);
+
+ /* This will call the mn_release function and unbind the PASID */
+ mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
out:
put_device_state(dev_state);
@@ -773,7 +751,8 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
spin_lock_init(&dev_state->lock);
init_waitqueue_head(&dev_state->wq);
- dev_state->pdev = pdev;
+ dev_state->pdev = pdev;
+ dev_state->devid = devid;
tmp = pasids;
for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
@@ -803,13 +782,13 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
spin_lock_irqsave(&state_lock, flags);
- if (state_table[devid] != NULL) {
+ if (__get_device_state(devid) != NULL) {
spin_unlock_irqrestore(&state_lock, flags);
ret = -EBUSY;
goto out_free_domain;
}
- state_table[devid] = dev_state;
+ list_add_tail(&dev_state->list, &state_list);
spin_unlock_irqrestore(&state_lock, flags);
@@ -841,13 +820,13 @@ void amd_iommu_free_device(struct pci_dev *pdev)
spin_lock_irqsave(&state_lock, flags);
- dev_state = state_table[devid];
+ dev_state = __get_device_state(devid);
if (dev_state == NULL) {
spin_unlock_irqrestore(&state_lock, flags);
return;
}
- state_table[devid] = NULL;
+ list_del(&dev_state->list);
spin_unlock_irqrestore(&state_lock, flags);
@@ -874,7 +853,7 @@ int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
spin_lock_irqsave(&state_lock, flags);
ret = -EINVAL;
- dev_state = state_table[devid];
+ dev_state = __get_device_state(devid);
if (dev_state == NULL)
goto out_unlock;
@@ -905,7 +884,7 @@ int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
spin_lock_irqsave(&state_lock, flags);
ret = -EINVAL;
- dev_state = state_table[devid];
+ dev_state = __get_device_state(devid);
if (dev_state == NULL)
goto out_unlock;
@@ -922,7 +901,6 @@ EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
static int __init amd_iommu_v2_init(void)
{
- size_t state_table_size;
int ret;
pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>\n");
@@ -938,16 +916,10 @@ static int __init amd_iommu_v2_init(void)
spin_lock_init(&state_lock);
- state_table_size = MAX_DEVICES * sizeof(struct device_state *);
- state_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(state_table_size));
- if (state_table == NULL)
- return -ENOMEM;
-
ret = -ENOMEM;
iommu_wq = create_workqueue("amd_iommu_v2");
if (iommu_wq == NULL)
- goto out_free;
+ goto out;
ret = -ENOMEM;
empty_page_table = (u64 *)get_zeroed_page(GFP_KERNEL);
@@ -955,29 +927,24 @@ static int __init amd_iommu_v2_init(void)
goto out_destroy_wq;
amd_iommu_register_ppr_notifier(&ppr_nb);
- profile_event_register(PROFILE_TASK_EXIT, &profile_nb);
return 0;
out_destroy_wq:
destroy_workqueue(iommu_wq);
-out_free:
- free_pages((unsigned long)state_table, get_order(state_table_size));
-
+out:
return ret;
}
static void __exit amd_iommu_v2_exit(void)
{
struct device_state *dev_state;
- size_t state_table_size;
int i;
if (!amd_iommu_v2_supported())
return;
- profile_event_unregister(PROFILE_TASK_EXIT, &profile_nb);
amd_iommu_unregister_ppr_notifier(&ppr_nb);
flush_workqueue(iommu_wq);
@@ -1000,9 +967,6 @@ static void __exit amd_iommu_v2_exit(void)
destroy_workqueue(iommu_wq);
- state_table_size = MAX_DEVICES * sizeof(struct device_state *);
- free_pages((unsigned long)state_table, get_order(state_table_size));
-
free_page((unsigned long)empty_page_table);
}
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 647c3c7fd742..1599354e974d 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1167,7 +1167,7 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
for (i = 0; i < master->num_streamids; ++i) {
u32 idx, s2cr;
idx = master->smrs ? master->smrs[i].idx : master->streamids[i];
- s2cr = (S2CR_TYPE_TRANS << S2CR_TYPE_SHIFT) |
+ s2cr = S2CR_TYPE_TRANS |
(smmu_domain->root_cfg.cbndx << S2CR_CBNDX_SHIFT);
writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
}
@@ -1804,7 +1804,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
* allocation (PTRS_PER_PGD).
*/
#ifdef CONFIG_64BIT
- smmu->s1_output_size = min(39UL, size);
+ smmu->s1_output_size = min((unsigned long)VA_BITS, size);
#else
smmu->s1_output_size = min(32UL, size);
#endif
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 074018979cdf..99054d2c040d 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -29,7 +29,8 @@
#include <asm/cacheflush.h>
#include <asm/pgtable.h>
-#include <mach/sysmmu.h>
+typedef u32 sysmmu_iova_t;
+typedef u32 sysmmu_pte_t;
/* We does not consider super section mapping (16MB) */
#define SECT_ORDER 20
@@ -44,28 +45,44 @@
#define LPAGE_MASK (~(LPAGE_SIZE - 1))
#define SPAGE_MASK (~(SPAGE_SIZE - 1))
-#define lv1ent_fault(sent) (((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
-#define lv1ent_page(sent) ((*(sent) & 3) == 1)
+#define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
+ ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
+#define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
+#define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
+#define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
+ ((*(sent) & 3) == 1))
#define lv1ent_section(sent) ((*(sent) & 3) == 2)
#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
#define lv2ent_small(pent) ((*(pent) & 2) == 2)
#define lv2ent_large(pent) ((*(pent) & 3) == 1)
+static u32 sysmmu_page_offset(sysmmu_iova_t iova, u32 size)
+{
+ return iova & (size - 1);
+}
+
#define section_phys(sent) (*(sent) & SECT_MASK)
-#define section_offs(iova) ((iova) & 0xFFFFF)
+#define section_offs(iova) sysmmu_page_offset((iova), SECT_SIZE)
#define lpage_phys(pent) (*(pent) & LPAGE_MASK)
-#define lpage_offs(iova) ((iova) & 0xFFFF)
+#define lpage_offs(iova) sysmmu_page_offset((iova), LPAGE_SIZE)
#define spage_phys(pent) (*(pent) & SPAGE_MASK)
-#define spage_offs(iova) ((iova) & 0xFFF)
-
-#define lv1ent_offset(iova) ((iova) >> SECT_ORDER)
-#define lv2ent_offset(iova) (((iova) & 0xFF000) >> SPAGE_ORDER)
+#define spage_offs(iova) sysmmu_page_offset((iova), SPAGE_SIZE)
#define NUM_LV1ENTRIES 4096
-#define NUM_LV2ENTRIES 256
+#define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
+
+static u32 lv1ent_offset(sysmmu_iova_t iova)
+{
+ return iova >> SECT_ORDER;
+}
+
+static u32 lv2ent_offset(sysmmu_iova_t iova)
+{
+ return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
+}
-#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
+#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
@@ -80,6 +97,13 @@
#define CTRL_BLOCK 0x7
#define CTRL_DISABLE 0x0
+#define CFG_LRU 0x1
+#define CFG_QOS(n) ((n & 0xF) << 7)
+#define CFG_MASK 0x0150FFFF /* Selecting bit 0-15, 20, 22 and 24 */
+#define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
+#define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
+#define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
+
#define REG_MMU_CTRL 0x000
#define REG_MMU_CFG 0x004
#define REG_MMU_STATUS 0x008
@@ -96,19 +120,32 @@
#define REG_MMU_VERSION 0x034
+#define MMU_MAJ_VER(val) ((val) >> 7)
+#define MMU_MIN_VER(val) ((val) & 0x7F)
+#define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
+
+#define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
+
#define REG_PB0_SADDR 0x04C
#define REG_PB0_EADDR 0x050
#define REG_PB1_SADDR 0x054
#define REG_PB1_EADDR 0x058
-static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova)
+#define has_sysmmu(dev) (dev->archdata.iommu != NULL)
+
+static struct kmem_cache *lv2table_kmem_cache;
+static sysmmu_pte_t *zero_lv2_table;
+#define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
+
+static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
{
return pgtable + lv1ent_offset(iova);
}
-static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
+static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
{
- return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova);
+ return (sysmmu_pte_t *)phys_to_virt(
+ lv2table_base(sent)) + lv2ent_offset(iova);
}
enum exynos_sysmmu_inttype {
@@ -124,16 +161,6 @@ enum exynos_sysmmu_inttype {
SYSMMU_FAULTS_NUM
};
-/*
- * @itype: type of fault.
- * @pgtable_base: the physical address of page table base. This is 0 if @itype
- * is SYSMMU_BUSERROR.
- * @fault_addr: the device (virtual) address that the System MMU tried to
- * translated. This is 0 if @itype is SYSMMU_BUSERROR.
- */
-typedef int (*sysmmu_fault_handler_t)(enum exynos_sysmmu_inttype itype,
- unsigned long pgtable_base, unsigned long fault_addr);
-
static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
REG_PAGE_FAULT_ADDR,
REG_AR_FAULT_ADDR,
@@ -157,27 +184,34 @@ static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
"UNKNOWN FAULT"
};
+/* attached to dev.archdata.iommu of the master device */
+struct exynos_iommu_owner {
+ struct list_head client; /* entry of exynos_iommu_domain.clients */
+ struct device *dev;
+ struct device *sysmmu;
+ struct iommu_domain *domain;
+ void *vmm_data; /* IO virtual memory manager's data */
+ spinlock_t lock; /* Lock to preserve consistency of System MMU */
+};
+
struct exynos_iommu_domain {
struct list_head clients; /* list of sysmmu_drvdata.node */
- unsigned long *pgtable; /* lv1 page table, 16KB */
+ sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
short *lv2entcnt; /* free lv2 entry counter for each section */
spinlock_t lock; /* lock for this structure */
spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
};
struct sysmmu_drvdata {
- struct list_head node; /* entry of exynos_iommu_domain.clients */
struct device *sysmmu; /* System MMU's device descriptor */
- struct device *dev; /* Owner of system MMU */
- char *dbgname;
- int nsfrs;
- void __iomem **sfrbases;
- struct clk *clk[2];
+ struct device *master; /* Owner of system MMU */
+ void __iomem *sfrbase;
+ struct clk *clk;
+ struct clk *clk_master;
int activations;
- rwlock_t lock;
+ spinlock_t lock;
struct iommu_domain *domain;
- sysmmu_fault_handler_t fault_handler;
- unsigned long pgtable;
+ phys_addr_t pgtable;
};
static bool set_sysmmu_active(struct sysmmu_drvdata *data)
@@ -204,6 +238,11 @@ static void sysmmu_unblock(void __iomem *sfrbase)
__raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
}
+static unsigned int __raw_sysmmu_version(struct sysmmu_drvdata *data)
+{
+ return MMU_RAW_VER(__raw_readl(data->sfrbase + REG_MMU_VERSION));
+}
+
static bool sysmmu_block(void __iomem *sfrbase)
{
int i = 120;
@@ -226,434 +265,428 @@ static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
}
static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
- unsigned long iova)
+ sysmmu_iova_t iova, unsigned int num_inv)
{
- __raw_writel((iova & SPAGE_MASK) | 1, sfrbase + REG_MMU_FLUSH_ENTRY);
+ unsigned int i;
+
+ for (i = 0; i < num_inv; i++) {
+ __raw_writel((iova & SPAGE_MASK) | 1,
+ sfrbase + REG_MMU_FLUSH_ENTRY);
+ iova += SPAGE_SIZE;
+ }
}
static void __sysmmu_set_ptbase(void __iomem *sfrbase,
- unsigned long pgd)
+ phys_addr_t pgd)
{
- __raw_writel(0x1, sfrbase + REG_MMU_CFG); /* 16KB LV1, LRU */
__raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
__sysmmu_tlb_invalidate(sfrbase);
}
-static void __sysmmu_set_prefbuf(void __iomem *sfrbase, unsigned long base,
- unsigned long size, int idx)
-{
- __raw_writel(base, sfrbase + REG_PB0_SADDR + idx * 8);
- __raw_writel(size - 1 + base, sfrbase + REG_PB0_EADDR + idx * 8);
-}
-
-static void __set_fault_handler(struct sysmmu_drvdata *data,
- sysmmu_fault_handler_t handler)
-{
- unsigned long flags;
-
- write_lock_irqsave(&data->lock, flags);
- data->fault_handler = handler;
- write_unlock_irqrestore(&data->lock, flags);
-}
-
-void exynos_sysmmu_set_fault_handler(struct device *dev,
- sysmmu_fault_handler_t handler)
-{
- struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
-
- __set_fault_handler(data, handler);
-}
-
-static int default_fault_handler(enum exynos_sysmmu_inttype itype,
- unsigned long pgtable_base, unsigned long fault_addr)
+static void show_fault_information(const char *name,
+ enum exynos_sysmmu_inttype itype,
+ phys_addr_t pgtable_base, sysmmu_iova_t fault_addr)
{
- unsigned long *ent;
+ sysmmu_pte_t *ent;
if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
itype = SYSMMU_FAULT_UNKNOWN;
- pr_err("%s occurred at 0x%lx(Page table base: 0x%lx)\n",
- sysmmu_fault_name[itype], fault_addr, pgtable_base);
+ pr_err("%s occurred at %#x by %s(Page table base: %pa)\n",
+ sysmmu_fault_name[itype], fault_addr, name, &pgtable_base);
- ent = section_entry(__va(pgtable_base), fault_addr);
- pr_err("\tLv1 entry: 0x%lx\n", *ent);
+ ent = section_entry(phys_to_virt(pgtable_base), fault_addr);
+ pr_err("\tLv1 entry: %#x\n", *ent);
if (lv1ent_page(ent)) {
ent = page_entry(ent, fault_addr);
- pr_err("\t Lv2 entry: 0x%lx\n", *ent);
+ pr_err("\t Lv2 entry: %#x\n", *ent);
}
-
- pr_err("Generating Kernel OOPS... because it is unrecoverable.\n");
-
- BUG();
-
- return 0;
}
static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
{
/* SYSMMU is in blocked when interrupt occurred. */
struct sysmmu_drvdata *data = dev_id;
- struct resource *irqres;
- struct platform_device *pdev;
enum exynos_sysmmu_inttype itype;
- unsigned long addr = -1;
-
- int i, ret = -ENOSYS;
-
- read_lock(&data->lock);
+ sysmmu_iova_t addr = -1;
+ int ret = -ENOSYS;
WARN_ON(!is_sysmmu_active(data));
- pdev = to_platform_device(data->sysmmu);
- for (i = 0; i < (pdev->num_resources / 2); i++) {
- irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i);
- if (irqres && ((int)irqres->start == irq))
- break;
- }
+ spin_lock(&data->lock);
- if (i == pdev->num_resources) {
+ if (!IS_ERR(data->clk_master))
+ clk_enable(data->clk_master);
+
+ itype = (enum exynos_sysmmu_inttype)
+ __ffs(__raw_readl(data->sfrbase + REG_INT_STATUS));
+ if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
itype = SYSMMU_FAULT_UNKNOWN;
+ else
+ addr = __raw_readl(data->sfrbase + fault_reg_offset[itype]);
+
+ if (itype == SYSMMU_FAULT_UNKNOWN) {
+ pr_err("%s: Fault is not occurred by System MMU '%s'!\n",
+ __func__, dev_name(data->sysmmu));
+ pr_err("%s: Please check if IRQ is correctly configured.\n",
+ __func__);
+ BUG();
} else {
- itype = (enum exynos_sysmmu_inttype)
- __ffs(__raw_readl(data->sfrbases[i] + REG_INT_STATUS));
- if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
- itype = SYSMMU_FAULT_UNKNOWN;
- else
- addr = __raw_readl(
- data->sfrbases[i] + fault_reg_offset[itype]);
+ unsigned int base =
+ __raw_readl(data->sfrbase + REG_PT_BASE_ADDR);
+ show_fault_information(dev_name(data->sysmmu),
+ itype, base, addr);
+ if (data->domain)
+ ret = report_iommu_fault(data->domain,
+ data->master, addr, itype);
}
- if (data->domain)
- ret = report_iommu_fault(data->domain, data->dev,
- addr, itype);
+ /* fault is not recovered by fault handler */
+ BUG_ON(ret != 0);
- if ((ret == -ENOSYS) && data->fault_handler) {
- unsigned long base = data->pgtable;
- if (itype != SYSMMU_FAULT_UNKNOWN)
- base = __raw_readl(
- data->sfrbases[i] + REG_PT_BASE_ADDR);
- ret = data->fault_handler(itype, base, addr);
- }
+ __raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR);
- if (!ret && (itype != SYSMMU_FAULT_UNKNOWN))
- __raw_writel(1 << itype, data->sfrbases[i] + REG_INT_CLEAR);
- else
- dev_dbg(data->sysmmu, "(%s) %s is not handled.\n",
- data->dbgname, sysmmu_fault_name[itype]);
+ sysmmu_unblock(data->sfrbase);
- if (itype != SYSMMU_FAULT_UNKNOWN)
- sysmmu_unblock(data->sfrbases[i]);
+ if (!IS_ERR(data->clk_master))
+ clk_disable(data->clk_master);
- read_unlock(&data->lock);
+ spin_unlock(&data->lock);
return IRQ_HANDLED;
}
-static bool __exynos_sysmmu_disable(struct sysmmu_drvdata *data)
+static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
{
+ if (!IS_ERR(data->clk_master))
+ clk_enable(data->clk_master);
+
+ __raw_writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
+ __raw_writel(0, data->sfrbase + REG_MMU_CFG);
+
+ clk_disable(data->clk);
+ if (!IS_ERR(data->clk_master))
+ clk_disable(data->clk_master);
+}
+
+static bool __sysmmu_disable(struct sysmmu_drvdata *data)
+{
+ bool disabled;
unsigned long flags;
- bool disabled = false;
- int i;
- write_lock_irqsave(&data->lock, flags);
+ spin_lock_irqsave(&data->lock, flags);
- if (!set_sysmmu_inactive(data))
- goto finish;
+ disabled = set_sysmmu_inactive(data);
- for (i = 0; i < data->nsfrs; i++)
- __raw_writel(CTRL_DISABLE, data->sfrbases[i] + REG_MMU_CTRL);
+ if (disabled) {
+ data->pgtable = 0;
+ data->domain = NULL;
- if (data->clk[1])
- clk_disable(data->clk[1]);
- if (data->clk[0])
- clk_disable(data->clk[0]);
+ __sysmmu_disable_nocount(data);
- disabled = true;
- data->pgtable = 0;
- data->domain = NULL;
-finish:
- write_unlock_irqrestore(&data->lock, flags);
+ dev_dbg(data->sysmmu, "Disabled\n");
+ } else {
+ dev_dbg(data->sysmmu, "%d times left to disable\n",
+ data->activations);
+ }
- if (disabled)
- dev_dbg(data->sysmmu, "(%s) Disabled\n", data->dbgname);
- else
- dev_dbg(data->sysmmu, "(%s) %d times left to be disabled\n",
- data->dbgname, data->activations);
+ spin_unlock_irqrestore(&data->lock, flags);
return disabled;
}
-/* __exynos_sysmmu_enable: Enables System MMU
- *
- * returns -error if an error occurred and System MMU is not enabled,
- * 0 if the System MMU has been just enabled and 1 if System MMU was already
- * enabled before.
- */
-static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data,
- unsigned long pgtable, struct iommu_domain *domain)
+static void __sysmmu_init_config(struct sysmmu_drvdata *data)
{
- int i, ret = 0;
- unsigned long flags;
+ unsigned int cfg = CFG_LRU | CFG_QOS(15);
+ unsigned int ver;
+
+ ver = __raw_sysmmu_version(data);
+ if (MMU_MAJ_VER(ver) == 3) {
+ if (MMU_MIN_VER(ver) >= 2) {
+ cfg |= CFG_FLPDCACHE;
+ if (MMU_MIN_VER(ver) == 3) {
+ cfg |= CFG_ACGEN;
+ cfg &= ~CFG_LRU;
+ } else {
+ cfg |= CFG_SYSSEL;
+ }
+ }
+ }
- write_lock_irqsave(&data->lock, flags);
+ __raw_writel(cfg, data->sfrbase + REG_MMU_CFG);
+}
- if (!set_sysmmu_active(data)) {
- if (WARN_ON(pgtable != data->pgtable)) {
- ret = -EBUSY;
- set_sysmmu_inactive(data);
- } else {
- ret = 1;
- }
+static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
+{
+ if (!IS_ERR(data->clk_master))
+ clk_enable(data->clk_master);
+ clk_enable(data->clk);
- dev_dbg(data->sysmmu, "(%s) Already enabled\n", data->dbgname);
- goto finish;
- }
+ __raw_writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
- if (data->clk[0])
- clk_enable(data->clk[0]);
- if (data->clk[1])
- clk_enable(data->clk[1]);
+ __sysmmu_init_config(data);
- data->pgtable = pgtable;
+ __sysmmu_set_ptbase(data->sfrbase, data->pgtable);
- for (i = 0; i < data->nsfrs; i++) {
- __sysmmu_set_ptbase(data->sfrbases[i], pgtable);
+ __raw_writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
- if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) {
- /* System MMU version is 3.x */
- __raw_writel((1 << 12) | (2 << 28),
- data->sfrbases[i] + REG_MMU_CFG);
- __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 0);
- __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 1);
- }
+ if (!IS_ERR(data->clk_master))
+ clk_disable(data->clk_master);
+}
- __raw_writel(CTRL_ENABLE, data->sfrbases[i] + REG_MMU_CTRL);
+static int __sysmmu_enable(struct sysmmu_drvdata *data,
+ phys_addr_t pgtable, struct iommu_domain *domain)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&data->lock, flags);
+ if (set_sysmmu_active(data)) {
+ data->pgtable = pgtable;
+ data->domain = domain;
+
+ __sysmmu_enable_nocount(data);
+
+ dev_dbg(data->sysmmu, "Enabled\n");
+ } else {
+ ret = (pgtable == data->pgtable) ? 1 : -EBUSY;
+
+ dev_dbg(data->sysmmu, "already enabled\n");
}
- data->domain = domain;
+ if (WARN_ON(ret < 0))
+ set_sysmmu_inactive(data); /* decrement count */
- dev_dbg(data->sysmmu, "(%s) Enabled\n", data->dbgname);
-finish:
- write_unlock_irqrestore(&data->lock, flags);
+ spin_unlock_irqrestore(&data->lock, flags);
return ret;
}
-int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable)
+/* __exynos_sysmmu_enable: Enables System MMU
+ *
+ * returns -error if an error occurred and System MMU is not enabled,
+ * 0 if the System MMU has been just enabled and 1 if System MMU was already
+ * enabled before.
+ */
+static int __exynos_sysmmu_enable(struct device *dev, phys_addr_t pgtable,
+ struct iommu_domain *domain)
{
- struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
- int ret;
+ int ret = 0;
+ unsigned long flags;
+ struct exynos_iommu_owner *owner = dev->archdata.iommu;
+ struct sysmmu_drvdata *data;
- BUG_ON(!memblock_is_memory(pgtable));
+ BUG_ON(!has_sysmmu(dev));
- ret = pm_runtime_get_sync(data->sysmmu);
- if (ret < 0) {
- dev_dbg(data->sysmmu, "(%s) Failed to enable\n", data->dbgname);
- return ret;
- }
+ spin_lock_irqsave(&owner->lock, flags);
- ret = __exynos_sysmmu_enable(data, pgtable, NULL);
- if (WARN_ON(ret < 0)) {
- pm_runtime_put(data->sysmmu);
- dev_err(data->sysmmu,
- "(%s) Already enabled with page table %#lx\n",
- data->dbgname, data->pgtable);
- } else {
- data->dev = dev;
- }
+ data = dev_get_drvdata(owner->sysmmu);
+
+ ret = __sysmmu_enable(data, pgtable, domain);
+ if (ret >= 0)
+ data->master = dev;
+
+ spin_unlock_irqrestore(&owner->lock, flags);
return ret;
}
+int exynos_sysmmu_enable(struct device *dev, phys_addr_t pgtable)
+{
+ BUG_ON(!memblock_is_memory(pgtable));
+
+ return __exynos_sysmmu_enable(dev, pgtable, NULL);
+}
+
static bool exynos_sysmmu_disable(struct device *dev)
{
- struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
- bool disabled;
+ unsigned long flags;
+ bool disabled = true;
+ struct exynos_iommu_owner *owner = dev->archdata.iommu;
+ struct sysmmu_drvdata *data;
+
+ BUG_ON(!has_sysmmu(dev));
+
+ spin_lock_irqsave(&owner->lock, flags);
- disabled = __exynos_sysmmu_disable(data);
- pm_runtime_put(data->sysmmu);
+ data = dev_get_drvdata(owner->sysmmu);
+
+ disabled = __sysmmu_disable(data);
+ if (disabled)
+ data->master = NULL;
+
+ spin_unlock_irqrestore(&owner->lock, flags);
return disabled;
}
-static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova)
+static void __sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
+ sysmmu_iova_t iova)
+{
+ if (__raw_sysmmu_version(data) == MAKE_MMU_VER(3, 3))
+ __raw_writel(iova | 0x1, data->sfrbase + REG_MMU_FLUSH_ENTRY);
+}
+
+static void sysmmu_tlb_invalidate_flpdcache(struct device *dev,
+ sysmmu_iova_t iova)
+{
+ unsigned long flags;
+ struct exynos_iommu_owner *owner = dev->archdata.iommu;
+ struct sysmmu_drvdata *data = dev_get_drvdata(owner->sysmmu);
+
+ if (!IS_ERR(data->clk_master))
+ clk_enable(data->clk_master);
+
+ spin_lock_irqsave(&data->lock, flags);
+ if (is_sysmmu_active(data))
+ __sysmmu_tlb_invalidate_flpdcache(data, iova);
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ if (!IS_ERR(data->clk_master))
+ clk_disable(data->clk_master);
+}
+
+static void sysmmu_tlb_invalidate_entry(struct device *dev, sysmmu_iova_t iova,
+ size_t size)
{
+ struct exynos_iommu_owner *owner = dev->archdata.iommu;
unsigned long flags;
- struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+ struct sysmmu_drvdata *data;
- read_lock_irqsave(&data->lock, flags);
+ data = dev_get_drvdata(owner->sysmmu);
+ spin_lock_irqsave(&data->lock, flags);
if (is_sysmmu_active(data)) {
- int i;
- for (i = 0; i < data->nsfrs; i++) {
- if (sysmmu_block(data->sfrbases[i])) {
- __sysmmu_tlb_invalidate_entry(
- data->sfrbases[i], iova);
- sysmmu_unblock(data->sfrbases[i]);
- }
+ unsigned int num_inv = 1;
+
+ if (!IS_ERR(data->clk_master))
+ clk_enable(data->clk_master);
+
+ /*
+ * L2TLB invalidation required
+ * 4KB page: 1 invalidation
+ * 64KB page: 16 invalidation
+ * 1MB page: 64 invalidation
+ * because it is set-associative TLB
+ * with 8-way and 64 sets.
+ * 1MB page can be cached in one of all sets.
+ * 64KB page can be one of 16 consecutive sets.
+ */
+ if (MMU_MAJ_VER(__raw_sysmmu_version(data)) == 2)
+ num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
+
+ if (sysmmu_block(data->sfrbase)) {
+ __sysmmu_tlb_invalidate_entry(
+ data->sfrbase, iova, num_inv);
+ sysmmu_unblock(data->sfrbase);
}
+ if (!IS_ERR(data->clk_master))
+ clk_disable(data->clk_master);
} else {
- dev_dbg(data->sysmmu,
- "(%s) Disabled. Skipping invalidating TLB.\n",
- data->dbgname);
+ dev_dbg(dev, "disabled. Skipping TLB invalidation @ %#x\n",
+ iova);
}
-
- read_unlock_irqrestore(&data->lock, flags);
+ spin_unlock_irqrestore(&data->lock, flags);
}
void exynos_sysmmu_tlb_invalidate(struct device *dev)
{
+ struct exynos_iommu_owner *owner = dev->archdata.iommu;
unsigned long flags;
- struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+ struct sysmmu_drvdata *data;
- read_lock_irqsave(&data->lock, flags);
+ data = dev_get_drvdata(owner->sysmmu);
+ spin_lock_irqsave(&data->lock, flags);
if (is_sysmmu_active(data)) {
- int i;
- for (i = 0; i < data->nsfrs; i++) {
- if (sysmmu_block(data->sfrbases[i])) {
- __sysmmu_tlb_invalidate(data->sfrbases[i]);
- sysmmu_unblock(data->sfrbases[i]);
- }
+ if (!IS_ERR(data->clk_master))
+ clk_enable(data->clk_master);
+ if (sysmmu_block(data->sfrbase)) {
+ __sysmmu_tlb_invalidate(data->sfrbase);
+ sysmmu_unblock(data->sfrbase);
}
+ if (!IS_ERR(data->clk_master))
+ clk_disable(data->clk_master);
} else {
- dev_dbg(data->sysmmu,
- "(%s) Disabled. Skipping invalidating TLB.\n",
- data->dbgname);
+ dev_dbg(dev, "disabled. Skipping TLB invalidation\n");
}
-
- read_unlock_irqrestore(&data->lock, flags);
+ spin_unlock_irqrestore(&data->lock, flags);
}
-static int exynos_sysmmu_probe(struct platform_device *pdev)
+static int __init exynos_sysmmu_probe(struct platform_device *pdev)
{
- int i, ret;
- struct device *dev;
+ int irq, ret;
+ struct device *dev = &pdev->dev;
struct sysmmu_drvdata *data;
+ struct resource *res;
- dev = &pdev->dev;
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data) {
- dev_dbg(dev, "Not enough memory\n");
- ret = -ENOMEM;
- goto err_alloc;
- }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->sfrbase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(data->sfrbase))
+ return PTR_ERR(data->sfrbase);
- ret = dev_set_drvdata(dev, data);
- if (ret) {
- dev_dbg(dev, "Unabled to initialize driver data\n");
- goto err_init;
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "Unable to find IRQ resource\n");
+ return irq;
}
- data->nsfrs = pdev->num_resources / 2;
- data->sfrbases = kmalloc(sizeof(*data->sfrbases) * data->nsfrs,
- GFP_KERNEL);
- if (data->sfrbases == NULL) {
- dev_dbg(dev, "Not enough memory\n");
- ret = -ENOMEM;
- goto err_init;
- }
-
- for (i = 0; i < data->nsfrs; i++) {
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- if (!res) {
- dev_dbg(dev, "Unable to find IOMEM region\n");
- ret = -ENOENT;
- goto err_res;
- }
-
- data->sfrbases[i] = ioremap(res->start, resource_size(res));
- if (!data->sfrbases[i]) {
- dev_dbg(dev, "Unable to map IOMEM @ PA:%#x\n",
- res->start);
- ret = -ENOENT;
- goto err_res;
- }
+ ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
+ dev_name(dev), data);
+ if (ret) {
+ dev_err(dev, "Unabled to register handler of irq %d\n", irq);
+ return ret;
}
- for (i = 0; i < data->nsfrs; i++) {
- ret = platform_get_irq(pdev, i);
- if (ret <= 0) {
- dev_dbg(dev, "Unable to find IRQ resource\n");
- goto err_irq;
- }
-
- ret = request_irq(ret, exynos_sysmmu_irq, 0,
- dev_name(dev), data);
+ data->clk = devm_clk_get(dev, "sysmmu");
+ if (IS_ERR(data->clk)) {
+ dev_err(dev, "Failed to get clock!\n");
+ return PTR_ERR(data->clk);
+ } else {
+ ret = clk_prepare(data->clk);
if (ret) {
- dev_dbg(dev, "Unabled to register interrupt handler\n");
- goto err_irq;
+ dev_err(dev, "Failed to prepare clk\n");
+ return ret;
}
}
- if (dev_get_platdata(dev)) {
- char *deli, *beg;
- struct sysmmu_platform_data *platdata = dev_get_platdata(dev);
-
- beg = platdata->clockname;
-
- for (deli = beg; (*deli != '\0') && (*deli != ','); deli++)
- /* NOTHING */;
-
- if (*deli == '\0')
- deli = NULL;
- else
- *deli = '\0';
-
- data->clk[0] = clk_get(dev, beg);
- if (IS_ERR(data->clk[0])) {
- data->clk[0] = NULL;
- dev_dbg(dev, "No clock descriptor registered\n");
- }
-
- if (data->clk[0] && deli) {
- *deli = ',';
- data->clk[1] = clk_get(dev, deli + 1);
- if (IS_ERR(data->clk[1]))
- data->clk[1] = NULL;
+ data->clk_master = devm_clk_get(dev, "master");
+ if (!IS_ERR(data->clk_master)) {
+ ret = clk_prepare(data->clk_master);
+ if (ret) {
+ clk_unprepare(data->clk);
+ dev_err(dev, "Failed to prepare master's clk\n");
+ return ret;
}
-
- data->dbgname = platdata->dbgname;
}
data->sysmmu = dev;
- rwlock_init(&data->lock);
- INIT_LIST_HEAD(&data->node);
+ spin_lock_init(&data->lock);
- __set_fault_handler(data, &default_fault_handler);
+ platform_set_drvdata(pdev, data);
- if (dev->parent)
- pm_runtime_enable(dev);
+ pm_runtime_enable(dev);
- dev_dbg(dev, "(%s) Initialized\n", data->dbgname);
return 0;
-err_irq:
- while (i-- > 0) {
- int irq;
-
- irq = platform_get_irq(pdev, i);
- free_irq(irq, data);
- }
-err_res:
- while (data->nsfrs-- > 0)
- iounmap(data->sfrbases[data->nsfrs]);
- kfree(data->sfrbases);
-err_init:
- kfree(data);
-err_alloc:
- dev_err(dev, "Failed to initialize\n");
- return ret;
}
-static struct platform_driver exynos_sysmmu_driver = {
- .probe = exynos_sysmmu_probe,
- .driver = {
+static const struct of_device_id sysmmu_of_match[] __initconst = {
+ { .compatible = "samsung,exynos-sysmmu", },
+ { },
+};
+
+static struct platform_driver exynos_sysmmu_driver __refdata = {
+ .probe = exynos_sysmmu_probe,
+ .driver = {
.owner = THIS_MODULE,
.name = "exynos-sysmmu",
+ .of_match_table = sysmmu_of_match,
}
};
@@ -667,21 +700,32 @@ static inline void pgtable_flush(void *vastart, void *vaend)
static int exynos_iommu_domain_init(struct iommu_domain *domain)
{
struct exynos_iommu_domain *priv;
+ int i;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- priv->pgtable = (unsigned long *)__get_free_pages(
- GFP_KERNEL | __GFP_ZERO, 2);
+ priv->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
if (!priv->pgtable)
goto err_pgtable;
- priv->lv2entcnt = (short *)__get_free_pages(
- GFP_KERNEL | __GFP_ZERO, 1);
+ priv->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
if (!priv->lv2entcnt)
goto err_counter;
+ /* w/a of System MMU v3.3 to prevent caching 1MiB mapping */
+ for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
+ priv->pgtable[i + 0] = ZERO_LV2LINK;
+ priv->pgtable[i + 1] = ZERO_LV2LINK;
+ priv->pgtable[i + 2] = ZERO_LV2LINK;
+ priv->pgtable[i + 3] = ZERO_LV2LINK;
+ priv->pgtable[i + 4] = ZERO_LV2LINK;
+ priv->pgtable[i + 5] = ZERO_LV2LINK;
+ priv->pgtable[i + 6] = ZERO_LV2LINK;
+ priv->pgtable[i + 7] = ZERO_LV2LINK;
+ }
+
pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
spin_lock_init(&priv->lock);
@@ -705,7 +749,7 @@ err_pgtable:
static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
{
struct exynos_iommu_domain *priv = domain->priv;
- struct sysmmu_drvdata *data;
+ struct exynos_iommu_owner *owner;
unsigned long flags;
int i;
@@ -713,16 +757,20 @@ static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
spin_lock_irqsave(&priv->lock, flags);
- list_for_each_entry(data, &priv->clients, node) {
- while (!exynos_sysmmu_disable(data->dev))
+ list_for_each_entry(owner, &priv->clients, client) {
+ while (!exynos_sysmmu_disable(owner->dev))
; /* until System MMU is actually disabled */
}
+ while (!list_empty(&priv->clients))
+ list_del_init(priv->clients.next);
+
spin_unlock_irqrestore(&priv->lock, flags);
for (i = 0; i < NUM_LV1ENTRIES; i++)
if (lv1ent_page(priv->pgtable + i))
- kfree(__va(lv2table_base(priv->pgtable + i)));
+ kmem_cache_free(lv2table_kmem_cache,
+ phys_to_virt(lv2table_base(priv->pgtable + i)));
free_pages((unsigned long)priv->pgtable, 2);
free_pages((unsigned long)priv->lv2entcnt, 1);
@@ -733,114 +781,134 @@ static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
static int exynos_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
- struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+ struct exynos_iommu_owner *owner = dev->archdata.iommu;
struct exynos_iommu_domain *priv = domain->priv;
+ phys_addr_t pagetable = virt_to_phys(priv->pgtable);
unsigned long flags;
int ret;
- ret = pm_runtime_get_sync(data->sysmmu);
- if (ret < 0)
- return ret;
-
- ret = 0;
-
spin_lock_irqsave(&priv->lock, flags);
- ret = __exynos_sysmmu_enable(data, __pa(priv->pgtable), domain);
-
+ ret = __exynos_sysmmu_enable(dev, pagetable, domain);
if (ret == 0) {
- /* 'data->node' must not be appeared in priv->clients */
- BUG_ON(!list_empty(&data->node));
- data->dev = dev;
- list_add_tail(&data->node, &priv->clients);
+ list_add_tail(&owner->client, &priv->clients);
+ owner->domain = domain;
}
spin_unlock_irqrestore(&priv->lock, flags);
if (ret < 0) {
- dev_err(dev, "%s: Failed to attach IOMMU with pgtable %#lx\n",
- __func__, __pa(priv->pgtable));
- pm_runtime_put(data->sysmmu);
- } else if (ret > 0) {
- dev_dbg(dev, "%s: IOMMU with pgtable 0x%lx already attached\n",
- __func__, __pa(priv->pgtable));
- } else {
- dev_dbg(dev, "%s: Attached new IOMMU with pgtable 0x%lx\n",
- __func__, __pa(priv->pgtable));
+ dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
+ __func__, &pagetable);
+ return ret;
}
+ dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
+ __func__, &pagetable, (ret == 0) ? "" : ", again");
+
return ret;
}
static void exynos_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
- struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+ struct exynos_iommu_owner *owner;
struct exynos_iommu_domain *priv = domain->priv;
- struct list_head *pos;
+ phys_addr_t pagetable = virt_to_phys(priv->pgtable);
unsigned long flags;
- bool found = false;
spin_lock_irqsave(&priv->lock, flags);
- list_for_each(pos, &priv->clients) {
- if (list_entry(pos, struct sysmmu_drvdata, node) == data) {
- found = true;
+ list_for_each_entry(owner, &priv->clients, client) {
+ if (owner == dev->archdata.iommu) {
+ if (exynos_sysmmu_disable(dev)) {
+ list_del_init(&owner->client);
+ owner->domain = NULL;
+ }
break;
}
}
- if (!found)
- goto finish;
-
- if (__exynos_sysmmu_disable(data)) {
- dev_dbg(dev, "%s: Detached IOMMU with pgtable %#lx\n",
- __func__, __pa(priv->pgtable));
- list_del_init(&data->node);
-
- } else {
- dev_dbg(dev, "%s: Detaching IOMMU with pgtable %#lx delayed",
- __func__, __pa(priv->pgtable));
- }
-
-finish:
spin_unlock_irqrestore(&priv->lock, flags);
- if (found)
- pm_runtime_put(data->sysmmu);
+ if (owner == dev->archdata.iommu)
+ dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
+ __func__, &pagetable);
+ else
+ dev_err(dev, "%s: No IOMMU is attached\n", __func__);
}
-static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova,
- short *pgcounter)
+static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *priv,
+ sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
{
+ if (lv1ent_section(sent)) {
+ WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
+ return ERR_PTR(-EADDRINUSE);
+ }
+
if (lv1ent_fault(sent)) {
- unsigned long *pent;
+ sysmmu_pte_t *pent;
+ bool need_flush_flpd_cache = lv1ent_zero(sent);
- pent = kzalloc(LV2TABLE_SIZE, GFP_ATOMIC);
- BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
+ pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
+ BUG_ON((unsigned int)pent & (LV2TABLE_SIZE - 1));
if (!pent)
- return NULL;
+ return ERR_PTR(-ENOMEM);
- *sent = mk_lv1ent_page(__pa(pent));
+ *sent = mk_lv1ent_page(virt_to_phys(pent));
*pgcounter = NUM_LV2ENTRIES;
pgtable_flush(pent, pent + NUM_LV2ENTRIES);
pgtable_flush(sent, sent + 1);
+
+ /*
+ * If pretched SLPD is a fault SLPD in zero_l2_table, FLPD cache
+ * may caches the address of zero_l2_table. This function
+ * replaces the zero_l2_table with new L2 page table to write
+ * valid mappings.
+ * Accessing the valid area may cause page fault since FLPD
+ * cache may still caches zero_l2_table for the valid area
+ * instead of new L2 page table that have the mapping
+ * information of the valid area
+ * Thus any replacement of zero_l2_table with other valid L2
+ * page table must involve FLPD cache invalidation for System
+ * MMU v3.3.
+ * FLPD cache invalidation is performed with TLB invalidation
+ * by VPN without blocking. It is safe to invalidate TLB without
+ * blocking because the target address of TLB invalidation is
+ * not currently mapped.
+ */
+ if (need_flush_flpd_cache) {
+ struct exynos_iommu_owner *owner;
+
+ spin_lock(&priv->lock);
+ list_for_each_entry(owner, &priv->clients, client)
+ sysmmu_tlb_invalidate_flpdcache(
+ owner->dev, iova);
+ spin_unlock(&priv->lock);
+ }
}
return page_entry(sent, iova);
}
-static int lv1set_section(unsigned long *sent, phys_addr_t paddr, short *pgcnt)
+static int lv1set_section(struct exynos_iommu_domain *priv,
+ sysmmu_pte_t *sent, sysmmu_iova_t iova,
+ phys_addr_t paddr, short *pgcnt)
{
- if (lv1ent_section(sent))
+ if (lv1ent_section(sent)) {
+ WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
+ iova);
return -EADDRINUSE;
+ }
if (lv1ent_page(sent)) {
- if (*pgcnt != NUM_LV2ENTRIES)
+ if (*pgcnt != NUM_LV2ENTRIES) {
+ WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
+ iova);
return -EADDRINUSE;
+ }
- kfree(page_entry(sent, 0));
-
+ kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
*pgcnt = 0;
}
@@ -848,14 +916,26 @@ static int lv1set_section(unsigned long *sent, phys_addr_t paddr, short *pgcnt)
pgtable_flush(sent, sent + 1);
+ spin_lock(&priv->lock);
+ if (lv1ent_page_zero(sent)) {
+ struct exynos_iommu_owner *owner;
+ /*
+ * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
+ * entry by speculative prefetch of SLPD which has no mapping.
+ */
+ list_for_each_entry(owner, &priv->clients, client)
+ sysmmu_tlb_invalidate_flpdcache(owner->dev, iova);
+ }
+ spin_unlock(&priv->lock);
+
return 0;
}
-static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
+static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
short *pgcnt)
{
if (size == SPAGE_SIZE) {
- if (!lv2ent_fault(pent))
+ if (WARN_ON(!lv2ent_fault(pent)))
return -EADDRINUSE;
*pent = mk_lv2ent_spage(paddr);
@@ -863,9 +943,11 @@ static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
*pgcnt -= 1;
} else { /* size == LPAGE_SIZE */
int i;
+
for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
- if (!lv2ent_fault(pent)) {
- memset(pent, 0, sizeof(*pent) * i);
+ if (WARN_ON(!lv2ent_fault(pent))) {
+ if (i > 0)
+ memset(pent - i, 0, sizeof(*pent) * i);
return -EADDRINUSE;
}
@@ -878,11 +960,38 @@ static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
return 0;
}
-static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova,
+/*
+ * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
+ *
+ * System MMU v3.x have an advanced logic to improve address translation
+ * performance with caching more page table entries by a page table walk.
+ * However, the logic has a bug that caching fault page table entries and System
+ * MMU reports page fault if the cached fault entry is hit even though the fault
+ * entry is updated to a valid entry after the entry is cached.
+ * To prevent caching fault page table entries which may be updated to valid
+ * entries later, the virtual memory manager should care about the w/a about the
+ * problem. The followings describe w/a.
+ *
+ * Any two consecutive I/O virtual address regions must have a hole of 128KiB
+ * in maximum to prevent misbehavior of System MMU 3.x. (w/a of h/w bug)
+ *
+ * Precisely, any start address of I/O virtual region must be aligned by
+ * the following sizes for System MMU v3.1 and v3.2.
+ * System MMU v3.1: 128KiB
+ * System MMU v3.2: 256KiB
+ *
+ * Because System MMU v3.3 caches page table entries more aggressively, it needs
+ * more w/a.
+ * - Any two consecutive I/O virtual regions must be have a hole of larger size
+ * than or equal size to 128KiB.
+ * - Start address of an I/O virtual region must be aligned by 128KiB.
+ */
+static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova,
phys_addr_t paddr, size_t size, int prot)
{
struct exynos_iommu_domain *priv = domain->priv;
- unsigned long *entry;
+ sysmmu_pte_t *entry;
+ sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
unsigned long flags;
int ret = -ENOMEM;
@@ -893,38 +1002,52 @@ static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova,
entry = section_entry(priv->pgtable, iova);
if (size == SECT_SIZE) {
- ret = lv1set_section(entry, paddr,
+ ret = lv1set_section(priv, entry, iova, paddr,
&priv->lv2entcnt[lv1ent_offset(iova)]);
} else {
- unsigned long *pent;
+ sysmmu_pte_t *pent;
- pent = alloc_lv2entry(entry, iova,
+ pent = alloc_lv2entry(priv, entry, iova,
&priv->lv2entcnt[lv1ent_offset(iova)]);
- if (!pent)
- ret = -ENOMEM;
+ if (IS_ERR(pent))
+ ret = PTR_ERR(pent);
else
ret = lv2set_page(pent, paddr, size,
&priv->lv2entcnt[lv1ent_offset(iova)]);
}
- if (ret) {
- pr_debug("%s: Failed to map iova 0x%lx/0x%x bytes\n",
- __func__, iova, size);
- }
+ if (ret)
+ pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
+ __func__, ret, size, iova);
spin_unlock_irqrestore(&priv->pgtablelock, flags);
return ret;
}
+static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *priv,
+ sysmmu_iova_t iova, size_t size)
+{
+ struct exynos_iommu_owner *owner;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ list_for_each_entry(owner, &priv->clients, client)
+ sysmmu_tlb_invalidate_entry(owner->dev, iova, size);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
static size_t exynos_iommu_unmap(struct iommu_domain *domain,
- unsigned long iova, size_t size)
+ unsigned long l_iova, size_t size)
{
struct exynos_iommu_domain *priv = domain->priv;
- struct sysmmu_drvdata *data;
+ sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
+ sysmmu_pte_t *ent;
+ size_t err_pgsize;
unsigned long flags;
- unsigned long *ent;
BUG_ON(priv->pgtable == NULL);
@@ -933,9 +1056,12 @@ static size_t exynos_iommu_unmap(struct iommu_domain *domain,
ent = section_entry(priv->pgtable, iova);
if (lv1ent_section(ent)) {
- BUG_ON(size < SECT_SIZE);
+ if (WARN_ON(size < SECT_SIZE)) {
+ err_pgsize = SECT_SIZE;
+ goto err;
+ }
- *ent = 0;
+ *ent = ZERO_LV2LINK; /* w/a for h/w bug in Sysmem MMU v3.3 */
pgtable_flush(ent, ent + 1);
size = SECT_SIZE;
goto done;
@@ -959,34 +1085,42 @@ static size_t exynos_iommu_unmap(struct iommu_domain *domain,
if (lv2ent_small(ent)) {
*ent = 0;
size = SPAGE_SIZE;
+ pgtable_flush(ent, ent + 1);
priv->lv2entcnt[lv1ent_offset(iova)] += 1;
goto done;
}
/* lv1ent_large(ent) == true here */
- BUG_ON(size < LPAGE_SIZE);
+ if (WARN_ON(size < LPAGE_SIZE)) {
+ err_pgsize = LPAGE_SIZE;
+ goto err;
+ }
memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
+ pgtable_flush(ent, ent + SPAGES_PER_LPAGE);
size = LPAGE_SIZE;
priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
done:
spin_unlock_irqrestore(&priv->pgtablelock, flags);
- spin_lock_irqsave(&priv->lock, flags);
- list_for_each_entry(data, &priv->clients, node)
- sysmmu_tlb_invalidate_entry(data->dev, iova);
- spin_unlock_irqrestore(&priv->lock, flags);
-
+ exynos_iommu_tlb_invalidate_entry(priv, iova, size);
return size;
+err:
+ spin_unlock_irqrestore(&priv->pgtablelock, flags);
+
+ pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
+ __func__, size, iova, err_pgsize);
+
+ return 0;
}
static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
struct exynos_iommu_domain *priv = domain->priv;
- unsigned long *entry;
+ sysmmu_pte_t *entry;
unsigned long flags;
phys_addr_t phys = 0;
@@ -1010,14 +1144,42 @@ static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
return phys;
}
+static int exynos_iommu_add_device(struct device *dev)
+{
+ struct iommu_group *group;
+ int ret;
+
+ group = iommu_group_get(dev);
+
+ if (!group) {
+ group = iommu_group_alloc();
+ if (IS_ERR(group)) {
+ dev_err(dev, "Failed to allocate IOMMU group\n");
+ return PTR_ERR(group);
+ }
+ }
+
+ ret = iommu_group_add_device(group, dev);
+ iommu_group_put(group);
+
+ return ret;
+}
+
+static void exynos_iommu_remove_device(struct device *dev)
+{
+ iommu_group_remove_device(dev);
+}
+
static struct iommu_ops exynos_iommu_ops = {
- .domain_init = &exynos_iommu_domain_init,
- .domain_destroy = &exynos_iommu_domain_destroy,
- .attach_dev = &exynos_iommu_attach_device,
- .detach_dev = &exynos_iommu_detach_device,
- .map = &exynos_iommu_map,
- .unmap = &exynos_iommu_unmap,
- .iova_to_phys = &exynos_iommu_iova_to_phys,
+ .domain_init = exynos_iommu_domain_init,
+ .domain_destroy = exynos_iommu_domain_destroy,
+ .attach_dev = exynos_iommu_attach_device,
+ .detach_dev = exynos_iommu_detach_device,
+ .map = exynos_iommu_map,
+ .unmap = exynos_iommu_unmap,
+ .iova_to_phys = exynos_iommu_iova_to_phys,
+ .add_device = exynos_iommu_add_device,
+ .remove_device = exynos_iommu_remove_device,
.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
};
@@ -1025,11 +1187,41 @@ static int __init exynos_iommu_init(void)
{
int ret;
+ lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
+ LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
+ if (!lv2table_kmem_cache) {
+ pr_err("%s: Failed to create kmem cache\n", __func__);
+ return -ENOMEM;
+ }
+
ret = platform_driver_register(&exynos_sysmmu_driver);
+ if (ret) {
+ pr_err("%s: Failed to register driver\n", __func__);
+ goto err_reg_driver;
+ }
- if (ret == 0)
- bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
+ zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
+ if (zero_lv2_table == NULL) {
+ pr_err("%s: Failed to allocate zero level2 page table\n",
+ __func__);
+ ret = -ENOMEM;
+ goto err_zero_lv2;
+ }
+ ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
+ if (ret) {
+ pr_err("%s: Failed to register exynos-iommu driver.\n",
+ __func__);
+ goto err_set_iommu;
+ }
+
+ return 0;
+err_set_iommu:
+ kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
+err_zero_lv2:
+ platform_driver_unregister(&exynos_sysmmu_driver);
+err_reg_driver:
+ kmem_cache_destroy(lv2table_kmem_cache);
return ret;
}
subsys_initcall(exynos_iommu_init);
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
new file mode 100644
index 000000000000..53cde086e83b
--- /dev/null
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -0,0 +1,1255 @@
+/*
+ * IPMMU VMSA
+ *
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/platform_data/ipmmu-vmsa.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+
+#include <asm/dma-iommu.h>
+#include <asm/pgalloc.h>
+
+struct ipmmu_vmsa_device {
+ struct device *dev;
+ void __iomem *base;
+ struct list_head list;
+
+ const struct ipmmu_vmsa_platform_data *pdata;
+ unsigned int num_utlbs;
+
+ struct dma_iommu_mapping *mapping;
+};
+
+struct ipmmu_vmsa_domain {
+ struct ipmmu_vmsa_device *mmu;
+ struct iommu_domain *io_domain;
+
+ unsigned int context_id;
+ spinlock_t lock; /* Protects mappings */
+ pgd_t *pgd;
+};
+
+struct ipmmu_vmsa_archdata {
+ struct ipmmu_vmsa_device *mmu;
+ unsigned int utlb;
+};
+
+static DEFINE_SPINLOCK(ipmmu_devices_lock);
+static LIST_HEAD(ipmmu_devices);
+
+#define TLB_LOOP_TIMEOUT 100 /* 100us */
+
+/* -----------------------------------------------------------------------------
+ * Registers Definition
+ */
+
+#define IM_CTX_SIZE 0x40
+
+#define IMCTR 0x0000
+#define IMCTR_TRE (1 << 17)
+#define IMCTR_AFE (1 << 16)
+#define IMCTR_RTSEL_MASK (3 << 4)
+#define IMCTR_RTSEL_SHIFT 4
+#define IMCTR_TREN (1 << 3)
+#define IMCTR_INTEN (1 << 2)
+#define IMCTR_FLUSH (1 << 1)
+#define IMCTR_MMUEN (1 << 0)
+
+#define IMCAAR 0x0004
+
+#define IMTTBCR 0x0008
+#define IMTTBCR_EAE (1 << 31)
+#define IMTTBCR_PMB (1 << 30)
+#define IMTTBCR_SH1_NON_SHAREABLE (0 << 28)
+#define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28)
+#define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28)
+#define IMTTBCR_SH1_MASK (3 << 28)
+#define IMTTBCR_ORGN1_NC (0 << 26)
+#define IMTTBCR_ORGN1_WB_WA (1 << 26)
+#define IMTTBCR_ORGN1_WT (2 << 26)
+#define IMTTBCR_ORGN1_WB (3 << 26)
+#define IMTTBCR_ORGN1_MASK (3 << 26)
+#define IMTTBCR_IRGN1_NC (0 << 24)
+#define IMTTBCR_IRGN1_WB_WA (1 << 24)
+#define IMTTBCR_IRGN1_WT (2 << 24)
+#define IMTTBCR_IRGN1_WB (3 << 24)
+#define IMTTBCR_IRGN1_MASK (3 << 24)
+#define IMTTBCR_TSZ1_MASK (7 << 16)
+#define IMTTBCR_TSZ1_SHIFT 16
+#define IMTTBCR_SH0_NON_SHAREABLE (0 << 12)
+#define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12)
+#define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12)
+#define IMTTBCR_SH0_MASK (3 << 12)
+#define IMTTBCR_ORGN0_NC (0 << 10)
+#define IMTTBCR_ORGN0_WB_WA (1 << 10)
+#define IMTTBCR_ORGN0_WT (2 << 10)
+#define IMTTBCR_ORGN0_WB (3 << 10)
+#define IMTTBCR_ORGN0_MASK (3 << 10)
+#define IMTTBCR_IRGN0_NC (0 << 8)
+#define IMTTBCR_IRGN0_WB_WA (1 << 8)
+#define IMTTBCR_IRGN0_WT (2 << 8)
+#define IMTTBCR_IRGN0_WB (3 << 8)
+#define IMTTBCR_IRGN0_MASK (3 << 8)
+#define IMTTBCR_SL0_LVL_2 (0 << 4)
+#define IMTTBCR_SL0_LVL_1 (1 << 4)
+#define IMTTBCR_TSZ0_MASK (7 << 0)
+#define IMTTBCR_TSZ0_SHIFT O
+
+#define IMBUSCR 0x000c
+#define IMBUSCR_DVM (1 << 2)
+#define IMBUSCR_BUSSEL_SYS (0 << 0)
+#define IMBUSCR_BUSSEL_CCI (1 << 0)
+#define IMBUSCR_BUSSEL_IMCAAR (2 << 0)
+#define IMBUSCR_BUSSEL_CCI_IMCAAR (3 << 0)
+#define IMBUSCR_BUSSEL_MASK (3 << 0)
+
+#define IMTTLBR0 0x0010
+#define IMTTUBR0 0x0014
+#define IMTTLBR1 0x0018
+#define IMTTUBR1 0x001c
+
+#define IMSTR 0x0020
+#define IMSTR_ERRLVL_MASK (3 << 12)
+#define IMSTR_ERRLVL_SHIFT 12
+#define IMSTR_ERRCODE_TLB_FORMAT (1 << 8)
+#define IMSTR_ERRCODE_ACCESS_PERM (4 << 8)
+#define IMSTR_ERRCODE_SECURE_ACCESS (5 << 8)
+#define IMSTR_ERRCODE_MASK (7 << 8)
+#define IMSTR_MHIT (1 << 4)
+#define IMSTR_ABORT (1 << 2)
+#define IMSTR_PF (1 << 1)
+#define IMSTR_TF (1 << 0)
+
+#define IMMAIR0 0x0028
+#define IMMAIR1 0x002c
+#define IMMAIR_ATTR_MASK 0xff
+#define IMMAIR_ATTR_DEVICE 0x04
+#define IMMAIR_ATTR_NC 0x44
+#define IMMAIR_ATTR_WBRWA 0xff
+#define IMMAIR_ATTR_SHIFT(n) ((n) << 3)
+#define IMMAIR_ATTR_IDX_NC 0
+#define IMMAIR_ATTR_IDX_WBRWA 1
+#define IMMAIR_ATTR_IDX_DEV 2
+
+#define IMEAR 0x0030
+
+#define IMPCTR 0x0200
+#define IMPSTR 0x0208
+#define IMPEAR 0x020c
+#define IMPMBA(n) (0x0280 + ((n) * 4))
+#define IMPMBD(n) (0x02c0 + ((n) * 4))
+
+#define IMUCTR(n) (0x0300 + ((n) * 16))
+#define IMUCTR_FIXADDEN (1 << 31)
+#define IMUCTR_FIXADD_MASK (0xff << 16)
+#define IMUCTR_FIXADD_SHIFT 16
+#define IMUCTR_TTSEL_MMU(n) ((n) << 4)
+#define IMUCTR_TTSEL_PMB (8 << 4)
+#define IMUCTR_TTSEL_MASK (15 << 4)
+#define IMUCTR_FLUSH (1 << 1)
+#define IMUCTR_MMUEN (1 << 0)
+
+#define IMUASID(n) (0x0308 + ((n) * 16))
+#define IMUASID_ASID8_MASK (0xff << 8)
+#define IMUASID_ASID8_SHIFT 8
+#define IMUASID_ASID0_MASK (0xff << 0)
+#define IMUASID_ASID0_SHIFT 0
+
+/* -----------------------------------------------------------------------------
+ * Page Table Bits
+ */
+
+/*
+ * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory access,
+ * Long-descriptor format" that the NStable bit being set in a table descriptor
+ * will result in the NStable and NS bits of all child entries being ignored and
+ * considered as being set. The IPMMU seems not to comply with this, as it
+ * generates a secure access page fault if any of the NStable and NS bits isn't
+ * set when running in non-secure mode.
+ */
+#ifndef PMD_NSTABLE
+#define PMD_NSTABLE (_AT(pmdval_t, 1) << 63)
+#endif
+
+#define ARM_VMSA_PTE_XN (((pteval_t)3) << 53)
+#define ARM_VMSA_PTE_CONT (((pteval_t)1) << 52)
+#define ARM_VMSA_PTE_AF (((pteval_t)1) << 10)
+#define ARM_VMSA_PTE_SH_NS (((pteval_t)0) << 8)
+#define ARM_VMSA_PTE_SH_OS (((pteval_t)2) << 8)
+#define ARM_VMSA_PTE_SH_IS (((pteval_t)3) << 8)
+#define ARM_VMSA_PTE_SH_MASK (((pteval_t)3) << 8)
+#define ARM_VMSA_PTE_NS (((pteval_t)1) << 5)
+#define ARM_VMSA_PTE_PAGE (((pteval_t)3) << 0)
+
+/* Stage-1 PTE */
+#define ARM_VMSA_PTE_nG (((pteval_t)1) << 11)
+#define ARM_VMSA_PTE_AP_UNPRIV (((pteval_t)1) << 6)
+#define ARM_VMSA_PTE_AP_RDONLY (((pteval_t)2) << 6)
+#define ARM_VMSA_PTE_AP_MASK (((pteval_t)3) << 6)
+#define ARM_VMSA_PTE_ATTRINDX_MASK (((pteval_t)3) << 2)
+#define ARM_VMSA_PTE_ATTRINDX_SHIFT 2
+
+#define ARM_VMSA_PTE_ATTRS_MASK \
+ (ARM_VMSA_PTE_XN | ARM_VMSA_PTE_CONT | ARM_VMSA_PTE_nG | \
+ ARM_VMSA_PTE_AF | ARM_VMSA_PTE_SH_MASK | ARM_VMSA_PTE_AP_MASK | \
+ ARM_VMSA_PTE_NS | ARM_VMSA_PTE_ATTRINDX_MASK)
+
+#define ARM_VMSA_PTE_CONT_ENTRIES 16
+#define ARM_VMSA_PTE_CONT_SIZE (PAGE_SIZE * ARM_VMSA_PTE_CONT_ENTRIES)
+
+#define IPMMU_PTRS_PER_PTE 512
+#define IPMMU_PTRS_PER_PMD 512
+#define IPMMU_PTRS_PER_PGD 4
+
+/* -----------------------------------------------------------------------------
+ * Read/Write Access
+ */
+
+static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset)
+{
+ return ioread32(mmu->base + offset);
+}
+
+static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
+ u32 data)
+{
+ iowrite32(data, mmu->base + offset);
+}
+
+static u32 ipmmu_ctx_read(struct ipmmu_vmsa_domain *domain, unsigned int reg)
+{
+ return ipmmu_read(domain->mmu, domain->context_id * IM_CTX_SIZE + reg);
+}
+
+static void ipmmu_ctx_write(struct ipmmu_vmsa_domain *domain, unsigned int reg,
+ u32 data)
+{
+ ipmmu_write(domain->mmu, domain->context_id * IM_CTX_SIZE + reg, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * TLB and microTLB Management
+ */
+
+/* Wait for any pending TLB invalidations to complete */
+static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
+{
+ unsigned int count = 0;
+
+ while (ipmmu_ctx_read(domain, IMCTR) & IMCTR_FLUSH) {
+ cpu_relax();
+ if (++count == TLB_LOOP_TIMEOUT) {
+ dev_err_ratelimited(domain->mmu->dev,
+ "TLB sync timed out -- MMU may be deadlocked\n");
+ return;
+ }
+ udelay(1);
+ }
+}
+
+static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
+{
+ u32 reg;
+
+ reg = ipmmu_ctx_read(domain, IMCTR);
+ reg |= IMCTR_FLUSH;
+ ipmmu_ctx_write(domain, IMCTR, reg);
+
+ ipmmu_tlb_sync(domain);
+}
+
+/*
+ * Enable MMU translation for the microTLB.
+ */
+static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
+ unsigned int utlb)
+{
+ struct ipmmu_vmsa_device *mmu = domain->mmu;
+
+ /*
+ * TODO: Reference-count the microTLB as several bus masters can be
+ * connected to the same microTLB.
+ */
+
+ /* TODO: What should we set the ASID to ? */
+ ipmmu_write(mmu, IMUASID(utlb), 0);
+ /* TODO: Do we need to flush the microTLB ? */
+ ipmmu_write(mmu, IMUCTR(utlb),
+ IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH |
+ IMUCTR_MMUEN);
+}
+
+/*
+ * Disable MMU translation for the microTLB.
+ */
+static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
+ unsigned int utlb)
+{
+ struct ipmmu_vmsa_device *mmu = domain->mmu;
+
+ ipmmu_write(mmu, IMUCTR(utlb), 0);
+}
+
+static void ipmmu_flush_pgtable(struct ipmmu_vmsa_device *mmu, void *addr,
+ size_t size)
+{
+ unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
+
+ /*
+ * TODO: Add support for coherent walk through CCI with DVM and remove
+ * cache handling.
+ */
+ dma_map_page(mmu->dev, virt_to_page(addr), offset, size, DMA_TO_DEVICE);
+}
+
+/* -----------------------------------------------------------------------------
+ * Domain/Context Management
+ */
+
+static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
+{
+ phys_addr_t ttbr;
+ u32 reg;
+
+ /*
+ * TODO: When adding support for multiple contexts, find an unused
+ * context.
+ */
+ domain->context_id = 0;
+
+ /* TTBR0 */
+ ipmmu_flush_pgtable(domain->mmu, domain->pgd,
+ IPMMU_PTRS_PER_PGD * sizeof(*domain->pgd));
+ ttbr = __pa(domain->pgd);
+ ipmmu_ctx_write(domain, IMTTLBR0, ttbr);
+ ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32);
+
+ /*
+ * TTBCR
+ * We use long descriptors with inner-shareable WBWA tables and allocate
+ * the whole 32-bit VA space to TTBR0.
+ */
+ ipmmu_ctx_write(domain, IMTTBCR, IMTTBCR_EAE |
+ IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
+ IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1);
+
+ /*
+ * MAIR0
+ * We need three attributes only, non-cacheable, write-back read/write
+ * allocate and device memory.
+ */
+ reg = (IMMAIR_ATTR_NC << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_NC))
+ | (IMMAIR_ATTR_WBRWA << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_WBRWA))
+ | (IMMAIR_ATTR_DEVICE << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_DEV));
+ ipmmu_ctx_write(domain, IMMAIR0, reg);
+
+ /* IMBUSCR */
+ ipmmu_ctx_write(domain, IMBUSCR,
+ ipmmu_ctx_read(domain, IMBUSCR) &
+ ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
+
+ /*
+ * IMSTR
+ * Clear all interrupt flags.
+ */
+ ipmmu_ctx_write(domain, IMSTR, ipmmu_ctx_read(domain, IMSTR));
+
+ /*
+ * IMCTR
+ * Enable the MMU and interrupt generation. The long-descriptor
+ * translation table format doesn't use TEX remapping. Don't enable AF
+ * software management as we have no use for it. Flush the TLB as
+ * required when modifying the context registers.
+ */
+ ipmmu_ctx_write(domain, IMCTR, IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
+
+ return 0;
+}
+
+static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
+{
+ /*
+ * Disable the context. Flush the TLB as required when modifying the
+ * context registers.
+ *
+ * TODO: Is TLB flush really needed ?
+ */
+ ipmmu_ctx_write(domain, IMCTR, IMCTR_FLUSH);
+ ipmmu_tlb_sync(domain);
+}
+
+/* -----------------------------------------------------------------------------
+ * Fault Handling
+ */
+
+static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
+{
+ const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
+ struct ipmmu_vmsa_device *mmu = domain->mmu;
+ u32 status;
+ u32 iova;
+
+ status = ipmmu_ctx_read(domain, IMSTR);
+ if (!(status & err_mask))
+ return IRQ_NONE;
+
+ iova = ipmmu_ctx_read(domain, IMEAR);
+
+ /*
+ * Clear the error status flags. Unlike traditional interrupt flag
+ * registers that must be cleared by writing 1, this status register
+ * seems to require 0. The error address register must be read before,
+ * otherwise its value will be 0.
+ */
+ ipmmu_ctx_write(domain, IMSTR, 0);
+
+ /* Log fatal errors. */
+ if (status & IMSTR_MHIT)
+ dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%08x\n",
+ iova);
+ if (status & IMSTR_ABORT)
+ dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%08x\n",
+ iova);
+
+ if (!(status & (IMSTR_PF | IMSTR_TF)))
+ return IRQ_NONE;
+
+ /*
+ * Try to handle page faults and translation faults.
+ *
+ * TODO: We need to look up the faulty device based on the I/O VA. Use
+ * the IOMMU device for now.
+ */
+ if (!report_iommu_fault(domain->io_domain, mmu->dev, iova, 0))
+ return IRQ_HANDLED;
+
+ dev_err_ratelimited(mmu->dev,
+ "Unhandled fault: status 0x%08x iova 0x%08x\n",
+ status, iova);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ipmmu_irq(int irq, void *dev)
+{
+ struct ipmmu_vmsa_device *mmu = dev;
+ struct iommu_domain *io_domain;
+ struct ipmmu_vmsa_domain *domain;
+
+ if (!mmu->mapping)
+ return IRQ_NONE;
+
+ io_domain = mmu->mapping->domain;
+ domain = io_domain->priv;
+
+ return ipmmu_domain_irq(domain);
+}
+
+/* -----------------------------------------------------------------------------
+ * Page Table Management
+ */
+
+#define pud_pgtable(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
+
+static void ipmmu_free_ptes(pmd_t *pmd)
+{
+ pgtable_t table = pmd_pgtable(*pmd);
+ __free_page(table);
+}
+
+static void ipmmu_free_pmds(pud_t *pud)
+{
+ pmd_t *pmd = pmd_offset(pud, 0);
+ pgtable_t table;
+ unsigned int i;
+
+ for (i = 0; i < IPMMU_PTRS_PER_PMD; ++i) {
+ if (!pmd_table(*pmd))
+ continue;
+
+ ipmmu_free_ptes(pmd);
+ pmd++;
+ }
+
+ table = pud_pgtable(*pud);
+ __free_page(table);
+}
+
+static void ipmmu_free_pgtables(struct ipmmu_vmsa_domain *domain)
+{
+ pgd_t *pgd, *pgd_base = domain->pgd;
+ unsigned int i;
+
+ /*
+ * Recursively free the page tables for this domain. We don't care about
+ * speculative TLB filling, because the TLB will be nuked next time this
+ * context bank is re-allocated and no devices currently map to these
+ * tables.
+ */
+ pgd = pgd_base;
+ for (i = 0; i < IPMMU_PTRS_PER_PGD; ++i) {
+ if (pgd_none(*pgd))
+ continue;
+ ipmmu_free_pmds((pud_t *)pgd);
+ pgd++;
+ }
+
+ kfree(pgd_base);
+}
+
+/*
+ * We can't use the (pgd|pud|pmd|pte)_populate or the set_(pgd|pud|pmd|pte)
+ * functions as they would flush the CPU TLB.
+ */
+
+static pte_t *ipmmu_alloc_pte(struct ipmmu_vmsa_device *mmu, pmd_t *pmd,
+ unsigned long iova)
+{
+ pte_t *pte;
+
+ if (!pmd_none(*pmd))
+ return pte_offset_kernel(pmd, iova);
+
+ pte = (pte_t *)get_zeroed_page(GFP_ATOMIC);
+ if (!pte)
+ return NULL;
+
+ ipmmu_flush_pgtable(mmu, pte, PAGE_SIZE);
+ *pmd = __pmd(__pa(pte) | PMD_NSTABLE | PMD_TYPE_TABLE);
+ ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd));
+
+ return pte + pte_index(iova);
+}
+
+static pmd_t *ipmmu_alloc_pmd(struct ipmmu_vmsa_device *mmu, pgd_t *pgd,
+ unsigned long iova)
+{
+ pud_t *pud = (pud_t *)pgd;
+ pmd_t *pmd;
+
+ if (!pud_none(*pud))
+ return pmd_offset(pud, iova);
+
+ pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
+ if (!pmd)
+ return NULL;
+
+ ipmmu_flush_pgtable(mmu, pmd, PAGE_SIZE);
+ *pud = __pud(__pa(pmd) | PMD_NSTABLE | PMD_TYPE_TABLE);
+ ipmmu_flush_pgtable(mmu, pud, sizeof(*pud));
+
+ return pmd + pmd_index(iova);
+}
+
+static u64 ipmmu_page_prot(unsigned int prot, u64 type)
+{
+ u64 pgprot = ARM_VMSA_PTE_XN | ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF
+ | ARM_VMSA_PTE_SH_IS | ARM_VMSA_PTE_AP_UNPRIV
+ | ARM_VMSA_PTE_NS | type;
+
+ if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
+ pgprot |= ARM_VMSA_PTE_AP_RDONLY;
+
+ if (prot & IOMMU_CACHE)
+ pgprot |= IMMAIR_ATTR_IDX_WBRWA << ARM_VMSA_PTE_ATTRINDX_SHIFT;
+
+ if (prot & IOMMU_EXEC)
+ pgprot &= ~ARM_VMSA_PTE_XN;
+ else if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
+ /* If no access create a faulting entry to avoid TLB fills. */
+ pgprot &= ~ARM_VMSA_PTE_PAGE;
+
+ return pgprot;
+}
+
+static int ipmmu_alloc_init_pte(struct ipmmu_vmsa_device *mmu, pmd_t *pmd,
+ unsigned long iova, unsigned long pfn,
+ size_t size, int prot)
+{
+ pteval_t pteval = ipmmu_page_prot(prot, ARM_VMSA_PTE_PAGE);
+ unsigned int num_ptes = 1;
+ pte_t *pte, *start;
+ unsigned int i;
+
+ pte = ipmmu_alloc_pte(mmu, pmd, iova);
+ if (!pte)
+ return -ENOMEM;
+
+ start = pte;
+
+ /*
+ * Install the page table entries. We can be called both for a single
+ * page or for a block of 16 physically contiguous pages. In the latter
+ * case set the PTE contiguous hint.
+ */
+ if (size == SZ_64K) {
+ pteval |= ARM_VMSA_PTE_CONT;
+ num_ptes = ARM_VMSA_PTE_CONT_ENTRIES;
+ }
+
+ for (i = num_ptes; i; --i)
+ *pte++ = pfn_pte(pfn++, __pgprot(pteval));
+
+ ipmmu_flush_pgtable(mmu, start, sizeof(*pte) * num_ptes);
+
+ return 0;
+}
+
+static int ipmmu_alloc_init_pmd(struct ipmmu_vmsa_device *mmu, pmd_t *pmd,
+ unsigned long iova, unsigned long pfn,
+ int prot)
+{
+ pmdval_t pmdval = ipmmu_page_prot(prot, PMD_TYPE_SECT);
+
+ *pmd = pfn_pmd(pfn, __pgprot(pmdval));
+ ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd));
+
+ return 0;
+}
+
+static int ipmmu_create_mapping(struct ipmmu_vmsa_domain *domain,
+ unsigned long iova, phys_addr_t paddr,
+ size_t size, int prot)
+{
+ struct ipmmu_vmsa_device *mmu = domain->mmu;
+ pgd_t *pgd = domain->pgd;
+ unsigned long flags;
+ unsigned long pfn;
+ pmd_t *pmd;
+ int ret;
+
+ if (!pgd)
+ return -EINVAL;
+
+ if (size & ~PAGE_MASK)
+ return -EINVAL;
+
+ if (paddr & ~((1ULL << 40) - 1))
+ return -ERANGE;
+
+ pfn = __phys_to_pfn(paddr);
+ pgd += pgd_index(iova);
+
+ /* Update the page tables. */
+ spin_lock_irqsave(&domain->lock, flags);
+
+ pmd = ipmmu_alloc_pmd(mmu, pgd, iova);
+ if (!pmd) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ switch (size) {
+ case SZ_2M:
+ ret = ipmmu_alloc_init_pmd(mmu, pmd, iova, pfn, prot);
+ break;
+ case SZ_64K:
+ case SZ_4K:
+ ret = ipmmu_alloc_init_pte(mmu, pmd, iova, pfn, size, prot);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+done:
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ if (!ret)
+ ipmmu_tlb_invalidate(domain);
+
+ return ret;
+}
+
+static void ipmmu_clear_pud(struct ipmmu_vmsa_device *mmu, pud_t *pud)
+{
+ /* Free the page table. */
+ pgtable_t table = pud_pgtable(*pud);
+ __free_page(table);
+
+ /* Clear the PUD. */
+ *pud = __pud(0);
+ ipmmu_flush_pgtable(mmu, pud, sizeof(*pud));
+}
+
+static void ipmmu_clear_pmd(struct ipmmu_vmsa_device *mmu, pud_t *pud,
+ pmd_t *pmd)
+{
+ unsigned int i;
+
+ /* Free the page table. */
+ if (pmd_table(*pmd)) {
+ pgtable_t table = pmd_pgtable(*pmd);
+ __free_page(table);
+ }
+
+ /* Clear the PMD. */
+ *pmd = __pmd(0);
+ ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd));
+
+ /* Check whether the PUD is still needed. */
+ pmd = pmd_offset(pud, 0);
+ for (i = 0; i < IPMMU_PTRS_PER_PMD; ++i) {
+ if (!pmd_none(pmd[i]))
+ return;
+ }
+
+ /* Clear the parent PUD. */
+ ipmmu_clear_pud(mmu, pud);
+}
+
+static void ipmmu_clear_pte(struct ipmmu_vmsa_device *mmu, pud_t *pud,
+ pmd_t *pmd, pte_t *pte, unsigned int num_ptes)
+{
+ unsigned int i;
+
+ /* Clear the PTE. */
+ for (i = num_ptes; i; --i)
+ pte[i-1] = __pte(0);
+
+ ipmmu_flush_pgtable(mmu, pte, sizeof(*pte) * num_ptes);
+
+ /* Check whether the PMD is still needed. */
+ pte = pte_offset_kernel(pmd, 0);
+ for (i = 0; i < IPMMU_PTRS_PER_PTE; ++i) {
+ if (!pte_none(pte[i]))
+ return;
+ }
+
+ /* Clear the parent PMD. */
+ ipmmu_clear_pmd(mmu, pud, pmd);
+}
+
+static int ipmmu_split_pmd(struct ipmmu_vmsa_device *mmu, pmd_t *pmd)
+{
+ pte_t *pte, *start;
+ pteval_t pteval;
+ unsigned long pfn;
+ unsigned int i;
+
+ pte = (pte_t *)get_zeroed_page(GFP_ATOMIC);
+ if (!pte)
+ return -ENOMEM;
+
+ /* Copy the PMD attributes. */
+ pteval = (pmd_val(*pmd) & ARM_VMSA_PTE_ATTRS_MASK)
+ | ARM_VMSA_PTE_CONT | ARM_VMSA_PTE_PAGE;
+
+ pfn = pmd_pfn(*pmd);
+ start = pte;
+
+ for (i = IPMMU_PTRS_PER_PTE; i; --i)
+ *pte++ = pfn_pte(pfn++, __pgprot(pteval));
+
+ ipmmu_flush_pgtable(mmu, start, PAGE_SIZE);
+ *pmd = __pmd(__pa(start) | PMD_NSTABLE | PMD_TYPE_TABLE);
+ ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd));
+
+ return 0;
+}
+
+static void ipmmu_split_pte(struct ipmmu_vmsa_device *mmu, pte_t *pte)
+{
+ unsigned int i;
+
+ for (i = ARM_VMSA_PTE_CONT_ENTRIES; i; --i)
+ pte[i-1] = __pte(pte_val(*pte) & ~ARM_VMSA_PTE_CONT);
+
+ ipmmu_flush_pgtable(mmu, pte, sizeof(*pte) * ARM_VMSA_PTE_CONT_ENTRIES);
+}
+
+static int ipmmu_clear_mapping(struct ipmmu_vmsa_domain *domain,
+ unsigned long iova, size_t size)
+{
+ struct ipmmu_vmsa_device *mmu = domain->mmu;
+ unsigned long flags;
+ pgd_t *pgd = domain->pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ int ret = 0;
+
+ if (!pgd)
+ return -EINVAL;
+
+ if (size & ~PAGE_MASK)
+ return -EINVAL;
+
+ pgd += pgd_index(iova);
+ pud = (pud_t *)pgd;
+
+ spin_lock_irqsave(&domain->lock, flags);
+
+ /* If there's no PUD or PMD we're done. */
+ if (pud_none(*pud))
+ goto done;
+
+ pmd = pmd_offset(pud, iova);
+ if (pmd_none(*pmd))
+ goto done;
+
+ /*
+ * When freeing a 2MB block just clear the PMD. In the unlikely case the
+ * block is mapped as individual pages this will free the corresponding
+ * PTE page table.
+ */
+ if (size == SZ_2M) {
+ ipmmu_clear_pmd(mmu, pud, pmd);
+ goto done;
+ }
+
+ /*
+ * If the PMD has been mapped as a section remap it as pages to allow
+ * freeing individual pages.
+ */
+ if (pmd_sect(*pmd))
+ ipmmu_split_pmd(mmu, pmd);
+
+ pte = pte_offset_kernel(pmd, iova);
+
+ /*
+ * When freeing a 64kB block just clear the PTE entries. We don't have
+ * to care about the contiguous hint of the surrounding entries.
+ */
+ if (size == SZ_64K) {
+ ipmmu_clear_pte(mmu, pud, pmd, pte, ARM_VMSA_PTE_CONT_ENTRIES);
+ goto done;
+ }
+
+ /*
+ * If the PTE has been mapped with the contiguous hint set remap it and
+ * its surrounding PTEs to allow unmapping a single page.
+ */
+ if (pte_val(*pte) & ARM_VMSA_PTE_CONT)
+ ipmmu_split_pte(mmu, pte);
+
+ /* Clear the PTE. */
+ ipmmu_clear_pte(mmu, pud, pmd, pte, 1);
+
+done:
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ if (ret)
+ ipmmu_tlb_invalidate(domain);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * IOMMU Operations
+ */
+
+static int ipmmu_domain_init(struct iommu_domain *io_domain)
+{
+ struct ipmmu_vmsa_domain *domain;
+
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (!domain)
+ return -ENOMEM;
+
+ spin_lock_init(&domain->lock);
+
+ domain->pgd = kzalloc(IPMMU_PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
+ if (!domain->pgd) {
+ kfree(domain);
+ return -ENOMEM;
+ }
+
+ io_domain->priv = domain;
+ domain->io_domain = io_domain;
+
+ return 0;
+}
+
+static void ipmmu_domain_destroy(struct iommu_domain *io_domain)
+{
+ struct ipmmu_vmsa_domain *domain = io_domain->priv;
+
+ /*
+ * Free the domain resources. We assume that all devices have already
+ * been detached.
+ */
+ ipmmu_domain_destroy_context(domain);
+ ipmmu_free_pgtables(domain);
+ kfree(domain);
+}
+
+static int ipmmu_attach_device(struct iommu_domain *io_domain,
+ struct device *dev)
+{
+ struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
+ struct ipmmu_vmsa_device *mmu = archdata->mmu;
+ struct ipmmu_vmsa_domain *domain = io_domain->priv;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!mmu) {
+ dev_err(dev, "Cannot attach to IPMMU\n");
+ return -ENXIO;
+ }
+
+ spin_lock_irqsave(&domain->lock, flags);
+
+ if (!domain->mmu) {
+ /* The domain hasn't been used yet, initialize it. */
+ domain->mmu = mmu;
+ ret = ipmmu_domain_init_context(domain);
+ } else if (domain->mmu != mmu) {
+ /*
+ * Something is wrong, we can't attach two devices using
+ * different IOMMUs to the same domain.
+ */
+ dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n",
+ dev_name(mmu->dev), dev_name(domain->mmu->dev));
+ ret = -EINVAL;
+ }
+
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ if (ret < 0)
+ return ret;
+
+ ipmmu_utlb_enable(domain, archdata->utlb);
+
+ return 0;
+}
+
+static void ipmmu_detach_device(struct iommu_domain *io_domain,
+ struct device *dev)
+{
+ struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
+ struct ipmmu_vmsa_domain *domain = io_domain->priv;
+
+ ipmmu_utlb_disable(domain, archdata->utlb);
+
+ /*
+ * TODO: Optimize by disabling the context when no device is attached.
+ */
+}
+
+static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ struct ipmmu_vmsa_domain *domain = io_domain->priv;
+
+ if (!domain)
+ return -ENODEV;
+
+ return ipmmu_create_mapping(domain, iova, paddr, size, prot);
+}
+
+static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
+ size_t size)
+{
+ struct ipmmu_vmsa_domain *domain = io_domain->priv;
+ int ret;
+
+ ret = ipmmu_clear_mapping(domain, iova, size);
+ return ret ? 0 : size;
+}
+
+static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
+ dma_addr_t iova)
+{
+ struct ipmmu_vmsa_domain *domain = io_domain->priv;
+ pgd_t pgd;
+ pud_t pud;
+ pmd_t pmd;
+ pte_t pte;
+
+ /* TODO: Is locking needed ? */
+
+ if (!domain->pgd)
+ return 0;
+
+ pgd = *(domain->pgd + pgd_index(iova));
+ if (pgd_none(pgd))
+ return 0;
+
+ pud = *pud_offset(&pgd, iova);
+ if (pud_none(pud))
+ return 0;
+
+ pmd = *pmd_offset(&pud, iova);
+ if (pmd_none(pmd))
+ return 0;
+
+ if (pmd_sect(pmd))
+ return __pfn_to_phys(pmd_pfn(pmd)) | (iova & ~PMD_MASK);
+
+ pte = *(pmd_page_vaddr(pmd) + pte_index(iova));
+ if (pte_none(pte))
+ return 0;
+
+ return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK);
+}
+
+static int ipmmu_find_utlb(struct ipmmu_vmsa_device *mmu, struct device *dev)
+{
+ const struct ipmmu_vmsa_master *master = mmu->pdata->masters;
+ const char *devname = dev_name(dev);
+ unsigned int i;
+
+ for (i = 0; i < mmu->pdata->num_masters; ++i, ++master) {
+ if (strcmp(master->name, devname) == 0)
+ return master->utlb;
+ }
+
+ return -1;
+}
+
+static int ipmmu_add_device(struct device *dev)
+{
+ struct ipmmu_vmsa_archdata *archdata;
+ struct ipmmu_vmsa_device *mmu;
+ struct iommu_group *group;
+ int utlb = -1;
+ int ret;
+
+ if (dev->archdata.iommu) {
+ dev_warn(dev, "IOMMU driver already assigned to device %s\n",
+ dev_name(dev));
+ return -EINVAL;
+ }
+
+ /* Find the master corresponding to the device. */
+ spin_lock(&ipmmu_devices_lock);
+
+ list_for_each_entry(mmu, &ipmmu_devices, list) {
+ utlb = ipmmu_find_utlb(mmu, dev);
+ if (utlb >= 0) {
+ /*
+ * TODO Take a reference to the MMU to protect
+ * against device removal.
+ */
+ break;
+ }
+ }
+
+ spin_unlock(&ipmmu_devices_lock);
+
+ if (utlb < 0)
+ return -ENODEV;
+
+ if (utlb >= mmu->num_utlbs)
+ return -EINVAL;
+
+ /* Create a device group and add the device to it. */
+ group = iommu_group_alloc();
+ if (IS_ERR(group)) {
+ dev_err(dev, "Failed to allocate IOMMU group\n");
+ return PTR_ERR(group);
+ }
+
+ ret = iommu_group_add_device(group, dev);
+ iommu_group_put(group);
+
+ if (ret < 0) {
+ dev_err(dev, "Failed to add device to IPMMU group\n");
+ return ret;
+ }
+
+ archdata = kzalloc(sizeof(*archdata), GFP_KERNEL);
+ if (!archdata) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ archdata->mmu = mmu;
+ archdata->utlb = utlb;
+ dev->archdata.iommu = archdata;
+
+ /*
+ * Create the ARM mapping, used by the ARM DMA mapping core to allocate
+ * VAs. This will allocate a corresponding IOMMU domain.
+ *
+ * TODO:
+ * - Create one mapping per context (TLB).
+ * - Make the mapping size configurable ? We currently use a 2GB mapping
+ * at a 1GB offset to ensure that NULL VAs will fault.
+ */
+ if (!mmu->mapping) {
+ struct dma_iommu_mapping *mapping;
+
+ mapping = arm_iommu_create_mapping(&platform_bus_type,
+ SZ_1G, SZ_2G);
+ if (IS_ERR(mapping)) {
+ dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
+ return PTR_ERR(mapping);
+ }
+
+ mmu->mapping = mapping;
+ }
+
+ /* Attach the ARM VA mapping to the device. */
+ ret = arm_iommu_attach_device(dev, mmu->mapping);
+ if (ret < 0) {
+ dev_err(dev, "Failed to attach device to VA mapping\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ kfree(dev->archdata.iommu);
+ dev->archdata.iommu = NULL;
+ iommu_group_remove_device(dev);
+ return ret;
+}
+
+static void ipmmu_remove_device(struct device *dev)
+{
+ arm_iommu_detach_device(dev);
+ iommu_group_remove_device(dev);
+ kfree(dev->archdata.iommu);
+ dev->archdata.iommu = NULL;
+}
+
+static struct iommu_ops ipmmu_ops = {
+ .domain_init = ipmmu_domain_init,
+ .domain_destroy = ipmmu_domain_destroy,
+ .attach_dev = ipmmu_attach_device,
+ .detach_dev = ipmmu_detach_device,
+ .map = ipmmu_map,
+ .unmap = ipmmu_unmap,
+ .iova_to_phys = ipmmu_iova_to_phys,
+ .add_device = ipmmu_add_device,
+ .remove_device = ipmmu_remove_device,
+ .pgsize_bitmap = SZ_2M | SZ_64K | SZ_4K,
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe/remove and init
+ */
+
+static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
+{
+ unsigned int i;
+
+ /* Disable all contexts. */
+ for (i = 0; i < 4; ++i)
+ ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0);
+}
+
+static int ipmmu_probe(struct platform_device *pdev)
+{
+ struct ipmmu_vmsa_device *mmu;
+ struct resource *res;
+ int irq;
+ int ret;
+
+ if (!pdev->dev.platform_data) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return -EINVAL;
+ }
+
+ mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
+ if (!mmu) {
+ dev_err(&pdev->dev, "cannot allocate device data\n");
+ return -ENOMEM;
+ }
+
+ mmu->dev = &pdev->dev;
+ mmu->pdata = pdev->dev.platform_data;
+ mmu->num_utlbs = 32;
+
+ /* Map I/O memory and request IRQ. */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mmu->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mmu->base))
+ return PTR_ERR(mmu->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no IRQ found\n");
+ return irq;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
+ dev_name(&pdev->dev), mmu);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
+ return irq;
+ }
+
+ ipmmu_device_reset(mmu);
+
+ /*
+ * We can't create the ARM mapping here as it requires the bus to have
+ * an IOMMU, which only happens when bus_set_iommu() is called in
+ * ipmmu_init() after the probe function returns.
+ */
+
+ spin_lock(&ipmmu_devices_lock);
+ list_add(&mmu->list, &ipmmu_devices);
+ spin_unlock(&ipmmu_devices_lock);
+
+ platform_set_drvdata(pdev, mmu);
+
+ return 0;
+}
+
+static int ipmmu_remove(struct platform_device *pdev)
+{
+ struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev);
+
+ spin_lock(&ipmmu_devices_lock);
+ list_del(&mmu->list);
+ spin_unlock(&ipmmu_devices_lock);
+
+ arm_iommu_release_mapping(mmu->mapping);
+
+ ipmmu_device_reset(mmu);
+
+ return 0;
+}
+
+static struct platform_driver ipmmu_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ipmmu-vmsa",
+ },
+ .probe = ipmmu_probe,
+ .remove = ipmmu_remove,
+};
+
+static int __init ipmmu_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&ipmmu_driver);
+ if (ret < 0)
+ return ret;
+
+ if (!iommu_present(&platform_bus_type))
+ bus_set_iommu(&platform_bus_type, &ipmmu_ops);
+
+ return 0;
+}
+
+static void __exit ipmmu_exit(void)
+{
+ return platform_driver_unregister(&ipmmu_driver);
+}
+
+subsys_initcall(ipmmu_init);
+module_exit(ipmmu_exit);
+
+MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU");
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/msm_iommu_dev.c b/drivers/iommu/msm_iommu_dev.c
index 08ba4972da9d..61def7cb5263 100644
--- a/drivers/iommu/msm_iommu_dev.c
+++ b/drivers/iommu/msm_iommu_dev.c
@@ -127,13 +127,12 @@ static void msm_iommu_reset(void __iomem *base, int ncb)
static int msm_iommu_probe(struct platform_device *pdev)
{
- struct resource *r, *r2;
+ struct resource *r;
struct clk *iommu_clk;
struct clk *iommu_pclk;
struct msm_iommu_drvdata *drvdata;
struct msm_iommu_dev *iommu_dev = pdev->dev.platform_data;
void __iomem *regs_base;
- resource_size_t len;
int ret, irq, par;
if (pdev->id == -1) {
@@ -178,35 +177,16 @@ static int msm_iommu_probe(struct platform_device *pdev)
iommu_clk = NULL;
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "physbase");
-
- if (!r) {
- ret = -ENODEV;
- goto fail_clk;
- }
-
- len = resource_size(r);
-
- r2 = request_mem_region(r->start, len, r->name);
- if (!r2) {
- pr_err("Could not request memory region: start=%p, len=%d\n",
- (void *) r->start, len);
- ret = -EBUSY;
+ regs_base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(regs_base)) {
+ ret = PTR_ERR(regs_base);
goto fail_clk;
}
- regs_base = ioremap(r2->start, len);
-
- if (!regs_base) {
- pr_err("Could not ioremap: start=%p, len=%d\n",
- (void *) r2->start, len);
- ret = -EBUSY;
- goto fail_mem;
- }
-
irq = platform_get_irq_byname(pdev, "secure_irq");
if (irq < 0) {
ret = -ENODEV;
- goto fail_io;
+ goto fail_clk;
}
msm_iommu_reset(regs_base, iommu_dev->ncb);
@@ -222,14 +202,14 @@ static int msm_iommu_probe(struct platform_device *pdev)
if (!par) {
pr_err("%s: Invalid PAR value detected\n", iommu_dev->name);
ret = -ENODEV;
- goto fail_io;
+ goto fail_clk;
}
ret = request_irq(irq, msm_iommu_fault_handler, 0,
"msm_iommu_secure_irpt_handler", drvdata);
if (ret) {
pr_err("Request IRQ %d failed with ret=%d\n", irq, ret);
- goto fail_io;
+ goto fail_clk;
}
@@ -250,10 +230,6 @@ static int msm_iommu_probe(struct platform_device *pdev)
clk_disable(iommu_pclk);
return 0;
-fail_io:
- iounmap(regs_base);
-fail_mem:
- release_mem_region(r->start, len);
fail_clk:
if (iommu_clk) {
clk_disable(iommu_clk);
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 7fcbfc498fa9..895af06a667f 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -34,6 +34,9 @@
#include "omap-iopgtable.h"
#include "omap-iommu.h"
+#define to_iommu(dev) \
+ ((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)))
+
#define for_each_iotlb_cr(obj, n, __i, cr) \
for (__i = 0; \
(__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
@@ -391,6 +394,7 @@ static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
__func__, start, da, bytes);
iotlb_load_cr(obj, &cr);
iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
+ break;
}
}
pm_runtime_put_sync(obj->dev);
@@ -1037,19 +1041,18 @@ static void iopte_cachep_ctor(void *iopte)
clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
}
-static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa,
- u32 flags)
+static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
{
memset(e, 0, sizeof(*e));
e->da = da;
e->pa = pa;
- e->valid = 1;
+ e->valid = MMU_CAM_V;
/* FIXME: add OMAP1 support */
- e->pgsz = flags & MMU_CAM_PGSZ_MASK;
- e->endian = flags & MMU_RAM_ENDIAN_MASK;
- e->elsz = flags & MMU_RAM_ELSZ_MASK;
- e->mixed = flags & MMU_RAM_MIXED_MASK;
+ e->pgsz = pgsz;
+ e->endian = MMU_RAM_ENDIAN_LITTLE;
+ e->elsz = MMU_RAM_ELSZ_8;
+ e->mixed = 0;
return iopgsz_to_bytes(e->pgsz);
}
@@ -1062,9 +1065,8 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
struct device *dev = oiommu->dev;
struct iotlb_entry e;
int omap_pgsz;
- u32 ret, flags;
+ u32 ret;
- /* we only support mapping a single iommu page for now */
omap_pgsz = bytes_to_iopgsz(bytes);
if (omap_pgsz < 0) {
dev_err(dev, "invalid size to map: %d\n", bytes);
@@ -1073,9 +1075,7 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes);
- flags = omap_pgsz | prot;
-
- iotlb_init_entry(&e, da, pa, flags);
+ iotlb_init_entry(&e, da, pa, omap_pgsz);
ret = omap_iopgtable_store_entry(oiommu, &e);
if (ret)
@@ -1248,12 +1248,6 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
return ret;
}
-static int omap_iommu_domain_has_cap(struct iommu_domain *domain,
- unsigned long cap)
-{
- return 0;
-}
-
static int omap_iommu_add_device(struct device *dev)
{
struct omap_iommu_arch_data *arch_data;
@@ -1305,7 +1299,6 @@ static struct iommu_ops omap_iommu_ops = {
.map = omap_iommu_map,
.unmap = omap_iommu_unmap,
.iova_to_phys = omap_iommu_iova_to_phys,
- .domain_has_cap = omap_iommu_domain_has_cap,
.add_device = omap_iommu_add_device,
.remove_device = omap_iommu_remove_device,
.pgsize_bitmap = OMAP_IOMMU_PGSIZES,
diff --git a/drivers/iommu/omap-iopgtable.h b/drivers/iommu/omap-iopgtable.h
index b6f9a51746ca..f891683e3f05 100644
--- a/drivers/iommu/omap-iopgtable.h
+++ b/drivers/iommu/omap-iopgtable.h
@@ -93,6 +93,3 @@ static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask)
/* to find an entry in the second-level page table. */
#define iopte_index(da) (((da) >> IOPTE_SHIFT) & (PTRS_PER_IOPTE - 1))
#define iopte_offset(iopgd, da) (iopgd_page_vaddr(iopgd) + iopte_index(da))
-
-#define to_iommu(dev) \
- (platform_get_drvdata(to_platform_device(dev)))
diff --git a/drivers/iommu/shmobile-ipmmu.c b/drivers/iommu/shmobile-ipmmu.c
index e3bc2e19b6dd..bd97adecb1fd 100644
--- a/drivers/iommu/shmobile-ipmmu.c
+++ b/drivers/iommu/shmobile-ipmmu.c
@@ -94,11 +94,6 @@ static int ipmmu_probe(struct platform_device *pdev)
struct resource *res;
struct shmobile_ipmmu_platform_data *pdata = pdev->dev.platform_data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "cannot get platform resources\n");
- return -ENOENT;
- }
ipmmu = devm_kzalloc(&pdev->dev, sizeof(*ipmmu), GFP_KERNEL);
if (!ipmmu) {
dev_err(&pdev->dev, "cannot allocate device data\n");
@@ -106,19 +101,18 @@ static int ipmmu_probe(struct platform_device *pdev)
}
spin_lock_init(&ipmmu->flush_lock);
ipmmu->dev = &pdev->dev;
- ipmmu->ipmmu_base = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
- if (!ipmmu->ipmmu_base) {
- dev_err(&pdev->dev, "ioremap_nocache failed\n");
- return -ENOMEM;
- }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ipmmu->ipmmu_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ipmmu->ipmmu_base))
+ return PTR_ERR(ipmmu->ipmmu_base);
+
ipmmu->dev_names = pdata->dev_names;
ipmmu->num_dev_names = pdata->num_dev_names;
platform_set_drvdata(pdev, ipmmu);
ipmmu_reg_write(ipmmu, IMCTR1, 0x0); /* disable TLB */
ipmmu_reg_write(ipmmu, IMCTR2, 0x0); /* disable PMB */
- ipmmu_iommu_init(ipmmu);
- return 0;
+ return ipmmu_iommu_init(ipmmu);
}
static struct platform_driver ipmmu_driver = {
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 9f69e818b000..93580a47cc54 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -82,7 +82,8 @@ static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
}
/* Forward declaration */
-static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]);
+static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
+ bool strict_match);
static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp);
static void rlb_src_unlink(struct bonding *bond, u32 index);
static void rlb_src_link(struct bonding *bond, u32 ip_src_hash,
@@ -459,7 +460,7 @@ static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
bond->alb_info.rlb_promisc_timeout_counter = 0;
- alb_send_learning_packets(bond->curr_active_slave, addr);
+ alb_send_learning_packets(bond->curr_active_slave, addr, true);
}
/* slave being removed should not be active at this point
@@ -995,7 +996,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
/*********************** tlb/rlb shared functions *********************/
static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
- u16 vid)
+ __be16 vlan_proto, u16 vid)
{
struct learning_pkt pkt;
struct sk_buff *skb;
@@ -1021,7 +1022,7 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
skb->dev = slave->dev;
if (vid) {
- skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vid);
+ skb = vlan_put_tag(skb, vlan_proto, vid);
if (!skb) {
pr_err("%s: Error: failed to insert VLAN tag\n",
slave->bond->dev->name);
@@ -1032,22 +1033,32 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
dev_queue_xmit(skb);
}
-
-static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
+static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
+ bool strict_match)
{
struct bonding *bond = bond_get_bond_by_slave(slave);
struct net_device *upper;
struct list_head *iter;
/* send untagged */
- alb_send_lp_vid(slave, mac_addr, 0);
+ alb_send_lp_vid(slave, mac_addr, 0, 0);
/* loop through vlans and send one packet for each */
rcu_read_lock();
netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
- if (upper->priv_flags & IFF_802_1Q_VLAN)
- alb_send_lp_vid(slave, mac_addr,
- vlan_dev_vlan_id(upper));
+ if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
+ if (strict_match &&
+ ether_addr_equal_64bits(mac_addr,
+ upper->dev_addr)) {
+ alb_send_lp_vid(slave, mac_addr,
+ vlan_dev_vlan_proto(upper),
+ vlan_dev_vlan_id(upper));
+ } else if (!strict_match) {
+ alb_send_lp_vid(slave, upper->dev_addr,
+ vlan_dev_vlan_proto(upper),
+ vlan_dev_vlan_id(upper));
+ }
+ }
}
rcu_read_unlock();
}
@@ -1107,7 +1118,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
/* fasten the change in the switch */
if (SLAVE_IS_OK(slave1)) {
- alb_send_learning_packets(slave1, slave1->dev->dev_addr);
+ alb_send_learning_packets(slave1, slave1->dev->dev_addr, false);
if (bond->alb_info.rlb_enabled) {
/* inform the clients that the mac address
* has changed
@@ -1119,7 +1130,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
}
if (SLAVE_IS_OK(slave2)) {
- alb_send_learning_packets(slave2, slave2->dev->dev_addr);
+ alb_send_learning_packets(slave2, slave2->dev->dev_addr, false);
if (bond->alb_info.rlb_enabled) {
/* inform the clients that the mac address
* has changed
@@ -1490,6 +1501,8 @@ void bond_alb_monitor(struct work_struct *work)
/* send learning packets */
if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) {
+ bool strict_match;
+
/* change of curr_active_slave involves swapping of mac addresses.
* in order to avoid this swapping from happening while
* sending the learning packets, the curr_slave_lock must be held for
@@ -1497,8 +1510,15 @@ void bond_alb_monitor(struct work_struct *work)
*/
read_lock(&bond->curr_slave_lock);
- bond_for_each_slave_rcu(bond, slave, iter)
- alb_send_learning_packets(slave, slave->dev->dev_addr);
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ /* If updating current_active, use all currently
+ * user mac addreses (!strict_match). Otherwise, only
+ * use mac of the slave device.
+ */
+ strict_match = (slave != bond->curr_active_slave);
+ alb_send_learning_packets(slave, slave->dev->dev_addr,
+ strict_match);
+ }
read_unlock(&bond->curr_slave_lock);
@@ -1721,7 +1741,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
} else {
/* set the new_slave to the bond mac address */
alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
- alb_send_learning_packets(new_slave, bond->dev->dev_addr);
+ alb_send_learning_packets(new_slave, bond->dev->dev_addr,
+ false);
}
write_lock_bh(&bond->curr_slave_lock);
@@ -1764,7 +1785,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr);
read_lock(&bond->lock);
- alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr);
+ alb_send_learning_packets(bond->curr_active_slave,
+ bond_dev->dev_addr, false);
if (bond->alb_info.rlb_enabled) {
/* inform clients mac address has changed */
rlb_req_update_slave_clients(bond, bond->curr_active_slave);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 69aff72c8957..d3a67896d435 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2126,10 +2126,10 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
*/
static void bond_arp_send(struct net_device *slave_dev, int arp_op,
__be32 dest_ip, __be32 src_ip,
- struct bond_vlan_tag *inner,
- struct bond_vlan_tag *outer)
+ struct bond_vlan_tag *tags)
{
struct sk_buff *skb;
+ int i;
pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n",
arp_op, slave_dev->name, &dest_ip, &src_ip);
@@ -2141,21 +2141,26 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
net_err_ratelimited("ARP packet allocation failed\n");
return;
}
- if (outer->vlan_id) {
- if (inner->vlan_id) {
- pr_debug("inner tag: proto %X vid %X\n",
- ntohs(inner->vlan_proto), inner->vlan_id);
- skb = __vlan_put_tag(skb, inner->vlan_proto,
- inner->vlan_id);
- if (!skb) {
- net_err_ratelimited("failed to insert inner VLAN tag\n");
- return;
- }
- }
- pr_debug("outer reg: proto %X vid %X\n",
- ntohs(outer->vlan_proto), outer->vlan_id);
- skb = vlan_put_tag(skb, outer->vlan_proto, outer->vlan_id);
+ /* Go through all the tags backwards and add them to the packet */
+ for (i = BOND_MAX_VLAN_ENCAP - 1; i > 0; i--) {
+ if (!tags[i].vlan_id)
+ continue;
+
+ pr_debug("inner tag: proto %X vid %X\n",
+ ntohs(tags[i].vlan_proto), tags[i].vlan_id);
+ skb = __vlan_put_tag(skb, tags[i].vlan_proto,
+ tags[i].vlan_id);
+ if (!skb) {
+ net_err_ratelimited("failed to insert inner VLAN tag\n");
+ return;
+ }
+ }
+ /* Set the outer tag */
+ if (tags[0].vlan_id) {
+ pr_debug("outer tag: proto %X vid %X\n",
+ ntohs(tags[0].vlan_proto), tags[0].vlan_id);
+ skb = vlan_put_tag(skb, tags[0].vlan_proto, tags[0].vlan_id);
if (!skb) {
net_err_ratelimited("failed to insert outer VLAN tag\n");
return;
@@ -2164,22 +2169,52 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
arp_xmit(skb);
}
+/* Validate the device path between the @start_dev and the @end_dev.
+ * The path is valid if the @end_dev is reachable through device
+ * stacking.
+ * When the path is validated, collect any vlan information in the
+ * path.
+ */
+static bool bond_verify_device_path(struct net_device *start_dev,
+ struct net_device *end_dev,
+ struct bond_vlan_tag *tags)
+{
+ struct net_device *upper;
+ struct list_head *iter;
+ int idx;
+
+ if (start_dev == end_dev)
+ return true;
+
+ netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
+ if (bond_verify_device_path(upper, end_dev, tags)) {
+ if (is_vlan_dev(upper)) {
+ idx = vlan_get_encap_level(upper);
+ if (idx >= BOND_MAX_VLAN_ENCAP)
+ return false;
+
+ tags[idx].vlan_proto =
+ vlan_dev_vlan_proto(upper);
+ tags[idx].vlan_id = vlan_dev_vlan_id(upper);
+ }
+ return true;
+ }
+ }
+
+ return false;
+}
static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
{
- struct net_device *upper, *vlan_upper;
- struct list_head *iter, *vlan_iter;
struct rtable *rt;
- struct bond_vlan_tag inner, outer;
+ struct bond_vlan_tag tags[BOND_MAX_VLAN_ENCAP];
__be32 *targets = bond->params.arp_targets, addr;
int i;
+ bool ret;
for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
pr_debug("basa: target %pI4\n", &targets[i]);
- inner.vlan_proto = 0;
- inner.vlan_id = 0;
- outer.vlan_proto = 0;
- outer.vlan_id = 0;
+ memset(tags, 0, sizeof(tags));
/* Find out through which dev should the packet go */
rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
@@ -2192,7 +2227,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
bond->dev->name,
&targets[i]);
- bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 0, &inner, &outer);
+ bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
+ 0, tags);
continue;
}
@@ -2201,52 +2237,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
goto found;
rcu_read_lock();
- /* first we search only for vlan devices. for every vlan
- * found we verify its upper dev list, searching for the
- * rt->dst.dev. If found we save the tag of the vlan and
- * proceed to send the packet.
- */
- netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper,
- vlan_iter) {
- if (!is_vlan_dev(vlan_upper))
- continue;
-
- if (vlan_upper == rt->dst.dev) {
- outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
- outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
- rcu_read_unlock();
- goto found;
- }
- netdev_for_each_all_upper_dev_rcu(vlan_upper, upper,
- iter) {
- if (upper == rt->dst.dev) {
- /* If the upper dev is a vlan dev too,
- * set the vlan tag to inner tag.
- */
- if (is_vlan_dev(upper)) {
- inner.vlan_proto = vlan_dev_vlan_proto(upper);
- inner.vlan_id = vlan_dev_vlan_id(upper);
- }
- outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
- outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
- rcu_read_unlock();
- goto found;
- }
- }
- }
-
- /* if the device we're looking for is not on top of any of
- * our upper vlans, then just search for any dev that
- * matches, and in case it's a vlan - save the id
- */
- netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
- if (upper == rt->dst.dev) {
- rcu_read_unlock();
- goto found;
- }
- }
+ ret = bond_verify_device_path(bond->dev, rt->dst.dev, tags);
rcu_read_unlock();
+ if (ret)
+ goto found;
+
/* Not our device - skip */
pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
bond->dev->name, &targets[i],
@@ -2259,7 +2255,7 @@ found:
addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
ip_rt_put(rt);
bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
- addr, &inner, &outer);
+ addr, tags);
}
}
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 724e30fa20b9..832070298446 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -125,6 +125,7 @@ static const struct bond_opt_value bond_fail_over_mac_tbl[] = {
static const struct bond_opt_value bond_intmax_tbl[] = {
{ "off", 0, BOND_VALFLAG_DEFAULT},
{ "maxval", INT_MAX, BOND_VALFLAG_MAX},
+ { NULL, -1, 0}
};
static const struct bond_opt_value bond_lacp_rate_tbl[] = {
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index b8bdd0acc8f3..00bea320e3b5 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -36,6 +36,7 @@
#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
+#define BOND_MAX_VLAN_ENCAP 2
#define BOND_MAX_ARP_TARGETS 16
#define BOND_DEFAULT_MIIMON 100
diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig
index 8ab7103d4f44..61ffc12d8fd8 100644
--- a/drivers/net/can/c_can/Kconfig
+++ b/drivers/net/can/c_can/Kconfig
@@ -14,13 +14,6 @@ config CAN_C_CAN_PLATFORM
SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com)
boards like am335x, dm814x, dm813x and dm811x.
-config CAN_C_CAN_STRICT_FRAME_ORDERING
- bool "Force a strict RX CAN frame order (may cause frame loss)"
- ---help---
- The RX split buffer prevents packet reordering but can cause packet
- loss. Only enable this option when you accept to lose CAN frames
- in favour of getting the received CAN frames in the correct order.
-
config CAN_C_CAN_PCI
tristate "Generic PCI Bus based C_CAN/D_CAN driver"
depends on PCI
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index a2ca820b5373..95e04e2002da 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -732,26 +732,12 @@ static u32 c_can_adjust_pending(u32 pend)
static inline void c_can_rx_object_get(struct net_device *dev,
struct c_can_priv *priv, u32 obj)
{
-#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
- if (obj < C_CAN_MSG_RX_LOW_LAST)
- c_can_object_get(dev, IF_RX, obj, IF_COMM_RCV_LOW);
- else
-#endif
c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high);
}
static inline void c_can_rx_finalize(struct net_device *dev,
struct c_can_priv *priv, u32 obj)
{
-#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
- if (obj < C_CAN_MSG_RX_LOW_LAST)
- priv->rxmasked |= BIT(obj - 1);
- else if (obj == C_CAN_MSG_RX_LOW_LAST) {
- priv->rxmasked = 0;
- /* activate all lower message objects */
- c_can_activate_all_lower_rx_msg_obj(dev, IF_RX);
- }
-#endif
if (priv->type != BOSCH_D_CAN)
c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT);
}
@@ -799,9 +785,6 @@ static inline u32 c_can_get_pending(struct c_can_priv *priv)
{
u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG);
-#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
- pend &= ~priv->rxmasked;
-#endif
return pend;
}
@@ -814,25 +797,6 @@ static inline u32 c_can_get_pending(struct c_can_priv *priv)
* has arrived. To work-around this issue, we keep two groups of message
* objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
*
- * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = y
- *
- * To ensure in-order frame reception we use the following
- * approach while re-activating a message object to receive further
- * frames:
- * - if the current message object number is lower than
- * C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
- * the INTPND bit.
- * - if the current message object number is equal to
- * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
- * receive message objects.
- * - if the current message object number is greater than
- * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
- * only this message object.
- *
- * This can cause packet loss!
- *
- * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = n
- *
* We clear the newdat bit right away.
*
* This can result in packet reordering when the readout is slow.
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index c540e3d12e3d..564933ae218c 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -551,7 +551,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct sja1000_priv *priv;
struct peak_pci_chan *chan;
- struct net_device *dev;
+ struct net_device *dev, *prev_dev;
void __iomem *cfg_base, *reg_base;
u16 sub_sys_id, icr;
int i, err, channels;
@@ -688,11 +688,13 @@ failure_remove_channels:
writew(0x0, cfg_base + PITA_ICR + 2);
chan = NULL;
- for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) {
- unregister_sja1000dev(dev);
- free_sja1000dev(dev);
+ for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) {
priv = netdev_priv(dev);
chan = priv->priv;
+ prev_dev = chan->prev_dev;
+
+ unregister_sja1000dev(dev);
+ free_sja1000dev(dev);
}
/* free any PCIeC resources too */
@@ -726,10 +728,12 @@ static void peak_pci_remove(struct pci_dev *pdev)
/* Loop over all registered devices */
while (1) {
+ struct net_device *prev_dev = chan->prev_dev;
+
dev_info(&pdev->dev, "removing device %s\n", dev->name);
unregister_sja1000dev(dev);
free_sja1000dev(dev);
- dev = chan->prev_dev;
+ dev = prev_dev;
if (!dev) {
/* do that only for first channel */
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 39b26fe28d10..d7401017a3f1 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -35,6 +35,18 @@ source "drivers/net/ethernet/calxeda/Kconfig"
source "drivers/net/ethernet/chelsio/Kconfig"
source "drivers/net/ethernet/cirrus/Kconfig"
source "drivers/net/ethernet/cisco/Kconfig"
+
+config CX_ECAT
+ tristate "Beckhoff CX5020 EtherCAT master support"
+ depends on PCI
+ ---help---
+ Driver for EtherCAT master module located on CCAT FPGA
+ that can be found on Beckhoff CX5020, and possibly other of CX
+ Beckhoff CX series industrial PCs.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ec_bhf.
+
source "drivers/net/ethernet/davicom/Kconfig"
config DNET
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 545d0b3b9cb4..35190e36c456 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
+obj-$(CONFIG_CX_ECAT) += ec_bhf.o
obj-$(CONFIG_DM9000) += davicom/
obj-$(CONFIG_DNET) += dnet.o
obj-$(CONFIG_NET_VENDOR_DEC) += dec/
diff --git a/drivers/net/ethernet/altera/Makefile b/drivers/net/ethernet/altera/Makefile
index d4a187e45369..3eff2fd3997e 100644
--- a/drivers/net/ethernet/altera/Makefile
+++ b/drivers/net/ethernet/altera/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_ALTERA_TSE) += altera_tse.o
altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \
altera_msgdma.o altera_sgdma.o altera_utils.o
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c
index 4d1f2fdd5c32..0fb986ba3290 100644
--- a/drivers/net/ethernet/altera/altera_msgdma.c
+++ b/drivers/net/ethernet/altera/altera_msgdma.c
@@ -37,18 +37,16 @@ void msgdma_start_rxdma(struct altera_tse_private *priv)
void msgdma_reset(struct altera_tse_private *priv)
{
int counter;
- struct msgdma_csr *txcsr =
- (struct msgdma_csr *)priv->tx_dma_csr;
- struct msgdma_csr *rxcsr =
- (struct msgdma_csr *)priv->rx_dma_csr;
/* Reset Rx mSGDMA */
- iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
- iowrite32(MSGDMA_CSR_CTL_RESET, &rxcsr->control);
+ csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr,
+ msgdma_csroffs(status));
+ csrwr32(MSGDMA_CSR_CTL_RESET, priv->rx_dma_csr,
+ msgdma_csroffs(control));
counter = 0;
while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
- if (tse_bit_is_clear(&rxcsr->status,
+ if (tse_bit_is_clear(priv->rx_dma_csr, msgdma_csroffs(status),
MSGDMA_CSR_STAT_RESETTING))
break;
udelay(1);
@@ -59,15 +57,18 @@ void msgdma_reset(struct altera_tse_private *priv)
"TSE Rx mSGDMA resetting bit never cleared!\n");
/* clear all status bits */
- iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
+ csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr, msgdma_csroffs(status));
/* Reset Tx mSGDMA */
- iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status);
- iowrite32(MSGDMA_CSR_CTL_RESET, &txcsr->control);
+ csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr,
+ msgdma_csroffs(status));
+
+ csrwr32(MSGDMA_CSR_CTL_RESET, priv->tx_dma_csr,
+ msgdma_csroffs(control));
counter = 0;
while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
- if (tse_bit_is_clear(&txcsr->status,
+ if (tse_bit_is_clear(priv->tx_dma_csr, msgdma_csroffs(status),
MSGDMA_CSR_STAT_RESETTING))
break;
udelay(1);
@@ -78,58 +79,58 @@ void msgdma_reset(struct altera_tse_private *priv)
"TSE Tx mSGDMA resetting bit never cleared!\n");
/* clear all status bits */
- iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status);
+ csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr, msgdma_csroffs(status));
}
void msgdma_disable_rxirq(struct altera_tse_private *priv)
{
- struct msgdma_csr *csr = priv->rx_dma_csr;
- tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+ tse_clear_bit(priv->rx_dma_csr, msgdma_csroffs(control),
+ MSGDMA_CSR_CTL_GLOBAL_INTR);
}
void msgdma_enable_rxirq(struct altera_tse_private *priv)
{
- struct msgdma_csr *csr = priv->rx_dma_csr;
- tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+ tse_set_bit(priv->rx_dma_csr, msgdma_csroffs(control),
+ MSGDMA_CSR_CTL_GLOBAL_INTR);
}
void msgdma_disable_txirq(struct altera_tse_private *priv)
{
- struct msgdma_csr *csr = priv->tx_dma_csr;
- tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+ tse_clear_bit(priv->tx_dma_csr, msgdma_csroffs(control),
+ MSGDMA_CSR_CTL_GLOBAL_INTR);
}
void msgdma_enable_txirq(struct altera_tse_private *priv)
{
- struct msgdma_csr *csr = priv->tx_dma_csr;
- tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+ tse_set_bit(priv->tx_dma_csr, msgdma_csroffs(control),
+ MSGDMA_CSR_CTL_GLOBAL_INTR);
}
void msgdma_clear_rxirq(struct altera_tse_private *priv)
{
- struct msgdma_csr *csr = priv->rx_dma_csr;
- iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
+ csrwr32(MSGDMA_CSR_STAT_IRQ, priv->rx_dma_csr, msgdma_csroffs(status));
}
void msgdma_clear_txirq(struct altera_tse_private *priv)
{
- struct msgdma_csr *csr = priv->tx_dma_csr;
- iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
+ csrwr32(MSGDMA_CSR_STAT_IRQ, priv->tx_dma_csr, msgdma_csroffs(status));
}
/* return 0 to indicate transmit is pending */
int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
{
- struct msgdma_extended_desc *desc = priv->tx_dma_desc;
-
- iowrite32(lower_32_bits(buffer->dma_addr), &desc->read_addr_lo);
- iowrite32(upper_32_bits(buffer->dma_addr), &desc->read_addr_hi);
- iowrite32(0, &desc->write_addr_lo);
- iowrite32(0, &desc->write_addr_hi);
- iowrite32(buffer->len, &desc->len);
- iowrite32(0, &desc->burst_seq_num);
- iowrite32(MSGDMA_DESC_TX_STRIDE, &desc->stride);
- iowrite32(MSGDMA_DESC_CTL_TX_SINGLE, &desc->control);
+ csrwr32(lower_32_bits(buffer->dma_addr), priv->tx_dma_desc,
+ msgdma_descroffs(read_addr_lo));
+ csrwr32(upper_32_bits(buffer->dma_addr), priv->tx_dma_desc,
+ msgdma_descroffs(read_addr_hi));
+ csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_lo));
+ csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_hi));
+ csrwr32(buffer->len, priv->tx_dma_desc, msgdma_descroffs(len));
+ csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(burst_seq_num));
+ csrwr32(MSGDMA_DESC_TX_STRIDE, priv->tx_dma_desc,
+ msgdma_descroffs(stride));
+ csrwr32(MSGDMA_DESC_CTL_TX_SINGLE, priv->tx_dma_desc,
+ msgdma_descroffs(control));
return 0;
}
@@ -138,17 +139,16 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
u32 ready = 0;
u32 inuse;
u32 status;
- struct msgdma_csr *txcsr =
- (struct msgdma_csr *)priv->tx_dma_csr;
/* Get number of sent descriptors */
- inuse = ioread32(&txcsr->rw_fill_level) & 0xffff;
+ inuse = csrrd32(priv->tx_dma_csr, msgdma_csroffs(rw_fill_level))
+ & 0xffff;
if (inuse) { /* Tx FIFO is not empty */
ready = priv->tx_prod - priv->tx_cons - inuse - 1;
} else {
/* Check for buffered last packet */
- status = ioread32(&txcsr->status);
+ status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
if (status & MSGDMA_CSR_STAT_BUSY)
ready = priv->tx_prod - priv->tx_cons - 1;
else
@@ -162,7 +162,6 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
void msgdma_add_rx_desc(struct altera_tse_private *priv,
struct tse_buffer *rxbuffer)
{
- struct msgdma_extended_desc *desc = priv->rx_dma_desc;
u32 len = priv->rx_dma_buf_sz;
dma_addr_t dma_addr = rxbuffer->dma_addr;
u32 control = (MSGDMA_DESC_CTL_END_ON_EOP
@@ -172,14 +171,16 @@ void msgdma_add_rx_desc(struct altera_tse_private *priv,
| MSGDMA_DESC_CTL_TR_ERR_IRQ
| MSGDMA_DESC_CTL_GO);
- iowrite32(0, &desc->read_addr_lo);
- iowrite32(0, &desc->read_addr_hi);
- iowrite32(lower_32_bits(dma_addr), &desc->write_addr_lo);
- iowrite32(upper_32_bits(dma_addr), &desc->write_addr_hi);
- iowrite32(len, &desc->len);
- iowrite32(0, &desc->burst_seq_num);
- iowrite32(0x00010001, &desc->stride);
- iowrite32(control, &desc->control);
+ csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_lo));
+ csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_hi));
+ csrwr32(lower_32_bits(dma_addr), priv->rx_dma_desc,
+ msgdma_descroffs(write_addr_lo));
+ csrwr32(upper_32_bits(dma_addr), priv->rx_dma_desc,
+ msgdma_descroffs(write_addr_hi));
+ csrwr32(len, priv->rx_dma_desc, msgdma_descroffs(len));
+ csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(burst_seq_num));
+ csrwr32(0x00010001, priv->rx_dma_desc, msgdma_descroffs(stride));
+ csrwr32(control, priv->rx_dma_desc, msgdma_descroffs(control));
}
/* status is returned on upper 16 bits,
@@ -190,14 +191,13 @@ u32 msgdma_rx_status(struct altera_tse_private *priv)
u32 rxstatus = 0;
u32 pktlength;
u32 pktstatus;
- struct msgdma_csr *rxcsr =
- (struct msgdma_csr *)priv->rx_dma_csr;
- struct msgdma_response *rxresp =
- (struct msgdma_response *)priv->rx_dma_resp;
-
- if (ioread32(&rxcsr->resp_fill_level) & 0xffff) {
- pktlength = ioread32(&rxresp->bytes_transferred);
- pktstatus = ioread32(&rxresp->status);
+
+ if (csrrd32(priv->rx_dma_csr, msgdma_csroffs(resp_fill_level))
+ & 0xffff) {
+ pktlength = csrrd32(priv->rx_dma_resp,
+ msgdma_respoffs(bytes_transferred));
+ pktstatus = csrrd32(priv->rx_dma_resp,
+ msgdma_respoffs(status));
rxstatus = pktstatus;
rxstatus = rxstatus << 16;
rxstatus |= (pktlength & 0xffff);
diff --git a/drivers/net/ethernet/altera/altera_msgdmahw.h b/drivers/net/ethernet/altera/altera_msgdmahw.h
index d7b59ba4019c..e335626e1b6b 100644
--- a/drivers/net/ethernet/altera/altera_msgdmahw.h
+++ b/drivers/net/ethernet/altera/altera_msgdmahw.h
@@ -17,15 +17,6 @@
#ifndef __ALTERA_MSGDMAHW_H__
#define __ALTERA_MSGDMAHW_H__
-/* mSGDMA standard descriptor format
- */
-struct msgdma_desc {
- u32 read_addr; /* data buffer source address */
- u32 write_addr; /* data buffer destination address */
- u32 len; /* the number of bytes to transfer per descriptor */
- u32 control; /* characteristics of the transfer */
-};
-
/* mSGDMA extended descriptor format
*/
struct msgdma_extended_desc {
@@ -159,6 +150,10 @@ struct msgdma_response {
u32 status;
};
+#define msgdma_respoffs(a) (offsetof(struct msgdma_response, a))
+#define msgdma_csroffs(a) (offsetof(struct msgdma_csr, a))
+#define msgdma_descroffs(a) (offsetof(struct msgdma_extended_desc, a))
+
/* mSGDMA response register bit definitions
*/
#define MSGDMA_RESP_EARLY_TERM BIT(8)
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
index 9ce8630692b6..99cc56f451cf 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.c
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -20,8 +20,8 @@
#include "altera_sgdmahw.h"
#include "altera_sgdma.h"
-static void sgdma_setup_descrip(struct sgdma_descrip *desc,
- struct sgdma_descrip *ndesc,
+static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
+ struct sgdma_descrip __iomem *ndesc,
dma_addr_t ndesc_phys,
dma_addr_t raddr,
dma_addr_t waddr,
@@ -31,17 +31,17 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
int wfixed);
static int sgdma_async_write(struct altera_tse_private *priv,
- struct sgdma_descrip *desc);
+ struct sgdma_descrip __iomem *desc);
static int sgdma_async_read(struct altera_tse_private *priv);
static dma_addr_t
sgdma_txphysaddr(struct altera_tse_private *priv,
- struct sgdma_descrip *desc);
+ struct sgdma_descrip __iomem *desc);
static dma_addr_t
sgdma_rxphysaddr(struct altera_tse_private *priv,
- struct sgdma_descrip *desc);
+ struct sgdma_descrip __iomem *desc);
static int sgdma_txbusy(struct altera_tse_private *priv);
@@ -79,7 +79,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
priv->rxdescphys = (dma_addr_t) 0;
priv->txdescphys = (dma_addr_t) 0;
- priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc,
+ priv->rxdescphys = dma_map_single(priv->device,
+ (void __force *)priv->rx_dma_desc,
priv->rxdescmem, DMA_BIDIRECTIONAL);
if (dma_mapping_error(priv->device, priv->rxdescphys)) {
@@ -88,7 +89,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
return -EINVAL;
}
- priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc,
+ priv->txdescphys = dma_map_single(priv->device,
+ (void __force *)priv->tx_dma_desc,
priv->txdescmem, DMA_TO_DEVICE);
if (dma_mapping_error(priv->device, priv->txdescphys)) {
@@ -98,8 +100,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
}
/* Initialize descriptor memory to all 0's, sync memory to cache */
- memset(priv->tx_dma_desc, 0, priv->txdescmem);
- memset(priv->rx_dma_desc, 0, priv->rxdescmem);
+ memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
+ memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
dma_sync_single_for_device(priv->device, priv->txdescphys,
priv->txdescmem, DMA_TO_DEVICE);
@@ -126,22 +128,15 @@ void sgdma_uninitialize(struct altera_tse_private *priv)
*/
void sgdma_reset(struct altera_tse_private *priv)
{
- u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc;
- u32 txdescriplen = priv->txdescmem;
- u32 *prxdescripmem = (u32 *)priv->rx_dma_desc;
- u32 rxdescriplen = priv->rxdescmem;
- struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr;
- struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr;
-
/* Initialize descriptor memory to 0 */
- memset(ptxdescripmem, 0, txdescriplen);
- memset(prxdescripmem, 0, rxdescriplen);
+ memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
+ memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
- iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control);
- iowrite32(0, &ptxsgdma->control);
+ csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control));
+ csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
- iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control);
- iowrite32(0, &prxsgdma->control);
+ csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control));
+ csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
}
/* For SGDMA, interrupts remain enabled after initially enabling,
@@ -167,14 +162,14 @@ void sgdma_disable_txirq(struct altera_tse_private *priv)
void sgdma_clear_rxirq(struct altera_tse_private *priv)
{
- struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
- tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
+ tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control),
+ SGDMA_CTRLREG_CLRINT);
}
void sgdma_clear_txirq(struct altera_tse_private *priv)
{
- struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
- tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
+ tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control),
+ SGDMA_CTRLREG_CLRINT);
}
/* transmits buffer through SGDMA. Returns number of buffers
@@ -184,12 +179,11 @@ void sgdma_clear_txirq(struct altera_tse_private *priv)
*/
int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
{
- int pktstx = 0;
- struct sgdma_descrip *descbase =
- (struct sgdma_descrip *)priv->tx_dma_desc;
+ struct sgdma_descrip __iomem *descbase =
+ (struct sgdma_descrip __iomem *)priv->tx_dma_desc;
- struct sgdma_descrip *cdesc = &descbase[0];
- struct sgdma_descrip *ndesc = &descbase[1];
+ struct sgdma_descrip __iomem *cdesc = &descbase[0];
+ struct sgdma_descrip __iomem *ndesc = &descbase[1];
/* wait 'til the tx sgdma is ready for the next transmit request */
if (sgdma_txbusy(priv))
@@ -205,7 +199,7 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
0, /* read fixed */
SGDMA_CONTROL_WR_FIXED); /* Generate SOP */
- pktstx = sgdma_async_write(priv, cdesc);
+ sgdma_async_write(priv, cdesc);
/* enqueue the request to the pending transmit queue */
queue_tx(priv, buffer);
@@ -219,10 +213,10 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
u32 sgdma_tx_completions(struct altera_tse_private *priv)
{
u32 ready = 0;
- struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc;
if (!sgdma_txbusy(priv) &&
- ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) &&
+ ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control))
+ & SGDMA_CONTROL_HW_OWNED) == 0) &&
(dequeue_tx(priv))) {
ready = 1;
}
@@ -246,32 +240,31 @@ void sgdma_add_rx_desc(struct altera_tse_private *priv,
*/
u32 sgdma_rx_status(struct altera_tse_private *priv)
{
- struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
- struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc;
- struct sgdma_descrip *desc = NULL;
- int pktsrx;
- unsigned int rxstatus = 0;
- unsigned int pktlength = 0;
- unsigned int pktstatus = 0;
+ struct sgdma_descrip __iomem *base =
+ (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
+ struct sgdma_descrip __iomem *desc = NULL;
struct tse_buffer *rxbuffer = NULL;
+ unsigned int rxstatus = 0;
- u32 sts = ioread32(&csr->status);
+ u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status));
desc = &base[0];
if (sts & SGDMA_STSREG_EOP) {
+ unsigned int pktlength = 0;
+ unsigned int pktstatus = 0;
dma_sync_single_for_cpu(priv->device,
priv->rxdescphys,
priv->sgdmadesclen,
DMA_FROM_DEVICE);
- pktlength = desc->bytes_xferred;
- pktstatus = desc->status & 0x3f;
- rxstatus = pktstatus;
+ pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
+ pktstatus = csrrd8(desc, sgdma_descroffs(status));
+ rxstatus = pktstatus & ~SGDMA_STATUS_EOP;
rxstatus = rxstatus << 16;
rxstatus |= (pktlength & 0xffff);
if (rxstatus) {
- desc->status = 0;
+ csrwr8(0, desc, sgdma_descroffs(status));
rxbuffer = dequeue_rx(priv);
if (rxbuffer == NULL)
@@ -279,12 +272,12 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
"sgdma rx and rx queue empty!\n");
/* Clear control */
- iowrite32(0, &csr->control);
+ csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
/* clear status */
- iowrite32(0xf, &csr->status);
+ csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status));
/* kick the rx sgdma after reaping this descriptor */
- pktsrx = sgdma_async_read(priv);
+ sgdma_async_read(priv);
} else {
/* If the SGDMA indicated an end of packet on recv,
@@ -298,10 +291,11 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
*/
netdev_err(priv->dev,
"SGDMA RX Error Info: %x, %x, %x\n",
- sts, desc->status, rxstatus);
+ sts, csrrd8(desc, sgdma_descroffs(status)),
+ rxstatus);
}
} else if (sts == 0) {
- pktsrx = sgdma_async_read(priv);
+ sgdma_async_read(priv);
}
return rxstatus;
@@ -309,8 +303,8 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
/* Private functions */
-static void sgdma_setup_descrip(struct sgdma_descrip *desc,
- struct sgdma_descrip *ndesc,
+static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
+ struct sgdma_descrip __iomem *ndesc,
dma_addr_t ndesc_phys,
dma_addr_t raddr,
dma_addr_t waddr,
@@ -320,27 +314,30 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
int wfixed)
{
/* Clear the next descriptor as not owned by hardware */
- u32 ctrl = ndesc->control;
+
+ u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control));
ctrl &= ~SGDMA_CONTROL_HW_OWNED;
- ndesc->control = ctrl;
+ csrwr8(ctrl, ndesc, sgdma_descroffs(control));
- ctrl = 0;
ctrl = SGDMA_CONTROL_HW_OWNED;
ctrl |= generate_eop;
ctrl |= rfixed;
ctrl |= wfixed;
/* Channel is implicitly zero, initialized to 0 by default */
-
- desc->raddr = raddr;
- desc->waddr = waddr;
- desc->next = lower_32_bits(ndesc_phys);
- desc->control = ctrl;
- desc->status = 0;
- desc->rburst = 0;
- desc->wburst = 0;
- desc->bytes = length;
- desc->bytes_xferred = 0;
+ csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr));
+ csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr));
+
+ csrwr32(0, desc, sgdma_descroffs(pad1));
+ csrwr32(0, desc, sgdma_descroffs(pad2));
+ csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next));
+
+ csrwr8(ctrl, desc, sgdma_descroffs(control));
+ csrwr8(0, desc, sgdma_descroffs(status));
+ csrwr8(0, desc, sgdma_descroffs(wburst));
+ csrwr8(0, desc, sgdma_descroffs(rburst));
+ csrwr16(length, desc, sgdma_descroffs(bytes));
+ csrwr16(0, desc, sgdma_descroffs(bytes_xferred));
}
/* If hardware is busy, don't restart async read.
@@ -351,12 +348,11 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
*/
static int sgdma_async_read(struct altera_tse_private *priv)
{
- struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
- struct sgdma_descrip *descbase =
- (struct sgdma_descrip *)priv->rx_dma_desc;
+ struct sgdma_descrip __iomem *descbase =
+ (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
- struct sgdma_descrip *cdesc = &descbase[0];
- struct sgdma_descrip *ndesc = &descbase[1];
+ struct sgdma_descrip __iomem *cdesc = &descbase[0];
+ struct sgdma_descrip __iomem *ndesc = &descbase[1];
struct tse_buffer *rxbuffer = NULL;
@@ -382,11 +378,13 @@ static int sgdma_async_read(struct altera_tse_private *priv)
priv->sgdmadesclen,
DMA_TO_DEVICE);
- iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
- &csr->next_descrip);
+ csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
+ priv->rx_dma_csr,
+ sgdma_csroffs(next_descrip));
- iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START),
- &csr->control);
+ csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START),
+ priv->rx_dma_csr,
+ sgdma_csroffs(control));
return 1;
}
@@ -395,32 +393,32 @@ static int sgdma_async_read(struct altera_tse_private *priv)
}
static int sgdma_async_write(struct altera_tse_private *priv,
- struct sgdma_descrip *desc)
+ struct sgdma_descrip __iomem *desc)
{
- struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
-
if (sgdma_txbusy(priv))
return 0;
/* clear control and status */
- iowrite32(0, &csr->control);
- iowrite32(0x1f, &csr->status);
+ csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
+ csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
dma_sync_single_for_device(priv->device, priv->txdescphys,
priv->sgdmadesclen, DMA_TO_DEVICE);
- iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
- &csr->next_descrip);
+ csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
+ priv->tx_dma_csr,
+ sgdma_csroffs(next_descrip));
- iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START),
- &csr->control);
+ csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START),
+ priv->tx_dma_csr,
+ sgdma_csroffs(control));
return 1;
}
static dma_addr_t
sgdma_txphysaddr(struct altera_tse_private *priv,
- struct sgdma_descrip *desc)
+ struct sgdma_descrip __iomem *desc)
{
dma_addr_t paddr = priv->txdescmem_busaddr;
uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
@@ -429,7 +427,7 @@ sgdma_txphysaddr(struct altera_tse_private *priv,
static dma_addr_t
sgdma_rxphysaddr(struct altera_tse_private *priv,
- struct sgdma_descrip *desc)
+ struct sgdma_descrip __iomem *desc)
{
dma_addr_t paddr = priv->rxdescmem_busaddr;
uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
@@ -518,8 +516,8 @@ queue_rx_peekhead(struct altera_tse_private *priv)
*/
static int sgdma_rxbusy(struct altera_tse_private *priv)
{
- struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
- return ioread32(&csr->status) & SGDMA_STSREG_BUSY;
+ return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status))
+ & SGDMA_STSREG_BUSY;
}
/* waits for the tx sgdma to finish it's current operation, returns 0
@@ -528,13 +526,14 @@ static int sgdma_rxbusy(struct altera_tse_private *priv)
static int sgdma_txbusy(struct altera_tse_private *priv)
{
int delay = 0;
- struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
/* if DMA is busy, wait for current transactino to finish */
- while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100))
+ while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
+ & SGDMA_STSREG_BUSY) && (delay++ < 100))
udelay(1);
- if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) {
+ if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
+ & SGDMA_STSREG_BUSY) {
netdev_err(priv->dev, "timeout waiting for tx dma\n");
return 1;
}
diff --git a/drivers/net/ethernet/altera/altera_sgdmahw.h b/drivers/net/ethernet/altera/altera_sgdmahw.h
index ba3334f35383..85bc33b218d9 100644
--- a/drivers/net/ethernet/altera/altera_sgdmahw.h
+++ b/drivers/net/ethernet/altera/altera_sgdmahw.h
@@ -19,16 +19,16 @@
/* SGDMA descriptor structure */
struct sgdma_descrip {
- unsigned int raddr; /* address of data to be read */
- unsigned int pad1;
- unsigned int waddr;
- unsigned int pad2;
- unsigned int next;
- unsigned int pad3;
- unsigned short bytes;
- unsigned char rburst;
- unsigned char wburst;
- unsigned short bytes_xferred; /* 16 bits, bytes xferred */
+ u32 raddr; /* address of data to be read */
+ u32 pad1;
+ u32 waddr;
+ u32 pad2;
+ u32 next;
+ u32 pad3;
+ u16 bytes;
+ u8 rburst;
+ u8 wburst;
+ u16 bytes_xferred; /* 16 bits, bytes xferred */
/* bit 0: error
* bit 1: length error
@@ -39,7 +39,7 @@ struct sgdma_descrip {
* bit 6: reserved
* bit 7: status eop for recv case
*/
- unsigned char status;
+ u8 status;
/* bit 0: eop
* bit 1: read_fixed
@@ -47,7 +47,7 @@ struct sgdma_descrip {
* bits 3,4,5,6: Channel (always 0)
* bit 7: hardware owned
*/
- unsigned char control;
+ u8 control;
} __packed;
@@ -101,6 +101,8 @@ struct sgdma_csr {
u32 pad3[3];
};
+#define sgdma_csroffs(a) (offsetof(struct sgdma_csr, a))
+#define sgdma_descroffs(a) (offsetof(struct sgdma_descrip, a))
#define SGDMA_STSREG_ERR BIT(0) /* Error */
#define SGDMA_STSREG_EOP BIT(1) /* EOP */
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
index 465c4aabebbd..2adb24d4523c 100644
--- a/drivers/net/ethernet/altera/altera_tse.h
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -357,6 +357,8 @@ struct altera_tse_mac {
u32 reserved5[42];
};
+#define tse_csroffs(a) (offsetof(struct altera_tse_mac, a))
+
/* Transmit and Receive Command Registers Bit Definitions
*/
#define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC BIT(17)
@@ -487,4 +489,49 @@ struct altera_tse_private {
*/
void altera_tse_set_ethtool_ops(struct net_device *);
+static inline
+u32 csrrd32(void __iomem *mac, size_t offs)
+{
+ void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+ return readl(paddr);
+}
+
+static inline
+u16 csrrd16(void __iomem *mac, size_t offs)
+{
+ void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+ return readw(paddr);
+}
+
+static inline
+u8 csrrd8(void __iomem *mac, size_t offs)
+{
+ void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+ return readb(paddr);
+}
+
+static inline
+void csrwr32(u32 val, void __iomem *mac, size_t offs)
+{
+ void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+
+ writel(val, paddr);
+}
+
+static inline
+void csrwr16(u16 val, void __iomem *mac, size_t offs)
+{
+ void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+
+ writew(val, paddr);
+}
+
+static inline
+void csrwr8(u8 val, void __iomem *mac, size_t offs)
+{
+ void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+
+ writeb(val, paddr);
+}
+
#endif /* __ALTERA_TSE_H__ */
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
index 76133caffa78..54c25eff7952 100644
--- a/drivers/net/ethernet/altera/altera_tse_ethtool.c
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -96,54 +96,89 @@ static void tse_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
u64 *buf)
{
struct altera_tse_private *priv = netdev_priv(dev);
- struct altera_tse_mac *mac = priv->mac_dev;
u64 ext;
- buf[0] = ioread32(&mac->frames_transmitted_ok);
- buf[1] = ioread32(&mac->frames_received_ok);
- buf[2] = ioread32(&mac->frames_check_sequence_errors);
- buf[3] = ioread32(&mac->alignment_errors);
+ buf[0] = csrrd32(priv->mac_dev,
+ tse_csroffs(frames_transmitted_ok));
+ buf[1] = csrrd32(priv->mac_dev,
+ tse_csroffs(frames_received_ok));
+ buf[2] = csrrd32(priv->mac_dev,
+ tse_csroffs(frames_check_sequence_errors));
+ buf[3] = csrrd32(priv->mac_dev,
+ tse_csroffs(alignment_errors));
/* Extended aOctetsTransmittedOK counter */
- ext = (u64) ioread32(&mac->msb_octets_transmitted_ok) << 32;
- ext |= ioread32(&mac->octets_transmitted_ok);
+ ext = (u64) csrrd32(priv->mac_dev,
+ tse_csroffs(msb_octets_transmitted_ok)) << 32;
+
+ ext |= csrrd32(priv->mac_dev,
+ tse_csroffs(octets_transmitted_ok));
buf[4] = ext;
/* Extended aOctetsReceivedOK counter */
- ext = (u64) ioread32(&mac->msb_octets_received_ok) << 32;
- ext |= ioread32(&mac->octets_received_ok);
+ ext = (u64) csrrd32(priv->mac_dev,
+ tse_csroffs(msb_octets_received_ok)) << 32;
+
+ ext |= csrrd32(priv->mac_dev,
+ tse_csroffs(octets_received_ok));
buf[5] = ext;
- buf[6] = ioread32(&mac->tx_pause_mac_ctrl_frames);
- buf[7] = ioread32(&mac->rx_pause_mac_ctrl_frames);
- buf[8] = ioread32(&mac->if_in_errors);
- buf[9] = ioread32(&mac->if_out_errors);
- buf[10] = ioread32(&mac->if_in_ucast_pkts);
- buf[11] = ioread32(&mac->if_in_multicast_pkts);
- buf[12] = ioread32(&mac->if_in_broadcast_pkts);
- buf[13] = ioread32(&mac->if_out_discards);
- buf[14] = ioread32(&mac->if_out_ucast_pkts);
- buf[15] = ioread32(&mac->if_out_multicast_pkts);
- buf[16] = ioread32(&mac->if_out_broadcast_pkts);
- buf[17] = ioread32(&mac->ether_stats_drop_events);
+ buf[6] = csrrd32(priv->mac_dev,
+ tse_csroffs(tx_pause_mac_ctrl_frames));
+ buf[7] = csrrd32(priv->mac_dev,
+ tse_csroffs(rx_pause_mac_ctrl_frames));
+ buf[8] = csrrd32(priv->mac_dev,
+ tse_csroffs(if_in_errors));
+ buf[9] = csrrd32(priv->mac_dev,
+ tse_csroffs(if_out_errors));
+ buf[10] = csrrd32(priv->mac_dev,
+ tse_csroffs(if_in_ucast_pkts));
+ buf[11] = csrrd32(priv->mac_dev,
+ tse_csroffs(if_in_multicast_pkts));
+ buf[12] = csrrd32(priv->mac_dev,
+ tse_csroffs(if_in_broadcast_pkts));
+ buf[13] = csrrd32(priv->mac_dev,
+ tse_csroffs(if_out_discards));
+ buf[14] = csrrd32(priv->mac_dev,
+ tse_csroffs(if_out_ucast_pkts));
+ buf[15] = csrrd32(priv->mac_dev,
+ tse_csroffs(if_out_multicast_pkts));
+ buf[16] = csrrd32(priv->mac_dev,
+ tse_csroffs(if_out_broadcast_pkts));
+ buf[17] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_drop_events));
/* Extended etherStatsOctets counter */
- ext = (u64) ioread32(&mac->msb_ether_stats_octets) << 32;
- ext |= ioread32(&mac->ether_stats_octets);
+ ext = (u64) csrrd32(priv->mac_dev,
+ tse_csroffs(msb_ether_stats_octets)) << 32;
+ ext |= csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_octets));
buf[18] = ext;
- buf[19] = ioread32(&mac->ether_stats_pkts);
- buf[20] = ioread32(&mac->ether_stats_undersize_pkts);
- buf[21] = ioread32(&mac->ether_stats_oversize_pkts);
- buf[22] = ioread32(&mac->ether_stats_pkts_64_octets);
- buf[23] = ioread32(&mac->ether_stats_pkts_65to127_octets);
- buf[24] = ioread32(&mac->ether_stats_pkts_128to255_octets);
- buf[25] = ioread32(&mac->ether_stats_pkts_256to511_octets);
- buf[26] = ioread32(&mac->ether_stats_pkts_512to1023_octets);
- buf[27] = ioread32(&mac->ether_stats_pkts_1024to1518_octets);
- buf[28] = ioread32(&mac->ether_stats_pkts_1519tox_octets);
- buf[29] = ioread32(&mac->ether_stats_jabbers);
- buf[30] = ioread32(&mac->ether_stats_fragments);
+ buf[19] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_pkts));
+ buf[20] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_undersize_pkts));
+ buf[21] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_oversize_pkts));
+ buf[22] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_pkts_64_octets));
+ buf[23] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_pkts_65to127_octets));
+ buf[24] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_pkts_128to255_octets));
+ buf[25] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_pkts_256to511_octets));
+ buf[26] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_pkts_512to1023_octets));
+ buf[27] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_pkts_1024to1518_octets));
+ buf[28] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_pkts_1519tox_octets));
+ buf[29] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_jabbers));
+ buf[30] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_fragments));
}
static int tse_sset_count(struct net_device *dev, int sset)
@@ -178,7 +213,6 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
{
int i;
struct altera_tse_private *priv = netdev_priv(dev);
- u32 *tse_mac_regs = (u32 *)priv->mac_dev;
u32 *buf = regbuf;
/* Set version to a known value, so ethtool knows
@@ -196,7 +230,7 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
regs->version = 1;
for (i = 0; i < TSE_NUM_REGS; i++)
- buf[i] = ioread32(&tse_mac_regs[i]);
+ buf[i] = csrrd32(priv->mac_dev, i * 4);
}
static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index e44a4aeb9701..7330681574d2 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -100,29 +100,30 @@ static inline u32 tse_tx_avail(struct altera_tse_private *priv)
*/
static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
- struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv;
- unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0;
- u32 data;
+ struct net_device *ndev = bus->priv;
+ struct altera_tse_private *priv = netdev_priv(ndev);
/* set MDIO address */
- iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr);
+ csrwr32((mii_id & 0x1f), priv->mac_dev,
+ tse_csroffs(mdio_phy0_addr));
/* get the data */
- data = ioread32(&mdio_regs[regnum]) & 0xffff;
- return data;
+ return csrrd32(priv->mac_dev,
+ tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
}
static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
u16 value)
{
- struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv;
- unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0;
+ struct net_device *ndev = bus->priv;
+ struct altera_tse_private *priv = netdev_priv(ndev);
/* set MDIO address */
- iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr);
+ csrwr32((mii_id & 0x1f), priv->mac_dev,
+ tse_csroffs(mdio_phy0_addr));
/* write the data */
- iowrite32((u32) value, &mdio_regs[regnum]);
+ csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
return 0;
}
@@ -168,7 +169,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
for (i = 0; i < PHY_MAX_ADDR; i++)
mdio->irq[i] = PHY_POLL;
- mdio->priv = priv->mac_dev;
+ mdio->priv = dev;
mdio->parent = priv->device;
ret = of_mdiobus_register(mdio, mdio_node);
@@ -563,7 +564,6 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int nopaged_len = skb_headlen(skb);
enum netdev_tx ret = NETDEV_TX_OK;
dma_addr_t dma_addr;
- int txcomplete = 0;
spin_lock_bh(&priv->tx_lock);
@@ -599,7 +599,7 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
dma_sync_single_for_device(priv->device, buffer->dma_addr,
buffer->len, DMA_TO_DEVICE);
- txcomplete = priv->dmaops->tx_buffer(priv, buffer);
+ priv->dmaops->tx_buffer(priv, buffer);
skb_tx_timestamp(skb);
@@ -698,7 +698,6 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
struct altera_tse_private *priv = netdev_priv(dev);
struct phy_device *phydev = NULL;
char phy_id_fmt[MII_BUS_ID_SIZE + 3];
- int ret;
if (priv->phy_addr != POLL_PHY) {
snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
@@ -712,6 +711,7 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
netdev_err(dev, "Could not attach to PHY\n");
} else {
+ int ret;
phydev = phy_find_first(priv->mdio);
if (phydev == NULL) {
netdev_err(dev, "No PHY found\n");
@@ -791,7 +791,6 @@ static int init_phy(struct net_device *dev)
static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
{
- struct altera_tse_mac *mac = priv->mac_dev;
u32 msb;
u32 lsb;
@@ -799,8 +798,8 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
/* Set primary MAC address */
- iowrite32(msb, &mac->mac_addr_0);
- iowrite32(lsb, &mac->mac_addr_1);
+ csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0));
+ csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1));
}
/* MAC software reset.
@@ -811,26 +810,26 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
*/
static int reset_mac(struct altera_tse_private *priv)
{
- void __iomem *cmd_cfg_reg = &priv->mac_dev->command_config;
int counter;
u32 dat;
- dat = ioread32(cmd_cfg_reg);
+ dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
- iowrite32(dat, cmd_cfg_reg);
+ csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
counter = 0;
while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
- if (tse_bit_is_clear(cmd_cfg_reg, MAC_CMDCFG_SW_RESET))
+ if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config),
+ MAC_CMDCFG_SW_RESET))
break;
udelay(1);
}
if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
- dat = ioread32(cmd_cfg_reg);
+ dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
dat &= ~MAC_CMDCFG_SW_RESET;
- iowrite32(dat, cmd_cfg_reg);
+ csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
return -1;
}
return 0;
@@ -840,41 +839,57 @@ static int reset_mac(struct altera_tse_private *priv)
*/
static int init_mac(struct altera_tse_private *priv)
{
- struct altera_tse_mac *mac = priv->mac_dev;
unsigned int cmd = 0;
u32 frm_length;
/* Setup Rx FIFO */
- iowrite32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
- &mac->rx_section_empty);
- iowrite32(ALTERA_TSE_RX_SECTION_FULL, &mac->rx_section_full);
- iowrite32(ALTERA_TSE_RX_ALMOST_EMPTY, &mac->rx_almost_empty);
- iowrite32(ALTERA_TSE_RX_ALMOST_FULL, &mac->rx_almost_full);
+ csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
+ priv->mac_dev, tse_csroffs(rx_section_empty));
+
+ csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev,
+ tse_csroffs(rx_section_full));
+
+ csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev,
+ tse_csroffs(rx_almost_empty));
+
+ csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev,
+ tse_csroffs(rx_almost_full));
/* Setup Tx FIFO */
- iowrite32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
- &mac->tx_section_empty);
- iowrite32(ALTERA_TSE_TX_SECTION_FULL, &mac->tx_section_full);
- iowrite32(ALTERA_TSE_TX_ALMOST_EMPTY, &mac->tx_almost_empty);
- iowrite32(ALTERA_TSE_TX_ALMOST_FULL, &mac->tx_almost_full);
+ csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
+ priv->mac_dev, tse_csroffs(tx_section_empty));
+
+ csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev,
+ tse_csroffs(tx_section_full));
+
+ csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev,
+ tse_csroffs(tx_almost_empty));
+
+ csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev,
+ tse_csroffs(tx_almost_full));
/* MAC Address Configuration */
tse_update_mac_addr(priv, priv->dev->dev_addr);
/* MAC Function Configuration */
frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
- iowrite32(frm_length, &mac->frm_length);
- iowrite32(ALTERA_TSE_TX_IPG_LENGTH, &mac->tx_ipg_length);
+ csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length));
+
+ csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev,
+ tse_csroffs(tx_ipg_length));
/* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
* start address
*/
- tse_set_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
- tse_clear_bit(&mac->tx_cmd_stat, ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
- ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
+ tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat),
+ ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
+
+ tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat),
+ ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
+ ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
/* Set the MAC options */
- cmd = ioread32(&mac->command_config);
+ cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config));
cmd &= ~MAC_CMDCFG_PAD_EN; /* No padding Removal on Receive */
cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */
cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames
@@ -889,9 +904,10 @@ static int init_mac(struct altera_tse_private *priv)
cmd &= ~MAC_CMDCFG_ETH_SPEED;
cmd &= ~MAC_CMDCFG_ENA_10;
- iowrite32(cmd, &mac->command_config);
+ csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config));
- iowrite32(ALTERA_TSE_PAUSE_QUANTA, &mac->pause_quanta);
+ csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev,
+ tse_csroffs(pause_quanta));
if (netif_msg_hw(priv))
dev_dbg(priv->device,
@@ -904,15 +920,14 @@ static int init_mac(struct altera_tse_private *priv)
*/
static void tse_set_mac(struct altera_tse_private *priv, bool enable)
{
- struct altera_tse_mac *mac = priv->mac_dev;
- u32 value = ioread32(&mac->command_config);
+ u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config));
if (enable)
value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
else
value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
- iowrite32(value, &mac->command_config);
+ csrwr32(value, priv->mac_dev, tse_csroffs(command_config));
}
/* Change the MTU
@@ -942,13 +957,12 @@ static int tse_change_mtu(struct net_device *dev, int new_mtu)
static void altera_tse_set_mcfilter(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
- struct altera_tse_mac *mac = priv->mac_dev;
int i;
struct netdev_hw_addr *ha;
/* clear the hash filter */
for (i = 0; i < 64; i++)
- iowrite32(0, &(mac->hash_table[i]));
+ csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
netdev_for_each_mc_addr(ha, dev) {
unsigned int hash = 0;
@@ -964,7 +978,7 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
hash = (hash << 1) | xor_bit;
}
- iowrite32(1, &(mac->hash_table[hash]));
+ csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4);
}
}
@@ -972,12 +986,11 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
static void altera_tse_set_mcfilterall(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
- struct altera_tse_mac *mac = priv->mac_dev;
int i;
/* set the hash filter */
for (i = 0; i < 64; i++)
- iowrite32(1, &(mac->hash_table[i]));
+ csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
}
/* Set or clear the multicast filter for this adaptor
@@ -985,12 +998,12 @@ static void altera_tse_set_mcfilterall(struct net_device *dev)
static void tse_set_rx_mode_hashfilter(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
- struct altera_tse_mac *mac = priv->mac_dev;
spin_lock(&priv->mac_cfg_lock);
if (dev->flags & IFF_PROMISC)
- tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+ tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
+ MAC_CMDCFG_PROMIS_EN);
if (dev->flags & IFF_ALLMULTI)
altera_tse_set_mcfilterall(dev);
@@ -1005,15 +1018,16 @@ static void tse_set_rx_mode_hashfilter(struct net_device *dev)
static void tse_set_rx_mode(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
- struct altera_tse_mac *mac = priv->mac_dev;
spin_lock(&priv->mac_cfg_lock);
if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
!netdev_mc_empty(dev) || !netdev_uc_empty(dev))
- tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+ tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
+ MAC_CMDCFG_PROMIS_EN);
else
- tse_clear_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+ tse_clear_bit(priv->mac_dev, tse_csroffs(command_config),
+ MAC_CMDCFG_PROMIS_EN);
spin_unlock(&priv->mac_cfg_lock);
}
@@ -1362,6 +1376,11 @@ static int altera_tse_probe(struct platform_device *pdev)
of_property_read_bool(pdev->dev.of_node,
"altr,has-hash-multicast-filter");
+ /* Set hash filter to not set for now until the
+ * multicast filter receive issue is debugged
+ */
+ priv->hash_filter = 0;
+
/* get supplemental address settings for this instance */
priv->added_unicast =
of_property_read_bool(pdev->dev.of_node,
@@ -1493,7 +1512,7 @@ static int altera_tse_remove(struct platform_device *pdev)
return 0;
}
-struct altera_dmaops altera_dtype_sgdma = {
+static const struct altera_dmaops altera_dtype_sgdma = {
.altera_dtype = ALTERA_DTYPE_SGDMA,
.dmamask = 32,
.reset_dma = sgdma_reset,
@@ -1512,7 +1531,7 @@ struct altera_dmaops altera_dtype_sgdma = {
.start_rxdma = sgdma_start_rxdma,
};
-struct altera_dmaops altera_dtype_msgdma = {
+static const struct altera_dmaops altera_dtype_msgdma = {
.altera_dtype = ALTERA_DTYPE_MSGDMA,
.dmamask = 64,
.reset_dma = msgdma_reset,
diff --git a/drivers/net/ethernet/altera/altera_utils.c b/drivers/net/ethernet/altera/altera_utils.c
index 70fa13f486b2..d7eeb1713ad2 100644
--- a/drivers/net/ethernet/altera/altera_utils.c
+++ b/drivers/net/ethernet/altera/altera_utils.c
@@ -17,28 +17,28 @@
#include "altera_tse.h"
#include "altera_utils.h"
-void tse_set_bit(void __iomem *ioaddr, u32 bit_mask)
+void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
{
- u32 value = ioread32(ioaddr);
+ u32 value = csrrd32(ioaddr, offs);
value |= bit_mask;
- iowrite32(value, ioaddr);
+ csrwr32(value, ioaddr, offs);
}
-void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask)
+void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
{
- u32 value = ioread32(ioaddr);
+ u32 value = csrrd32(ioaddr, offs);
value &= ~bit_mask;
- iowrite32(value, ioaddr);
+ csrwr32(value, ioaddr, offs);
}
-int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask)
+int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask)
{
- u32 value = ioread32(ioaddr);
+ u32 value = csrrd32(ioaddr, offs);
return (value & bit_mask) ? 1 : 0;
}
-int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask)
+int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask)
{
- u32 value = ioread32(ioaddr);
+ u32 value = csrrd32(ioaddr, offs);
return (value & bit_mask) ? 0 : 1;
}
diff --git a/drivers/net/ethernet/altera/altera_utils.h b/drivers/net/ethernet/altera/altera_utils.h
index ce1db36d3583..baf100ccf587 100644
--- a/drivers/net/ethernet/altera/altera_utils.h
+++ b/drivers/net/ethernet/altera/altera_utils.h
@@ -19,9 +19,9 @@
#ifndef __ALTERA_UTILS_H__
#define __ALTERA_UTILS_H__
-void tse_set_bit(void __iomem *ioaddr, u32 bit_mask);
-void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask);
-int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask);
-int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask);
+void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
+void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
+int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask);
+int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask);
#endif /* __ALTERA_UTILS_H__*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index b260913db236..3b0d43154e67 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -10051,8 +10051,8 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
#define BCM_5710_UNDI_FW_MF_MAJOR (0x07)
#define BCM_5710_UNDI_FW_MF_MINOR (0x08)
#define BCM_5710_UNDI_FW_MF_VERS (0x05)
-#define BNX2X_PREV_UNDI_MF_PORT(p) (0x1a150c + ((p) << 4))
-#define BNX2X_PREV_UNDI_MF_FUNC(f) (0x1a184c + ((f) << 4))
+#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4))
+#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4))
static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
{
u8 major, minor, version;
@@ -10352,6 +10352,7 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
/* Reset should be performed after BRB is emptied */
if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
u32 timer_count = 1000;
+ bool need_write = true;
/* Close the MAC Rx to prevent BRB from filling up */
bnx2x_prev_unload_close_mac(bp, &mac_vals);
@@ -10398,7 +10399,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
* cleaning methods - might be redundant but harmless.
*/
if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) {
- bnx2x_prev_unload_undi_mf(bp);
+ if (need_write) {
+ bnx2x_prev_unload_undi_mf(bp);
+ need_write = false;
+ }
} else if (prev_undi) {
/* If UNDI resides in memory,
* manually increment it
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 81cc2d9831c2..b8078d50261b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2695,7 +2695,7 @@ out:
bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
}
- return 0;
+ return rc;
}
int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 0c067e8564dd..784c7155b98a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -747,7 +747,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
out:
bnx2x_vfpf_finalize(bp, &req->first_tlv);
- return 0;
+ return rc;
}
/* request pf to config rss table for vf queues*/
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
new file mode 100644
index 000000000000..4884205e56ee
--- /dev/null
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -0,0 +1,706 @@
+ /*
+ * drivers/net/ethernet/beckhoff/ec_bhf.c
+ *
+ * Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* This is a driver for EtherCAT master module present on CCAT FPGA.
+ * Those can be found on Bechhoff CX50xx industrial PCs.
+ */
+
+#if 0
+#define DEBUG
+#endif
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/stat.h>
+
+#define TIMER_INTERVAL_NSEC 20000
+
+#define INFO_BLOCK_SIZE 0x10
+#define INFO_BLOCK_TYPE 0x0
+#define INFO_BLOCK_REV 0x2
+#define INFO_BLOCK_BLK_CNT 0x4
+#define INFO_BLOCK_TX_CHAN 0x4
+#define INFO_BLOCK_RX_CHAN 0x5
+#define INFO_BLOCK_OFFSET 0x8
+
+#define EC_MII_OFFSET 0x4
+#define EC_FIFO_OFFSET 0x8
+#define EC_MAC_OFFSET 0xc
+
+#define MAC_FRAME_ERR_CNT 0x0
+#define MAC_RX_ERR_CNT 0x1
+#define MAC_CRC_ERR_CNT 0x2
+#define MAC_LNK_LST_ERR_CNT 0x3
+#define MAC_TX_FRAME_CNT 0x10
+#define MAC_RX_FRAME_CNT 0x14
+#define MAC_TX_FIFO_LVL 0x20
+#define MAC_DROPPED_FRMS 0x28
+#define MAC_CONNECTED_CCAT_FLAG 0x78
+
+#define MII_MAC_ADDR 0x8
+#define MII_MAC_FILT_FLAG 0xe
+#define MII_LINK_STATUS 0xf
+
+#define FIFO_TX_REG 0x0
+#define FIFO_TX_RESET 0x8
+#define FIFO_RX_REG 0x10
+#define FIFO_RX_ADDR_VALID (1u << 31)
+#define FIFO_RX_RESET 0x18
+
+#define DMA_CHAN_OFFSET 0x1000
+#define DMA_CHAN_SIZE 0x8
+
+#define DMA_WINDOW_SIZE_MASK 0xfffffffc
+
+static struct pci_device_id ids[] = {
+ { PCI_DEVICE(0x15ec, 0x5000), },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ids);
+
+struct rx_header {
+#define RXHDR_NEXT_ADDR_MASK 0xffffffu
+#define RXHDR_NEXT_VALID (1u << 31)
+ __le32 next;
+#define RXHDR_NEXT_RECV_FLAG 0x1
+ __le32 recv;
+#define RXHDR_LEN_MASK 0xfffu
+ __le16 len;
+ __le16 port;
+ __le32 reserved;
+ u8 timestamp[8];
+} __packed;
+
+#define PKT_PAYLOAD_SIZE 0x7e8
+struct rx_desc {
+ struct rx_header header;
+ u8 data[PKT_PAYLOAD_SIZE];
+} __packed;
+
+struct tx_header {
+ __le16 len;
+#define TX_HDR_PORT_0 0x1
+#define TX_HDR_PORT_1 0x2
+ u8 port;
+ u8 ts_enable;
+#define TX_HDR_SENT 0x1
+ __le32 sent;
+ u8 timestamp[8];
+} __packed;
+
+struct tx_desc {
+ struct tx_header header;
+ u8 data[PKT_PAYLOAD_SIZE];
+} __packed;
+
+#define FIFO_SIZE 64
+
+static long polling_frequency = TIMER_INTERVAL_NSEC;
+
+struct bhf_dma {
+ u8 *buf;
+ size_t len;
+ dma_addr_t buf_phys;
+
+ u8 *alloc;
+ size_t alloc_len;
+ dma_addr_t alloc_phys;
+};
+
+struct ec_bhf_priv {
+ struct net_device *net_dev;
+
+ struct pci_dev *dev;
+
+ void * __iomem io;
+ void * __iomem dma_io;
+
+ struct hrtimer hrtimer;
+
+ int tx_dma_chan;
+ int rx_dma_chan;
+ void * __iomem ec_io;
+ void * __iomem fifo_io;
+ void * __iomem mii_io;
+ void * __iomem mac_io;
+
+ struct bhf_dma rx_buf;
+ struct rx_desc *rx_descs;
+ int rx_dnext;
+ int rx_dcount;
+
+ struct bhf_dma tx_buf;
+ struct tx_desc *tx_descs;
+ int tx_dcount;
+ int tx_dnext;
+
+ u64 stat_rx_bytes;
+ u64 stat_tx_bytes;
+};
+
+#define PRIV_TO_DEV(priv) (&(priv)->dev->dev)
+
+#define ETHERCAT_MASTER_ID 0x14
+
+static void ec_bhf_print_status(struct ec_bhf_priv *priv)
+{
+ struct device *dev = PRIV_TO_DEV(priv);
+
+ dev_dbg(dev, "Frame error counter: %d\n",
+ ioread8(priv->mac_io + MAC_FRAME_ERR_CNT));
+ dev_dbg(dev, "RX error counter: %d\n",
+ ioread8(priv->mac_io + MAC_RX_ERR_CNT));
+ dev_dbg(dev, "CRC error counter: %d\n",
+ ioread8(priv->mac_io + MAC_CRC_ERR_CNT));
+ dev_dbg(dev, "TX frame counter: %d\n",
+ ioread32(priv->mac_io + MAC_TX_FRAME_CNT));
+ dev_dbg(dev, "RX frame counter: %d\n",
+ ioread32(priv->mac_io + MAC_RX_FRAME_CNT));
+ dev_dbg(dev, "TX fifo level: %d\n",
+ ioread8(priv->mac_io + MAC_TX_FIFO_LVL));
+ dev_dbg(dev, "Dropped frames: %d\n",
+ ioread8(priv->mac_io + MAC_DROPPED_FRMS));
+ dev_dbg(dev, "Connected with CCAT slot: %d\n",
+ ioread8(priv->mac_io + MAC_CONNECTED_CCAT_FLAG));
+ dev_dbg(dev, "Link status: %d\n",
+ ioread8(priv->mii_io + MII_LINK_STATUS));
+}
+
+static void ec_bhf_reset(struct ec_bhf_priv *priv)
+{
+ iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT);
+ iowrite8(0, priv->mac_io + MAC_RX_ERR_CNT);
+ iowrite8(0, priv->mac_io + MAC_CRC_ERR_CNT);
+ iowrite8(0, priv->mac_io + MAC_LNK_LST_ERR_CNT);
+ iowrite32(0, priv->mac_io + MAC_TX_FRAME_CNT);
+ iowrite32(0, priv->mac_io + MAC_RX_FRAME_CNT);
+ iowrite8(0, priv->mac_io + MAC_DROPPED_FRMS);
+
+ iowrite8(0, priv->fifo_io + FIFO_TX_RESET);
+ iowrite8(0, priv->fifo_io + FIFO_RX_RESET);
+
+ iowrite8(0, priv->mac_io + MAC_TX_FIFO_LVL);
+}
+
+static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc)
+{
+ u32 len = le16_to_cpu(desc->header.len) + sizeof(desc->header);
+ u32 addr = (u8 *)desc - priv->tx_buf.buf;
+
+ iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG);
+
+ dev_dbg(PRIV_TO_DEV(priv), "Done sending packet\n");
+}
+
+static int ec_bhf_desc_sent(struct tx_desc *desc)
+{
+ return le32_to_cpu(desc->header.sent) & TX_HDR_SENT;
+}
+
+static void ec_bhf_process_tx(struct ec_bhf_priv *priv)
+{
+ if (unlikely(netif_queue_stopped(priv->net_dev))) {
+ /* Make sure that we perceive changes to tx_dnext. */
+ smp_rmb();
+
+ if (ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext]))
+ netif_wake_queue(priv->net_dev);
+ }
+}
+
+static int ec_bhf_pkt_received(struct rx_desc *desc)
+{
+ return le32_to_cpu(desc->header.recv) & RXHDR_NEXT_RECV_FLAG;
+}
+
+static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc)
+{
+ iowrite32(FIFO_RX_ADDR_VALID | ((u8 *)(desc) - priv->rx_buf.buf),
+ priv->fifo_io + FIFO_RX_REG);
+}
+
+static void ec_bhf_process_rx(struct ec_bhf_priv *priv)
+{
+ struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext];
+ struct device *dev = PRIV_TO_DEV(priv);
+
+ while (ec_bhf_pkt_received(desc)) {
+ int pkt_size = (le16_to_cpu(desc->header.len) &
+ RXHDR_LEN_MASK) - sizeof(struct rx_header) - 4;
+ u8 *data = desc->data;
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size);
+ dev_dbg(dev, "Received packet, size: %d\n", pkt_size);
+
+ if (skb) {
+ memcpy(skb_put(skb, pkt_size), data, pkt_size);
+ skb->protocol = eth_type_trans(skb, priv->net_dev);
+ dev_dbg(dev, "Protocol type: %x\n", skb->protocol);
+
+ priv->stat_rx_bytes += pkt_size;
+
+ netif_rx(skb);
+ } else {
+ dev_err_ratelimited(dev,
+ "Couldn't allocate a skb_buff for a packet of size %u\n",
+ pkt_size);
+ }
+
+ desc->header.recv = 0;
+
+ ec_bhf_add_rx_desc(priv, desc);
+
+ priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount;
+ desc = &priv->rx_descs[priv->rx_dnext];
+ }
+
+}
+
+static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer)
+{
+ struct ec_bhf_priv *priv = container_of(timer, struct ec_bhf_priv,
+ hrtimer);
+ ec_bhf_process_rx(priv);
+ ec_bhf_process_tx(priv);
+
+ if (!netif_running(priv->net_dev))
+ return HRTIMER_NORESTART;
+
+ hrtimer_forward_now(timer, ktime_set(0, polling_frequency));
+ return HRTIMER_RESTART;
+}
+
+static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv)
+{
+ struct device *dev = PRIV_TO_DEV(priv);
+ unsigned block_count, i;
+ void * __iomem ec_info;
+
+ dev_dbg(dev, "Info block:\n");
+ dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io));
+ dev_dbg(dev, "Revision of function: %x\n",
+ (unsigned)ioread16(priv->io + INFO_BLOCK_REV));
+
+ block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT);
+ dev_dbg(dev, "Number of function blocks: %x\n", block_count);
+
+ for (i = 0; i < block_count; i++) {
+ u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE +
+ INFO_BLOCK_TYPE);
+ if (type == ETHERCAT_MASTER_ID)
+ break;
+ }
+ if (i == block_count) {
+ dev_err(dev, "EtherCAT master with DMA block not found\n");
+ return -ENODEV;
+ }
+ dev_dbg(dev, "EtherCAT master with DMA block found at pos: %d\n", i);
+
+ ec_info = priv->io + i * INFO_BLOCK_SIZE;
+ dev_dbg(dev, "EtherCAT master revision: %d\n",
+ ioread16(ec_info + INFO_BLOCK_REV));
+
+ priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN);
+ dev_dbg(dev, "EtherCAT master tx dma channel: %d\n",
+ priv->tx_dma_chan);
+
+ priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN);
+ dev_dbg(dev, "EtherCAT master rx dma channel: %d\n",
+ priv->rx_dma_chan);
+
+ priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET);
+ priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET);
+ priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET);
+ priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET);
+
+ dev_dbg(dev,
+ "EtherCAT block addres: %p, fifo address: %p, mii address: %p, mac address: %p\n",
+ priv->ec_io, priv->fifo_io, priv->mii_io, priv->mac_io);
+
+ return 0;
+}
+
+static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb,
+ struct net_device *net_dev)
+{
+ struct ec_bhf_priv *priv = netdev_priv(net_dev);
+ struct tx_desc *desc;
+ unsigned len;
+
+ dev_dbg(PRIV_TO_DEV(priv), "Starting xmit\n");
+
+ desc = &priv->tx_descs[priv->tx_dnext];
+
+ skb_copy_and_csum_dev(skb, desc->data);
+ len = skb->len;
+
+ memset(&desc->header, 0, sizeof(desc->header));
+ desc->header.len = cpu_to_le16(len);
+ desc->header.port = TX_HDR_PORT_0;
+
+ ec_bhf_send_packet(priv, desc);
+
+ priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount;
+
+ if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) {
+ /* Make sure that update updates to tx_dnext are perceived
+ * by timer routine.
+ */
+ smp_wmb();
+
+ netif_stop_queue(net_dev);
+
+ dev_dbg(PRIV_TO_DEV(priv), "Stopping netif queue\n");
+ ec_bhf_print_status(priv);
+ }
+
+ priv->stat_tx_bytes += len;
+
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv,
+ struct bhf_dma *buf,
+ int channel,
+ int size)
+{
+ int offset = channel * DMA_CHAN_SIZE + DMA_CHAN_OFFSET;
+ struct device *dev = PRIV_TO_DEV(priv);
+ u32 mask;
+
+ iowrite32(0xffffffff, priv->dma_io + offset);
+
+ mask = ioread32(priv->dma_io + offset);
+ mask &= DMA_WINDOW_SIZE_MASK;
+ dev_dbg(dev, "Read mask %x for channel %d\n", mask, channel);
+
+ /* We want to allocate a chunk of memory that is:
+ * - aligned to the mask we just read
+ * - is of size 2^mask bytes (at most)
+ * In order to ensure that we will allocate buffer of
+ * 2 * 2^mask bytes.
+ */
+ buf->len = min_t(int, ~mask + 1, size);
+ buf->alloc_len = 2 * buf->len;
+
+ dev_dbg(dev, "Allocating %d bytes for channel %d",
+ (int)buf->alloc_len, channel);
+ buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys,
+ GFP_KERNEL);
+ if (buf->alloc == NULL) {
+ dev_info(dev, "Failed to allocate buffer\n");
+ return -ENOMEM;
+ }
+
+ buf->buf_phys = (buf->alloc_phys + buf->len) & mask;
+ buf->buf = buf->alloc + (buf->buf_phys - buf->alloc_phys);
+
+ iowrite32(0, priv->dma_io + offset + 4);
+ iowrite32(buf->buf_phys, priv->dma_io + offset);
+ dev_dbg(dev, "Buffer: %x and read from dev: %x",
+ (unsigned)buf->buf_phys, ioread32(priv->dma_io + offset));
+
+ return 0;
+}
+
+static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv)
+{
+ int i = 0;
+
+ priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc);
+ priv->tx_descs = (struct tx_desc *) priv->tx_buf.buf;
+ priv->tx_dnext = 0;
+
+ for (i = 0; i < priv->tx_dcount; i++)
+ priv->tx_descs[i].header.sent = cpu_to_le32(TX_HDR_SENT);
+}
+
+static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv)
+{
+ int i;
+
+ priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc);
+ priv->rx_descs = (struct rx_desc *) priv->rx_buf.buf;
+ priv->rx_dnext = 0;
+
+ for (i = 0; i < priv->rx_dcount; i++) {
+ struct rx_desc *desc = &priv->rx_descs[i];
+ u32 next;
+
+ if (i != priv->rx_dcount - 1)
+ next = (u8 *)(desc + 1) - priv->rx_buf.buf;
+ else
+ next = 0;
+ next |= RXHDR_NEXT_VALID;
+ desc->header.next = cpu_to_le32(next);
+ desc->header.recv = 0;
+ ec_bhf_add_rx_desc(priv, desc);
+ }
+}
+
+static int ec_bhf_open(struct net_device *net_dev)
+{
+ struct ec_bhf_priv *priv = netdev_priv(net_dev);
+ struct device *dev = PRIV_TO_DEV(priv);
+ int err = 0;
+
+ dev_info(dev, "Opening device\n");
+
+ ec_bhf_reset(priv);
+
+ err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan,
+ FIFO_SIZE * sizeof(struct rx_desc));
+ if (err) {
+ dev_err(dev, "Failed to allocate rx buffer\n");
+ goto out;
+ }
+ ec_bhf_setup_rx_descs(priv);
+
+ dev_info(dev, "RX buffer allocated, address: %x\n",
+ (unsigned)priv->rx_buf.buf_phys);
+
+ err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan,
+ FIFO_SIZE * sizeof(struct tx_desc));
+ if (err) {
+ dev_err(dev, "Failed to allocate tx buffer\n");
+ goto error_rx_free;
+ }
+ dev_dbg(dev, "TX buffer allocated, addres: %x\n",
+ (unsigned)priv->tx_buf.buf_phys);
+
+ iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG);
+
+ ec_bhf_setup_tx_descs(priv);
+
+ netif_start_queue(net_dev);
+
+ hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ priv->hrtimer.function = ec_bhf_timer_fun;
+ hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency),
+ HRTIMER_MODE_REL);
+
+ dev_info(PRIV_TO_DEV(priv), "Device open\n");
+
+ ec_bhf_print_status(priv);
+
+ return 0;
+
+error_rx_free:
+ dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc,
+ priv->rx_buf.alloc_len);
+out:
+ return err;
+}
+
+static int ec_bhf_stop(struct net_device *net_dev)
+{
+ struct ec_bhf_priv *priv = netdev_priv(net_dev);
+ struct device *dev = PRIV_TO_DEV(priv);
+
+ hrtimer_cancel(&priv->hrtimer);
+
+ ec_bhf_reset(priv);
+
+ netif_tx_disable(net_dev);
+
+ dma_free_coherent(dev, priv->tx_buf.alloc_len,
+ priv->tx_buf.alloc, priv->tx_buf.alloc_phys);
+ dma_free_coherent(dev, priv->rx_buf.alloc_len,
+ priv->rx_buf.alloc, priv->rx_buf.alloc_phys);
+
+ return 0;
+}
+
+static struct rtnl_link_stats64 *
+ec_bhf_get_stats(struct net_device *net_dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ec_bhf_priv *priv = netdev_priv(net_dev);
+
+ stats->rx_errors = ioread8(priv->mac_io + MAC_RX_ERR_CNT) +
+ ioread8(priv->mac_io + MAC_CRC_ERR_CNT) +
+ ioread8(priv->mac_io + MAC_FRAME_ERR_CNT);
+ stats->rx_packets = ioread32(priv->mac_io + MAC_RX_FRAME_CNT);
+ stats->tx_packets = ioread32(priv->mac_io + MAC_TX_FRAME_CNT);
+ stats->rx_dropped = ioread8(priv->mac_io + MAC_DROPPED_FRMS);
+
+ stats->tx_bytes = priv->stat_tx_bytes;
+ stats->rx_bytes = priv->stat_rx_bytes;
+
+ return stats;
+}
+
+static const struct net_device_ops ec_bhf_netdev_ops = {
+ .ndo_start_xmit = ec_bhf_start_xmit,
+ .ndo_open = ec_bhf_open,
+ .ndo_stop = ec_bhf_stop,
+ .ndo_get_stats64 = ec_bhf_get_stats,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr
+};
+
+static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ struct net_device *net_dev;
+ struct ec_bhf_priv *priv;
+ void * __iomem dma_io;
+ void * __iomem io;
+ int err = 0;
+
+ err = pci_enable_device(dev);
+ if (err)
+ return err;
+
+ pci_set_master(dev);
+
+ err = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&dev->dev,
+ "Required dma mask not supported, failed to initialize device\n");
+ err = -EIO;
+ goto err_disable_dev;
+ }
+
+ err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&dev->dev,
+ "Required dma mask not supported, failed to initialize device\n");
+ goto err_disable_dev;
+ }
+
+ err = pci_request_regions(dev, "ec_bhf");
+ if (err) {
+ dev_err(&dev->dev, "Failed to request pci memory regions\n");
+ goto err_disable_dev;
+ }
+
+ io = pci_iomap(dev, 0, 0);
+ if (!io) {
+ dev_err(&dev->dev, "Failed to map pci card memory bar 0");
+ err = -EIO;
+ goto err_release_regions;
+ }
+
+ dma_io = pci_iomap(dev, 2, 0);
+ if (!dma_io) {
+ dev_err(&dev->dev, "Failed to map pci card memory bar 2");
+ err = -EIO;
+ goto err_unmap;
+ }
+
+ net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv));
+ if (net_dev == 0) {
+ err = -ENOMEM;
+ goto err_unmap_dma_io;
+ }
+
+ pci_set_drvdata(dev, net_dev);
+ SET_NETDEV_DEV(net_dev, &dev->dev);
+
+ net_dev->features = 0;
+ net_dev->flags |= IFF_NOARP;
+
+ net_dev->netdev_ops = &ec_bhf_netdev_ops;
+
+ priv = netdev_priv(net_dev);
+ priv->net_dev = net_dev;
+ priv->io = io;
+ priv->dma_io = dma_io;
+ priv->dev = dev;
+
+ err = ec_bhf_setup_offsets(priv);
+ if (err < 0)
+ goto err_free_net_dev;
+
+ memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6);
+
+ dev_dbg(&dev->dev, "CX5020 Ethercat master address: %pM\n",
+ net_dev->dev_addr);
+
+ err = register_netdev(net_dev);
+ if (err < 0)
+ goto err_free_net_dev;
+
+ return 0;
+
+err_free_net_dev:
+ free_netdev(net_dev);
+err_unmap_dma_io:
+ pci_iounmap(dev, dma_io);
+err_unmap:
+ pci_iounmap(dev, io);
+err_release_regions:
+ pci_release_regions(dev);
+err_disable_dev:
+ pci_clear_master(dev);
+ pci_disable_device(dev);
+
+ return err;
+}
+
+static void ec_bhf_remove(struct pci_dev *dev)
+{
+ struct net_device *net_dev = pci_get_drvdata(dev);
+ struct ec_bhf_priv *priv = netdev_priv(net_dev);
+
+ unregister_netdev(net_dev);
+ free_netdev(net_dev);
+
+ pci_iounmap(dev, priv->dma_io);
+ pci_iounmap(dev, priv->io);
+ pci_release_regions(dev);
+ pci_clear_master(dev);
+ pci_disable_device(dev);
+}
+
+static struct pci_driver pci_driver = {
+ .name = "ec_bhf",
+ .id_table = ids,
+ .probe = ec_bhf_probe,
+ .remove = ec_bhf_remove,
+};
+
+static int __init ec_bhf_init(void)
+{
+ return pci_register_driver(&pci_driver);
+}
+
+static void __exit ec_bhf_exit(void)
+{
+ pci_unregister_driver(&pci_driver);
+}
+
+module_init(ec_bhf_init);
+module_exit(ec_bhf_exit);
+
+module_param(polling_frequency, long, S_IRUGO);
+MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>");
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index a18645407d21..dc19bc5dec77 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4949,6 +4949,12 @@ static void be_eeh_resume(struct pci_dev *pdev)
if (status)
goto err;
+ /* On some BE3 FW versions, after a HW reset,
+ * interrupts will remain disabled for each function.
+ * So, explicitly enable interrupts
+ */
+ be_intr_set(adapter, true);
+
/* tell fw we're ready to fire cmds */
status = be_cmd_fw_init(adapter);
if (status)
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index b0c6050479eb..b78378cea5e3 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1988,7 +1988,7 @@ jme_alloc_txdesc(struct jme_adapter *jme,
return idx;
}
-static void
+static int
jme_fill_tx_map(struct pci_dev *pdev,
struct txdesc *txdesc,
struct jme_buffer_info *txbi,
@@ -2005,6 +2005,9 @@ jme_fill_tx_map(struct pci_dev *pdev,
len,
PCI_DMA_TODEVICE);
+ if (unlikely(pci_dma_mapping_error(pdev, dmaaddr)))
+ return -EINVAL;
+
pci_dma_sync_single_for_device(pdev,
dmaaddr,
len,
@@ -2021,9 +2024,30 @@ jme_fill_tx_map(struct pci_dev *pdev,
txbi->mapping = dmaaddr;
txbi->len = len;
+ return 0;
}
-static void
+static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count)
+{
+ struct jme_ring *txring = &(jme->txring[0]);
+ struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
+ int mask = jme->tx_ring_mask;
+ int j;
+
+ for (j = 0 ; j < count ; j++) {
+ ctxbi = txbi + ((startidx + j + 2) & (mask));
+ pci_unmap_page(jme->pdev,
+ ctxbi->mapping,
+ ctxbi->len,
+ PCI_DMA_TODEVICE);
+
+ ctxbi->mapping = 0;
+ ctxbi->len = 0;
+ }
+
+}
+
+static int
jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
{
struct jme_ring *txring = &(jme->txring[0]);
@@ -2034,25 +2058,37 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
int mask = jme->tx_ring_mask;
const struct skb_frag_struct *frag;
u32 len;
+ int ret = 0;
for (i = 0 ; i < nr_frags ; ++i) {
frag = &skb_shinfo(skb)->frags[i];
ctxdesc = txdesc + ((idx + i + 2) & (mask));
ctxbi = txbi + ((idx + i + 2) & (mask));
- jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
+ ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
skb_frag_page(frag),
frag->page_offset, skb_frag_size(frag), hidma);
+ if (ret) {
+ jme_drop_tx_map(jme, idx, i);
+ goto out;
+ }
+
}
len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
ctxdesc = txdesc + ((idx + 1) & (mask));
ctxbi = txbi + ((idx + 1) & (mask));
- jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
+ ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
offset_in_page(skb->data), len, hidma);
+ if (ret)
+ jme_drop_tx_map(jme, idx, i);
+
+out:
+ return ret;
}
+
static int
jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
{
@@ -2131,6 +2167,7 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
struct txdesc *txdesc;
struct jme_buffer_info *txbi;
u8 flags;
+ int ret = 0;
txdesc = (struct txdesc *)txring->desc + idx;
txbi = txring->bufinf + idx;
@@ -2155,7 +2192,10 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
jme_tx_csum(jme, skb, &flags);
jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
- jme_map_tx_skb(jme, skb, idx);
+ ret = jme_map_tx_skb(jme, skb, idx);
+ if (ret)
+ return ret;
+
txdesc->desc1.flags = flags;
/*
* Set tx buffer info after telling NIC to send
@@ -2228,7 +2268,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
- jme_fill_tx_desc(jme, skb, idx);
+ if (jme_fill_tx_desc(jme, skb, idx))
+ return NETDEV_TX_OK;
jwrite32(jme, JME_TXCS, jme->reg_txcs |
TXCS_SELECT_QUEUE0 |
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 78099eab7673..92d3249f63f1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1253,12 +1253,12 @@ static struct mlx4_cmd_info cmd_info[] = {
},
{
.opcode = MLX4_CMD_UPDATE_QP,
- .has_inbox = false,
+ .has_inbox = true,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
- .wrapper = mlx4_CMD_EPERM_wrapper
+ .wrapper = mlx4_UPDATE_QP_wrapper
},
{
.opcode = MLX4_CMD_GET_OP_REQ,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index f9c465101963..212cea440f90 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1195,6 +1195,12 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd);
+int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+
int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 61d64ebffd56..fbd32af89c7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -389,6 +389,41 @@ err_icm:
EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
+#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
+int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
+ enum mlx4_update_qp_attr attr,
+ struct mlx4_update_qp_params *params)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_update_qp_context *cmd;
+ u64 pri_addr_path_mask = 0;
+ int err = 0;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ cmd = (struct mlx4_update_qp_context *)mailbox->buf;
+
+ if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
+ return -EINVAL;
+
+ if (attr & MLX4_UPDATE_QP_SMAC) {
+ pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
+ cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
+ }
+
+ cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
+
+ err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0,
+ MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_update_qp);
+
void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
{
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 1c3fdd4a1f7d..8f1254a79832 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3895,6 +3895,60 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
}
+#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
+int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd_info)
+{
+ int err;
+ u32 qpn = vhcr->in_modifier & 0xffffff;
+ struct res_qp *rqp;
+ u64 mac;
+ unsigned port;
+ u64 pri_addr_path_mask;
+ struct mlx4_update_qp_context *cmd;
+ int smac_index;
+
+ cmd = (struct mlx4_update_qp_context *)inbox->buf;
+
+ pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
+ if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
+ (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
+ return -EPERM;
+
+ /* Just change the smac for the QP */
+ err = get_res(dev, slave, qpn, RES_QP, &rqp);
+ if (err) {
+ mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
+ return err;
+ }
+
+ port = (rqp->sched_queue >> 6 & 1) + 1;
+ smac_index = cmd->qp_context.pri_path.grh_mylmc;
+ err = mac_find_smac_ix_in_slave(dev, slave, port,
+ smac_index, &mac);
+ if (err) {
+ mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
+ qpn, smac_index);
+ goto err_mac;
+ }
+
+ err = mlx4_cmd(dev, inbox->dma,
+ vhcr->in_modifier, 0,
+ MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err) {
+ mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
+ goto err_mac;
+ }
+
+err_mac:
+ put_res(dev, slave, qpn, RES_QP);
+ return err;
+}
+
int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 7b52a88923ef..f785d01c7d12 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1719,22 +1719,6 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
tx_ring->producer;
}
-static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
- struct net_device *netdev)
-{
- int err;
-
- netdev->num_tx_queues = adapter->drv_tx_rings;
- netdev->real_num_tx_queues = adapter->drv_tx_rings;
-
- err = netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
- if (err)
- netdev_err(netdev, "failed to set %d Tx queues\n",
- adapter->drv_tx_rings);
-
- return err;
-}
-
struct qlcnic_nic_template {
int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
int (*config_led) (struct qlcnic_adapter *, u32, u32);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 0bc914859e38..7e55e88a81bf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2206,6 +2206,31 @@ static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
ahw->max_uc_count = count;
}
+static int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
+ u8 tx_queues, u8 rx_queues)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err = 0;
+
+ if (tx_queues) {
+ err = netif_set_real_num_tx_queues(netdev, tx_queues);
+ if (err) {
+ netdev_err(netdev, "failed to set %d Tx queues\n",
+ tx_queues);
+ return err;
+ }
+ }
+
+ if (rx_queues) {
+ err = netif_set_real_num_rx_queues(netdev, rx_queues);
+ if (err)
+ netdev_err(netdev, "failed to set %d Rx queues\n",
+ rx_queues);
+ }
+
+ return err;
+}
+
int
qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
int pci_using_dac)
@@ -2269,7 +2294,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->irq = adapter->msix_entries[0].vector;
- err = qlcnic_set_real_num_queues(adapter, netdev);
+ err = qlcnic_set_real_num_queues(adapter, adapter->drv_tx_rings,
+ adapter->drv_sds_rings);
if (err)
return err;
@@ -2943,9 +2969,13 @@ static void qlcnic_dump_tx_rings(struct qlcnic_adapter *adapter)
tx_ring->tx_stats.xmit_called,
tx_ring->tx_stats.xmit_on,
tx_ring->tx_stats.xmit_off);
+
+ if (tx_ring->crb_intr_mask)
+ netdev_info(netdev, "crb_intr_mask=%d\n",
+ readl(tx_ring->crb_intr_mask));
+
netdev_info(netdev,
- "crb_intr_mask=%d, hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
- readl(tx_ring->crb_intr_mask),
+ "hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
readl(tx_ring->crb_cmd_producer),
tx_ring->producer, tx_ring->sw_consumer,
le32_to_cpu(*(tx_ring->hw_consumer)));
@@ -3978,12 +4008,21 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ u8 tx_rings, rx_rings;
int err;
if (test_bit(__QLCNIC_RESETTING, &adapter->state))
return -EBUSY;
+ tx_rings = adapter->drv_tss_rings;
+ rx_rings = adapter->drv_rss_rings;
+
netif_device_detach(netdev);
+
+ err = qlcnic_set_real_num_queues(adapter, tx_rings, rx_rings);
+ if (err)
+ goto done;
+
if (netif_running(netdev))
__qlcnic_down(adapter, netdev);
@@ -4003,7 +4042,17 @@ int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
return err;
}
- netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
+ /* Check if we need to update real_num_{tx|rx}_queues because
+ * qlcnic_setup_intr() may change Tx/Rx rings size
+ */
+ if ((tx_rings != adapter->drv_tx_rings) ||
+ (rx_rings != adapter->drv_sds_rings)) {
+ err = qlcnic_set_real_num_queues(adapter,
+ adapter->drv_tx_rings,
+ adapter->drv_sds_rings);
+ if (err)
+ goto done;
+ }
if (qlcnic_83xx_check(adapter)) {
qlcnic_83xx_initialize_nic(adapter, 1);
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 32d969e857f7..89b83e59e1dc 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -156,13 +156,15 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
efx->net_dev->rx_cpu_rmap = NULL;
#endif
- /* Disable MSI/MSI-X interrupts */
- efx_for_each_channel(channel, efx)
- free_irq(channel->irq, &efx->msi_context[channel->channel]);
-
- /* Disable legacy interrupt */
- if (efx->legacy_irq)
+ if (EFX_INT_MODE_USE_MSI(efx)) {
+ /* Disable MSI/MSI-X interrupts */
+ efx_for_each_channel(channel, efx)
+ free_irq(channel->irq,
+ &efx->msi_context[channel->channel]);
+ } else {
+ /* Disable legacy interrupt */
free_irq(efx->legacy_irq, efx);
+ }
}
/* Register dump */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d940034acdd4..0f4841d2e8dc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1704,7 +1704,7 @@ static int stmmac_open(struct net_device *dev)
if (ret) {
pr_err("%s: Cannot attach to PHY (error: %d)\n",
__func__, ret);
- goto phy_error;
+ return ret;
}
}
@@ -1779,8 +1779,6 @@ init_error:
dma_desc_error:
if (priv->phydev)
phy_disconnect(priv->phydev);
-phy_error:
- clk_disable_unprepare(priv->stmmac_clk);
return ret;
}
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index df8d383acf48..b9ac20f42651 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -246,7 +246,7 @@ static inline void cas_lock_tx(struct cas *cp)
int i;
for (i = 0; i < N_TX_RINGS; i++)
- spin_lock(&cp->tx_lock[i]);
+ spin_lock_nested(&cp->tx_lock[i], i);
}
static inline void cas_lock_all(struct cas *cp)
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 36aa109416c4..c331b7ebc812 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1871,18 +1871,13 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
phyid = be32_to_cpup(parp+1);
mdio = of_find_device_by_node(mdio_node);
-
- if (strncmp(mdio->name, "gpio", 4) == 0) {
- /* GPIO bitbang MDIO driver attached */
- struct mii_bus *bus = dev_get_drvdata(&mdio->dev);
-
- snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
- PHY_ID_FMT, bus->id, phyid);
- } else {
- /* davinci MDIO driver attached */
- snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
- PHY_ID_FMT, mdio->name, phyid);
+ of_node_put(mdio_node);
+ if (!mdio) {
+ pr_err("Missing mdio platform device\n");
+ return -EINVAL;
}
+ snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
+ PHY_ID_FMT, mdio->name, phyid);
mac_addr = of_get_mac_address(slave_node);
if (mac_addr)
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index b0e2865a6810..d53e299ae1d9 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -458,8 +458,10 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
- if (change & IFF_ALLMULTI)
- dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+ if (dev->flags & IFF_UP) {
+ if (change & IFF_ALLMULTI)
+ dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+ }
}
static void macvlan_set_mac_lists(struct net_device *dev)
@@ -515,6 +517,11 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
#define MACVLAN_STATE_MASK \
((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
+static int macvlan_get_nest_level(struct net_device *dev)
+{
+ return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
+}
+
static void macvlan_set_lockdep_class_one(struct net_device *dev,
struct netdev_queue *txq,
void *_unused)
@@ -525,8 +532,9 @@ static void macvlan_set_lockdep_class_one(struct net_device *dev,
static void macvlan_set_lockdep_class(struct net_device *dev)
{
- lockdep_set_class(&dev->addr_list_lock,
- &macvlan_netdev_addr_lock_key);
+ lockdep_set_class_and_subclass(&dev->addr_list_lock,
+ &macvlan_netdev_addr_lock_key,
+ macvlan_get_nest_level(dev));
netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL);
}
@@ -721,6 +729,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
.ndo_fdb_add = macvlan_fdb_add,
.ndo_fdb_del = macvlan_fdb_del,
.ndo_fdb_dump = ndo_dflt_fdb_dump,
+ .ndo_get_lock_subclass = macvlan_get_nest_level,
};
void macvlan_common_setup(struct net_device *dev)
@@ -849,6 +858,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
vlan->dev = dev;
vlan->port = port;
vlan->set_features = MACVLAN_FEATURES;
+ vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1;
vlan->mode = MACVLAN_MODE_VEPA;
if (data && data[IFLA_MACVLAN_MODE])
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 9c4defdec67b..5f1a2250018f 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -215,6 +215,10 @@ static int mdio_gpio_probe(struct platform_device *pdev)
if (pdev->dev.of_node) {
pdata = mdio_gpio_of_get_data(pdev);
bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio");
+ if (bus_id < 0) {
+ dev_warn(&pdev->dev, "failed to get alias id\n");
+ bus_id = 0;
+ }
} else {
pdata = dev_get_platdata(&pdev->dev);
bus_id = pdev->id;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index a972056b2249..3bc079a67a3d 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -715,7 +715,7 @@ void phy_state_machine(struct work_struct *work)
struct delayed_work *dwork = to_delayed_work(work);
struct phy_device *phydev =
container_of(dwork, struct phy_device, state_queue);
- int needs_aneg = 0, do_suspend = 0;
+ bool needs_aneg = false, do_suspend = false, do_resume = false;
int err = 0;
mutex_lock(&phydev->lock);
@@ -727,7 +727,7 @@ void phy_state_machine(struct work_struct *work)
case PHY_PENDING:
break;
case PHY_UP:
- needs_aneg = 1;
+ needs_aneg = true;
phydev->link_timeout = PHY_AN_TIMEOUT;
@@ -757,7 +757,7 @@ void phy_state_machine(struct work_struct *work)
phydev->adjust_link(phydev->attached_dev);
} else if (0 == phydev->link_timeout--)
- needs_aneg = 1;
+ needs_aneg = true;
break;
case PHY_NOLINK:
err = phy_read_status(phydev);
@@ -791,7 +791,7 @@ void phy_state_machine(struct work_struct *work)
netif_carrier_on(phydev->attached_dev);
} else {
if (0 == phydev->link_timeout--)
- needs_aneg = 1;
+ needs_aneg = true;
}
phydev->adjust_link(phydev->attached_dev);
@@ -827,7 +827,7 @@ void phy_state_machine(struct work_struct *work)
phydev->link = 0;
netif_carrier_off(phydev->attached_dev);
phydev->adjust_link(phydev->attached_dev);
- do_suspend = 1;
+ do_suspend = true;
}
break;
case PHY_RESUMING:
@@ -876,6 +876,7 @@ void phy_state_machine(struct work_struct *work)
}
phydev->adjust_link(phydev->attached_dev);
}
+ do_resume = true;
break;
}
@@ -883,9 +884,10 @@ void phy_state_machine(struct work_struct *work)
if (needs_aneg)
err = phy_start_aneg(phydev);
-
- if (do_suspend)
+ else if (do_suspend)
phy_suspend(phydev);
+ else if (do_resume)
+ phy_resume(phydev);
if (err < 0)
phy_error(phydev);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 0ce606624296..4987a1c6dc52 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -614,8 +614,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
err = phy_init_hw(phydev);
if (err)
phy_detach(phydev);
-
- phy_resume(phydev);
+ else
+ phy_resume(phydev);
return err;
}
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index c9f3281506af..2e025ddcef21 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -120,6 +120,16 @@ static void cdc_mbim_unbind(struct usbnet *dev, struct usb_interface *intf)
cdc_ncm_unbind(dev, intf);
}
+/* verify that the ethernet protocol is IPv4 or IPv6 */
+static bool is_ip_proto(__be16 proto)
+{
+ switch (proto) {
+ case htons(ETH_P_IP):
+ case htons(ETH_P_IPV6):
+ return true;
+ }
+ return false;
+}
static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
{
@@ -128,6 +138,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
struct cdc_ncm_ctx *ctx = info->ctx;
__le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN);
u16 tci = 0;
+ bool is_ip;
u8 *c;
if (!ctx)
@@ -137,25 +148,32 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
if (skb->len <= ETH_HLEN)
goto error;
+ /* Some applications using e.g. packet sockets will
+ * bypass the VLAN acceleration and create tagged
+ * ethernet frames directly. We primarily look for
+ * the accelerated out-of-band tag, but fall back if
+ * required
+ */
+ skb_reset_mac_header(skb);
+ if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN &&
+ __vlan_get_tag(skb, &tci) == 0) {
+ is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
+ skb_pull(skb, VLAN_ETH_HLEN);
+ } else {
+ is_ip = is_ip_proto(eth_hdr(skb)->h_proto);
+ skb_pull(skb, ETH_HLEN);
+ }
+
/* mapping VLANs to MBIM sessions:
* no tag => IPS session <0>
* 1 - 255 => IPS session <vlanid>
* 256 - 511 => DSS session <vlanid - 256>
* 512 - 4095 => unsupported, drop
*/
- vlan_get_tag(skb, &tci);
-
switch (tci & 0x0f00) {
case 0x0000: /* VLAN ID 0 - 255 */
- /* verify that datagram is IPv4 or IPv6 */
- skb_reset_mac_header(skb);
- switch (eth_hdr(skb)->h_proto) {
- case htons(ETH_P_IP):
- case htons(ETH_P_IPV6):
- break;
- default:
+ if (!is_ip)
goto error;
- }
c = (u8 *)&sign;
c[3] = tci;
break;
@@ -169,7 +187,6 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
"unsupported tci=0x%04x\n", tci);
goto error;
}
- skb_pull(skb, ETH_HLEN);
}
spin_lock_bh(&ctx->mtx);
@@ -204,17 +221,23 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
return;
/* need to send the NA on the VLAN dev, if any */
- if (tci)
+ rcu_read_lock();
+ if (tci) {
netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q),
tci);
- else
+ if (!netdev) {
+ rcu_read_unlock();
+ return;
+ }
+ } else {
netdev = dev->net;
- if (!netdev)
- return;
+ }
+ dev_hold(netdev);
+ rcu_read_unlock();
in6_dev = in6_dev_get(netdev);
if (!in6_dev)
- return;
+ goto out;
is_router = !!in6_dev->cnf.forwarding;
in6_dev_put(in6_dev);
@@ -224,6 +247,8 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
true /* solicited */,
false /* override */,
true /* inc_opt */);
+out:
+ dev_put(netdev);
}
static bool is_neigh_solicit(u8 *buf, size_t len)
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index f46cd0250e48..5627917c5ff7 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -95,8 +95,10 @@ static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
if ((vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_MESH_POINT) &&
- bss_conf->enable_beacon)
+ bss_conf->enable_beacon) {
priv->reconfig_beacon = true;
+ priv->rearm_ani = true;
+ }
if (bss_conf->assoc) {
priv->rearm_ani = true;
@@ -257,6 +259,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
ath9k_htc_ps_wakeup(priv);
+ ath9k_htc_stop_ani(priv);
del_timer_sync(&priv->tx.cleanup_timer);
ath9k_htc_tx_drain(priv);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index afb3d15e38ff..be1985296bdc 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -4948,7 +4948,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_if *ifp)
if (!err) {
/* only set 2G bandwidth using bw_cap command */
band_bwcap.band = cpu_to_le32(WLC_BAND_2G);
- band_bwcap.bw_cap = cpu_to_le32(WLC_BW_40MHZ_BIT);
+ band_bwcap.bw_cap = cpu_to_le32(WLC_BW_CAP_40MHZ);
err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap,
sizeof(band_bwcap));
} else {
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index fa858d548d13..0489314425cb 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -611,14 +611,14 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
if (IWL_MVM_BT_COEX_CORUNNING) {
- bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_CORUN_LUT_20 |
- BT_VALID_CORUN_LUT_40);
+ bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
+ BT_VALID_CORUN_LUT_40);
bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
}
if (IWL_MVM_BT_COEX_MPLUT) {
bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
- bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
+ bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
}
if (mvm->cfg->bt_shared_single_ant)
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 9426905de6b2..d73a89ecd78a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -183,9 +183,9 @@ enum iwl_scan_type {
* this number of packets were received (typically 1)
* @passive2active: is auto switching from passive to active during scan allowed
* @rxchain_sel_flags: RXON_RX_CHAIN_*
- * @max_out_time: in usecs, max out of serving channel time
+ * @max_out_time: in TUs, max out of serving channel time
* @suspend_time: how long to pause scan when returning to service channel:
- * bits 0-19: beacon interal in usecs (suspend before executing)
+ * bits 0-19: beacon interal in TUs (suspend before executing)
* bits 20-23: reserved
* bits 24-31: number of beacons (suspend between channels)
* @rxon_flags: RXON_FLG_*
@@ -383,8 +383,8 @@ enum scan_framework_client {
* @quiet_plcp_th: quiet channel num of packets threshold
* @good_CRC_th: passive to active promotion threshold
* @rx_chain: RXON rx chain.
- * @max_out_time: max uSec to be out of assoceated channel
- * @suspend_time: pause scan this long when returning to service channel
+ * @max_out_time: max TUs to be out of assoceated channel
+ * @suspend_time: pause scan this TUs when returning to service channel
* @flags: RXON flags
* @filter_flags: RXONfilter
* @tx_cmd: tx command for active scan; for 2GHz and for 5GHz.
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index f0cebf12c7b8..b41dc84e9431 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -1007,7 +1007,7 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
- ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, len, cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
if (ret)
IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
}
@@ -1023,7 +1023,7 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
return;
- ieee80211_iterate_active_interfaces(
+ ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_mc_iface_iterator, &iter_data);
}
@@ -1807,6 +1807,11 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
+ if (!iwl_mvm_is_idle(mvm)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
switch (mvm->scan_status) {
case IWL_MVM_SCAN_OS:
IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n");
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index d564233a65da..f1ec0986c3c9 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -1003,6 +1003,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
return mvmvif->low_latency;
}
+/* Assoc status */
+bool iwl_mvm_is_idle(struct iwl_mvm *mvm);
+
/* Thermal management and CT-kill */
void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 9f52c5b3f0ec..e1c838899363 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -1010,7 +1010,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
return;
}
-#ifdef CPTCFG_MAC80211_DEBUGFS
+#ifdef CONFIG_MAC80211_DEBUGFS
/* Disable last tx check if we are debugging with fixed rate */
if (lq_sta->dbg_fixed_rate) {
IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n");
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index c91dc8498852..c28de54c75d4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -277,51 +277,22 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_scan_condition_iterator,
&global_bound);
- /*
- * Under low latency traffic passive scan is fragmented meaning
- * that dwell on a particular channel will be fragmented. Each fragment
- * dwell time is 20ms and fragments period is 105ms. Skipping to next
- * channel will be delayed by the same period - 105ms. So suspend_time
- * parameter describing both fragments and channels skipping periods is
- * set to 105ms. This value is chosen so that overall passive scan
- * duration will not be too long. Max_out_time in this case is set to
- * 70ms, so for active scanning operating channel will be left for 70ms
- * while for passive still for 20ms (fragment dwell).
- */
- if (global_bound) {
- if (!iwl_mvm_low_latency(mvm)) {
- params->suspend_time = ieee80211_tu_to_usec(100);
- params->max_out_time = ieee80211_tu_to_usec(600);
- } else {
- params->suspend_time = ieee80211_tu_to_usec(105);
- /* P2P doesn't support fragmented passive scan, so
- * configure max_out_time to be at least longest dwell
- * time for passive scan.
- */
- if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
- params->max_out_time = ieee80211_tu_to_usec(70);
- params->passive_fragmented = true;
- } else {
- u32 passive_dwell;
- /*
- * Use band G so that passive channel dwell time
- * will be assigned with maximum value.
- */
- band = IEEE80211_BAND_2GHZ;
- passive_dwell = iwl_mvm_get_passive_dwell(band);
- params->max_out_time =
- ieee80211_tu_to_usec(passive_dwell);
- }
- }
+ if (!global_bound)
+ goto not_bound;
+
+ params->suspend_time = 100;
+ params->max_out_time = 600;
+
+ if (iwl_mvm_low_latency(mvm)) {
+ params->suspend_time = 250;
+ params->max_out_time = 250;
}
+not_bound:
+
for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
- if (params->passive_fragmented)
- params->dwell[band].passive = 20;
- else
- params->dwell[band].passive =
- iwl_mvm_get_passive_dwell(band);
+ params->dwell[band].passive = iwl_mvm_get_passive_dwell(band);
params->dwell[band].active = iwl_mvm_get_active_dwell(band,
n_ssids);
}
@@ -761,7 +732,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
int head = 0;
- int tail = band_2ghz + band_5ghz;
+ int tail = band_2ghz + band_5ghz - 1;
u32 ssid_bitmap;
int cmd_len;
int ret;
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index d619851745a1..2180902266ae 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -644,3 +644,22 @@ bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
return result;
}
+
+static void iwl_mvm_idle_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
+{
+ bool *idle = _data;
+
+ if (!vif->bss_conf.idle)
+ *idle = false;
+}
+
+bool iwl_mvm_is_idle(struct iwl_mvm *mvm)
+{
+ bool idle = true;
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_idle_iter, &idle);
+
+ return idle;
+}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index dcfd6d866d09..2365553f1ef7 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1749,6 +1749,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
* PCI Tx retries from interfering with C3 CPU state */
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+ trans->dev = &pdev->dev;
+ trans_pcie->pci_dev = pdev;
+ iwl_disable_interrupts(trans);
+
err = pci_enable_msi(pdev);
if (err) {
dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
@@ -1760,8 +1764,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
}
}
- trans->dev = &pdev->dev;
- trans_pcie->pci_dev = pdev;
trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
@@ -1787,8 +1789,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
goto out_pci_disable_msi;
}
- trans_pcie->inta_mask = CSR_INI_SET_MASK;
-
if (iwl_pcie_alloc_ict(trans))
goto out_free_cmd_pool;
@@ -1800,6 +1800,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
goto out_free_ict;
}
+ trans_pcie->inta_mask = CSR_INI_SET_MASK;
+
return trans;
out_free_ict:
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 630a3fcf65bc..0d4a285cbd7e 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -226,7 +226,7 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
grant_ref_t rx_ring_ref);
/* Check for SKBs from frontend and schedule backend processing */
-void xenvif_check_rx_xenvif(struct xenvif *vif);
+void xenvif_napi_schedule_or_enable_events(struct xenvif *vif);
/* Prevent the device from generating any further traffic. */
void xenvif_carrier_off(struct xenvif *vif);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index ef05c5c49d41..20e9defa1060 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -75,32 +75,8 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
work_done = xenvif_tx_action(vif, budget);
if (work_done < budget) {
- int more_to_do = 0;
- unsigned long flags;
-
- /* It is necessary to disable IRQ before calling
- * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might
- * lose event from the frontend.
- *
- * Consider:
- * RING_HAS_UNCONSUMED_REQUESTS
- * <frontend generates event to trigger napi_schedule>
- * __napi_complete
- *
- * This handler is still in scheduled state so the
- * event has no effect at all. After __napi_complete
- * this handler is descheduled and cannot get
- * scheduled again. We lose event in this case and the ring
- * will be completely stalled.
- */
-
- local_irq_save(flags);
-
- RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
- if (!more_to_do)
- __napi_complete(napi);
-
- local_irq_restore(flags);
+ napi_complete(napi);
+ xenvif_napi_schedule_or_enable_events(vif);
}
return work_done;
@@ -194,7 +170,7 @@ static void xenvif_up(struct xenvif *vif)
enable_irq(vif->tx_irq);
if (vif->tx_irq != vif->rx_irq)
enable_irq(vif->rx_irq);
- xenvif_check_rx_xenvif(vif);
+ xenvif_napi_schedule_or_enable_events(vif);
}
static void xenvif_down(struct xenvif *vif)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 76665405c5aa..7367208ee8cd 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -104,7 +104,7 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
/* Find the containing VIF's structure from a pointer in pending_tx_info array
*/
-static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf)
+static inline struct xenvif *ubuf_to_vif(const struct ubuf_info *ubuf)
{
u16 pending_idx = ubuf->desc;
struct pending_tx_info *temp =
@@ -323,6 +323,35 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
}
/*
+ * Find the grant ref for a given frag in a chain of struct ubuf_info's
+ * skb: the skb itself
+ * i: the frag's number
+ * ubuf: a pointer to an element in the chain. It should not be NULL
+ *
+ * Returns a pointer to the element in the chain where the page were found. If
+ * not found, returns NULL.
+ * See the definition of callback_struct in common.h for more details about
+ * the chain.
+ */
+static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
+ const int i,
+ const struct ubuf_info *ubuf)
+{
+ struct xenvif *foreign_vif = ubuf_to_vif(ubuf);
+
+ do {
+ u16 pending_idx = ubuf->desc;
+
+ if (skb_shinfo(skb)->frags[i].page.p ==
+ foreign_vif->mmap_pages[pending_idx])
+ break;
+ ubuf = (struct ubuf_info *) ubuf->ctx;
+ } while (ubuf);
+
+ return ubuf;
+}
+
+/*
* Prepare an SKB to be transmitted to the frontend.
*
* This function is responsible for allocating grant operations, meta
@@ -346,9 +375,8 @@ static int xenvif_gop_skb(struct sk_buff *skb,
int head = 1;
int old_meta_prod;
int gso_type;
- struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
- grant_ref_t foreign_grefs[MAX_SKB_FRAGS];
- struct xenvif *foreign_vif = NULL;
+ const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
+ const struct ubuf_info *const head_ubuf = ubuf;
old_meta_prod = npo->meta_prod;
@@ -386,19 +414,6 @@ static int xenvif_gop_skb(struct sk_buff *skb,
npo->copy_off = 0;
npo->copy_gref = req->gref;
- if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
- (ubuf->callback == &xenvif_zerocopy_callback)) {
- int i = 0;
- foreign_vif = ubuf_to_vif(ubuf);
-
- do {
- u16 pending_idx = ubuf->desc;
- foreign_grefs[i++] =
- foreign_vif->pending_tx_info[pending_idx].req.gref;
- ubuf = (struct ubuf_info *) ubuf->ctx;
- } while (ubuf);
- }
-
data = skb->data;
while (data < skb_tail_pointer(skb)) {
unsigned int offset = offset_in_page(data);
@@ -415,13 +430,60 @@ static int xenvif_gop_skb(struct sk_buff *skb,
}
for (i = 0; i < nr_frags; i++) {
+ /* This variable also signals whether foreign_gref has a real
+ * value or not.
+ */
+ struct xenvif *foreign_vif = NULL;
+ grant_ref_t foreign_gref;
+
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
+ (ubuf->callback == &xenvif_zerocopy_callback)) {
+ const struct ubuf_info *const startpoint = ubuf;
+
+ /* Ideally ubuf points to the chain element which
+ * belongs to this frag. Or if frags were removed from
+ * the beginning, then shortly before it.
+ */
+ ubuf = xenvif_find_gref(skb, i, ubuf);
+
+ /* Try again from the beginning of the list, if we
+ * haven't tried from there. This only makes sense in
+ * the unlikely event of reordering the original frags.
+ * For injected local pages it's an unnecessary second
+ * run.
+ */
+ if (unlikely(!ubuf) && startpoint != head_ubuf)
+ ubuf = xenvif_find_gref(skb, i, head_ubuf);
+
+ if (likely(ubuf)) {
+ u16 pending_idx = ubuf->desc;
+
+ foreign_vif = ubuf_to_vif(ubuf);
+ foreign_gref = foreign_vif->pending_tx_info[pending_idx].req.gref;
+ /* Just a safety measure. If this was the last
+ * element on the list, the for loop will
+ * iterate again if a local page were added to
+ * the end. Using head_ubuf here prevents the
+ * second search on the chain. Or the original
+ * frags changed order, but that's less likely.
+ * In any way, ubuf shouldn't be NULL.
+ */
+ ubuf = ubuf->ctx ?
+ (struct ubuf_info *) ubuf->ctx :
+ head_ubuf;
+ } else
+ /* This frag was a local page, added to the
+ * array after the skb left netback.
+ */
+ ubuf = head_ubuf;
+ }
xenvif_gop_frag_copy(vif, skb, npo,
skb_frag_page(&skb_shinfo(skb)->frags[i]),
skb_frag_size(&skb_shinfo(skb)->frags[i]),
skb_shinfo(skb)->frags[i].page_offset,
&head,
foreign_vif,
- foreign_grefs[i]);
+ foreign_vif ? foreign_gref : UINT_MAX);
}
return npo->meta_prod - old_meta_prod;
@@ -654,7 +716,7 @@ done:
notify_remote_via_irq(vif->rx_irq);
}
-void xenvif_check_rx_xenvif(struct xenvif *vif)
+void xenvif_napi_schedule_or_enable_events(struct xenvif *vif)
{
int more_to_do;
@@ -688,7 +750,7 @@ static void tx_credit_callback(unsigned long data)
{
struct xenvif *vif = (struct xenvif *)data;
tx_add_credit(vif);
- xenvif_check_rx_xenvif(vif);
+ xenvif_napi_schedule_or_enable_events(vif);
}
static void xenvif_tx_err(struct xenvif *vif,
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 6963bdf54175..6aea373547f6 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -6,6 +6,7 @@ menu "PTP clock support"
config PTP_1588_CLOCK
tristate "PTP clock support"
+ depends on NET
select PPS
select NET_PTP_CLASSIFY
help
@@ -74,7 +75,7 @@ config DP83640_PHY
config PTP_1588_CLOCK_PCH
tristate "Intel PCH EG20T as PTP clock"
depends on X86 || COMPILE_TEST
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && NET
select PTP_1588_CLOCK
help
This driver adds support for using the PCH EG20T as a PTP
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 1b681427dde0..c341f855fadc 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -1621,8 +1621,6 @@ void sas_rphy_free(struct sas_rphy *rphy)
list_del(&rphy->list);
mutex_unlock(&sas_host->lock);
- sas_bsg_remove(shost, rphy);
-
transport_destroy_device(dev);
put_device(dev);
@@ -1681,6 +1679,7 @@ sas_rphy_remove(struct sas_rphy *rphy)
}
sas_rphy_unlink(rphy);
+ sas_bsg_remove(NULL, rphy);
transport_remove_device(dev);
device_del(dev);
}