summaryrefslogtreecommitdiff
path: root/drivers/net/e1000e
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/e1000e')
-rw-r--r--drivers/net/e1000e/82571.c8
-rw-r--r--drivers/net/e1000e/e1000.h2
-rw-r--r--drivers/net/e1000e/es2lan.c2
-rw-r--r--drivers/net/e1000e/hw.h1
-rw-r--r--drivers/net/e1000e/ich8lan.c78
-rw-r--r--drivers/net/e1000e/lib.c54
-rw-r--r--drivers/net/e1000e/netdev.c87
-rw-r--r--drivers/net/e1000e/phy.c85
8 files changed, 161 insertions, 156 deletions
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index c1a42cfc80ba..02d67d047d96 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -237,6 +237,8 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
/* Set if manageability features are enabled. */
mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK)
? true : false;
+ /* Adaptive IFS supported */
+ mac->adaptive_ifs = true;
/* check for link */
switch (hw->phy.media_type) {
@@ -1290,7 +1292,6 @@ static s32 e1000_setup_link_82571(struct e1000_hw *hw)
static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw)
{
u32 ctrl;
- u32 led_ctrl;
s32 ret_val;
ctrl = er32(CTRL);
@@ -1305,11 +1306,6 @@ static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw)
break;
case e1000_phy_igp_2:
ret_val = e1000e_copper_link_setup_igp(hw);
- /* Setup activity LED */
- led_ctrl = er32(LEDCTL);
- led_ctrl &= IGP_ACTIVITY_LED_MASK;
- led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
- ew32(LEDCTL, led_ctrl);
break;
default:
return -E1000_ERR_PHY;
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index cebbd9079d53..d236efaf7478 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -421,6 +421,7 @@ struct e1000_info {
/* CRC Stripping defines */
#define FLAG2_CRC_STRIPPING (1 << 0)
#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
+#define FLAG2_IS_DISCARDING (1 << 2)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -582,7 +583,6 @@ extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
u16 data);
-extern s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow);
extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
extern s32 e1000_check_polarity_82577(struct e1000_hw *hw);
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index 3028f23da891..e2aa3b788564 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -224,6 +224,8 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
/* Set if manageability features are enabled. */
mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK)
? true : false;
+ /* Adaptive IFS not supported */
+ mac->adaptive_ifs = false;
/* check for link */
switch (hw->phy.media_type) {
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 2784cf44a6f3..eccf29b75c41 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -818,6 +818,7 @@ struct e1000_mac_info {
u8 forced_speed_duplex;
+ bool adaptive_ifs;
bool arc_subsystem_valid;
bool autoneg;
bool autoneg_failed;
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 9b09246af064..8b6ecd127889 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -138,6 +138,10 @@
#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */
+/* KMRN Mode Control */
+#define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
+#define HV_KMRN_MDIO_SLOW 0x0400
+
/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
/* Offset 04h HSFSTS */
union ich8_hws_flash_status {
@@ -219,6 +223,7 @@ static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
+static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
{
@@ -270,7 +275,21 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
phy->id = e1000_phy_unknown;
- e1000e_get_phy_id(hw);
+ ret_val = e1000e_get_phy_id(hw);
+ if (ret_val)
+ goto out;
+ if ((phy->id == 0) || (phy->id == PHY_REVISION_MASK)) {
+ /*
+ * In case the PHY needs to be in mdio slow mode (eg. 82577),
+ * set slow mode and try to get the PHY id again.
+ */
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (ret_val)
+ goto out;
+ ret_val = e1000e_get_phy_id(hw);
+ if (ret_val)
+ goto out;
+ }
phy->type = e1000e_get_phy_type_from_id(phy->id);
switch (phy->type) {
@@ -292,6 +311,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
break;
}
+out:
return ret_val;
}
@@ -454,6 +474,8 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
mac->rar_entry_count--;
/* Set if manageability features are enabled. */
mac->arc_subsystem_valid = true;
+ /* Adaptive IFS supported */
+ mac->adaptive_ifs = true;
/* LED operations */
switch (mac->type) {
@@ -1074,16 +1096,44 @@ out:
/**
+ * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 data;
+
+ ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= HV_KMRN_MDIO_SLOW;
+
+ ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data);
+
+ return ret_val;
+}
+
+/**
* e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
* done after every PHY reset.
**/
static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
{
s32 ret_val = 0;
+ u16 phy_data;
if (hw->mac.type != e1000_pchlan)
return ret_val;
+ /* Set MDIO slow mode before any other MDIO access */
+ if (hw->phy.type == e1000_phy_82577) {
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (ret_val)
+ goto out;
+ }
+
if (((hw->phy.type == e1000_phy_82577) &&
((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
@@ -1116,16 +1166,32 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
hw->phy.addr = 1;
ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
+ hw->phy.ops.release(hw);
if (ret_val)
goto out;
- hw->phy.ops.release(hw);
/*
* Configure the K1 Si workaround during phy reset assuming there is
* link so that it disables K1 if link is in 1Gbps.
*/
ret_val = e1000_k1_gig_workaround_hv(hw, true);
+ if (ret_val)
+ goto out;
+ /* Workaround for link disconnects on a busy hub in half duplex */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+ ret_val = hw->phy.ops.read_reg_locked(hw,
+ PHY_REG(BM_PORT_CTRL_PAGE, 17),
+ &phy_data);
+ if (ret_val)
+ goto release;
+ ret_val = hw->phy.ops.write_reg_locked(hw,
+ PHY_REG(BM_PORT_CTRL_PAGE, 17),
+ phy_data & 0x00FF);
+release:
+ hw->phy.ops.release(hw);
out:
return ret_val;
}
@@ -1182,6 +1248,7 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
/* Allow time for h/w to get to a quiescent state after reset */
mdelay(10);
+ /* Perform any necessary post-reset workarounds */
if (hw->mac.type == e1000_pchlan) {
ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
if (ret_val)
@@ -2482,6 +2549,10 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
if (!ret_val)
e1000_release_swflag_ich8lan(hw);
+ /* Perform any necessary post-reset workarounds */
+ if (hw->mac.type == e1000_pchlan)
+ ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
+
if (ctrl & E1000_CTRL_PHY_RST)
ret_val = hw->phy.ops.get_cfg_done(hw);
@@ -2526,9 +2597,6 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
kab |= E1000_KABGTXD_BGSQLBIAS;
ew32(KABGTXD, kab);
- if (hw->mac.type == e1000_pchlan)
- ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
-
out:
return ret_val;
}
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index a86c17548c1e..2fa9b36a2c5a 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -125,6 +125,7 @@ void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
{
u32 i;
+ u8 mac_addr[ETH_ALEN] = {0};
/* Setup the receive address */
e_dbg("Programming MAC Address into RAR[0]\n");
@@ -133,12 +134,8 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
/* Zero out the other (rar_entry_count - 1) receive addresses */
e_dbg("Clearing RAR[1-%u]\n", rar_count-1);
- for (i = 1; i < rar_count; i++) {
- E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0);
- e1e_flush();
- E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((i << 1) + 1), 0);
- e1e_flush();
- }
+ for (i = 1; i < rar_count; i++)
+ e1000e_rar_set(hw, mac_addr, i);
}
/**
@@ -164,10 +161,19 @@ void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
- rar_high |= E1000_RAH_AV;
+ /* If MAC address zero, no need to set the AV bit */
+ if (rar_low || rar_high)
+ rar_high |= E1000_RAH_AV;
- E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low);
- E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high);
+ /*
+ * Some bridges will combine consecutive 32-bit writes into
+ * a single burst write, which will malfunction on some parts.
+ * The flushes avoid this.
+ */
+ ew32(RAL(index), rar_low);
+ e1e_flush();
+ ew32(RAH(index), rar_high);
+ e1e_flush();
}
/**
@@ -1609,6 +1615,11 @@ void e1000e_reset_adaptive(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
+ if (!mac->adaptive_ifs) {
+ e_dbg("Not in Adaptive IFS mode!\n");
+ goto out;
+ }
+
mac->current_ifs_val = 0;
mac->ifs_min_val = IFS_MIN;
mac->ifs_max_val = IFS_MAX;
@@ -1617,6 +1628,8 @@ void e1000e_reset_adaptive(struct e1000_hw *hw)
mac->in_ifs_mode = false;
ew32(AIT, 0);
+out:
+ return;
}
/**
@@ -1630,6 +1643,11 @@ void e1000e_update_adaptive(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
+ if (!mac->adaptive_ifs) {
+ e_dbg("Not in Adaptive IFS mode!\n");
+ goto out;
+ }
+
if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
if (mac->tx_packet_delta > MIN_NUM_XMITS) {
mac->in_ifs_mode = true;
@@ -1650,6 +1668,8 @@ void e1000e_update_adaptive(struct e1000_hw *hw)
ew32(AIT, 0);
}
}
+out:
+ return;
}
/**
@@ -2287,10 +2307,12 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
s32 ret_val, hdr_csum, csum;
u8 i, len;
+ hw->mac.tx_pkt_filtering = true;
+
/* No manageability, no filtering */
if (!e1000e_check_mng_mode(hw)) {
hw->mac.tx_pkt_filtering = false;
- return 0;
+ goto out;
}
/*
@@ -2298,9 +2320,9 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
* reason, disable filtering.
*/
ret_val = e1000_mng_enable_host_if(hw);
- if (ret_val != 0) {
+ if (ret_val) {
hw->mac.tx_pkt_filtering = false;
- return ret_val;
+ goto out;
}
/* Read in the header. Length and offset are in dwords. */
@@ -2319,17 +2341,17 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
*/
if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
hw->mac.tx_pkt_filtering = true;
- return 1;
+ goto out;
}
/* Cookie area is valid, make the final check for filtering. */
if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) {
hw->mac.tx_pkt_filtering = false;
- return 0;
+ goto out;
}
- hw->mac.tx_pkt_filtering = true;
- return 1;
+out:
+ return hw->mac.tx_pkt_filtering;
}
/**
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 762b697ce731..57f149b75fbe 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -450,13 +450,23 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
length = le16_to_cpu(rx_desc->length);
- /* !EOP means multiple descriptors were used to store a single
- * packet, also make sure the frame isn't just CRC only */
- if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) {
+ /*
+ * !EOP means multiple descriptors were used to store a single
+ * packet, if that's the case we need to toss it. In fact, we
+ * need to toss every packet with the EOP bit clear and the
+ * next frame that _does_ have the EOP bit set, as it is by
+ * definition only a frame fragment
+ */
+ if (unlikely(!(status & E1000_RXD_STAT_EOP)))
+ adapter->flags2 |= FLAG2_IS_DISCARDING;
+
+ if (adapter->flags2 & FLAG2_IS_DISCARDING) {
/* All receives must fit into a single buffer */
e_dbg("Receive packet consumed multiple buffers\n");
/* recycle */
buffer_info->skb = skb;
+ if (status & E1000_RXD_STAT_EOP)
+ adapter->flags2 &= ~FLAG2_IS_DISCARDING;
goto next_desc;
}
@@ -745,10 +755,16 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
PCI_DMA_FROMDEVICE);
buffer_info->dma = 0;
- if (!(staterr & E1000_RXD_STAT_EOP)) {
+ /* see !EOP comment in other rx routine */
+ if (!(staterr & E1000_RXD_STAT_EOP))
+ adapter->flags2 |= FLAG2_IS_DISCARDING;
+
+ if (adapter->flags2 & FLAG2_IS_DISCARDING) {
e_dbg("Packet Split buffers didn't pick up the full "
"packet\n");
dev_kfree_skb_irq(skb);
+ if (staterr & E1000_RXD_STAT_EOP)
+ adapter->flags2 &= ~FLAG2_IS_DISCARDING;
goto next_desc;
}
@@ -1118,6 +1134,7 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
+ adapter->flags2 &= ~FLAG2_IS_DISCARDING;
writel(0, adapter->hw.hw_addr + rx_ring->head);
writel(0, adapter->hw.hw_addr + rx_ring->tail);
@@ -2333,18 +2350,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
rctl &= ~E1000_RCTL_SZ_4096;
rctl |= E1000_RCTL_BSEX;
switch (adapter->rx_buffer_len) {
- case 256:
- rctl |= E1000_RCTL_SZ_256;
- rctl &= ~E1000_RCTL_BSEX;
- break;
- case 512:
- rctl |= E1000_RCTL_SZ_512;
- rctl &= ~E1000_RCTL_BSEX;
- break;
- case 1024:
- rctl |= E1000_RCTL_SZ_1024;
- rctl &= ~E1000_RCTL_BSEX;
- break;
case 2048:
default:
rctl |= E1000_RCTL_SZ_2048;
@@ -3315,24 +3320,24 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
if ((hw->phy.type == e1000_phy_82578) ||
(hw->phy.type == e1000_phy_82577)) {
e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
- e1e_rphy(hw, HV_SCC_LOWER, &phy_data);
- adapter->stats.scc += phy_data;
+ if (!e1e_rphy(hw, HV_SCC_LOWER, &phy_data))
+ adapter->stats.scc += phy_data;
e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
- e1e_rphy(hw, HV_ECOL_LOWER, &phy_data);
- adapter->stats.ecol += phy_data;
+ if (!e1e_rphy(hw, HV_ECOL_LOWER, &phy_data))
+ adapter->stats.ecol += phy_data;
e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
- e1e_rphy(hw, HV_MCC_LOWER, &phy_data);
- adapter->stats.mcc += phy_data;
+ if (!e1e_rphy(hw, HV_MCC_LOWER, &phy_data))
+ adapter->stats.mcc += phy_data;
e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
- e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data);
- adapter->stats.latecol += phy_data;
+ if (!e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data))
+ adapter->stats.latecol += phy_data;
e1e_rphy(hw, HV_DC_UPPER, &phy_data);
- e1e_rphy(hw, HV_DC_LOWER, &phy_data);
- adapter->stats.dc += phy_data;
+ if (!e1e_rphy(hw, HV_DC_LOWER, &phy_data))
+ adapter->stats.dc += phy_data;
} else {
adapter->stats.scc += er32(SCC);
adapter->stats.ecol += er32(ECOL);
@@ -3360,8 +3365,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
if ((hw->phy.type == e1000_phy_82578) ||
(hw->phy.type == e1000_phy_82577)) {
e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
- e1e_rphy(hw, HV_COLC_LOWER, &phy_data);
- hw->mac.collision_delta = phy_data;
+ if (!e1e_rphy(hw, HV_COLC_LOWER, &phy_data))
+ hw->mac.collision_delta = phy_data;
} else {
hw->mac.collision_delta = er32(COLC);
}
@@ -3372,8 +3377,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
if ((hw->phy.type == e1000_phy_82578) ||
(hw->phy.type == e1000_phy_82577)) {
e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
- e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data);
- adapter->stats.tncrs += phy_data;
+ if (!e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data))
+ adapter->stats.tncrs += phy_data;
} else {
if ((hw->mac.type != e1000_82574) &&
(hw->mac.type != e1000_82583))
@@ -3781,7 +3786,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
0, IPPROTO_TCP, 0);
cmd_length = E1000_TXD_CMD_IP;
ipcse = skb_transport_offset(skb) - 1;
- } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
+ } else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
@@ -3962,13 +3967,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
dma_error:
dev_err(&pdev->dev, "TX DMA map failed\n");
buffer_info->dma = 0;
- count--;
-
- while (count >= 0) {
+ if (count)
count--;
- i--;
- if (i < 0)
+
+ while (count--) {
+ if (i==0)
i += tx_ring->count;
+ i--;
buffer_info = &tx_ring->buffer_info[i];
e1000_put_txbuf(adapter, buffer_info);;
}
@@ -4317,13 +4322,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
* fragmented skbs
*/
- if (max_frame <= 256)
- adapter->rx_buffer_len = 256;
- else if (max_frame <= 512)
- adapter->rx_buffer_len = 512;
- else if (max_frame <= 1024)
- adapter->rx_buffer_len = 1024;
- else if (max_frame <= 2048)
+ if (max_frame <= 2048)
adapter->rx_buffer_len = 2048;
else
adapter->rx_buffer_len = 4096;
@@ -4674,6 +4673,7 @@ static int e1000_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
+ pci_save_state(pdev);
e1000e_disable_l1aspm(pdev);
err = pci_enable_device_mem(pdev);
@@ -4825,6 +4825,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
} else {
pci_set_master(pdev);
pci_restore_state(pdev);
+ pci_save_state(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 55a2c0acfee7..7f3ceb9dad6a 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -152,32 +152,9 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw)
if (phy->id != 0 && phy->id != PHY_REVISION_MASK)
goto out;
- /*
- * If the PHY ID is still unknown, we may have an 82577
- * without link. We will try again after setting Slow MDIC
- * mode. No harm in trying again in this case since the PHY
- * ID is unknown at this point anyway.
- */
- ret_val = phy->ops.acquire(hw);
- if (ret_val)
- goto out;
- ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
- if (ret_val)
- goto out;
- phy->ops.release(hw);
-
retry_count++;
}
out:
- /* Revert to MDIO fast mode, if applicable */
- if (retry_count) {
- ret_val = phy->ops.acquire(hw);
- if (ret_val)
- return ret_val;
- ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
- phy->ops.release(hw);
- }
-
return ret_val;
}
@@ -2791,38 +2768,6 @@ static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
}
/**
- * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
- * @hw: pointer to the HW structure
- * @slow: true for slow mode, false for normal mode
- *
- * Assumes semaphore already acquired.
- **/
-s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow)
-{
- s32 ret_val = 0;
- u16 data = 0;
-
- /* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */
- hw->phy.addr = 1;
- ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
- (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
- if (ret_val)
- goto out;
-
- ret_val = e1000e_write_phy_reg_mdic(hw, BM_CS_CTRL1,
- (0x2180 | (slow << 10)));
- if (ret_val)
- goto out;
-
- /* dummy read when reverting to fast mode - throw away result */
- if (!slow)
- ret_val = e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data);
-
-out:
- return ret_val;
-}
-
-/**
* __e1000_read_phy_reg_hv - Read HV PHY register
* @hw: pointer to the HW structure
* @offset: register offset to be read
@@ -2839,7 +2784,6 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
s32 ret_val;
u16 page = BM_PHY_REG_PAGE(offset);
u16 reg = BM_PHY_REG_NUM(offset);
- bool in_slow_mode = false;
if (!locked) {
ret_val = hw->phy.ops.acquire(hw);
@@ -2847,16 +2791,6 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
return ret_val;
}
- /* Workaround failure in MDIO access while cable is disconnected */
- if ((hw->phy.type == e1000_phy_82577) &&
- !(er32(STATUS) & E1000_STATUS_LU)) {
- ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
- if (ret_val)
- goto out;
-
- in_slow_mode = true;
- }
-
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
@@ -2893,10 +2827,6 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
data);
out:
- /* Revert to MDIO fast mode, if applicable */
- if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
- ret_val |= e1000_set_mdio_slow_mode_hv(hw, false);
-
if (!locked)
hw->phy.ops.release(hw);
@@ -2948,7 +2878,6 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
s32 ret_val;
u16 page = BM_PHY_REG_PAGE(offset);
u16 reg = BM_PHY_REG_NUM(offset);
- bool in_slow_mode = false;
if (!locked) {
ret_val = hw->phy.ops.acquire(hw);
@@ -2956,16 +2885,6 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
return ret_val;
}
- /* Workaround failure in MDIO access while cable is disconnected */
- if ((hw->phy.type == e1000_phy_82577) &&
- !(er32(STATUS) & E1000_STATUS_LU)) {
- ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
- if (ret_val)
- goto out;
-
- in_slow_mode = true;
- }
-
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
@@ -3019,10 +2938,6 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
data);
out:
- /* Revert to MDIO fast mode, if applicable */
- if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
- ret_val |= e1000_set_mdio_slow_mode_hv(hw, false);
-
if (!locked)
hw->phy.ops.release(hw);