summaryrefslogtreecommitdiff
path: root/drivers/net/ixgbe
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r--drivers/net/ixgbe/ixgbe.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c67
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c38
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c115
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h8
6 files changed, 154 insertions, 80 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 1b12c7ba275f..2c4dc8221dcd 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -96,6 +96,8 @@
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
+#define IXGBE_MAX_RSC_INT_RATE 162760
+
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct ixgbe_tx_buffer {
@@ -134,6 +136,8 @@ struct ixgbe_ring {
u8 queue_index; /* needed for multiqueue queue management */
+#define IXGBE_RING_RX_PS_ENABLED (u8)(1)
+ u8 flags; /* per ring feature flags */
u16 head;
u16 tail;
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index b9923047ce11..522c03bc1dad 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -50,6 +50,51 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
/**
+ * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
+ * @hw: pointer to the HW structure
+ *
+ * The defaults for 82598 should be in the range of 50us to 50ms,
+ * however the hardware default for these parts is 500us to 1ms which is less
+ * than the 10ms recommended by the pci-e spec. To address this we need to
+ * increase the value to either 10ms to 250ms for capability version 1 config,
+ * or 16ms to 55ms for version 2.
+ **/
+void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+ u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
+ u16 pcie_devctl2;
+
+ /* only take action if timeout value is defaulted to 0 */
+ if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
+ goto out;
+
+ /*
+ * if capababilities version is type 1 we can write the
+ * timeout of 10ms to 250ms through the GCR register
+ */
+ if (!(gcr & IXGBE_GCR_CAP_VER2)) {
+ gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
+ goto out;
+ }
+
+ /*
+ * for version 2 capabilities we need to write the config space
+ * directly in order to set the completion timeout value for
+ * 16ms to 55ms
+ */
+ pci_read_config_word(adapter->pdev,
+ IXGBE_PCI_DEVICE_CONTROL2, &pcie_devctl2);
+ pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
+ pci_write_config_word(adapter->pdev,
+ IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
+out:
+ /* disable completion timeout resend */
+ gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
+ IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
+}
+
+/**
* ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
* @hw: pointer to hardware structure
*
@@ -153,6 +198,26 @@ out:
}
/**
+ * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware using the generic start_hw function.
+ * Then set pcie completion timeout
+ **/
+s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
+{
+ s32 ret_val = 0;
+
+ ret_val = ixgbe_start_hw_generic(hw);
+
+ /* set the completion timeout for interface */
+ if (ret_val == 0)
+ ixgbe_set_pcie_completion_timeout(hw);
+
+ return ret_val;
+}
+
+/**
* ixgbe_get_link_capabilities_82598 - Determines link capabilities
* @hw: pointer to hardware structure
* @speed: pointer to link speed
@@ -1085,7 +1150,7 @@ out:
static struct ixgbe_mac_operations mac_ops_82598 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_82598,
- .start_hw = &ixgbe_start_hw_generic,
+ .start_hw = &ixgbe_start_hw_82598,
.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
.get_media_type = &ixgbe_get_media_type_82598,
.get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598,
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 2a978008fd6e..dff8dfac7ed9 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1948,6 +1948,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_q_vector *q_vector;
int i;
if (ec->tx_max_coalesced_frames_irq)
@@ -1975,18 +1976,31 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
* any other value means disable eitr, which is best
* served by setting the interrupt rate very high
*/
- adapter->eitr_param = IXGBE_MAX_INT_RATE;
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
+ adapter->eitr_param = IXGBE_MAX_RSC_INT_RATE;
+ else
+ adapter->eitr_param = IXGBE_MAX_INT_RATE;
adapter->itr_setting = 0;
}
- for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
- struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
- if (q_vector->txr_count && !q_vector->rxr_count)
- /* tx vector gets half the rate */
- q_vector->eitr = (adapter->eitr_param >> 1);
- else
- /* rx only or mixed */
- q_vector->eitr = adapter->eitr_param;
+ /* MSI/MSIx Interrupt Mode */
+ if (adapter->flags &
+ (IXGBE_FLAG_MSIX_ENABLED | IXGBE_FLAG_MSI_ENABLED)) {
+ int num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ for (i = 0; i < num_vectors; i++) {
+ q_vector = adapter->q_vector[i];
+ if (q_vector->txr_count && !q_vector->rxr_count)
+ /* tx vector gets half the rate */
+ q_vector->eitr = (adapter->eitr_param >> 1);
+ else
+ /* rx only or mixed */
+ q_vector->eitr = adapter->eitr_param;
+ ixgbe_write_eitr(q_vector);
+ }
+ /* Legacy Interrupt Mode */
+ } else {
+ q_vector = adapter->q_vector[0];
+ q_vector->eitr = adapter->eitr_param;
ixgbe_write_eitr(q_vector);
}
@@ -1999,13 +2013,13 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
ethtool_op_set_flags(netdev, data);
- if (!(adapter->flags & IXGBE_FLAG2_RSC_CAPABLE))
+ if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
return 0;
/* if state changes we need to update adapter->flags and reset */
if ((!!(data & ETH_FLAG_LRO)) !=
- (!!(adapter->flags & IXGBE_FLAG2_RSC_ENABLED))) {
- adapter->flags ^= IXGBE_FLAG2_RSC_ENABLED;
+ (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
+ adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
else
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index fa9f24e23683..28cf104e36cc 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -336,7 +336,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
/* return 0 to bypass going to ULD for DDPed data */
if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_DDP)
rc = 0;
- else
+ else if (ddp->len)
rc = ddp->len;
}
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 200454f30f6a..77b0381a2b5c 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -492,12 +492,12 @@ static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
skb_record_rx_queue(skb, ring->queue_index);
if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
- if (adapter->vlgrp && is_vlan && (tag != 0))
+ if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
vlan_gro_receive(napi, adapter->vlgrp, tag, skb);
else
napi_gro_receive(napi, skb);
} else {
- if (adapter->vlgrp && is_vlan && (tag != 0))
+ if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
else
netif_rx(skb);
@@ -585,7 +585,7 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
if (!bi->page_dma &&
- (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
+ (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
if (!bi->page) {
bi->page = alloc_page(GFP_ATOMIC);
if (!bi->page) {
@@ -629,7 +629,7 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
}
/* Refresh the desc even if buffer_addrs didn't change because
* each write-back erases this info. */
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
} else {
@@ -726,7 +726,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
break;
(*work_done)++;
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
IXGBE_RXDADV_HDRBUFLEN_SHIFT;
@@ -780,7 +780,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
prefetch(next_rxd);
cleaned_count++;
- if (adapter->flags & IXGBE_FLAG2_RSC_CAPABLE)
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
rsc_count = ixgbe_get_rsc_count(rx_desc);
if (rsc_count) {
@@ -798,7 +798,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
rx_ring->stats.packets++;
rx_ring->stats.bytes += skb->len;
} else {
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
rx_buffer_info->skb = next_buffer->skb;
rx_buffer_info->dma = next_buffer->dma;
next_buffer->skb = skb;
@@ -1898,46 +1898,19 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
-static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
+static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring)
{
- struct ixgbe_ring *rx_ring;
u32 srrctl;
- int queue0 = 0;
- unsigned long mask;
+ int index;
struct ixgbe_ring_feature *feature = adapter->ring_feature;
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- int dcb_i = feature[RING_F_DCB].indices;
- if (dcb_i == 8)
- queue0 = index >> 4;
- else if (dcb_i == 4)
- queue0 = index >> 5;
- else
- dev_err(&adapter->pdev->dev, "Invalid DCB "
- "configuration\n");
-#ifdef IXGBE_FCOE
- if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
- struct ixgbe_ring_feature *f;
-
- rx_ring = &adapter->rx_ring[queue0];
- f = &adapter->ring_feature[RING_F_FCOE];
- if ((queue0 == 0) && (index > rx_ring->reg_idx))
- queue0 = f->mask + index -
- rx_ring->reg_idx - 1;
- }
-#endif /* IXGBE_FCOE */
- } else {
- queue0 = index;
- }
- } else {
+ index = rx_ring->reg_idx;
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ unsigned long mask;
mask = (unsigned long) feature[RING_F_RSS].mask;
- queue0 = index & mask;
index = index & mask;
}
-
- rx_ring = &adapter->rx_ring[queue0];
-
srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
@@ -1946,7 +1919,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
IXGBE_SRRCTL_BSIZEHDR_MASK;
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
#else
@@ -2002,6 +1975,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
{
u64 rdba;
struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_ring *rx_ring;
struct net_device *netdev = adapter->netdev;
int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
int i, j;
@@ -2018,11 +1992,6 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
/* Decide whether to use packet split mode or not */
adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
-#ifdef IXGBE_FCOE
- if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
- adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
-#endif /* IXGBE_FCOE */
-
/* Set the RX buffer length according to the mode */
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
rx_buf_len = IXGBE_RX_HDR_SIZE;
@@ -2036,7 +2005,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
}
} else {
- if (!(adapter->flags & IXGBE_FLAG2_RSC_ENABLED) &&
+ if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
(netdev->mtu <= ETH_DATA_LEN))
rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
else
@@ -2070,29 +2039,35 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
* the Base and Length of the Rx Descriptor Ring
*/
for (i = 0; i < adapter->num_rx_queues; i++) {
- rdba = adapter->rx_ring[i].dma;
- j = adapter->rx_ring[i].reg_idx;
+ rx_ring = &adapter->rx_ring[i];
+ rdba = rx_ring->dma;
+ j = rx_ring->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32)));
IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
- adapter->rx_ring[i].head = IXGBE_RDH(j);
- adapter->rx_ring[i].tail = IXGBE_RDT(j);
- adapter->rx_ring[i].rx_buf_len = rx_buf_len;
+ rx_ring->head = IXGBE_RDH(j);
+ rx_ring->tail = IXGBE_RDT(j);
+ rx_ring->rx_buf_len = rx_buf_len;
+
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
+ rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED;
#ifdef IXGBE_FCOE
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
struct ixgbe_ring_feature *f;
f = &adapter->ring_feature[RING_F_FCOE];
- if ((rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
- (i >= f->mask) && (i < f->mask + f->indices))
- adapter->rx_ring[i].rx_buf_len =
- IXGBE_FCOE_JUMBO_FRAME_SIZE;
+ if ((i >= f->mask) && (i < f->mask + f->indices)) {
+ rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
+ if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
+ rx_ring->rx_buf_len =
+ IXGBE_FCOE_JUMBO_FRAME_SIZE;
+ }
}
#endif /* IXGBE_FCOE */
- ixgbe_configure_srrctl(adapter, j);
+ ixgbe_configure_srrctl(adapter, rx_ring);
}
if (hw->mac.type == ixgbe_mac_82598EB) {
@@ -2165,10 +2140,11 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
}
- if (adapter->flags & IXGBE_FLAG2_RSC_ENABLED) {
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
/* Enable 82599 HW-RSC */
for (i = 0; i < adapter->num_rx_queues; i++) {
- j = adapter->rx_ring[i].reg_idx;
+ rx_ring = &adapter->rx_ring[i];
+ j = rx_ring->reg_idx;
rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
rscctrl |= IXGBE_RSCCTL_RSCEN;
/*
@@ -2176,7 +2152,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
* total size of max desc * buf_len is not greater
* than 65535
*/
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
#if (MAX_SKB_FRAGS > 16)
rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
#elif (MAX_SKB_FRAGS > 8)
@@ -3812,8 +3788,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
} else if (hw->mac.type == ixgbe_mac_82599EB) {
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
- adapter->flags |= IXGBE_FLAG2_RSC_CAPABLE;
- adapter->flags |= IXGBE_FLAG2_RSC_ENABLED;
+ adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
+ adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->ring_feature[RING_F_FDIR].indices =
IXGBE_MAX_FDIR_INDICES;
@@ -5360,12 +5336,19 @@ static int ixgbe_del_sanmac_netdev(struct net_device *dev)
static void ixgbe_netpoll(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ int i;
- disable_irq(adapter->pdev->irq);
adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
- ixgbe_intr(adapter->pdev->irq, netdev);
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+ int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ for (i = 0; i < num_q_vectors; i++) {
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
+ ixgbe_msix_clean_many(0, q_vector);
+ }
+ } else {
+ ixgbe_intr(adapter->pdev->irq, netdev);
+ }
adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
- enable_irq(adapter->pdev->irq);
}
#endif
@@ -5611,7 +5594,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- if (adapter->flags & IXGBE_FLAG2_RSC_ENABLED)
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
netdev->features |= NETIF_F_LRO;
/* make sure the EEPROM is good */
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index fa87309dc087..be90eb4575f6 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -718,6 +718,12 @@
#define IXGBE_ECC_STATUS_82599 0x110E0
#define IXGBE_BAR_CTRL_82599 0x110F4
+/* PCI Express Control */
+#define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000
+#define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000
+#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000
+#define IXGBE_GCR_CAP_VER2 0x00040000
+
/* Time Sync Registers */
#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */
@@ -1521,6 +1527,7 @@
/* PCI Bus Info */
#define IXGBE_PCI_LINK_STATUS 0xB2
+#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
#define IXGBE_PCI_LINK_WIDTH 0x3F0
#define IXGBE_PCI_LINK_WIDTH_1 0x10
#define IXGBE_PCI_LINK_WIDTH_2 0x20
@@ -1531,6 +1538,7 @@
#define IXGBE_PCI_LINK_SPEED_5000 0x2
#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E
#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
+#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
/* Number of 100 microseconds we wait for PCI Express master disable */
#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800