summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/ath/wcn36xx/dxe.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/wcn36xx/dxe.c')
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c200
1 files changed, 120 insertions, 80 deletions
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
index 2c3b899a88fa..06cfe8d311f3 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.c
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -78,7 +78,6 @@ static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
if (!cur_ctl)
goto out_fail;
- spin_lock_init(&cur_ctl->skb_lock);
cur_ctl->ctl_blk_order = i;
if (i == 0) {
ch->head_blk_ctl = cur_ctl;
@@ -275,12 +274,14 @@ static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
return 0;
}
-static int wcn36xx_dxe_fill_skb(struct device *dev, struct wcn36xx_dxe_ctl *ctl)
+static int wcn36xx_dxe_fill_skb(struct device *dev,
+ struct wcn36xx_dxe_ctl *ctl,
+ gfp_t gfp)
{
struct wcn36xx_dxe_desc *dxe = ctl->desc;
struct sk_buff *skb;
- skb = alloc_skb(WCN36XX_PKT_SIZE, GFP_ATOMIC);
+ skb = alloc_skb(WCN36XX_PKT_SIZE, gfp);
if (skb == NULL)
return -ENOMEM;
@@ -307,7 +308,7 @@ static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
cur_ctl = wcn_ch->head_blk_ctl;
for (i = 0; i < wcn_ch->desc_num; i++) {
- wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl);
+ wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl, GFP_KERNEL);
cur_ctl = cur_ctl->next;
}
@@ -367,9 +368,11 @@ static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
spin_lock_irqsave(&ch->lock, flags);
ctl = ch->tail_blk_ctl;
do {
- if (ctl->desc->ctrl & WCN36xx_DXE_CTRL_VLD)
+ if (READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_VLD)
break;
- if (ctl->skb) {
+
+ if (ctl->skb &&
+ READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_EOP) {
dma_unmap_single(wcn->dev, ctl->desc->src_addr_l,
ctl->skb->len, DMA_TO_DEVICE);
info = IEEE80211_SKB_CB(ctl->skb);
@@ -377,18 +380,16 @@ static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
/* Keep frame until TX status comes */
ieee80211_free_txskb(wcn->hw, ctl->skb);
}
- spin_lock(&ctl->skb_lock);
+
if (wcn->queues_stopped) {
wcn->queues_stopped = false;
ieee80211_wake_queues(wcn->hw);
}
- spin_unlock(&ctl->skb_lock);
ctl->skb = NULL;
}
ctl = ctl->next;
- } while (ctl != ch->head_blk_ctl &&
- !(ctl->desc->ctrl & WCN36xx_DXE_CTRL_VLD));
+ } while (ctl != ch->head_blk_ctl);
ch->tail_blk_ctl = ctl;
spin_unlock_irqrestore(&ch->lock, flags);
@@ -431,8 +432,12 @@ static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
WCN36XX_INT_MASK_CHAN_TX_H);
}
- wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high\n");
- reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
+ wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high, reason %08x\n",
+ int_reason);
+
+ if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
+ WCN36XX_CH_STAT_INT_ED_MASK))
+ reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
}
if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
@@ -466,8 +471,12 @@ static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
WCN36XX_INT_MASK_CHAN_TX_L);
}
- wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low\n");
- reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
+ wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low, reason %08x\n",
+ int_reason);
+
+ if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
+ WCN36XX_CH_STAT_INT_ED_MASK))
+ reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
}
return IRQ_HANDLED;
@@ -477,9 +486,8 @@ static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
{
struct wcn36xx *wcn = (struct wcn36xx *)dev;
- disable_irq_nosync(wcn->rx_irq);
wcn36xx_dxe_rx_frame(wcn);
- enable_irq(wcn->rx_irq);
+
return IRQ_HANDLED;
}
@@ -513,27 +521,53 @@ out_err:
}
static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
- struct wcn36xx_dxe_ch *ch)
+ struct wcn36xx_dxe_ch *ch,
+ u32 ctrl,
+ u32 en_mask,
+ u32 int_mask,
+ u32 status_reg)
{
- struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl;
- struct wcn36xx_dxe_desc *dxe = ctl->desc;
+ struct wcn36xx_dxe_desc *dxe;
+ struct wcn36xx_dxe_ctl *ctl;
dma_addr_t dma_addr;
struct sk_buff *skb;
- int ret = 0, int_mask;
- u32 value;
+ u32 int_reason;
+ int ret;
- if (ch->ch_type == WCN36XX_DXE_CH_RX_L) {
- value = WCN36XX_DXE_CTRL_RX_L;
- int_mask = WCN36XX_DXE_INT_CH1_MASK;
- } else {
- value = WCN36XX_DXE_CTRL_RX_H;
- int_mask = WCN36XX_DXE_INT_CH3_MASK;
+ wcn36xx_dxe_read_register(wcn, status_reg, &int_reason);
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR, int_mask);
+
+ if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK) {
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_0_INT_ERR_CLR,
+ int_mask);
+
+ wcn36xx_err("DXE IRQ reported error on RX channel\n");
}
- while (!(dxe->ctrl & WCN36xx_DXE_CTRL_VLD)) {
+ if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK)
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_0_INT_DONE_CLR,
+ int_mask);
+
+ if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK)
+ wcn36xx_dxe_write_register(wcn,
+ WCN36XX_DXE_0_INT_ED_CLR,
+ int_mask);
+
+ if (!(int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
+ WCN36XX_CH_STAT_INT_ED_MASK)))
+ return 0;
+
+ spin_lock(&ch->lock);
+
+ ctl = ch->head_blk_ctl;
+ dxe = ctl->desc;
+
+ while (!(READ_ONCE(dxe->ctrl) & WCN36xx_DXE_CTRL_VLD)) {
skb = ctl->skb;
dma_addr = dxe->dst_addr_l;
- ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl);
+ ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl, GFP_ATOMIC);
if (0 == ret) {
/* new skb allocation ok. Use the new one and queue
* the old one to network system.
@@ -543,13 +577,16 @@ static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
wcn36xx_rx_skb(wcn, skb);
} /* else keep old skb not submitted and use it for rx DMA */
- dxe->ctrl = value;
+ dxe->ctrl = ctrl;
ctl = ctl->next;
dxe = ctl->desc;
}
- wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, int_mask);
+ wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, en_mask);
ch->head_blk_ctl = ctl;
+
+ spin_unlock(&ch->lock);
+
return 0;
}
@@ -560,19 +597,20 @@ void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
/* RX_LOW_PRI */
- if (int_src & WCN36XX_DXE_INT_CH1_MASK) {
- wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
- WCN36XX_DXE_INT_CH1_MASK);
- wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_l_ch));
- }
+ if (int_src & WCN36XX_DXE_INT_CH1_MASK)
+ wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_l_ch,
+ WCN36XX_DXE_CTRL_RX_L,
+ WCN36XX_DXE_INT_CH1_MASK,
+ WCN36XX_INT_MASK_CHAN_RX_L,
+ WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_L);
/* RX_HIGH_PRI */
- if (int_src & WCN36XX_DXE_INT_CH3_MASK) {
- /* Clean up all the INT within this channel */
- wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
- WCN36XX_DXE_INT_CH3_MASK);
- wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_h_ch));
- }
+ if (int_src & WCN36XX_DXE_INT_CH3_MASK)
+ wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_h_ch,
+ WCN36XX_DXE_CTRL_RX_H,
+ WCN36XX_DXE_INT_CH3_MASK,
+ WCN36XX_INT_MASK_CHAN_RX_H,
+ WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_H);
if (!int_src)
wcn36xx_warn("No DXE interrupt pending\n");
@@ -643,8 +681,8 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
struct sk_buff *skb,
bool is_low)
{
- struct wcn36xx_dxe_ctl *ctl = NULL;
- struct wcn36xx_dxe_desc *desc = NULL;
+ struct wcn36xx_dxe_desc *desc_bd, *desc_skb;
+ struct wcn36xx_dxe_ctl *ctl_bd, *ctl_skb;
struct wcn36xx_dxe_ch *ch = NULL;
unsigned long flags;
int ret;
@@ -652,73 +690,75 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
spin_lock_irqsave(&ch->lock, flags);
- ctl = ch->head_blk_ctl;
-
- spin_lock(&ctl->next->skb_lock);
+ ctl_bd = ch->head_blk_ctl;
+ ctl_skb = ctl_bd->next;
/*
* If skb is not null that means that we reached the tail of the ring
* hence ring is full. Stop queues to let mac80211 back off until ring
* has an empty slot again.
*/
- if (NULL != ctl->next->skb) {
+ if (NULL != ctl_skb->skb) {
ieee80211_stop_queues(wcn->hw);
wcn->queues_stopped = true;
- spin_unlock(&ctl->next->skb_lock);
spin_unlock_irqrestore(&ch->lock, flags);
return -EBUSY;
}
- spin_unlock(&ctl->next->skb_lock);
- ctl->skb = NULL;
- desc = ctl->desc;
+ if (unlikely(ctl_skb->bd_cpu_addr)) {
+ wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ desc_bd = ctl_bd->desc;
+ desc_skb = ctl_skb->desc;
+
+ ctl_bd->skb = NULL;
/* write buffer descriptor */
- memcpy(ctl->bd_cpu_addr, bd, sizeof(*bd));
+ memcpy(ctl_bd->bd_cpu_addr, bd, sizeof(*bd));
/* Set source address of the BD we send */
- desc->src_addr_l = ctl->bd_phy_addr;
-
- desc->dst_addr_l = ch->dxe_wq;
- desc->fr_len = sizeof(struct wcn36xx_tx_bd);
- desc->ctrl = ch->ctrl_bd;
+ desc_bd->src_addr_l = ctl_bd->bd_phy_addr;
+ desc_bd->dst_addr_l = ch->dxe_wq;
+ desc_bd->fr_len = sizeof(struct wcn36xx_tx_bd);
wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
- (char *)desc, sizeof(*desc));
+ (char *)desc_bd, sizeof(*desc_bd));
wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
- "BD >>> ", (char *)ctl->bd_cpu_addr,
+ "BD >>> ", (char *)ctl_bd->bd_cpu_addr,
sizeof(struct wcn36xx_tx_bd));
- /* Set source address of the SKB we send */
- ctl = ctl->next;
- ctl->skb = skb;
- desc = ctl->desc;
- if (ctl->bd_cpu_addr) {
- wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
- ret = -EINVAL;
+ desc_skb->src_addr_l = dma_map_single(wcn->dev,
+ skb->data,
+ skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(wcn->dev, desc_skb->src_addr_l)) {
+ dev_err(wcn->dev, "unable to DMA map src_addr_l\n");
+ ret = -ENOMEM;
goto unlock;
}
- desc->src_addr_l = dma_map_single(wcn->dev,
- ctl->skb->data,
- ctl->skb->len,
- DMA_TO_DEVICE);
-
- desc->dst_addr_l = ch->dxe_wq;
- desc->fr_len = ctl->skb->len;
-
- /* set dxe descriptor to VALID */
- desc->ctrl = ch->ctrl_skb;
+ ctl_skb->skb = skb;
+ desc_skb->dst_addr_l = ch->dxe_wq;
+ desc_skb->fr_len = ctl_skb->skb->len;
wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
- (char *)desc, sizeof(*desc));
+ (char *)desc_skb, sizeof(*desc_skb));
wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB >>> ",
- (char *)ctl->skb->data, ctl->skb->len);
+ (char *)ctl_skb->skb->data, ctl_skb->skb->len);
/* Move the head of the ring to the next empty descriptor */
- ch->head_blk_ctl = ctl->next;
+ ch->head_blk_ctl = ctl_skb->next;
+
+ /* Commit all previous writes and set descriptors to VALID */
+ wmb();
+ desc_skb->ctrl = ch->ctrl_skb;
+ wmb();
+ desc_bd->ctrl = ch->ctrl_bd;
/*
* When connected and trying to send data frame chip can be in sleep