struct ieee80211_hw *hw = sc->hw;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
+ int hdrlen, padsize;
DPRINTF(sc, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
tx_info->flags |= IEEE80211_TX_STAT_ACK;
}
- tx_info->status.rates[0].count = tx_status->retries + 1;
+ tx_info->status.rates[0].count = tx_status->retries;
+ if (tx_info->status.rates[0].flags & IEEE80211_TX_RC_MCS) {
+ /* Change idx from internal table index to MCS index */
+ int idx = tx_info->status.rates[0].idx;
+ struct ath_rate_table *rate_table = sc->cur_rate_table;
+ if (idx >= 0 && idx < rate_table->rate_cnt)
+ tx_info->status.rates[0].idx =
+ rate_table->info[idx].ratecode & 0x7f;
+ }
+
+ hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+ padsize = hdrlen & 3;
+ if (padsize && hdrlen >= 24) {
+ /*
+ * Remove MAC header padding before giving the frame back to
+ * mac80211.
+ */
+ memmove(skb->data + padsize, skb->data, hdrlen);
+ skb_pull(skb, padsize);
+ }
ieee80211_tx_status(hw, skb);
}
{
struct ath_buf *bf = NULL;
- spin_lock_bh(&sc->sc_txbuflock);
+ spin_lock_bh(&sc->tx.txbuflock);
- if (unlikely(list_empty(&sc->sc_txbuf))) {
- spin_unlock_bh(&sc->sc_txbuflock);
+ if (unlikely(list_empty(&sc->tx.txbuf))) {
+ spin_unlock_bh(&sc->tx.txbuflock);
return NULL;
}
- bf = list_first_entry(&sc->sc_txbuf, struct ath_buf, list);
+ bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
list_del(&bf->list);
- spin_unlock_bh(&sc->sc_txbuflock);
+ spin_unlock_bh(&sc->tx.txbuflock);
return bf;
}
{
struct sk_buff *skb = bf->bf_mpdu;
struct ath_xmit_status tx_status;
+ unsigned long flags;
/*
* Set retry information.
/*
* Return the list of ath_buf of this mpdu to free queue
*/
- spin_lock_bh(&sc->sc_txbuflock);
- list_splice_tail_init(bf_q, &sc->sc_txbuf);
- spin_unlock_bh(&sc->sc_txbuflock);
+ spin_lock_irqsave(&sc->tx.txbuflock, flags);
+ list_splice_tail_init(bf_q, &sc->tx.txbuf);
+ spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
}
/*
static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
{
- struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
+ struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
spin_lock_bh(&txq->axq_lock);
void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
{
- struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
+ struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
ASSERT(tid->paused > 0);
spin_lock_bh(&txq->axq_lock);
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
{
- struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
+ struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
struct ath_buf *bf;
struct list_head bf_head;
INIT_LIST_HEAD(&bf_head);
struct ath_buf *tbf;
/* allocate new descriptor */
- spin_lock_bh(&sc->sc_txbuflock);
- ASSERT(!list_empty((&sc->sc_txbuf)));
- tbf = list_first_entry(&sc->sc_txbuf,
+ spin_lock_bh(&sc->tx.txbuflock);
+ ASSERT(!list_empty((&sc->tx.txbuf)));
+ tbf = list_first_entry(&sc->tx.txbuf,
struct ath_buf, list);
list_del(&tbf->list);
- spin_unlock_bh(&sc->sc_txbuflock);
+ spin_unlock_bh(&sc->tx.txbuflock);
ATH_TXBUF_RESET(tbf);
if (bf_held) {
list_del(&bf_held->list);
- spin_lock_bh(&sc->sc_txbuflock);
- list_add_tail(&bf_held->list, &sc->sc_txbuf);
- spin_unlock_bh(&sc->sc_txbuflock);
+ spin_lock_bh(&sc->tx.txbuflock);
+ list_add_tail(&bf_held->list, &sc->tx.txbuf);
+ spin_unlock_bh(&sc->tx.txbuflock);
}
if (!bf_isampdu(bf)) {
if (!(sc->sc_flags & SC_OP_INVALID)) {
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
if (ATH_TXQ_SETUP(sc, i)) {
- ath_tx_stopdma(sc, &sc->sc_txq[i]);
+ ath_tx_stopdma(sc, &sc->tx.txq[i]);
/* The TxDMA may not really be stopped.
* Double check the hal tx pending count */
npend += ath9k_hw_numtxpending(ah,
- sc->sc_txq[i].axq_qnum);
+ sc->tx.txq[i].axq_qnum);
}
}
}
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
if (ATH_TXQ_SETUP(sc, i))
- ath_tx_draintxq(sc, &sc->sc_txq[i], retry_tx);
+ ath_tx_draintxq(sc, &sc->tx.txq[i], retry_tx);
}
}
}
spin_unlock_bh(&txq->axq_lock);
- spin_lock_bh(&sc->sc_txbuflock);
- list_add_tail(&bf->list, &sc->sc_txbuf);
- spin_unlock_bh(&sc->sc_txbuflock);
+ spin_lock_bh(&sc->tx.txbuflock);
+ list_add_tail(&bf->list, &sc->tx.txbuf);
+ spin_unlock_bh(&sc->tx.txbuflock);
return r;
}
int error = 0;
do {
- spin_lock_init(&sc->sc_txbuflock);
+ spin_lock_init(&sc->tx.txbuflock);
/* Setup tx descriptors */
- error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
+ error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
"tx", nbufs, 1);
if (error != 0) {
DPRINTF(sc, ATH_DBG_FATAL,
}
/* XXX allocate beacon state together with vap */
- error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
+ error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
"beacon", ATH_BCBUF, 1);
if (error != 0) {
DPRINTF(sc, ATH_DBG_FATAL,
int ath_tx_cleanup(struct ath_softc *sc)
{
/* cleanup beacon descriptors */
- if (sc->sc_bdma.dd_desc_len != 0)
- ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
+ if (sc->beacon.bdma.dd_desc_len != 0)
+ ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
/* cleanup tx descriptors */
- if (sc->sc_txdma.dd_desc_len != 0)
- ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
+ if (sc->tx.txdma.dd_desc_len != 0)
+ ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
return 0;
}
*/
return NULL;
}
- if (qnum >= ARRAY_SIZE(sc->sc_txq)) {
+ if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
DPRINTF(sc, ATH_DBG_FATAL,
"qnum %u out of range, max %u!\n",
- qnum, (unsigned int)ARRAY_SIZE(sc->sc_txq));
+ qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
ath9k_hw_releasetxqueue(ah, qnum);
return NULL;
}
if (!ATH_TXQ_SETUP(sc, qnum)) {
- struct ath_txq *txq = &sc->sc_txq[qnum];
+ struct ath_txq *txq = &sc->tx.txq[qnum];
txq->axq_qnum = qnum;
txq->axq_link = NULL;
txq->axq_aggr_depth = 0;
txq->axq_totalqueued = 0;
txq->axq_linkbuf = NULL;
- sc->sc_txqsetup |= 1<<qnum;
+ sc->tx.txqsetup |= 1<<qnum;
}
- return &sc->sc_txq[qnum];
+ return &sc->tx.txq[qnum];
}
/* Reclaim resources for a setup queue */
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
{
ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
- sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
+ sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
}
/*
{
struct ath_txq *txq;
- if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
+ if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
DPRINTF(sc, ATH_DBG_FATAL,
"HAL AC %u out of range, max %zu!\n",
- haltype, ARRAY_SIZE(sc->sc_haltype2q));
+ haltype, ARRAY_SIZE(sc->tx.hwq_map));
return 0;
}
txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
if (txq != NULL) {
- sc->sc_haltype2q[haltype] = txq->axq_qnum;
+ sc->tx.hwq_map[haltype] = txq->axq_qnum;
return 1;
} else
return 0;
switch (qtype) {
case ATH9K_TX_QUEUE_DATA:
- if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
+ if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
DPRINTF(sc, ATH_DBG_FATAL,
"HAL AC %u out of range, max %zu!\n",
- haltype, ARRAY_SIZE(sc->sc_haltype2q));
+ haltype, ARRAY_SIZE(sc->tx.hwq_map));
return -1;
}
- qnum = sc->sc_haltype2q[haltype];
+ qnum = sc->tx.hwq_map[haltype];
break;
case ATH9K_TX_QUEUE_BEACON:
- qnum = sc->sc_bhalq;
+ qnum = sc->beacon.beaconq;
break;
case ATH9K_TX_QUEUE_CAB:
- qnum = sc->sc_cabq->axq_qnum;
+ qnum = sc->beacon.cabq->axq_qnum;
break;
default:
qnum = -1;
int qnum;
qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
- txq = &sc->sc_txq[qnum];
+ txq = &sc->tx.txq[qnum];
spin_lock_bh(&txq->axq_lock);
int error = 0;
struct ath9k_tx_queue_info qi;
- if (qnum == sc->sc_bhalq) {
+ if (qnum == sc->beacon.beaconq) {
/*
* XXX: for beacon queue, we just save the parameter.
* It will be picked up by ath_beaconq_config when
* it's necessary.
*/
- sc->sc_beacon_qi = *qinfo;
+ sc->beacon.beacon_qi = *qinfo;
return 0;
}
- ASSERT(sc->sc_txq[qnum].axq_qnum == qnum);
+ ASSERT(sc->tx.txq[qnum].axq_qnum == qnum);
ath9k_hw_get_txq_props(ah, qnum, &qi);
qi.tqi_aifs = qinfo->tqi_aifs;
int ath_cabq_update(struct ath_softc *sc)
{
struct ath9k_tx_queue_info qi;
- int qnum = sc->sc_cabq->axq_qnum;
+ int qnum = sc->beacon.cabq->axq_qnum;
struct ath_beacon_config conf;
ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
*/
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
- ath_tx_processq(sc, &sc->sc_txq[i]);
+ ath_tx_processq(sc, &sc->tx.txq[i]);
}
}
list_del(&bf->list);
spin_unlock_bh(&txq->axq_lock);
- spin_lock_bh(&sc->sc_txbuflock);
- list_add_tail(&bf->list, &sc->sc_txbuf);
- spin_unlock_bh(&sc->sc_txbuflock);
+ spin_lock_bh(&sc->tx.txbuflock);
+ list_add_tail(&bf->list, &sc->tx.txbuf);
+ spin_unlock_bh(&sc->tx.txbuflock);
continue;
}
/* stop beacon queue. The beacon will be freed when
* we go to INIT state */
if (!(sc->sc_flags & SC_OP_INVALID)) {
- (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
+ (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
DPRINTF(sc, ATH_DBG_XMIT, "beacon queue %x\n",
- ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq));
+ ath9k_hw_gettxbuf(sc->sc_ah, sc->beacon.beaconq));
}
ath_drain_txdataq(sc, retry_tx);
u32 ath_txq_depth(struct ath_softc *sc, int qnum)
{
- return sc->sc_txq[qnum].axq_depth;
+ return sc->tx.txq[qnum].axq_depth;
}
u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum)
{
- return sc->sc_txq[qnum].axq_aggr_depth;
+ return sc->tx.txq[qnum].axq_aggr_depth;
}
bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
void ath_tx_aggr_teardown(struct ath_softc *sc, struct ath_node *an, u8 tid)
{
struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
- struct ath_txq *txq = &sc->sc_txq[txtid->ac->qnum];
+ struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
struct ath_buf *bf;
struct list_head bf_head;
INIT_LIST_HEAD(&bf_head);
struct ath_txq *txq;
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
if (ATH_TXQ_SETUP(sc, i)) {
- txq = &sc->sc_txq[i];
+ txq = &sc->tx.txq[i];
spin_lock(&txq->axq_lock);
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
- sc->seq_no += 0x10;
+ sc->tx.seq_no += 0x10;
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
- hdr->seq_ctrl |= cpu_to_le16(sc->seq_no);
+ hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
}
/* Add the padding after the header if this is not already done */
memmove(skb->data, skb->data + padsize, hdrlen);
}
- txctl.txq = sc->sc_cabq;
+ txctl.txq = sc->beacon.cabq;
DPRINTF(sc, ATH_DBG_XMIT, "transmitting CABQ packet, skb: %p\n", skb);