1 /******************************************************************************
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
30 #include <linux/etherdevice.h>
31 #include <net/mac80211.h>
32 #include "iwl-eeprom.h"
37 #include "iwl-helpers.h"
39 static const u16 default_tid_to_tx_fifo[] = {
61 * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
63 * Does NOT advance any TFD circular buffer read/write indexes
64 * Does NOT free the TFD itself (which is within circular buffer)
66 int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
68 struct iwl_tfd_frame *bd_tmp = (struct iwl_tfd_frame *)&txq->bd[0];
69 struct iwl_tfd_frame *bd = &bd_tmp[txq->q.read_ptr];
70 struct pci_dev *dev = priv->pci_dev;
75 /* Host command buffers stay mapped in memory, nothing to clean */
76 if (txq->q.id == IWL_CMD_QUEUE_NUM)
79 /* Sanity check on number of chunks */
80 counter = IWL_GET_BITS(*bd, num_tbs);
81 if (counter > MAX_NUM_OF_TBS) {
82 IWL_ERROR("Too many chunks: %i\n", counter);
83 /* @todo issue fatal error, it is quite serious situation */
87 /* Unmap chunks, if any.
88 * TFD info for odd chunks is different format than for even chunks. */
89 for (i = 0; i < counter; i++) {
96 IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) |
97 (IWL_GET_BITS(bd->pa[index],
98 tb2_addr_hi20) << 16),
99 IWL_GET_BITS(bd->pa[index], tb2_len),
103 pci_unmap_single(dev,
104 le32_to_cpu(bd->pa[index].tb1_addr),
105 IWL_GET_BITS(bd->pa[index], tb1_len),
108 /* Free SKB, if any, for this chunk */
109 if (txq->txb[txq->q.read_ptr].skb[i]) {
110 struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[i];
113 txq->txb[txq->q.read_ptr].skb[i] = NULL;
118 EXPORT_SYMBOL(iwl_hw_txq_free_tfd);
121 int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
122 dma_addr_t addr, u16 len)
125 struct iwl_tfd_frame *tfd = ptr;
126 u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs);
128 /* Each TFD can point to a maximum 20 Tx buffers */
129 if ((num_tbs >= MAX_NUM_OF_TBS) || (num_tbs < 0)) {
130 IWL_ERROR("Error can not send more than %d chunks\n",
136 is_odd = num_tbs & 0x1;
139 tfd->pa[index].tb1_addr = cpu_to_le32(addr);
140 IWL_SET_BITS(tfd->pa[index], tb1_addr_hi,
141 iwl_get_dma_hi_address(addr));
142 IWL_SET_BITS(tfd->pa[index], tb1_len, len);
144 IWL_SET_BITS(tfd->pa[index], tb2_addr_lo16,
145 (u32) (addr & 0xffff));
146 IWL_SET_BITS(tfd->pa[index], tb2_addr_hi20, addr >> 16);
147 IWL_SET_BITS(tfd->pa[index], tb2_len, len);
150 IWL_SET_BITS(*tfd, num_tbs, num_tbs + 1);
154 EXPORT_SYMBOL(iwl_hw_txq_attach_buf_to_tfd);
157 * iwl_txq_update_write_ptr - Send new write index to hardware
159 int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
163 int txq_id = txq->q.id;
165 if (txq->need_update == 0)
168 /* if we're trying to save power */
169 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
170 /* wake up nic if it's powered down ...
171 * uCode will wake up, and interrupt us again, so next
172 * time we'll skip this part. */
173 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
175 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
176 IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg);
177 iwl_set_bit(priv, CSR_GP_CNTRL,
178 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
182 /* restore this queue's parameters in nic hardware. */
183 ret = iwl_grab_nic_access(priv);
186 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
187 txq->q.write_ptr | (txq_id << 8));
188 iwl_release_nic_access(priv);
190 /* else not in power-save mode, uCode will never sleep when we're
191 * trying to tx (during RFKILL, we're not trying to tx). */
193 iwl_write32(priv, HBUS_TARG_WRPTR,
194 txq->q.write_ptr | (txq_id << 8));
196 txq->need_update = 0;
200 EXPORT_SYMBOL(iwl_txq_update_write_ptr);
204 * iwl_tx_queue_free - Deallocate DMA queue.
205 * @txq: Transmit queue to deallocate.
207 * Empty queue by removing and destroying all BD's.
209 * 0-fill, but do not free "txq" descriptor structure.
211 static void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq)
213 struct iwl_queue *q = &txq->q;
214 struct pci_dev *dev = priv->pci_dev;
220 /* first, empty all BD's */
221 for (; q->write_ptr != q->read_ptr;
222 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
223 iwl_hw_txq_free_tfd(priv, txq);
225 len = sizeof(struct iwl_cmd) * q->n_window;
226 if (q->id == IWL_CMD_QUEUE_NUM)
227 len += IWL_MAX_SCAN_SIZE;
229 /* De-alloc array of command/tx buffers */
230 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
232 /* De-alloc circular buffer of TFDs */
234 pci_free_consistent(dev, sizeof(struct iwl_tfd_frame) *
235 txq->q.n_bd, txq->bd, txq->q.dma_addr);
237 /* De-alloc array of per-TFD driver data */
241 /* 0-fill queue descriptor structure */
242 memset(txq, 0, sizeof(*txq));
245 /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
248 * Theory of operation
250 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
251 * of buffer descriptors, each of which points to one or more data buffers for
252 * the device to read from or fill. Driver and device exchange status of each
253 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
254 * entries in each circular buffer, to protect against confusing empty and full
257 * The device reads or writes the data in the queues via the device's several
258 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
260 * For Tx queue, there are low mark and high mark limits. If, after queuing
261 * the packet for Tx, free space become < low mark, Tx queue stopped. When
262 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
265 * See more detailed info in iwl-4965-hw.h.
266 ***************************************************/
268 int iwl_queue_space(const struct iwl_queue *q)
270 int s = q->read_ptr - q->write_ptr;
272 if (q->read_ptr > q->write_ptr)
277 /* keep some reserve to not confuse empty and full situations */
283 EXPORT_SYMBOL(iwl_queue_space);
287 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
289 static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
290 int count, int slots_num, u32 id)
293 q->n_window = slots_num;
296 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
297 * and iwl_queue_dec_wrap are broken. */
298 BUG_ON(!is_power_of_2(count));
300 /* slots_num must be power-of-two size, otherwise
301 * get_cmd_index is broken. */
302 BUG_ON(!is_power_of_2(slots_num));
304 q->low_mark = q->n_window / 4;
308 q->high_mark = q->n_window / 8;
309 if (q->high_mark < 2)
312 q->write_ptr = q->read_ptr = 0;
318 * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
320 static int iwl_tx_queue_alloc(struct iwl_priv *priv,
321 struct iwl_tx_queue *txq, u32 id)
323 struct pci_dev *dev = priv->pci_dev;
325 /* Driver private data, only for Tx (not command) queues,
326 * not shared with device. */
327 if (id != IWL_CMD_QUEUE_NUM) {
328 txq->txb = kmalloc(sizeof(txq->txb[0]) *
329 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
331 IWL_ERROR("kmalloc for auxiliary BD "
332 "structures failed\n");
338 /* Circular buffer of transmit frame descriptors (TFDs),
339 * shared with device */
340 txq->bd = pci_alloc_consistent(dev,
341 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX,
345 IWL_ERROR("pci_alloc_consistent(%zd) failed\n",
346 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX);
361 * Tell nic where to find circular buffer of Tx Frame Descriptors for
362 * given Tx queue, and enable the DMA channel used for that queue.
364 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
365 * channels supported in hardware.
367 static int iwl_hw_tx_queue_init(struct iwl_priv *priv,
368 struct iwl_tx_queue *txq)
372 int txq_id = txq->q.id;
374 spin_lock_irqsave(&priv->lock, flags);
375 rc = iwl_grab_nic_access(priv);
377 spin_unlock_irqrestore(&priv->lock, flags);
381 /* Circular buffer (TFD queue in DRAM) physical base address */
382 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
383 txq->q.dma_addr >> 8);
385 /* Enable DMA channel, using same id as for TFD queue */
387 priv, FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
388 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
389 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
390 iwl_release_nic_access(priv);
391 spin_unlock_irqrestore(&priv->lock, flags);
397 * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
399 static int iwl_tx_queue_init(struct iwl_priv *priv,
400 struct iwl_tx_queue *txq,
401 int slots_num, u32 txq_id)
403 struct pci_dev *dev = priv->pci_dev;
408 * Alloc buffer array for commands (Tx or other types of commands).
409 * For the command queue (#4), allocate command space + one big
410 * command for scan, since scan command is very huge; the system will
411 * not have two scans at the same time, so only one is needed.
412 * For normal Tx queues (all other queues), no super-size command
415 len = sizeof(struct iwl_cmd) * slots_num;
416 if (txq_id == IWL_CMD_QUEUE_NUM)
417 len += IWL_MAX_SCAN_SIZE;
418 txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd);
422 /* Alloc driver data array and TFD circular buffer */
423 rc = iwl_tx_queue_alloc(priv, txq, txq_id);
425 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
429 txq->need_update = 0;
431 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
432 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
433 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
435 /* Initialize queue's high/low-water marks, and head/tail indexes */
436 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
438 /* Tell device where to find queue */
439 iwl_hw_tx_queue_init(priv, txq);
444 * iwl_hw_txq_ctx_free - Free TXQ Context
446 * Destroy all TX DMA queues and structures
448 void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
453 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
454 iwl_tx_queue_free(priv, &priv->txq[txq_id]);
456 /* Keep-warm buffer */
459 EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
463 * iwl_txq_ctx_reset - Reset TX queue context
464 * Destroys all DMA structures and initialise them again
469 int iwl_txq_ctx_reset(struct iwl_priv *priv)
472 int txq_id, slots_num;
477 /* Free all tx/cmd queues and keep-warm buffer */
478 iwl_hw_txq_ctx_free(priv);
480 /* Alloc keep-warm buffer */
481 ret = iwl_kw_alloc(priv);
483 IWL_ERROR("Keep Warm allocation failed");
486 spin_lock_irqsave(&priv->lock, flags);
487 ret = iwl_grab_nic_access(priv);
489 spin_unlock_irqrestore(&priv->lock, flags);
493 /* Turn off all Tx DMA fifos */
494 priv->cfg->ops->lib->txq_set_sched(priv, 0);
496 iwl_release_nic_access(priv);
497 spin_unlock_irqrestore(&priv->lock, flags);
500 /* Tell nic where to find the keep-warm buffer */
501 ret = iwl_kw_init(priv);
503 IWL_ERROR("kw_init failed\n");
507 /* Alloc and init all Tx queues, including the command queue (#4) */
508 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
509 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
510 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
511 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
514 IWL_ERROR("Tx %d queue init failed\n", txq_id);
522 iwl_hw_txq_ctx_free(priv);
529 * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
531 void iwl_txq_ctx_stop(struct iwl_priv *priv)
538 /* Turn off all Tx DMA fifos */
539 spin_lock_irqsave(&priv->lock, flags);
540 if (iwl_grab_nic_access(priv)) {
541 spin_unlock_irqrestore(&priv->lock, flags);
545 priv->cfg->ops->lib->txq_set_sched(priv, 0);
547 /* Stop each Tx DMA channel, and wait for it to be idle */
548 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
549 iwl_write_direct32(priv,
550 FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), 0x0);
551 iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
552 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE
555 iwl_release_nic_access(priv);
556 spin_unlock_irqrestore(&priv->lock, flags);
558 /* Deallocate memory for all Tx queues */
559 iwl_hw_txq_ctx_free(priv);
561 EXPORT_SYMBOL(iwl_txq_ctx_stop);
564 * handle build REPLY_TX command notification.
566 static void iwl_tx_cmd_build_basic(struct iwl_priv *priv,
567 struct iwl_tx_cmd *tx_cmd,
568 struct ieee80211_tx_info *info,
569 struct ieee80211_hdr *hdr,
570 int is_unicast, u8 std_id)
572 __le16 fc = hdr->frame_control;
573 __le32 tx_flags = tx_cmd->tx_flags;
575 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
576 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
577 tx_flags |= TX_CMD_FLG_ACK_MSK;
578 if (ieee80211_is_mgmt(fc))
579 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
580 if (ieee80211_is_probe_resp(fc) &&
581 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
582 tx_flags |= TX_CMD_FLG_TSF_MSK;
584 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
585 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
588 if (ieee80211_is_back_req(fc))
589 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
592 tx_cmd->sta_id = std_id;
593 if (ieee80211_has_morefrags(fc))
594 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
596 if (ieee80211_is_data_qos(fc)) {
597 u8 *qc = ieee80211_get_qos_ctl(hdr);
598 tx_cmd->tid_tspec = qc[0] & 0xf;
599 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
601 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
604 priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
606 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
607 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
609 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
610 if (ieee80211_is_mgmt(fc)) {
611 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
612 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
614 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
616 tx_cmd->timeout.pm_frame_timeout = 0;
619 tx_cmd->driver_txop = 0;
620 tx_cmd->tx_flags = tx_flags;
621 tx_cmd->next_frame_len = 0;
624 #define RTS_HCCA_RETRY_LIMIT 3
625 #define RTS_DFAULT_RETRY_LIMIT 60
627 static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
628 struct iwl_tx_cmd *tx_cmd,
629 struct ieee80211_tx_info *info,
630 __le16 fc, int sta_id,
633 u8 rts_retry_limit = 0;
634 u8 data_retry_limit = 0;
639 rate_idx = min(ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xffff,
642 rate_plcp = iwl_rates[rate_idx].plcp;
644 rts_retry_limit = (is_hcca) ?
645 RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT;
647 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
648 rate_flags |= RATE_MCS_CCK_MSK;
651 if (ieee80211_is_probe_resp(fc)) {
652 data_retry_limit = 3;
653 if (data_retry_limit < rts_retry_limit)
654 rts_retry_limit = data_retry_limit;
656 data_retry_limit = IWL_DEFAULT_TX_RETRY;
658 if (priv->data_retry_limit != -1)
659 data_retry_limit = priv->data_retry_limit;
662 if (ieee80211_is_data(fc)) {
663 tx_cmd->initial_rate_index = 0;
664 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
666 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
667 case cpu_to_le16(IEEE80211_STYPE_AUTH):
668 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
669 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
670 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
671 if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
672 tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
673 tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
680 /* Alternate between antenna A and B for successive frames */
681 if (priv->use_ant_b_for_management_frame) {
682 priv->use_ant_b_for_management_frame = 0;
683 rate_flags |= RATE_MCS_ANT_B_MSK;
685 priv->use_ant_b_for_management_frame = 1;
686 rate_flags |= RATE_MCS_ANT_A_MSK;
690 tx_cmd->rts_retry_limit = rts_retry_limit;
691 tx_cmd->data_retry_limit = data_retry_limit;
692 tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
695 static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
696 struct ieee80211_tx_info *info,
697 struct iwl_tx_cmd *tx_cmd,
698 struct sk_buff *skb_frag,
701 struct ieee80211_key_conf *keyconf = info->control.hw_key;
703 switch (keyconf->alg) {
705 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
706 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
707 if (info->flags & IEEE80211_TX_CTL_AMPDU)
708 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
709 IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n");
713 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
714 ieee80211_get_tkip_key(keyconf, skb_frag,
715 IEEE80211_TKIP_P2_KEY, tx_cmd->key);
716 IWL_DEBUG_TX("tx_cmd with tkip hwcrypto\n");
720 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
721 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
723 if (keyconf->keylen == WEP_KEY_LEN_128)
724 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
726 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
728 IWL_DEBUG_TX("Configuring packet for WEP encryption "
729 "with key %d\n", keyconf->keyidx);
733 printk(KERN_ERR "Unknown encode alg %d\n", keyconf->alg);
738 static void iwl_update_tx_stats(struct iwl_priv *priv, u16 fc, u16 len)
740 /* 0 - mgmt, 1 - cnt, 2 - data */
741 int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2;
742 priv->tx_stats[idx].cnt++;
743 priv->tx_stats[idx].bytes += len;
747 * start REPLY_TX command process
749 int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
751 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
752 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
753 struct iwl_tfd_frame *tfd;
755 int txq_id = skb_get_queue_mapping(skb);
756 struct iwl_tx_queue *txq = NULL;
757 struct iwl_queue *q = NULL;
758 dma_addr_t phys_addr;
759 dma_addr_t txcmd_phys;
760 dma_addr_t scratch_phys;
761 struct iwl_cmd *out_cmd = NULL;
762 struct iwl_tx_cmd *tx_cmd;
763 u16 len, idx, len_org;
765 u8 id, hdr_len, unicast;
768 u8 wait_write_ptr = 0;
774 spin_lock_irqsave(&priv->lock, flags);
775 if (iwl_is_rfkill(priv)) {
776 IWL_DEBUG_DROP("Dropping - RF KILL\n");
781 IWL_DEBUG_DROP("Dropping - !priv->vif\n");
785 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) ==
787 IWL_ERROR("ERROR: No TX rate available.\n");
791 unicast = !is_multicast_ether_addr(hdr->addr1);
794 fc = hdr->frame_control;
796 #ifdef CONFIG_IWLWIFI_DEBUG
797 if (ieee80211_is_auth(fc))
798 IWL_DEBUG_TX("Sending AUTH frame\n");
799 else if (ieee80211_is_assoc_req(fc))
800 IWL_DEBUG_TX("Sending ASSOC frame\n");
801 else if (ieee80211_is_reassoc_req(fc))
802 IWL_DEBUG_TX("Sending REASSOC frame\n");
805 /* drop all data frame if we are not associated */
806 if (ieee80211_is_data(fc) &&
807 (!iwl_is_associated(priv) ||
808 ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && !priv->assoc_id) ||
809 !priv->assoc_station_added)) {
810 IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n");
814 spin_unlock_irqrestore(&priv->lock, flags);
816 hdr_len = ieee80211_get_hdrlen(le16_to_cpu(fc));
818 /* Find (or create) index into station table for destination station */
819 sta_id = iwl_get_sta_id(priv, hdr);
820 if (sta_id == IWL_INVALID_STATION) {
821 DECLARE_MAC_BUF(mac);
823 IWL_DEBUG_DROP("Dropping - INVALID STATION: %s\n",
824 print_mac(mac, hdr->addr1));
828 IWL_DEBUG_TX("station Id %d\n", sta_id);
830 if (ieee80211_is_data_qos(fc)) {
831 qc = ieee80211_get_qos_ctl(hdr);
833 seq_number = priv->stations[sta_id].tid[tid].seq_number &
835 hdr->seq_ctrl = cpu_to_le16(seq_number) |
837 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG));
839 /* aggregation is on for this <sta,tid> */
840 if (info->flags & IEEE80211_TX_CTL_AMPDU)
841 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
842 priv->stations[sta_id].tid[tid].tfds_in_queue++;
845 /* Descriptor for chosen Tx queue */
846 txq = &priv->txq[txq_id];
849 spin_lock_irqsave(&priv->lock, flags);
851 /* Set up first empty TFD within this queue's circular TFD buffer */
852 tfd = &txq->bd[q->write_ptr];
853 memset(tfd, 0, sizeof(*tfd));
854 control_flags = (u32 *) tfd;
855 idx = get_cmd_index(q, q->write_ptr, 0);
857 /* Set up driver data for this TFD */
858 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
859 txq->txb[q->write_ptr].skb[0] = skb;
861 /* Set up first empty entry in queue's array of Tx/cmd buffers */
862 out_cmd = &txq->cmd[idx];
863 tx_cmd = &out_cmd->cmd.tx;
864 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
865 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
868 * Set up the Tx-command (not MAC!) header.
869 * Store the chosen Tx queue and TFD index within the sequence field;
870 * after Tx, uCode's Tx response will return this value so driver can
871 * locate the frame within the tx queue and do post-tx processing.
873 out_cmd->hdr.cmd = REPLY_TX;
874 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
875 INDEX_TO_SEQ(q->write_ptr)));
877 /* Copy MAC header from skb into command buffer */
878 memcpy(tx_cmd->hdr, hdr, hdr_len);
881 * Use the first empty entry in this queue's command buffer array
882 * to contain the Tx command and MAC header concatenated together
883 * (payload data will be in another buffer).
884 * Size of this varies, due to varying MAC header length.
885 * If end is not dword aligned, we'll have 2 extra bytes at the end
886 * of the MAC header (device reads on dword boundaries).
887 * We'll tell device about this padding later.
889 len = sizeof(struct iwl_tx_cmd) +
890 sizeof(struct iwl_cmd_header) + hdr_len;
893 len = (len + 3) & ~3;
900 /* Physical address of this Tx command's header (not MAC header!),
901 * within command buffer array. */
902 txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl_cmd) * idx +
903 offsetof(struct iwl_cmd, hdr);
905 /* Add buffer containing Tx command and MAC(!) header to TFD's
907 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
909 if (!(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT))
910 iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
912 /* Set up TFD's 2nd entry to point directly to remainder of skb,
913 * if any (802.11 null frames have no payload). */
914 len = skb->len - hdr_len;
916 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
917 len, PCI_DMA_TODEVICE);
918 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len);
921 /* Tell NIC about any 2-byte padding after MAC header */
923 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
925 /* Total # bytes to be transmitted */
927 tx_cmd->len = cpu_to_le16(len);
928 /* TODO need this for burst mode later on */
929 iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, unicast, sta_id);
931 /* set is_hcca to 0; it probably will never be implemented */
932 iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0);
934 iwl_update_tx_stats(priv, le16_to_cpu(fc), len);
936 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
937 offsetof(struct iwl_tx_cmd, scratch);
938 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
939 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_address(scratch_phys);
941 if (!ieee80211_has_morefrags(hdr->frame_control)) {
942 txq->need_update = 1;
944 priv->stations[sta_id].tid[tid].seq_number = seq_number;
947 txq->need_update = 0;
950 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
952 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
954 /* Set up entry for this TFD in Tx byte-count array */
955 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, len);
957 /* Tell device the write index *just past* this latest filled TFD */
958 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
959 ret = iwl_txq_update_write_ptr(priv, txq);
960 spin_unlock_irqrestore(&priv->lock, flags);
965 if ((iwl_queue_space(q) < q->high_mark)
966 && priv->mac80211_registered) {
967 if (wait_write_ptr) {
968 spin_lock_irqsave(&priv->lock, flags);
969 txq->need_update = 1;
970 iwl_txq_update_write_ptr(priv, txq);
971 spin_unlock_irqrestore(&priv->lock, flags);
974 ieee80211_stop_queue(priv->hw, skb_get_queue_mapping(skb));
980 spin_unlock_irqrestore(&priv->lock, flags);
984 EXPORT_SYMBOL(iwl_tx_skb);
986 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
989 * iwl_enqueue_hcmd - enqueue a uCode command
990 * @priv: device private data point
991 * @cmd: a point to the ucode command structure
993 * The function returns < 0 values to indicate the operation is
994 * failed. On success, it turns the index (> 0) of command in the
997 int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
999 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
1000 struct iwl_queue *q = &txq->q;
1001 struct iwl_tfd_frame *tfd;
1003 struct iwl_cmd *out_cmd;
1006 dma_addr_t phys_addr;
1008 unsigned long flags;
1010 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
1011 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
1013 /* If any of the command structures end up being larger than
1014 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
1015 * we will need to increase the size of the TFD entries */
1016 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
1017 !(cmd->meta.flags & CMD_SIZE_HUGE));
1019 if (iwl_is_rfkill(priv)) {
1020 IWL_DEBUG_INFO("Not sending command - RF KILL");
1024 if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
1025 IWL_ERROR("No space for Tx\n");
1029 spin_lock_irqsave(&priv->hcmd_lock, flags);
1031 tfd = &txq->bd[q->write_ptr];
1032 memset(tfd, 0, sizeof(*tfd));
1034 control_flags = (u32 *) tfd;
1036 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
1037 out_cmd = &txq->cmd[idx];
1039 out_cmd->hdr.cmd = cmd->id;
1040 memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
1041 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
1043 /* At this point, the out_cmd now has all of the incoming cmd
1046 out_cmd->hdr.flags = 0;
1047 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
1048 INDEX_TO_SEQ(q->write_ptr));
1049 if (out_cmd->meta.flags & CMD_SIZE_HUGE)
1050 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
1052 phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx +
1053 offsetof(struct iwl_cmd, hdr);
1054 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
1056 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
1057 "%d bytes at %d[%d]:%d\n",
1058 get_cmd_string(out_cmd->hdr.cmd),
1059 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
1060 fix_size, q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
1062 txq->need_update = 1;
1064 /* Set up entry in queue's byte count circular buffer */
1065 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
1067 /* Increment and update queue's write index */
1068 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1069 ret = iwl_txq_update_write_ptr(priv, txq);
1071 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
1072 return ret ? ret : idx;
1075 int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1077 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1078 struct iwl_queue *q = &txq->q;
1079 struct iwl_tx_info *tx_info;
1082 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
1083 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
1084 "is out of range [0-%d] %d %d.\n", txq_id,
1085 index, q->n_bd, q->write_ptr, q->read_ptr);
1089 for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index;
1090 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1092 tx_info = &txq->txb[txq->q.read_ptr];
1093 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]);
1094 tx_info->skb[0] = NULL;
1096 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
1097 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
1099 iwl_hw_txq_free_tfd(priv, txq);
1104 EXPORT_SYMBOL(iwl_tx_queue_reclaim);
1108 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
1110 * When FW advances 'R' index, all entries between old and new 'R' index
1111 * need to be reclaimed. As result, some free space forms. If there is
1112 * enough free space (> low mark), wake the stack that feeds us.
1114 static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1116 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1117 struct iwl_queue *q = &txq->q;
1120 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
1121 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
1122 "is out of range [0-%d] %d %d.\n", txq_id,
1123 index, q->n_bd, q->write_ptr, q->read_ptr);
1127 for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index;
1128 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1131 IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index,
1132 q->write_ptr, q->read_ptr);
1133 queue_work(priv->workqueue, &priv->restart);
1140 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
1141 * @rxb: Rx buffer to reclaim
1143 * If an Rx buffer has an async callback associated with it the callback
1144 * will be executed. The attached skb (if present) will only be freed
1145 * if the callback returns 1
1147 void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1149 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1150 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1151 int txq_id = SEQ_TO_QUEUE(sequence);
1152 int index = SEQ_TO_INDEX(sequence);
1153 int huge = sequence & SEQ_HUGE_FRAME;
1155 struct iwl_cmd *cmd;
1157 /* If a Tx command is being handled and it isn't in the actual
1158 * command queue then there a command routing bug has been introduced
1159 * in the queue management code. */
1160 if (txq_id != IWL_CMD_QUEUE_NUM)
1161 IWL_ERROR("Error wrong command queue %d command id 0x%X\n",
1162 txq_id, pkt->hdr.cmd);
1163 BUG_ON(txq_id != IWL_CMD_QUEUE_NUM);
1165 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
1166 cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
1168 /* Input error checking is done when commands are added to queue. */
1169 if (cmd->meta.flags & CMD_WANT_SKB) {
1170 cmd->meta.source->u.skb = rxb->skb;
1172 } else if (cmd->meta.u.callback &&
1173 !cmd->meta.u.callback(priv, cmd, rxb->skb))
1176 iwl_hcmd_queue_reclaim(priv, txq_id, index);
1178 if (!(cmd->meta.flags & CMD_ASYNC)) {
1179 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1180 wake_up_interruptible(&priv->wait_command_queue);
1183 EXPORT_SYMBOL(iwl_tx_cmd_complete);
1186 * Find first available (lowest unused) Tx Queue, mark it "active".
1187 * Called only when finding queue for aggregation.
1188 * Should never return anything < 7, because they should already
1189 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
1191 static int iwl_txq_ctx_activate_free(struct iwl_priv *priv)
1195 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
1196 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
1201 int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
1207 unsigned long flags;
1208 struct iwl_tid_data *tid_data;
1209 DECLARE_MAC_BUF(mac);
1211 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1212 tx_fifo = default_tid_to_tx_fifo[tid];
1216 IWL_WARNING("%s on ra = %s tid = %d\n",
1217 __func__, print_mac(mac, ra), tid);
1219 sta_id = iwl_find_station(priv, ra);
1220 if (sta_id == IWL_INVALID_STATION)
1223 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
1224 IWL_ERROR("Start AGG when state is not IWL_AGG_OFF !\n");
1228 txq_id = iwl_txq_ctx_activate_free(priv);
1232 spin_lock_irqsave(&priv->sta_lock, flags);
1233 tid_data = &priv->stations[sta_id].tid[tid];
1234 *ssn = SEQ_TO_SN(tid_data->seq_number);
1235 tid_data->agg.txq_id = txq_id;
1236 spin_unlock_irqrestore(&priv->sta_lock, flags);
1238 ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
1243 if (tid_data->tfds_in_queue == 0) {
1244 printk(KERN_ERR "HW queue is empty\n");
1245 tid_data->agg.state = IWL_AGG_ON;
1246 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, ra, tid);
1248 IWL_DEBUG_HT("HW queue is NOT empty: %d packets in HW queue\n",
1249 tid_data->tfds_in_queue);
1250 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
1254 EXPORT_SYMBOL(iwl_tx_agg_start);
1256 int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1258 int tx_fifo_id, txq_id, sta_id, ssn = -1;
1259 struct iwl_tid_data *tid_data;
1260 int ret, write_ptr, read_ptr;
1261 unsigned long flags;
1262 DECLARE_MAC_BUF(mac);
1265 IWL_ERROR("ra = NULL\n");
1269 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1270 tx_fifo_id = default_tid_to_tx_fifo[tid];
1274 sta_id = iwl_find_station(priv, ra);
1276 if (sta_id == IWL_INVALID_STATION)
1279 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
1280 IWL_WARNING("Stopping AGG while state not IWL_AGG_ON\n");
1282 tid_data = &priv->stations[sta_id].tid[tid];
1283 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
1284 txq_id = tid_data->agg.txq_id;
1285 write_ptr = priv->txq[txq_id].q.write_ptr;
1286 read_ptr = priv->txq[txq_id].q.read_ptr;
1288 /* The queue is not empty */
1289 if (write_ptr != read_ptr) {
1290 IWL_DEBUG_HT("Stopping a non empty AGG HW QUEUE\n");
1291 priv->stations[sta_id].tid[tid].agg.state =
1292 IWL_EMPTYING_HW_QUEUE_DELBA;
1296 IWL_DEBUG_HT("HW queue is empty\n");
1297 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1299 spin_lock_irqsave(&priv->lock, flags);
1300 ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
1302 spin_unlock_irqrestore(&priv->lock, flags);
1307 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid);
1311 EXPORT_SYMBOL(iwl_tx_agg_stop);
1313 int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
1315 struct iwl_queue *q = &priv->txq[txq_id].q;
1316 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1317 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1319 switch (priv->stations[sta_id].tid[tid].agg.state) {
1320 case IWL_EMPTYING_HW_QUEUE_DELBA:
1321 /* We are reclaiming the last packet of the */
1322 /* aggregated HW queue */
1323 if (txq_id == tid_data->agg.txq_id &&
1324 q->read_ptr == q->write_ptr) {
1325 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1326 int tx_fifo = default_tid_to_tx_fifo[tid];
1327 IWL_DEBUG_HT("HW queue empty: continue DELBA flow\n");
1328 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
1330 tid_data->agg.state = IWL_AGG_OFF;
1331 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid);
1334 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1335 /* We are reclaiming the last packet of the queue */
1336 if (tid_data->tfds_in_queue == 0) {
1337 IWL_DEBUG_HT("HW queue empty: continue ADDBA flow\n");
1338 tid_data->agg.state = IWL_AGG_ON;
1339 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
1345 EXPORT_SYMBOL(iwl_txq_check_empty);
1348 * iwl_tx_status_reply_compressed_ba - Update tx status from block-ack
1350 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
1351 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
1353 static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1354 struct iwl_ht_agg *agg,
1355 struct iwl_compressed_ba_resp *ba_resp)
1359 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
1360 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1363 struct ieee80211_tx_info *info;
1365 if (unlikely(!agg->wait_for_ba)) {
1366 IWL_ERROR("Received BA when not expected\n");
1370 /* Mark that the expected block-ack response arrived */
1371 agg->wait_for_ba = 0;
1372 IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
1374 /* Calculate shift to align block-ack bits with our Tx window bits */
1375 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl>>4);
1376 if (sh < 0) /* tbw something is wrong with indices */
1379 /* don't use 64-bit values for now */
1380 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1382 if (agg->frame_count > (64 - sh)) {
1383 IWL_DEBUG_TX_REPLY("more frames than bitmap size");
1387 /* check for success or failure according to the
1388 * transmitted bitmap and block-ack bitmap */
1389 bitmap &= agg->bitmap;
1391 /* For each frame attempted in aggregation,
1392 * update driver's record of tx frame's status. */
1393 for (i = 0; i < agg->frame_count ; i++) {
1394 ack = bitmap & (1 << i);
1396 IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n",
1397 ack? "ACK":"NACK", i, (agg->start_idx + i) & 0xff,
1398 agg->start_idx + i);
1401 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
1402 memset(&info->status, 0, sizeof(info->status));
1403 info->flags = IEEE80211_TX_STAT_ACK;
1404 info->flags |= IEEE80211_TX_STAT_AMPDU;
1405 info->status.ampdu_ack_map = successes;
1406 info->status.ampdu_ack_len = agg->frame_count;
1407 iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1409 IWL_DEBUG_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
1415 * iwl_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1417 * Handles block-acknowledge notification from device, which reports success
1418 * of frames sent via aggregation.
1420 void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
1421 struct iwl_rx_mem_buffer *rxb)
1423 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1424 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
1426 struct iwl_tx_queue *txq = NULL;
1427 struct iwl_ht_agg *agg;
1428 DECLARE_MAC_BUF(mac);
1430 /* "flow" corresponds to Tx queue */
1431 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1433 /* "ssn" is start of block-ack Tx window, corresponds to index
1434 * (in Tx queue's circular buffer) of first TFD/frame in window */
1435 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1437 if (scd_flow >= priv->hw_params.max_txq_num) {
1438 IWL_ERROR("BUG_ON scd_flow is bigger than number of queues");
1442 txq = &priv->txq[scd_flow];
1443 agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg;
1445 /* Find index just before block-ack window */
1446 index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
1448 /* TODO: Need to get this copy more safely - now good for debug */
1450 IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d]Received from %s, "
1453 print_mac(mac, (u8 *) &ba_resp->sta_addr_lo32),
1455 IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
1456 "%d, scd_ssn = %d\n",
1459 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1462 IWL_DEBUG_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx \n",
1464 (unsigned long long)agg->bitmap);
1466 /* Update driver's record of ACK vs. not for each frame in window */
1467 iwl_tx_status_reply_compressed_ba(priv, agg, ba_resp);
1469 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1470 * block-ack window (we assume that they've been successfully
1471 * transmitted ... if not, it's too late anyway). */
1472 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
1473 /* calculate mac80211 ampdu sw queue to wake */
1475 scd_flow - priv->hw_params.first_ampdu_q + priv->hw->queues;
1476 int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
1477 priv->stations[ba_resp->sta_id].
1478 tid[ba_resp->tid].tfds_in_queue -= freed;
1479 if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
1480 priv->mac80211_registered &&
1481 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)
1482 ieee80211_wake_queue(priv->hw, ampdu_q);
1484 iwl_txq_check_empty(priv, ba_resp->sta_id,
1485 ba_resp->tid, scd_flow);
1488 EXPORT_SYMBOL(iwl_rx_reply_compressed_ba);
1490 #ifdef CONFIG_IWLWIFI_DEBUG
1491 #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
1493 const char *iwl_get_tx_fail_reason(u32 status)
1495 switch (status & TX_STATUS_MSK) {
1496 case TX_STATUS_SUCCESS:
1498 TX_STATUS_ENTRY(SHORT_LIMIT);
1499 TX_STATUS_ENTRY(LONG_LIMIT);
1500 TX_STATUS_ENTRY(FIFO_UNDERRUN);
1501 TX_STATUS_ENTRY(MGMNT_ABORT);
1502 TX_STATUS_ENTRY(NEXT_FRAG);
1503 TX_STATUS_ENTRY(LIFE_EXPIRE);
1504 TX_STATUS_ENTRY(DEST_PS);
1505 TX_STATUS_ENTRY(ABORTED);
1506 TX_STATUS_ENTRY(BT_RETRY);
1507 TX_STATUS_ENTRY(STA_INVALID);
1508 TX_STATUS_ENTRY(FRAG_DROPPED);
1509 TX_STATUS_ENTRY(TID_DISABLE);
1510 TX_STATUS_ENTRY(FRAME_FLUSHED);
1511 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
1512 TX_STATUS_ENTRY(TX_LOCKED);
1513 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
1518 EXPORT_SYMBOL(iwl_get_tx_fail_reason);
1519 #endif /* CONFIG_IWLWIFI_DEBUG */