1 /******************************************************************************
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
30 #include <linux/etherdevice.h>
31 #include <net/mac80211.h>
32 #include "iwl-eeprom.h"
37 #include "iwl-helpers.h"
39 static const u16 default_tid_to_tx_fifo[] = {
59 static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
60 struct iwl_dma_ptr *ptr, size_t size)
62 ptr->addr = pci_alloc_consistent(priv->pci_dev, size, &ptr->dma);
69 static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
70 struct iwl_dma_ptr *ptr)
72 if (unlikely(!ptr->addr))
75 pci_free_consistent(priv->pci_dev, ptr->size, ptr->addr, ptr->dma);
76 memset(ptr, 0, sizeof(*ptr));
79 static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
81 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
83 dma_addr_t addr = get_unaligned_le32(&tb->lo);
84 if (sizeof(dma_addr_t) > sizeof(u32))
86 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
91 static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
93 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
95 return le16_to_cpu(tb->hi_n_len) >> 4;
98 static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
99 dma_addr_t addr, u16 len)
101 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
102 u16 hi_n_len = len << 4;
104 put_unaligned_le32(addr, &tb->lo);
105 if (sizeof(dma_addr_t) > sizeof(u32))
106 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
108 tb->hi_n_len = cpu_to_le16(hi_n_len);
110 tfd->num_tbs = idx + 1;
113 static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
115 return tfd->num_tbs & 0x1f;
119 * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
120 * @priv - driver private data
123 * Does NOT advance any TFD circular buffer read/write indexes
124 * Does NOT free the TFD itself (which is within circular buffer)
126 static void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
128 struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)&txq->tfds[0];
130 struct pci_dev *dev = priv->pci_dev;
131 int index = txq->q.read_ptr;
135 tfd = &tfd_tmp[index];
137 /* Sanity check on number of chunks */
138 num_tbs = iwl_tfd_get_num_tbs(tfd);
140 if (num_tbs >= IWL_NUM_OF_TBS) {
141 IWL_ERROR("Too many chunks: %i\n", num_tbs);
142 /* @todo issue fatal error, it is quite serious situation */
148 pci_unmap_single(dev,
149 pci_unmap_addr(&txq->cmd[index]->meta, mapping),
150 pci_unmap_len(&txq->cmd[index]->meta, len),
153 /* Unmap chunks, if any. */
154 for (i = 1; i < num_tbs; i++) {
155 pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
156 iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
159 dev_kfree_skb(txq->txb[txq->q.read_ptr].skb[i - 1]);
160 txq->txb[txq->q.read_ptr].skb[i - 1] = NULL;
165 static int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
167 dma_addr_t addr, u16 len)
170 u32 num_tbs = iwl_tfd_get_num_tbs(tfd);
172 /* Each TFD can point to a maximum 20 Tx buffers */
173 if (num_tbs >= IWL_NUM_OF_TBS) {
174 IWL_ERROR("Error can not send more than %d chunks\n",
179 BUG_ON(addr & ~DMA_BIT_MASK(36));
180 if (unlikely(addr & ~IWL_TX_DMA_MASK))
181 IWL_ERROR("Unaligned address = %llx\n",
182 (unsigned long long)addr);
184 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
190 * iwl_txq_update_write_ptr - Send new write index to hardware
192 int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
196 int txq_id = txq->q.id;
198 if (txq->need_update == 0)
201 /* if we're trying to save power */
202 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
203 /* wake up nic if it's powered down ...
204 * uCode will wake up, and interrupt us again, so next
205 * time we'll skip this part. */
206 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
208 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
209 IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg);
210 iwl_set_bit(priv, CSR_GP_CNTRL,
211 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
215 /* restore this queue's parameters in nic hardware. */
216 ret = iwl_grab_nic_access(priv);
219 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
220 txq->q.write_ptr | (txq_id << 8));
221 iwl_release_nic_access(priv);
223 /* else not in power-save mode, uCode will never sleep when we're
224 * trying to tx (during RFKILL, we're not trying to tx). */
226 iwl_write32(priv, HBUS_TARG_WRPTR,
227 txq->q.write_ptr | (txq_id << 8));
229 txq->need_update = 0;
233 EXPORT_SYMBOL(iwl_txq_update_write_ptr);
237 * iwl_tx_queue_free - Deallocate DMA queue.
238 * @txq: Transmit queue to deallocate.
240 * Empty queue by removing and destroying all BD's.
242 * 0-fill, but do not free "txq" descriptor structure.
244 static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
246 struct iwl_tx_queue *txq = &priv->txq[txq_id];
247 struct iwl_queue *q = &txq->q;
248 struct pci_dev *dev = priv->pci_dev;
254 /* first, empty all BD's */
255 for (; q->write_ptr != q->read_ptr;
256 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
257 iwl_hw_txq_free_tfd(priv, txq);
259 len = sizeof(struct iwl_cmd) * q->n_window;
261 /* De-alloc array of command/tx buffers */
262 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
265 /* De-alloc circular buffer of TFDs */
267 pci_free_consistent(dev, sizeof(struct iwl_tfd) *
268 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
270 /* De-alloc array of per-TFD driver data */
274 /* 0-fill queue descriptor structure */
275 memset(txq, 0, sizeof(*txq));
280 * iwl_cmd_queue_free - Deallocate DMA queue.
281 * @txq: Transmit queue to deallocate.
283 * Empty queue by removing and destroying all BD's.
285 * 0-fill, but do not free "txq" descriptor structure.
287 static void iwl_cmd_queue_free(struct iwl_priv *priv)
289 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
290 struct iwl_queue *q = &txq->q;
291 struct pci_dev *dev = priv->pci_dev;
297 len = sizeof(struct iwl_cmd) * q->n_window;
298 len += IWL_MAX_SCAN_SIZE;
300 /* De-alloc array of command/tx buffers */
301 for (i = 0; i <= TFD_CMD_SLOTS; i++)
304 /* De-alloc circular buffer of TFDs */
306 pci_free_consistent(dev, sizeof(struct iwl_tfd) *
307 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
309 /* 0-fill queue descriptor structure */
310 memset(txq, 0, sizeof(*txq));
312 /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
315 * Theory of operation
317 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
318 * of buffer descriptors, each of which points to one or more data buffers for
319 * the device to read from or fill. Driver and device exchange status of each
320 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
321 * entries in each circular buffer, to protect against confusing empty and full
324 * The device reads or writes the data in the queues via the device's several
325 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
327 * For Tx queue, there are low mark and high mark limits. If, after queuing
328 * the packet for Tx, free space become < low mark, Tx queue stopped. When
329 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
332 * See more detailed info in iwl-4965-hw.h.
333 ***************************************************/
335 int iwl_queue_space(const struct iwl_queue *q)
337 int s = q->read_ptr - q->write_ptr;
339 if (q->read_ptr > q->write_ptr)
344 /* keep some reserve to not confuse empty and full situations */
350 EXPORT_SYMBOL(iwl_queue_space);
354 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
356 static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
357 int count, int slots_num, u32 id)
360 q->n_window = slots_num;
363 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
364 * and iwl_queue_dec_wrap are broken. */
365 BUG_ON(!is_power_of_2(count));
367 /* slots_num must be power-of-two size, otherwise
368 * get_cmd_index is broken. */
369 BUG_ON(!is_power_of_2(slots_num));
371 q->low_mark = q->n_window / 4;
375 q->high_mark = q->n_window / 8;
376 if (q->high_mark < 2)
379 q->write_ptr = q->read_ptr = 0;
385 * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
387 static int iwl_tx_queue_alloc(struct iwl_priv *priv,
388 struct iwl_tx_queue *txq, u32 id)
390 struct pci_dev *dev = priv->pci_dev;
392 /* Driver private data, only for Tx (not command) queues,
393 * not shared with device. */
394 if (id != IWL_CMD_QUEUE_NUM) {
395 txq->txb = kmalloc(sizeof(txq->txb[0]) *
396 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
398 IWL_ERROR("kmalloc for auxiliary BD "
399 "structures failed\n");
405 /* Circular buffer of transmit frame descriptors (TFDs),
406 * shared with device */
407 txq->tfds = pci_alloc_consistent(dev,
408 sizeof(txq->tfds[0]) * TFD_QUEUE_SIZE_MAX,
412 IWL_ERROR("pci_alloc_consistent(%zd) failed\n",
413 sizeof(txq->tfds[0]) * TFD_QUEUE_SIZE_MAX);
428 * Tell nic where to find circular buffer of Tx Frame Descriptors for
429 * given Tx queue, and enable the DMA channel used for that queue.
431 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
432 * channels supported in hardware.
434 static int iwl_hw_tx_queue_init(struct iwl_priv *priv,
435 struct iwl_tx_queue *txq)
439 int txq_id = txq->q.id;
441 spin_lock_irqsave(&priv->lock, flags);
442 ret = iwl_grab_nic_access(priv);
444 spin_unlock_irqrestore(&priv->lock, flags);
448 /* Circular buffer (TFD queue in DRAM) physical base address */
449 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
450 txq->q.dma_addr >> 8);
452 /* Enable DMA channel, using same id as for TFD queue */
453 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
454 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
455 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
457 iwl_release_nic_access(priv);
458 spin_unlock_irqrestore(&priv->lock, flags);
464 * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
466 static int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
467 int slots_num, u32 txq_id)
473 * Alloc buffer array for commands (Tx or other types of commands).
474 * For the command queue (#4), allocate command space + one big
475 * command for scan, since scan command is very huge; the system will
476 * not have two scans at the same time, so only one is needed.
477 * For normal Tx queues (all other queues), no super-size command
480 len = sizeof(struct iwl_cmd);
481 for (i = 0; i <= slots_num; i++) {
482 if (i == slots_num) {
483 if (txq_id == IWL_CMD_QUEUE_NUM)
484 len += IWL_MAX_SCAN_SIZE;
489 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
494 /* Alloc driver data array and TFD circular buffer */
495 ret = iwl_tx_queue_alloc(priv, txq, txq_id);
499 txq->need_update = 0;
501 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
502 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
503 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
505 /* Initialize queue's high/low-water marks, and head/tail indexes */
506 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
508 /* Tell device where to find queue */
509 iwl_hw_tx_queue_init(priv, txq);
513 for (i = 0; i < slots_num; i++) {
518 if (txq_id == IWL_CMD_QUEUE_NUM) {
519 kfree(txq->cmd[slots_num]);
520 txq->cmd[slots_num] = NULL;
525 * iwl_hw_txq_ctx_free - Free TXQ Context
527 * Destroy all TX DMA queues and structures
529 void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
534 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
535 if (txq_id == IWL_CMD_QUEUE_NUM)
536 iwl_cmd_queue_free(priv);
538 iwl_tx_queue_free(priv, txq_id);
540 iwl_free_dma_ptr(priv, &priv->kw);
542 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
544 EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
547 * iwl_txq_ctx_reset - Reset TX queue context
548 * Destroys all DMA structures and initialize them again
553 int iwl_txq_ctx_reset(struct iwl_priv *priv)
556 int txq_id, slots_num;
559 /* Free all tx/cmd queues and keep-warm buffer */
560 iwl_hw_txq_ctx_free(priv);
562 ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
563 priv->hw_params.scd_bc_tbls_size);
565 IWL_ERROR("Scheduler BC Table allocation failed\n");
568 /* Alloc keep-warm buffer */
569 ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
571 IWL_ERROR("Keep Warm allocation failed\n");
574 spin_lock_irqsave(&priv->lock, flags);
575 ret = iwl_grab_nic_access(priv);
577 spin_unlock_irqrestore(&priv->lock, flags);
581 /* Turn off all Tx DMA fifos */
582 priv->cfg->ops->lib->txq_set_sched(priv, 0);
584 /* Tell NIC where to find the "keep warm" buffer */
585 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
587 iwl_release_nic_access(priv);
588 spin_unlock_irqrestore(&priv->lock, flags);
592 /* Alloc and init all Tx queues, including the command queue (#4) */
593 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
594 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
595 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
596 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
599 IWL_ERROR("Tx %d queue init failed\n", txq_id);
607 iwl_hw_txq_ctx_free(priv);
609 iwl_free_dma_ptr(priv, &priv->kw);
611 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
617 * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
619 void iwl_txq_ctx_stop(struct iwl_priv *priv)
626 /* Turn off all Tx DMA fifos */
627 spin_lock_irqsave(&priv->lock, flags);
628 if (iwl_grab_nic_access(priv)) {
629 spin_unlock_irqrestore(&priv->lock, flags);
633 priv->cfg->ops->lib->txq_set_sched(priv, 0);
635 /* Stop each Tx DMA channel, and wait for it to be idle */
636 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
637 iwl_write_direct32(priv,
638 FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), 0x0);
639 iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
640 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE
643 iwl_release_nic_access(priv);
644 spin_unlock_irqrestore(&priv->lock, flags);
646 /* Deallocate memory for all Tx queues */
647 iwl_hw_txq_ctx_free(priv);
649 EXPORT_SYMBOL(iwl_txq_ctx_stop);
652 * handle build REPLY_TX command notification.
654 static void iwl_tx_cmd_build_basic(struct iwl_priv *priv,
655 struct iwl_tx_cmd *tx_cmd,
656 struct ieee80211_tx_info *info,
657 struct ieee80211_hdr *hdr,
658 int is_unicast, u8 std_id)
660 __le16 fc = hdr->frame_control;
661 __le32 tx_flags = tx_cmd->tx_flags;
663 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
664 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
665 tx_flags |= TX_CMD_FLG_ACK_MSK;
666 if (ieee80211_is_mgmt(fc))
667 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
668 if (ieee80211_is_probe_resp(fc) &&
669 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
670 tx_flags |= TX_CMD_FLG_TSF_MSK;
672 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
673 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
676 if (ieee80211_is_back_req(fc))
677 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
680 tx_cmd->sta_id = std_id;
681 if (ieee80211_has_morefrags(fc))
682 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
684 if (ieee80211_is_data_qos(fc)) {
685 u8 *qc = ieee80211_get_qos_ctl(hdr);
686 tx_cmd->tid_tspec = qc[0] & 0xf;
687 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
689 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
692 priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
694 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
695 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
697 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
698 if (ieee80211_is_mgmt(fc)) {
699 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
700 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
702 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
704 tx_cmd->timeout.pm_frame_timeout = 0;
707 tx_cmd->driver_txop = 0;
708 tx_cmd->tx_flags = tx_flags;
709 tx_cmd->next_frame_len = 0;
712 #define RTS_HCCA_RETRY_LIMIT 3
713 #define RTS_DFAULT_RETRY_LIMIT 60
715 static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
716 struct iwl_tx_cmd *tx_cmd,
717 struct ieee80211_tx_info *info,
718 __le16 fc, int sta_id,
723 u8 rts_retry_limit = 0;
724 u8 data_retry_limit = 0;
727 rate_idx = min(ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xffff,
730 rate_plcp = iwl_rates[rate_idx].plcp;
732 rts_retry_limit = (is_hcca) ?
733 RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT;
735 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
736 rate_flags |= RATE_MCS_CCK_MSK;
739 if (ieee80211_is_probe_resp(fc)) {
740 data_retry_limit = 3;
741 if (data_retry_limit < rts_retry_limit)
742 rts_retry_limit = data_retry_limit;
744 data_retry_limit = IWL_DEFAULT_TX_RETRY;
746 if (priv->data_retry_limit != -1)
747 data_retry_limit = priv->data_retry_limit;
750 if (ieee80211_is_data(fc)) {
751 tx_cmd->initial_rate_index = 0;
752 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
754 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
755 case cpu_to_le16(IEEE80211_STYPE_AUTH):
756 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
757 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
758 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
759 if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
760 tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
761 tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
768 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
769 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
772 tx_cmd->rts_retry_limit = rts_retry_limit;
773 tx_cmd->data_retry_limit = data_retry_limit;
774 tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
777 static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
778 struct ieee80211_tx_info *info,
779 struct iwl_tx_cmd *tx_cmd,
780 struct sk_buff *skb_frag,
783 struct ieee80211_key_conf *keyconf = info->control.hw_key;
785 switch (keyconf->alg) {
787 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
788 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
789 if (info->flags & IEEE80211_TX_CTL_AMPDU)
790 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
791 IWL_DEBUG_TX("tx_cmd with AES hwcrypto\n");
795 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
796 ieee80211_get_tkip_key(keyconf, skb_frag,
797 IEEE80211_TKIP_P2_KEY, tx_cmd->key);
798 IWL_DEBUG_TX("tx_cmd with tkip hwcrypto\n");
802 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
803 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
805 if (keyconf->keylen == WEP_KEY_LEN_128)
806 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
808 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
810 IWL_DEBUG_TX("Configuring packet for WEP encryption "
811 "with key %d\n", keyconf->keyidx);
815 printk(KERN_ERR "Unknown encode alg %d\n", keyconf->alg);
820 static void iwl_update_tx_stats(struct iwl_priv *priv, u16 fc, u16 len)
822 /* 0 - mgmt, 1 - cnt, 2 - data */
823 int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2;
824 priv->tx_stats[idx].cnt++;
825 priv->tx_stats[idx].bytes += len;
829 * start REPLY_TX command process
831 int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
833 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
834 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
836 struct iwl_tx_queue *txq;
838 struct iwl_cmd *out_cmd;
839 struct iwl_tx_cmd *tx_cmd;
841 dma_addr_t phys_addr;
842 dma_addr_t txcmd_phys;
843 dma_addr_t scratch_phys;
849 u8 wait_write_ptr = 0;
855 spin_lock_irqsave(&priv->lock, flags);
856 if (iwl_is_rfkill(priv)) {
857 IWL_DEBUG_DROP("Dropping - RF KILL\n");
861 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) ==
863 IWL_ERROR("ERROR: No TX rate available.\n");
867 unicast = !is_multicast_ether_addr(hdr->addr1);
869 fc = hdr->frame_control;
871 #ifdef CONFIG_IWLWIFI_DEBUG
872 if (ieee80211_is_auth(fc))
873 IWL_DEBUG_TX("Sending AUTH frame\n");
874 else if (ieee80211_is_assoc_req(fc))
875 IWL_DEBUG_TX("Sending ASSOC frame\n");
876 else if (ieee80211_is_reassoc_req(fc))
877 IWL_DEBUG_TX("Sending REASSOC frame\n");
880 /* drop all data frame if we are not associated */
881 if (ieee80211_is_data(fc) &&
882 (priv->iw_mode != NL80211_IFTYPE_MONITOR ||
883 !(info->flags & IEEE80211_TX_CTL_INJECTED)) && /* packet injection */
884 (!iwl_is_associated(priv) ||
885 ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) ||
886 !priv->assoc_station_added)) {
887 IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n");
891 spin_unlock_irqrestore(&priv->lock, flags);
893 hdr_len = ieee80211_hdrlen(fc);
895 /* Find (or create) index into station table for destination station */
896 sta_id = iwl_get_sta_id(priv, hdr);
897 if (sta_id == IWL_INVALID_STATION) {
898 IWL_DEBUG_DROP("Dropping - INVALID STATION: %pM\n",
903 IWL_DEBUG_TX("station Id %d\n", sta_id);
905 swq_id = skb_get_queue_mapping(skb);
907 if (ieee80211_is_data_qos(fc)) {
908 qc = ieee80211_get_qos_ctl(hdr);
909 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
910 seq_number = priv->stations[sta_id].tid[tid].seq_number;
911 seq_number &= IEEE80211_SCTL_SEQ;
912 hdr->seq_ctrl = hdr->seq_ctrl &
913 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG);
914 hdr->seq_ctrl |= cpu_to_le16(seq_number);
916 /* aggregation is on for this <sta,tid> */
917 if (info->flags & IEEE80211_TX_CTL_AMPDU)
918 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
919 priv->stations[sta_id].tid[tid].tfds_in_queue++;
922 txq = &priv->txq[txq_id];
924 txq->swq_id = swq_id;
926 spin_lock_irqsave(&priv->lock, flags);
928 /* Set up first empty TFD within this queue's circular TFD buffer */
929 tfd = &txq->tfds[q->write_ptr];
930 memset(tfd, 0, sizeof(*tfd));
932 /* Set up driver data for this TFD */
933 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
934 txq->txb[q->write_ptr].skb[0] = skb;
936 /* Set up first empty entry in queue's array of Tx/cmd buffers */
937 out_cmd = txq->cmd[q->write_ptr];
938 tx_cmd = &out_cmd->cmd.tx;
939 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
940 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
943 * Set up the Tx-command (not MAC!) header.
944 * Store the chosen Tx queue and TFD index within the sequence field;
945 * after Tx, uCode's Tx response will return this value so driver can
946 * locate the frame within the tx queue and do post-tx processing.
948 out_cmd->hdr.cmd = REPLY_TX;
949 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
950 INDEX_TO_SEQ(q->write_ptr)));
952 /* Copy MAC header from skb into command buffer */
953 memcpy(tx_cmd->hdr, hdr, hdr_len);
956 * Use the first empty entry in this queue's command buffer array
957 * to contain the Tx command and MAC header concatenated together
958 * (payload data will be in another buffer).
959 * Size of this varies, due to varying MAC header length.
960 * If end is not dword aligned, we'll have 2 extra bytes at the end
961 * of the MAC header (device reads on dword boundaries).
962 * We'll tell device about this padding later.
964 len = sizeof(struct iwl_tx_cmd) +
965 sizeof(struct iwl_cmd_header) + hdr_len;
968 len = (len + 3) & ~3;
975 /* Physical address of this Tx command's header (not MAC header!),
976 * within command buffer array. */
977 txcmd_phys = pci_map_single(priv->pci_dev,
978 out_cmd, sizeof(struct iwl_cmd),
980 pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys);
981 pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd));
982 /* Add buffer containing Tx command and MAC(!) header to TFD's
984 txcmd_phys += offsetof(struct iwl_cmd, hdr);
985 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
987 if (info->control.hw_key)
988 iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
990 /* Set up TFD's 2nd entry to point directly to remainder of skb,
991 * if any (802.11 null frames have no payload). */
992 len = skb->len - hdr_len;
994 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
995 len, PCI_DMA_TODEVICE);
996 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len);
999 /* Tell NIC about any 2-byte padding after MAC header */
1001 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1003 /* Total # bytes to be transmitted */
1004 len = (u16)skb->len;
1005 tx_cmd->len = cpu_to_le16(len);
1006 /* TODO need this for burst mode later on */
1007 iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, unicast, sta_id);
1009 /* set is_hcca to 0; it probably will never be implemented */
1010 iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0);
1012 iwl_update_tx_stats(priv, le16_to_cpu(fc), len);
1014 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
1015 offsetof(struct iwl_tx_cmd, scratch);
1016 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1017 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1019 if (!ieee80211_has_morefrags(hdr->frame_control)) {
1020 txq->need_update = 1;
1022 priv->stations[sta_id].tid[tid].seq_number = seq_number;
1025 txq->need_update = 0;
1028 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
1030 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
1032 /* Set up entry for this TFD in Tx byte-count array */
1033 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, len);
1035 /* Tell device the write index *just past* this latest filled TFD */
1036 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1037 ret = iwl_txq_update_write_ptr(priv, txq);
1038 spin_unlock_irqrestore(&priv->lock, flags);
1043 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
1044 if (wait_write_ptr) {
1045 spin_lock_irqsave(&priv->lock, flags);
1046 txq->need_update = 1;
1047 iwl_txq_update_write_ptr(priv, txq);
1048 spin_unlock_irqrestore(&priv->lock, flags);
1050 ieee80211_stop_queue(priv->hw, txq->swq_id);
1057 spin_unlock_irqrestore(&priv->lock, flags);
1061 EXPORT_SYMBOL(iwl_tx_skb);
1063 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
1066 * iwl_enqueue_hcmd - enqueue a uCode command
1067 * @priv: device private data point
1068 * @cmd: a point to the ucode command structure
1070 * The function returns < 0 values to indicate the operation is
1071 * failed. On success, it turns the index (> 0) of command in the
1074 int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1076 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
1077 struct iwl_queue *q = &txq->q;
1078 struct iwl_tfd *tfd;
1079 struct iwl_cmd *out_cmd;
1080 dma_addr_t phys_addr;
1081 unsigned long flags;
1086 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
1087 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
1089 /* If any of the command structures end up being larger than
1090 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
1091 * we will need to increase the size of the TFD entries */
1092 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
1093 !(cmd->meta.flags & CMD_SIZE_HUGE));
1095 if (iwl_is_rfkill(priv)) {
1096 IWL_DEBUG_INFO("Not sending command - RF KILL");
1100 if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
1101 IWL_ERROR("No space for Tx\n");
1105 spin_lock_irqsave(&priv->hcmd_lock, flags);
1107 tfd = &txq->tfds[q->write_ptr];
1108 memset(tfd, 0, sizeof(*tfd));
1111 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
1112 out_cmd = txq->cmd[idx];
1114 out_cmd->hdr.cmd = cmd->id;
1115 memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
1116 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
1118 /* At this point, the out_cmd now has all of the incoming cmd
1121 out_cmd->hdr.flags = 0;
1122 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
1123 INDEX_TO_SEQ(q->write_ptr));
1124 if (out_cmd->meta.flags & CMD_SIZE_HUGE)
1125 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
1126 len = (idx == TFD_CMD_SLOTS) ?
1127 IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd);
1129 phys_addr = pci_map_single(priv->pci_dev, out_cmd,
1130 len, PCI_DMA_TODEVICE);
1131 pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr);
1132 pci_unmap_len_set(&out_cmd->meta, len, len);
1133 phys_addr += offsetof(struct iwl_cmd, hdr);
1135 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
1137 #ifdef CONFIG_IWLWIFI_DEBUG
1138 switch (out_cmd->hdr.cmd) {
1139 case REPLY_TX_LINK_QUALITY_CMD:
1140 case SENSITIVITY_CMD:
1141 IWL_DEBUG_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, "
1142 "%d bytes at %d[%d]:%d\n",
1143 get_cmd_string(out_cmd->hdr.cmd),
1145 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
1146 q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
1149 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
1150 "%d bytes at %d[%d]:%d\n",
1151 get_cmd_string(out_cmd->hdr.cmd),
1153 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
1154 q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
1157 txq->need_update = 1;
1159 /* Set up entry in queue's byte count circular buffer */
1160 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
1162 /* Increment and update queue's write index */
1163 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1164 ret = iwl_txq_update_write_ptr(priv, txq);
1166 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
1167 return ret ? ret : idx;
1170 int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1172 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1173 struct iwl_queue *q = &txq->q;
1174 struct iwl_tx_info *tx_info;
1177 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
1178 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
1179 "is out of range [0-%d] %d %d.\n", txq_id,
1180 index, q->n_bd, q->write_ptr, q->read_ptr);
1184 for (index = iwl_queue_inc_wrap(index, q->n_bd);
1185 q->read_ptr != index;
1186 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1188 tx_info = &txq->txb[txq->q.read_ptr];
1189 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]);
1190 tx_info->skb[0] = NULL;
1192 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
1193 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
1195 iwl_hw_txq_free_tfd(priv, txq);
1200 EXPORT_SYMBOL(iwl_tx_queue_reclaim);
1204 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
1206 * When FW advances 'R' index, all entries between old and new 'R' index
1207 * need to be reclaimed. As result, some free space forms. If there is
1208 * enough free space (> low mark), wake the stack that feeds us.
1210 static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
1211 int idx, int cmd_idx)
1213 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1214 struct iwl_queue *q = &txq->q;
1217 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
1218 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
1219 "is out of range [0-%d] %d %d.\n", txq_id,
1220 idx, q->n_bd, q->write_ptr, q->read_ptr);
1224 pci_unmap_single(priv->pci_dev,
1225 pci_unmap_addr(&txq->cmd[cmd_idx]->meta, mapping),
1226 pci_unmap_len(&txq->cmd[cmd_idx]->meta, len),
1229 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
1230 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1233 IWL_ERROR("HCMD skipped: index (%d) %d %d\n", idx,
1234 q->write_ptr, q->read_ptr);
1235 queue_work(priv->workqueue, &priv->restart);
1242 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
1243 * @rxb: Rx buffer to reclaim
1245 * If an Rx buffer has an async callback associated with it the callback
1246 * will be executed. The attached skb (if present) will only be freed
1247 * if the callback returns 1
1249 void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1251 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1252 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1253 int txq_id = SEQ_TO_QUEUE(sequence);
1254 int index = SEQ_TO_INDEX(sequence);
1256 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
1257 struct iwl_cmd *cmd;
1259 /* If a Tx command is being handled and it isn't in the actual
1260 * command queue then there a command routing bug has been introduced
1261 * in the queue management code. */
1262 if (WARN(txq_id != IWL_CMD_QUEUE_NUM,
1263 "wrong command queue %d, sequence 0x%X readp=%d writep=%d\n",
1265 priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr,
1266 priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) {
1267 iwl_print_hex_dump(priv, IWL_DL_INFO , rxb, 32);
1271 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
1272 cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
1274 /* Input error checking is done when commands are added to queue. */
1275 if (cmd->meta.flags & CMD_WANT_SKB) {
1276 cmd->meta.source->u.skb = rxb->skb;
1278 } else if (cmd->meta.u.callback &&
1279 !cmd->meta.u.callback(priv, cmd, rxb->skb))
1282 iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
1284 if (!(cmd->meta.flags & CMD_ASYNC)) {
1285 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1286 wake_up_interruptible(&priv->wait_command_queue);
1289 EXPORT_SYMBOL(iwl_tx_cmd_complete);
1292 * Find first available (lowest unused) Tx Queue, mark it "active".
1293 * Called only when finding queue for aggregation.
1294 * Should never return anything < 7, because they should already
1295 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
1297 static int iwl_txq_ctx_activate_free(struct iwl_priv *priv)
1301 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
1302 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
1307 int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
1313 unsigned long flags;
1314 struct iwl_tid_data *tid_data;
1316 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1317 tx_fifo = default_tid_to_tx_fifo[tid];
1321 IWL_WARNING("%s on ra = %pM tid = %d\n",
1324 sta_id = iwl_find_station(priv, ra);
1325 if (sta_id == IWL_INVALID_STATION)
1328 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
1329 IWL_ERROR("Start AGG when state is not IWL_AGG_OFF !\n");
1333 txq_id = iwl_txq_ctx_activate_free(priv);
1337 spin_lock_irqsave(&priv->sta_lock, flags);
1338 tid_data = &priv->stations[sta_id].tid[tid];
1339 *ssn = SEQ_TO_SN(tid_data->seq_number);
1340 tid_data->agg.txq_id = txq_id;
1341 spin_unlock_irqrestore(&priv->sta_lock, flags);
1343 ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
1348 if (tid_data->tfds_in_queue == 0) {
1349 printk(KERN_ERR "HW queue is empty\n");
1350 tid_data->agg.state = IWL_AGG_ON;
1351 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, ra, tid);
1353 IWL_DEBUG_HT("HW queue is NOT empty: %d packets in HW queue\n",
1354 tid_data->tfds_in_queue);
1355 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
1359 EXPORT_SYMBOL(iwl_tx_agg_start);
1361 int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1363 int tx_fifo_id, txq_id, sta_id, ssn = -1;
1364 struct iwl_tid_data *tid_data;
1365 int ret, write_ptr, read_ptr;
1366 unsigned long flags;
1369 IWL_ERROR("ra = NULL\n");
1373 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1374 tx_fifo_id = default_tid_to_tx_fifo[tid];
1378 sta_id = iwl_find_station(priv, ra);
1380 if (sta_id == IWL_INVALID_STATION)
1383 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
1384 IWL_WARNING("Stopping AGG while state not IWL_AGG_ON\n");
1386 tid_data = &priv->stations[sta_id].tid[tid];
1387 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
1388 txq_id = tid_data->agg.txq_id;
1389 write_ptr = priv->txq[txq_id].q.write_ptr;
1390 read_ptr = priv->txq[txq_id].q.read_ptr;
1392 /* The queue is not empty */
1393 if (write_ptr != read_ptr) {
1394 IWL_DEBUG_HT("Stopping a non empty AGG HW QUEUE\n");
1395 priv->stations[sta_id].tid[tid].agg.state =
1396 IWL_EMPTYING_HW_QUEUE_DELBA;
1400 IWL_DEBUG_HT("HW queue is empty\n");
1401 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1403 spin_lock_irqsave(&priv->lock, flags);
1404 ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
1406 spin_unlock_irqrestore(&priv->lock, flags);
1411 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid);
1415 EXPORT_SYMBOL(iwl_tx_agg_stop);
1417 int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
1419 struct iwl_queue *q = &priv->txq[txq_id].q;
1420 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1421 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1423 switch (priv->stations[sta_id].tid[tid].agg.state) {
1424 case IWL_EMPTYING_HW_QUEUE_DELBA:
1425 /* We are reclaiming the last packet of the */
1426 /* aggregated HW queue */
1427 if ((txq_id == tid_data->agg.txq_id) &&
1428 (q->read_ptr == q->write_ptr)) {
1429 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1430 int tx_fifo = default_tid_to_tx_fifo[tid];
1431 IWL_DEBUG_HT("HW queue empty: continue DELBA flow\n");
1432 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
1434 tid_data->agg.state = IWL_AGG_OFF;
1435 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid);
1438 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1439 /* We are reclaiming the last packet of the queue */
1440 if (tid_data->tfds_in_queue == 0) {
1441 IWL_DEBUG_HT("HW queue empty: continue ADDBA flow\n");
1442 tid_data->agg.state = IWL_AGG_ON;
1443 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
1449 EXPORT_SYMBOL(iwl_txq_check_empty);
1452 * iwl_tx_status_reply_compressed_ba - Update tx status from block-ack
1454 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
1455 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
1457 static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1458 struct iwl_ht_agg *agg,
1459 struct iwl_compressed_ba_resp *ba_resp)
1463 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
1464 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1467 struct ieee80211_tx_info *info;
1469 if (unlikely(!agg->wait_for_ba)) {
1470 IWL_ERROR("Received BA when not expected\n");
1474 /* Mark that the expected block-ack response arrived */
1475 agg->wait_for_ba = 0;
1476 IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
1478 /* Calculate shift to align block-ack bits with our Tx window bits */
1479 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
1480 if (sh < 0) /* tbw something is wrong with indices */
1483 /* don't use 64-bit values for now */
1484 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1486 if (agg->frame_count > (64 - sh)) {
1487 IWL_DEBUG_TX_REPLY("more frames than bitmap size");
1491 /* check for success or failure according to the
1492 * transmitted bitmap and block-ack bitmap */
1493 bitmap &= agg->bitmap;
1495 /* For each frame attempted in aggregation,
1496 * update driver's record of tx frame's status. */
1497 for (i = 0; i < agg->frame_count ; i++) {
1498 ack = bitmap & (1ULL << i);
1500 IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n",
1501 ack? "ACK":"NACK", i, (agg->start_idx + i) & 0xff,
1502 agg->start_idx + i);
1505 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
1506 memset(&info->status, 0, sizeof(info->status));
1507 info->flags = IEEE80211_TX_STAT_ACK;
1508 info->flags |= IEEE80211_TX_STAT_AMPDU;
1509 info->status.ampdu_ack_map = successes;
1510 info->status.ampdu_ack_len = agg->frame_count;
1511 iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1513 IWL_DEBUG_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
1519 * iwl_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1521 * Handles block-acknowledge notification from device, which reports success
1522 * of frames sent via aggregation.
1524 void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
1525 struct iwl_rx_mem_buffer *rxb)
1527 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1528 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
1529 struct iwl_tx_queue *txq = NULL;
1530 struct iwl_ht_agg *agg;
1535 /* "flow" corresponds to Tx queue */
1536 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1538 /* "ssn" is start of block-ack Tx window, corresponds to index
1539 * (in Tx queue's circular buffer) of first TFD/frame in window */
1540 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1542 if (scd_flow >= priv->hw_params.max_txq_num) {
1543 IWL_ERROR("BUG_ON scd_flow is bigger than number of queues\n");
1547 txq = &priv->txq[scd_flow];
1548 sta_id = ba_resp->sta_id;
1550 agg = &priv->stations[sta_id].tid[tid].agg;
1552 /* Find index just before block-ack window */
1553 index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
1555 /* TODO: Need to get this copy more safely - now good for debug */
1557 IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d] Received from %pM, "
1560 (u8 *) &ba_resp->sta_addr_lo32,
1562 IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
1563 "%d, scd_ssn = %d\n",
1566 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1569 IWL_DEBUG_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx \n",
1571 (unsigned long long)agg->bitmap);
1573 /* Update driver's record of ACK vs. not for each frame in window */
1574 iwl_tx_status_reply_compressed_ba(priv, agg, ba_resp);
1576 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1577 * block-ack window (we assume that they've been successfully
1578 * transmitted ... if not, it's too late anyway). */
1579 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
1580 /* calculate mac80211 ampdu sw queue to wake */
1581 int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
1582 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1584 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
1585 priv->mac80211_registered &&
1586 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
1587 ieee80211_wake_queue(priv->hw, txq->swq_id);
1589 iwl_txq_check_empty(priv, sta_id, tid, scd_flow);
1592 EXPORT_SYMBOL(iwl_rx_reply_compressed_ba);
1594 #ifdef CONFIG_IWLWIFI_DEBUG
1595 #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
1597 const char *iwl_get_tx_fail_reason(u32 status)
1599 switch (status & TX_STATUS_MSK) {
1600 case TX_STATUS_SUCCESS:
1602 TX_STATUS_ENTRY(SHORT_LIMIT);
1603 TX_STATUS_ENTRY(LONG_LIMIT);
1604 TX_STATUS_ENTRY(FIFO_UNDERRUN);
1605 TX_STATUS_ENTRY(MGMNT_ABORT);
1606 TX_STATUS_ENTRY(NEXT_FRAG);
1607 TX_STATUS_ENTRY(LIFE_EXPIRE);
1608 TX_STATUS_ENTRY(DEST_PS);
1609 TX_STATUS_ENTRY(ABORTED);
1610 TX_STATUS_ENTRY(BT_RETRY);
1611 TX_STATUS_ENTRY(STA_INVALID);
1612 TX_STATUS_ENTRY(FRAG_DROPPED);
1613 TX_STATUS_ENTRY(TID_DISABLE);
1614 TX_STATUS_ENTRY(FRAME_FLUSHED);
1615 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
1616 TX_STATUS_ENTRY(TX_LOCKED);
1617 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
1622 EXPORT_SYMBOL(iwl_get_tx_fail_reason);
1623 #endif /* CONFIG_IWLWIFI_DEBUG */