1 /******************************************************************************
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
30 #include <linux/etherdevice.h>
31 #include <net/mac80211.h>
32 #include "iwl-eeprom.h"
37 #include "iwl-helpers.h"
39 static const u16 default_tid_to_tx_fifo[] = {
59 static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
60 struct iwl_dma_ptr *ptr, size_t size)
62 ptr->addr = pci_alloc_consistent(priv->pci_dev, size, &ptr->dma);
69 static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
70 struct iwl_dma_ptr *ptr)
72 if (unlikely(!ptr->addr))
75 pci_free_consistent(priv->pci_dev, ptr->size, ptr->addr, ptr->dma);
76 memset(ptr, 0, sizeof(*ptr));
79 static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
81 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
83 dma_addr_t addr = get_unaligned_le32(&tb->lo);
84 if (sizeof(dma_addr_t) > sizeof(u32))
86 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
91 static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
93 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
95 return le16_to_cpu(tb->hi_n_len) >> 4;
98 static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
99 dma_addr_t addr, u16 len)
101 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
102 u16 hi_n_len = len << 4;
104 put_unaligned_le32(addr, &tb->lo);
105 if (sizeof(dma_addr_t) > sizeof(u32))
106 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
108 tb->hi_n_len = cpu_to_le16(hi_n_len);
110 tfd->num_tbs = idx + 1;
113 static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
115 return tfd->num_tbs & 0x1f;
119 * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
120 * @priv - driver private data
123 * Does NOT advance any TFD circular buffer read/write indexes
124 * Does NOT free the TFD itself (which is within circular buffer)
126 static void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
128 struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)&txq->tfds[0];
130 struct pci_dev *dev = priv->pci_dev;
131 int index = txq->q.read_ptr;
135 tfd = &tfd_tmp[index];
137 /* Sanity check on number of chunks */
138 num_tbs = iwl_tfd_get_num_tbs(tfd);
140 if (num_tbs >= IWL_NUM_OF_TBS) {
141 IWL_ERROR("Too many chunks: %i\n", num_tbs);
142 /* @todo issue fatal error, it is quite serious situation */
148 pci_unmap_single(dev,
149 pci_unmap_addr(&txq->cmd[index]->meta, mapping),
150 pci_unmap_len(&txq->cmd[index]->meta, len),
153 /* Unmap chunks, if any. */
154 for (i = 1; i < num_tbs; i++) {
155 pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
156 iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
159 dev_kfree_skb(txq->txb[txq->q.read_ptr].skb[i - 1]);
160 txq->txb[txq->q.read_ptr].skb[i - 1] = NULL;
165 static int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
167 dma_addr_t addr, u16 len)
170 u32 num_tbs = iwl_tfd_get_num_tbs(tfd);
172 /* Each TFD can point to a maximum 20 Tx buffers */
173 if (num_tbs >= IWL_NUM_OF_TBS) {
174 IWL_ERROR("Error can not send more than %d chunks\n",
179 BUG_ON(addr & ~DMA_BIT_MASK(36));
180 if (unlikely(addr & ~IWL_TX_DMA_MASK))
181 IWL_ERROR("Unaligned address = %llx\n",
182 (unsigned long long)addr);
184 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
190 * iwl_txq_update_write_ptr - Send new write index to hardware
192 int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
196 int txq_id = txq->q.id;
198 if (txq->need_update == 0)
201 /* if we're trying to save power */
202 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
203 /* wake up nic if it's powered down ...
204 * uCode will wake up, and interrupt us again, so next
205 * time we'll skip this part. */
206 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
208 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
209 IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg);
210 iwl_set_bit(priv, CSR_GP_CNTRL,
211 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
215 /* restore this queue's parameters in nic hardware. */
216 ret = iwl_grab_nic_access(priv);
219 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
220 txq->q.write_ptr | (txq_id << 8));
221 iwl_release_nic_access(priv);
223 /* else not in power-save mode, uCode will never sleep when we're
224 * trying to tx (during RFKILL, we're not trying to tx). */
226 iwl_write32(priv, HBUS_TARG_WRPTR,
227 txq->q.write_ptr | (txq_id << 8));
229 txq->need_update = 0;
233 EXPORT_SYMBOL(iwl_txq_update_write_ptr);
237 * iwl_tx_queue_free - Deallocate DMA queue.
238 * @txq: Transmit queue to deallocate.
240 * Empty queue by removing and destroying all BD's.
242 * 0-fill, but do not free "txq" descriptor structure.
244 static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
246 struct iwl_tx_queue *txq = &priv->txq[txq_id];
247 struct iwl_queue *q = &txq->q;
248 struct pci_dev *dev = priv->pci_dev;
254 /* first, empty all BD's */
255 for (; q->write_ptr != q->read_ptr;
256 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
257 iwl_hw_txq_free_tfd(priv, txq);
259 len = sizeof(struct iwl_cmd) * q->n_window;
261 /* De-alloc array of command/tx buffers */
262 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
265 /* De-alloc circular buffer of TFDs */
267 pci_free_consistent(dev, sizeof(struct iwl_tfd) *
268 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
270 /* De-alloc array of per-TFD driver data */
274 /* 0-fill queue descriptor structure */
275 memset(txq, 0, sizeof(*txq));
280 * iwl_cmd_queue_free - Deallocate DMA queue.
281 * @txq: Transmit queue to deallocate.
283 * Empty queue by removing and destroying all BD's.
285 * 0-fill, but do not free "txq" descriptor structure.
287 static void iwl_cmd_queue_free(struct iwl_priv *priv)
289 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
290 struct iwl_queue *q = &txq->q;
291 struct pci_dev *dev = priv->pci_dev;
297 len = sizeof(struct iwl_cmd) * q->n_window;
298 len += IWL_MAX_SCAN_SIZE;
300 /* De-alloc array of command/tx buffers */
301 for (i = 0; i <= TFD_CMD_SLOTS; i++)
304 /* De-alloc circular buffer of TFDs */
306 pci_free_consistent(dev, sizeof(struct iwl_tfd) *
307 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
309 /* 0-fill queue descriptor structure */
310 memset(txq, 0, sizeof(*txq));
312 /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
315 * Theory of operation
317 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
318 * of buffer descriptors, each of which points to one or more data buffers for
319 * the device to read from or fill. Driver and device exchange status of each
320 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
321 * entries in each circular buffer, to protect against confusing empty and full
324 * The device reads or writes the data in the queues via the device's several
325 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
327 * For Tx queue, there are low mark and high mark limits. If, after queuing
328 * the packet for Tx, free space become < low mark, Tx queue stopped. When
329 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
332 * See more detailed info in iwl-4965-hw.h.
333 ***************************************************/
335 int iwl_queue_space(const struct iwl_queue *q)
337 int s = q->read_ptr - q->write_ptr;
339 if (q->read_ptr > q->write_ptr)
344 /* keep some reserve to not confuse empty and full situations */
350 EXPORT_SYMBOL(iwl_queue_space);
354 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
356 static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
357 int count, int slots_num, u32 id)
360 q->n_window = slots_num;
363 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
364 * and iwl_queue_dec_wrap are broken. */
365 BUG_ON(!is_power_of_2(count));
367 /* slots_num must be power-of-two size, otherwise
368 * get_cmd_index is broken. */
369 BUG_ON(!is_power_of_2(slots_num));
371 q->low_mark = q->n_window / 4;
375 q->high_mark = q->n_window / 8;
376 if (q->high_mark < 2)
379 q->write_ptr = q->read_ptr = 0;
385 * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
387 static int iwl_tx_queue_alloc(struct iwl_priv *priv,
388 struct iwl_tx_queue *txq, u32 id)
390 struct pci_dev *dev = priv->pci_dev;
392 /* Driver private data, only for Tx (not command) queues,
393 * not shared with device. */
394 if (id != IWL_CMD_QUEUE_NUM) {
395 txq->txb = kmalloc(sizeof(txq->txb[0]) *
396 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
398 IWL_ERROR("kmalloc for auxiliary BD "
399 "structures failed\n");
405 /* Circular buffer of transmit frame descriptors (TFDs),
406 * shared with device */
407 txq->tfds = pci_alloc_consistent(dev,
408 sizeof(txq->tfds[0]) * TFD_QUEUE_SIZE_MAX,
412 IWL_ERROR("pci_alloc_consistent(%zd) failed\n",
413 sizeof(txq->tfds[0]) * TFD_QUEUE_SIZE_MAX);
428 * Tell nic where to find circular buffer of Tx Frame Descriptors for
429 * given Tx queue, and enable the DMA channel used for that queue.
431 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
432 * channels supported in hardware.
434 static int iwl_hw_tx_queue_init(struct iwl_priv *priv,
435 struct iwl_tx_queue *txq)
439 int txq_id = txq->q.id;
441 spin_lock_irqsave(&priv->lock, flags);
442 ret = iwl_grab_nic_access(priv);
444 spin_unlock_irqrestore(&priv->lock, flags);
448 /* Circular buffer (TFD queue in DRAM) physical base address */
449 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
450 txq->q.dma_addr >> 8);
452 iwl_release_nic_access(priv);
453 spin_unlock_irqrestore(&priv->lock, flags);
459 * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
461 static int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
462 int slots_num, u32 txq_id)
468 * Alloc buffer array for commands (Tx or other types of commands).
469 * For the command queue (#4), allocate command space + one big
470 * command for scan, since scan command is very huge; the system will
471 * not have two scans at the same time, so only one is needed.
472 * For normal Tx queues (all other queues), no super-size command
475 len = sizeof(struct iwl_cmd);
476 for (i = 0; i <= slots_num; i++) {
477 if (i == slots_num) {
478 if (txq_id == IWL_CMD_QUEUE_NUM)
479 len += IWL_MAX_SCAN_SIZE;
484 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
489 /* Alloc driver data array and TFD circular buffer */
490 ret = iwl_tx_queue_alloc(priv, txq, txq_id);
494 txq->need_update = 0;
496 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
497 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
498 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
500 /* Initialize queue's high/low-water marks, and head/tail indexes */
501 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
503 /* Tell device where to find queue */
504 iwl_hw_tx_queue_init(priv, txq);
508 for (i = 0; i < slots_num; i++) {
513 if (txq_id == IWL_CMD_QUEUE_NUM) {
514 kfree(txq->cmd[slots_num]);
515 txq->cmd[slots_num] = NULL;
520 * iwl_hw_txq_ctx_free - Free TXQ Context
522 * Destroy all TX DMA queues and structures
524 void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
529 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
530 if (txq_id == IWL_CMD_QUEUE_NUM)
531 iwl_cmd_queue_free(priv);
533 iwl_tx_queue_free(priv, txq_id);
535 iwl_free_dma_ptr(priv, &priv->kw);
537 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
539 EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
542 * iwl_txq_ctx_reset - Reset TX queue context
543 * Destroys all DMA structures and initialize them again
548 int iwl_txq_ctx_reset(struct iwl_priv *priv)
551 int txq_id, slots_num;
554 /* Free all tx/cmd queues and keep-warm buffer */
555 iwl_hw_txq_ctx_free(priv);
557 ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
558 priv->hw_params.scd_bc_tbls_size);
560 IWL_ERROR("Scheduler BC Table allocation failed\n");
563 /* Alloc keep-warm buffer */
564 ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
566 IWL_ERROR("Keep Warm allocation failed\n");
569 spin_lock_irqsave(&priv->lock, flags);
570 ret = iwl_grab_nic_access(priv);
572 spin_unlock_irqrestore(&priv->lock, flags);
576 /* Turn off all Tx DMA fifos */
577 priv->cfg->ops->lib->txq_set_sched(priv, 0);
579 /* Tell NIC where to find the "keep warm" buffer */
580 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
582 iwl_release_nic_access(priv);
583 spin_unlock_irqrestore(&priv->lock, flags);
585 /* Alloc and init all Tx queues, including the command queue (#4) */
586 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
587 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
588 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
589 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
592 IWL_ERROR("Tx %d queue init failed\n", txq_id);
600 iwl_hw_txq_ctx_free(priv);
602 iwl_free_dma_ptr(priv, &priv->kw);
604 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
610 * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
612 void iwl_txq_ctx_stop(struct iwl_priv *priv)
617 /* Turn off all Tx DMA fifos */
618 spin_lock_irqsave(&priv->lock, flags);
619 if (iwl_grab_nic_access(priv)) {
620 spin_unlock_irqrestore(&priv->lock, flags);
624 priv->cfg->ops->lib->txq_set_sched(priv, 0);
626 /* Stop each Tx DMA channel, and wait for it to be idle */
627 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
628 iwl_write_direct32(priv,
629 FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), 0x0);
630 iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
631 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE
634 iwl_release_nic_access(priv);
635 spin_unlock_irqrestore(&priv->lock, flags);
637 /* Deallocate memory for all Tx queues */
638 iwl_hw_txq_ctx_free(priv);
640 EXPORT_SYMBOL(iwl_txq_ctx_stop);
643 * handle build REPLY_TX command notification.
645 static void iwl_tx_cmd_build_basic(struct iwl_priv *priv,
646 struct iwl_tx_cmd *tx_cmd,
647 struct ieee80211_tx_info *info,
648 struct ieee80211_hdr *hdr,
649 int is_unicast, u8 std_id)
651 __le16 fc = hdr->frame_control;
652 __le32 tx_flags = tx_cmd->tx_flags;
654 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
655 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
656 tx_flags |= TX_CMD_FLG_ACK_MSK;
657 if (ieee80211_is_mgmt(fc))
658 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
659 if (ieee80211_is_probe_resp(fc) &&
660 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
661 tx_flags |= TX_CMD_FLG_TSF_MSK;
663 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
664 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
667 if (ieee80211_is_back_req(fc))
668 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
671 tx_cmd->sta_id = std_id;
672 if (ieee80211_has_morefrags(fc))
673 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
675 if (ieee80211_is_data_qos(fc)) {
676 u8 *qc = ieee80211_get_qos_ctl(hdr);
677 tx_cmd->tid_tspec = qc[0] & 0xf;
678 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
680 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
683 priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
685 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
686 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
688 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
689 if (ieee80211_is_mgmt(fc)) {
690 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
691 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
693 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
695 tx_cmd->timeout.pm_frame_timeout = 0;
698 tx_cmd->driver_txop = 0;
699 tx_cmd->tx_flags = tx_flags;
700 tx_cmd->next_frame_len = 0;
703 #define RTS_HCCA_RETRY_LIMIT 3
704 #define RTS_DFAULT_RETRY_LIMIT 60
706 static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
707 struct iwl_tx_cmd *tx_cmd,
708 struct ieee80211_tx_info *info,
709 __le16 fc, int sta_id,
714 u8 rts_retry_limit = 0;
715 u8 data_retry_limit = 0;
718 rate_idx = min(ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xffff,
721 rate_plcp = iwl_rates[rate_idx].plcp;
723 rts_retry_limit = (is_hcca) ?
724 RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT;
726 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
727 rate_flags |= RATE_MCS_CCK_MSK;
730 if (ieee80211_is_probe_resp(fc)) {
731 data_retry_limit = 3;
732 if (data_retry_limit < rts_retry_limit)
733 rts_retry_limit = data_retry_limit;
735 data_retry_limit = IWL_DEFAULT_TX_RETRY;
737 if (priv->data_retry_limit != -1)
738 data_retry_limit = priv->data_retry_limit;
741 if (ieee80211_is_data(fc)) {
742 tx_cmd->initial_rate_index = 0;
743 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
745 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
746 case cpu_to_le16(IEEE80211_STYPE_AUTH):
747 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
748 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
749 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
750 if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
751 tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
752 tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
759 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
760 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
763 tx_cmd->rts_retry_limit = rts_retry_limit;
764 tx_cmd->data_retry_limit = data_retry_limit;
765 tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
768 static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
769 struct ieee80211_tx_info *info,
770 struct iwl_tx_cmd *tx_cmd,
771 struct sk_buff *skb_frag,
774 struct ieee80211_key_conf *keyconf = info->control.hw_key;
776 switch (keyconf->alg) {
778 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
779 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
780 if (info->flags & IEEE80211_TX_CTL_AMPDU)
781 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
782 IWL_DEBUG_TX("tx_cmd with AES hwcrypto\n");
786 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
787 ieee80211_get_tkip_key(keyconf, skb_frag,
788 IEEE80211_TKIP_P2_KEY, tx_cmd->key);
789 IWL_DEBUG_TX("tx_cmd with tkip hwcrypto\n");
793 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
794 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
796 if (keyconf->keylen == WEP_KEY_LEN_128)
797 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
799 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
801 IWL_DEBUG_TX("Configuring packet for WEP encryption "
802 "with key %d\n", keyconf->keyidx);
806 printk(KERN_ERR "Unknown encode alg %d\n", keyconf->alg);
811 static void iwl_update_tx_stats(struct iwl_priv *priv, u16 fc, u16 len)
813 /* 0 - mgmt, 1 - cnt, 2 - data */
814 int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2;
815 priv->tx_stats[idx].cnt++;
816 priv->tx_stats[idx].bytes += len;
820 * start REPLY_TX command process
822 int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
824 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
825 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
827 struct iwl_tx_queue *txq;
829 struct iwl_cmd *out_cmd;
830 struct iwl_tx_cmd *tx_cmd;
832 dma_addr_t phys_addr;
833 dma_addr_t txcmd_phys;
834 dma_addr_t scratch_phys;
840 u8 wait_write_ptr = 0;
846 spin_lock_irqsave(&priv->lock, flags);
847 if (iwl_is_rfkill(priv)) {
848 IWL_DEBUG_DROP("Dropping - RF KILL\n");
852 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) ==
854 IWL_ERROR("ERROR: No TX rate available.\n");
858 unicast = !is_multicast_ether_addr(hdr->addr1);
860 fc = hdr->frame_control;
862 #ifdef CONFIG_IWLWIFI_DEBUG
863 if (ieee80211_is_auth(fc))
864 IWL_DEBUG_TX("Sending AUTH frame\n");
865 else if (ieee80211_is_assoc_req(fc))
866 IWL_DEBUG_TX("Sending ASSOC frame\n");
867 else if (ieee80211_is_reassoc_req(fc))
868 IWL_DEBUG_TX("Sending REASSOC frame\n");
871 /* drop all data frame if we are not associated */
872 if (ieee80211_is_data(fc) &&
873 (priv->iw_mode != NL80211_IFTYPE_MONITOR ||
874 !(info->flags & IEEE80211_TX_CTL_INJECTED)) && /* packet injection */
875 (!iwl_is_associated(priv) ||
876 ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) ||
877 !priv->assoc_station_added)) {
878 IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n");
882 spin_unlock_irqrestore(&priv->lock, flags);
884 hdr_len = ieee80211_hdrlen(fc);
886 /* Find (or create) index into station table for destination station */
887 sta_id = iwl_get_sta_id(priv, hdr);
888 if (sta_id == IWL_INVALID_STATION) {
889 IWL_DEBUG_DROP("Dropping - INVALID STATION: %pM\n",
894 IWL_DEBUG_TX("station Id %d\n", sta_id);
896 swq_id = skb_get_queue_mapping(skb);
898 if (ieee80211_is_data_qos(fc)) {
899 qc = ieee80211_get_qos_ctl(hdr);
900 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
901 seq_number = priv->stations[sta_id].tid[tid].seq_number;
902 seq_number &= IEEE80211_SCTL_SEQ;
903 hdr->seq_ctrl = hdr->seq_ctrl &
904 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG);
905 hdr->seq_ctrl |= cpu_to_le16(seq_number);
907 /* aggregation is on for this <sta,tid> */
908 if (info->flags & IEEE80211_TX_CTL_AMPDU)
909 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
910 priv->stations[sta_id].tid[tid].tfds_in_queue++;
913 txq = &priv->txq[txq_id];
915 txq->swq_id = swq_id;
917 spin_lock_irqsave(&priv->lock, flags);
919 /* Set up first empty TFD within this queue's circular TFD buffer */
920 tfd = &txq->tfds[q->write_ptr];
921 memset(tfd, 0, sizeof(*tfd));
923 /* Set up driver data for this TFD */
924 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
925 txq->txb[q->write_ptr].skb[0] = skb;
927 /* Set up first empty entry in queue's array of Tx/cmd buffers */
928 out_cmd = txq->cmd[q->write_ptr];
929 tx_cmd = &out_cmd->cmd.tx;
930 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
931 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
934 * Set up the Tx-command (not MAC!) header.
935 * Store the chosen Tx queue and TFD index within the sequence field;
936 * after Tx, uCode's Tx response will return this value so driver can
937 * locate the frame within the tx queue and do post-tx processing.
939 out_cmd->hdr.cmd = REPLY_TX;
940 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
941 INDEX_TO_SEQ(q->write_ptr)));
943 /* Copy MAC header from skb into command buffer */
944 memcpy(tx_cmd->hdr, hdr, hdr_len);
947 * Use the first empty entry in this queue's command buffer array
948 * to contain the Tx command and MAC header concatenated together
949 * (payload data will be in another buffer).
950 * Size of this varies, due to varying MAC header length.
951 * If end is not dword aligned, we'll have 2 extra bytes at the end
952 * of the MAC header (device reads on dword boundaries).
953 * We'll tell device about this padding later.
955 len = sizeof(struct iwl_tx_cmd) +
956 sizeof(struct iwl_cmd_header) + hdr_len;
959 len = (len + 3) & ~3;
966 /* Physical address of this Tx command's header (not MAC header!),
967 * within command buffer array. */
968 txcmd_phys = pci_map_single(priv->pci_dev,
969 out_cmd, sizeof(struct iwl_cmd),
971 pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys);
972 pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd));
973 /* Add buffer containing Tx command and MAC(!) header to TFD's
975 txcmd_phys += offsetof(struct iwl_cmd, hdr);
976 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
978 if (info->control.hw_key)
979 iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
981 /* Set up TFD's 2nd entry to point directly to remainder of skb,
982 * if any (802.11 null frames have no payload). */
983 len = skb->len - hdr_len;
985 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
986 len, PCI_DMA_TODEVICE);
987 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len);
990 /* Tell NIC about any 2-byte padding after MAC header */
992 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
994 /* Total # bytes to be transmitted */
996 tx_cmd->len = cpu_to_le16(len);
997 /* TODO need this for burst mode later on */
998 iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, unicast, sta_id);
1000 /* set is_hcca to 0; it probably will never be implemented */
1001 iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0);
1003 iwl_update_tx_stats(priv, le16_to_cpu(fc), len);
1005 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
1006 offsetof(struct iwl_tx_cmd, scratch);
1007 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1008 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1010 if (!ieee80211_has_morefrags(hdr->frame_control)) {
1011 txq->need_update = 1;
1013 priv->stations[sta_id].tid[tid].seq_number = seq_number;
1016 txq->need_update = 0;
1019 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
1021 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
1023 /* Set up entry for this TFD in Tx byte-count array */
1024 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, len);
1026 /* Tell device the write index *just past* this latest filled TFD */
1027 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1028 ret = iwl_txq_update_write_ptr(priv, txq);
1029 spin_unlock_irqrestore(&priv->lock, flags);
1034 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
1035 if (wait_write_ptr) {
1036 spin_lock_irqsave(&priv->lock, flags);
1037 txq->need_update = 1;
1038 iwl_txq_update_write_ptr(priv, txq);
1039 spin_unlock_irqrestore(&priv->lock, flags);
1041 ieee80211_stop_queue(priv->hw, txq->swq_id);
1048 spin_unlock_irqrestore(&priv->lock, flags);
1052 EXPORT_SYMBOL(iwl_tx_skb);
1054 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
1057 * iwl_enqueue_hcmd - enqueue a uCode command
1058 * @priv: device private data point
1059 * @cmd: a point to the ucode command structure
1061 * The function returns < 0 values to indicate the operation is
1062 * failed. On success, it turns the index (> 0) of command in the
1065 int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1067 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
1068 struct iwl_queue *q = &txq->q;
1069 struct iwl_tfd *tfd;
1070 struct iwl_cmd *out_cmd;
1071 dma_addr_t phys_addr;
1072 unsigned long flags;
1077 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
1078 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
1080 /* If any of the command structures end up being larger than
1081 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
1082 * we will need to increase the size of the TFD entries */
1083 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
1084 !(cmd->meta.flags & CMD_SIZE_HUGE));
1086 if (iwl_is_rfkill(priv)) {
1087 IWL_DEBUG_INFO("Not sending command - RF KILL");
1091 if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
1092 IWL_ERROR("No space for Tx\n");
1096 spin_lock_irqsave(&priv->hcmd_lock, flags);
1098 tfd = &txq->tfds[q->write_ptr];
1099 memset(tfd, 0, sizeof(*tfd));
1102 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
1103 out_cmd = txq->cmd[idx];
1105 out_cmd->hdr.cmd = cmd->id;
1106 memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
1107 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
1109 /* At this point, the out_cmd now has all of the incoming cmd
1112 out_cmd->hdr.flags = 0;
1113 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
1114 INDEX_TO_SEQ(q->write_ptr));
1115 if (out_cmd->meta.flags & CMD_SIZE_HUGE)
1116 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
1117 len = (idx == TFD_CMD_SLOTS) ?
1118 IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd);
1120 phys_addr = pci_map_single(priv->pci_dev, out_cmd,
1121 len, PCI_DMA_TODEVICE);
1122 pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr);
1123 pci_unmap_len_set(&out_cmd->meta, len, len);
1124 phys_addr += offsetof(struct iwl_cmd, hdr);
1126 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
1128 #ifdef CONFIG_IWLWIFI_DEBUG
1129 switch (out_cmd->hdr.cmd) {
1130 case REPLY_TX_LINK_QUALITY_CMD:
1131 case SENSITIVITY_CMD:
1132 IWL_DEBUG_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, "
1133 "%d bytes at %d[%d]:%d\n",
1134 get_cmd_string(out_cmd->hdr.cmd),
1136 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
1137 q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
1140 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
1141 "%d bytes at %d[%d]:%d\n",
1142 get_cmd_string(out_cmd->hdr.cmd),
1144 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
1145 q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
1148 txq->need_update = 1;
1150 /* Set up entry in queue's byte count circular buffer */
1151 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
1153 /* Increment and update queue's write index */
1154 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1155 ret = iwl_txq_update_write_ptr(priv, txq);
1157 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
1158 return ret ? ret : idx;
1161 int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1163 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1164 struct iwl_queue *q = &txq->q;
1165 struct iwl_tx_info *tx_info;
1168 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
1169 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
1170 "is out of range [0-%d] %d %d.\n", txq_id,
1171 index, q->n_bd, q->write_ptr, q->read_ptr);
1175 for (index = iwl_queue_inc_wrap(index, q->n_bd);
1176 q->read_ptr != index;
1177 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1179 tx_info = &txq->txb[txq->q.read_ptr];
1180 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]);
1181 tx_info->skb[0] = NULL;
1183 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
1184 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
1186 iwl_hw_txq_free_tfd(priv, txq);
1191 EXPORT_SYMBOL(iwl_tx_queue_reclaim);
1195 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
1197 * When FW advances 'R' index, all entries between old and new 'R' index
1198 * need to be reclaimed. As result, some free space forms. If there is
1199 * enough free space (> low mark), wake the stack that feeds us.
1201 static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
1202 int idx, int cmd_idx)
1204 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1205 struct iwl_queue *q = &txq->q;
1208 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
1209 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
1210 "is out of range [0-%d] %d %d.\n", txq_id,
1211 idx, q->n_bd, q->write_ptr, q->read_ptr);
1215 pci_unmap_single(priv->pci_dev,
1216 pci_unmap_addr(&txq->cmd[cmd_idx]->meta, mapping),
1217 pci_unmap_len(&txq->cmd[cmd_idx]->meta, len),
1220 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
1221 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1224 IWL_ERROR("HCMD skipped: index (%d) %d %d\n", idx,
1225 q->write_ptr, q->read_ptr);
1226 queue_work(priv->workqueue, &priv->restart);
1233 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
1234 * @rxb: Rx buffer to reclaim
1236 * If an Rx buffer has an async callback associated with it the callback
1237 * will be executed. The attached skb (if present) will only be freed
1238 * if the callback returns 1
1240 void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1242 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1243 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1244 int txq_id = SEQ_TO_QUEUE(sequence);
1245 int index = SEQ_TO_INDEX(sequence);
1247 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
1248 struct iwl_cmd *cmd;
1250 /* If a Tx command is being handled and it isn't in the actual
1251 * command queue then there a command routing bug has been introduced
1252 * in the queue management code. */
1253 if (WARN(txq_id != IWL_CMD_QUEUE_NUM,
1254 "wrong command queue %d, sequence 0x%X readp=%d writep=%d\n",
1256 priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr,
1257 priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) {
1258 iwl_print_hex_dump(priv, IWL_DL_INFO , rxb, 32);
1262 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
1263 cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
1265 /* Input error checking is done when commands are added to queue. */
1266 if (cmd->meta.flags & CMD_WANT_SKB) {
1267 cmd->meta.source->u.skb = rxb->skb;
1269 } else if (cmd->meta.u.callback &&
1270 !cmd->meta.u.callback(priv, cmd, rxb->skb))
1273 iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
1275 if (!(cmd->meta.flags & CMD_ASYNC)) {
1276 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1277 wake_up_interruptible(&priv->wait_command_queue);
1280 EXPORT_SYMBOL(iwl_tx_cmd_complete);
1283 * Find first available (lowest unused) Tx Queue, mark it "active".
1284 * Called only when finding queue for aggregation.
1285 * Should never return anything < 7, because they should already
1286 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
1288 static int iwl_txq_ctx_activate_free(struct iwl_priv *priv)
1292 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
1293 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
1298 int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
1304 unsigned long flags;
1305 struct iwl_tid_data *tid_data;
1307 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1308 tx_fifo = default_tid_to_tx_fifo[tid];
1312 IWL_WARNING("%s on ra = %pM tid = %d\n",
1315 sta_id = iwl_find_station(priv, ra);
1316 if (sta_id == IWL_INVALID_STATION)
1319 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
1320 IWL_ERROR("Start AGG when state is not IWL_AGG_OFF !\n");
1324 txq_id = iwl_txq_ctx_activate_free(priv);
1328 spin_lock_irqsave(&priv->sta_lock, flags);
1329 tid_data = &priv->stations[sta_id].tid[tid];
1330 *ssn = SEQ_TO_SN(tid_data->seq_number);
1331 tid_data->agg.txq_id = txq_id;
1332 spin_unlock_irqrestore(&priv->sta_lock, flags);
1334 ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
1339 if (tid_data->tfds_in_queue == 0) {
1340 printk(KERN_ERR "HW queue is empty\n");
1341 tid_data->agg.state = IWL_AGG_ON;
1342 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, ra, tid);
1344 IWL_DEBUG_HT("HW queue is NOT empty: %d packets in HW queue\n",
1345 tid_data->tfds_in_queue);
1346 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
1350 EXPORT_SYMBOL(iwl_tx_agg_start);
1352 int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1354 int tx_fifo_id, txq_id, sta_id, ssn = -1;
1355 struct iwl_tid_data *tid_data;
1356 int ret, write_ptr, read_ptr;
1357 unsigned long flags;
1360 IWL_ERROR("ra = NULL\n");
1364 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1365 tx_fifo_id = default_tid_to_tx_fifo[tid];
1369 sta_id = iwl_find_station(priv, ra);
1371 if (sta_id == IWL_INVALID_STATION)
1374 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
1375 IWL_WARNING("Stopping AGG while state not IWL_AGG_ON\n");
1377 tid_data = &priv->stations[sta_id].tid[tid];
1378 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
1379 txq_id = tid_data->agg.txq_id;
1380 write_ptr = priv->txq[txq_id].q.write_ptr;
1381 read_ptr = priv->txq[txq_id].q.read_ptr;
1383 /* The queue is not empty */
1384 if (write_ptr != read_ptr) {
1385 IWL_DEBUG_HT("Stopping a non empty AGG HW QUEUE\n");
1386 priv->stations[sta_id].tid[tid].agg.state =
1387 IWL_EMPTYING_HW_QUEUE_DELBA;
1391 IWL_DEBUG_HT("HW queue is empty\n");
1392 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1394 spin_lock_irqsave(&priv->lock, flags);
1395 ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
1397 spin_unlock_irqrestore(&priv->lock, flags);
1402 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid);
1406 EXPORT_SYMBOL(iwl_tx_agg_stop);
1408 int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
1410 struct iwl_queue *q = &priv->txq[txq_id].q;
1411 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1412 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1414 switch (priv->stations[sta_id].tid[tid].agg.state) {
1415 case IWL_EMPTYING_HW_QUEUE_DELBA:
1416 /* We are reclaiming the last packet of the */
1417 /* aggregated HW queue */
1418 if ((txq_id == tid_data->agg.txq_id) &&
1419 (q->read_ptr == q->write_ptr)) {
1420 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1421 int tx_fifo = default_tid_to_tx_fifo[tid];
1422 IWL_DEBUG_HT("HW queue empty: continue DELBA flow\n");
1423 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
1425 tid_data->agg.state = IWL_AGG_OFF;
1426 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid);
1429 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1430 /* We are reclaiming the last packet of the queue */
1431 if (tid_data->tfds_in_queue == 0) {
1432 IWL_DEBUG_HT("HW queue empty: continue ADDBA flow\n");
1433 tid_data->agg.state = IWL_AGG_ON;
1434 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
1440 EXPORT_SYMBOL(iwl_txq_check_empty);
1443 * iwl_tx_status_reply_compressed_ba - Update tx status from block-ack
1445 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
1446 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
1448 static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1449 struct iwl_ht_agg *agg,
1450 struct iwl_compressed_ba_resp *ba_resp)
1454 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
1455 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1458 struct ieee80211_tx_info *info;
1460 if (unlikely(!agg->wait_for_ba)) {
1461 IWL_ERROR("Received BA when not expected\n");
1465 /* Mark that the expected block-ack response arrived */
1466 agg->wait_for_ba = 0;
1467 IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
1469 /* Calculate shift to align block-ack bits with our Tx window bits */
1470 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
1471 if (sh < 0) /* tbw something is wrong with indices */
1474 /* don't use 64-bit values for now */
1475 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1477 if (agg->frame_count > (64 - sh)) {
1478 IWL_DEBUG_TX_REPLY("more frames than bitmap size");
1482 /* check for success or failure according to the
1483 * transmitted bitmap and block-ack bitmap */
1484 bitmap &= agg->bitmap;
1486 /* For each frame attempted in aggregation,
1487 * update driver's record of tx frame's status. */
1488 for (i = 0; i < agg->frame_count ; i++) {
1489 ack = bitmap & (1ULL << i);
1491 IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n",
1492 ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
1493 agg->start_idx + i);
1496 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
1497 memset(&info->status, 0, sizeof(info->status));
1498 info->flags = IEEE80211_TX_STAT_ACK;
1499 info->flags |= IEEE80211_TX_STAT_AMPDU;
1500 info->status.ampdu_ack_map = successes;
1501 info->status.ampdu_ack_len = agg->frame_count;
1502 iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1504 IWL_DEBUG_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
1510 * iwl_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1512 * Handles block-acknowledge notification from device, which reports success
1513 * of frames sent via aggregation.
1515 void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
1516 struct iwl_rx_mem_buffer *rxb)
1518 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1519 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
1520 struct iwl_tx_queue *txq = NULL;
1521 struct iwl_ht_agg *agg;
1526 /* "flow" corresponds to Tx queue */
1527 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1529 /* "ssn" is start of block-ack Tx window, corresponds to index
1530 * (in Tx queue's circular buffer) of first TFD/frame in window */
1531 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1533 if (scd_flow >= priv->hw_params.max_txq_num) {
1534 IWL_ERROR("BUG_ON scd_flow is bigger than number of queues\n");
1538 txq = &priv->txq[scd_flow];
1539 sta_id = ba_resp->sta_id;
1541 agg = &priv->stations[sta_id].tid[tid].agg;
1543 /* Find index just before block-ack window */
1544 index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
1546 /* TODO: Need to get this copy more safely - now good for debug */
1548 IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d] Received from %pM, "
1551 (u8 *) &ba_resp->sta_addr_lo32,
1553 IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
1554 "%d, scd_ssn = %d\n",
1557 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1560 IWL_DEBUG_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx \n",
1562 (unsigned long long)agg->bitmap);
1564 /* Update driver's record of ACK vs. not for each frame in window */
1565 iwl_tx_status_reply_compressed_ba(priv, agg, ba_resp);
1567 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1568 * block-ack window (we assume that they've been successfully
1569 * transmitted ... if not, it's too late anyway). */
1570 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
1571 /* calculate mac80211 ampdu sw queue to wake */
1572 int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
1573 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1575 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
1576 priv->mac80211_registered &&
1577 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
1578 ieee80211_wake_queue(priv->hw, txq->swq_id);
1580 iwl_txq_check_empty(priv, sta_id, tid, scd_flow);
1583 EXPORT_SYMBOL(iwl_rx_reply_compressed_ba);
1585 #ifdef CONFIG_IWLWIFI_DEBUG
1586 #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
1588 const char *iwl_get_tx_fail_reason(u32 status)
1590 switch (status & TX_STATUS_MSK) {
1591 case TX_STATUS_SUCCESS:
1593 TX_STATUS_ENTRY(SHORT_LIMIT);
1594 TX_STATUS_ENTRY(LONG_LIMIT);
1595 TX_STATUS_ENTRY(FIFO_UNDERRUN);
1596 TX_STATUS_ENTRY(MGMNT_ABORT);
1597 TX_STATUS_ENTRY(NEXT_FRAG);
1598 TX_STATUS_ENTRY(LIFE_EXPIRE);
1599 TX_STATUS_ENTRY(DEST_PS);
1600 TX_STATUS_ENTRY(ABORTED);
1601 TX_STATUS_ENTRY(BT_RETRY);
1602 TX_STATUS_ENTRY(STA_INVALID);
1603 TX_STATUS_ENTRY(FRAG_DROPPED);
1604 TX_STATUS_ENTRY(TID_DISABLE);
1605 TX_STATUS_ENTRY(FRAME_FLUSHED);
1606 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
1607 TX_STATUS_ENTRY(TX_LOCKED);
1608 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
1613 EXPORT_SYMBOL(iwl_get_tx_fail_reason);
1614 #endif /* CONFIG_IWLWIFI_DEBUG */