1 /******************************************************************************
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
30 #include <linux/etherdevice.h>
31 #include <net/mac80211.h>
32 #include "iwl-eeprom.h"
37 #include "iwl-helpers.h"
40 * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
42 * Does NOT advance any TFD circular buffer read/write indexes
43 * Does NOT free the TFD itself (which is within circular buffer)
45 int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
47 struct iwl_tfd_frame *bd_tmp = (struct iwl_tfd_frame *)&txq->bd[0];
48 struct iwl_tfd_frame *bd = &bd_tmp[txq->q.read_ptr];
49 struct pci_dev *dev = priv->pci_dev;
54 /* Host command buffers stay mapped in memory, nothing to clean */
55 if (txq->q.id == IWL_CMD_QUEUE_NUM)
58 /* Sanity check on number of chunks */
59 counter = IWL_GET_BITS(*bd, num_tbs);
60 if (counter > MAX_NUM_OF_TBS) {
61 IWL_ERROR("Too many chunks: %i\n", counter);
62 /* @todo issue fatal error, it is quite serious situation */
66 /* Unmap chunks, if any.
67 * TFD info for odd chunks is different format than for even chunks. */
68 for (i = 0; i < counter; i++) {
75 IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) |
76 (IWL_GET_BITS(bd->pa[index],
77 tb2_addr_hi20) << 16),
78 IWL_GET_BITS(bd->pa[index], tb2_len),
83 le32_to_cpu(bd->pa[index].tb1_addr),
84 IWL_GET_BITS(bd->pa[index], tb1_len),
87 /* Free SKB, if any, for this chunk */
88 if (txq->txb[txq->q.read_ptr].skb[i]) {
89 struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[i];
92 txq->txb[txq->q.read_ptr].skb[i] = NULL;
97 EXPORT_SYMBOL(iwl_hw_txq_free_tfd);
100 int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
101 dma_addr_t addr, u16 len)
104 struct iwl_tfd_frame *tfd = ptr;
105 u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs);
107 /* Each TFD can point to a maximum 20 Tx buffers */
108 if ((num_tbs >= MAX_NUM_OF_TBS) || (num_tbs < 0)) {
109 IWL_ERROR("Error can not send more than %d chunks\n",
115 is_odd = num_tbs & 0x1;
118 tfd->pa[index].tb1_addr = cpu_to_le32(addr);
119 IWL_SET_BITS(tfd->pa[index], tb1_addr_hi,
120 iwl_get_dma_hi_address(addr));
121 IWL_SET_BITS(tfd->pa[index], tb1_len, len);
123 IWL_SET_BITS(tfd->pa[index], tb2_addr_lo16,
124 (u32) (addr & 0xffff));
125 IWL_SET_BITS(tfd->pa[index], tb2_addr_hi20, addr >> 16);
126 IWL_SET_BITS(tfd->pa[index], tb2_len, len);
129 IWL_SET_BITS(*tfd, num_tbs, num_tbs + 1);
133 EXPORT_SYMBOL(iwl_hw_txq_attach_buf_to_tfd);
136 * iwl_txq_update_write_ptr - Send new write index to hardware
138 int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
142 int txq_id = txq->q.id;
144 if (txq->need_update == 0)
147 /* if we're trying to save power */
148 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
149 /* wake up nic if it's powered down ...
150 * uCode will wake up, and interrupt us again, so next
151 * time we'll skip this part. */
152 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
154 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
155 IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg);
156 iwl_set_bit(priv, CSR_GP_CNTRL,
157 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
161 /* restore this queue's parameters in nic hardware. */
162 ret = iwl_grab_nic_access(priv);
165 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
166 txq->q.write_ptr | (txq_id << 8));
167 iwl_release_nic_access(priv);
169 /* else not in power-save mode, uCode will never sleep when we're
170 * trying to tx (during RFKILL, we're not trying to tx). */
172 iwl_write32(priv, HBUS_TARG_WRPTR,
173 txq->q.write_ptr | (txq_id << 8));
175 txq->need_update = 0;
179 EXPORT_SYMBOL(iwl_txq_update_write_ptr);
183 * iwl_tx_queue_free - Deallocate DMA queue.
184 * @txq: Transmit queue to deallocate.
186 * Empty queue by removing and destroying all BD's.
188 * 0-fill, but do not free "txq" descriptor structure.
190 static void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq)
192 struct iwl_queue *q = &txq->q;
193 struct pci_dev *dev = priv->pci_dev;
199 /* first, empty all BD's */
200 for (; q->write_ptr != q->read_ptr;
201 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
202 iwl_hw_txq_free_tfd(priv, txq);
204 len = sizeof(struct iwl_cmd) * q->n_window;
205 if (q->id == IWL_CMD_QUEUE_NUM)
206 len += IWL_MAX_SCAN_SIZE;
208 /* De-alloc array of command/tx buffers */
209 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
211 /* De-alloc circular buffer of TFDs */
213 pci_free_consistent(dev, sizeof(struct iwl_tfd_frame) *
214 txq->q.n_bd, txq->bd, txq->q.dma_addr);
216 /* De-alloc array of per-TFD driver data */
220 /* 0-fill queue descriptor structure */
221 memset(txq, 0, sizeof(*txq));
224 /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
227 * Theory of operation
229 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
230 * of buffer descriptors, each of which points to one or more data buffers for
231 * the device to read from or fill. Driver and device exchange status of each
232 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
233 * entries in each circular buffer, to protect against confusing empty and full
236 * The device reads or writes the data in the queues via the device's several
237 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
239 * For Tx queue, there are low mark and high mark limits. If, after queuing
240 * the packet for Tx, free space become < low mark, Tx queue stopped. When
241 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
244 * See more detailed info in iwl-4965-hw.h.
245 ***************************************************/
247 int iwl_queue_space(const struct iwl_queue *q)
249 int s = q->read_ptr - q->write_ptr;
251 if (q->read_ptr > q->write_ptr)
256 /* keep some reserve to not confuse empty and full situations */
262 EXPORT_SYMBOL(iwl_queue_space);
266 * iwl_hw_txq_ctx_free - Free TXQ Context
268 * Destroy all TX DMA queues and structures
270 void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
275 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
276 iwl_tx_queue_free(priv, &priv->txq[txq_id]);
278 /* Keep-warm buffer */
281 EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
284 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
286 static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
287 int count, int slots_num, u32 id)
290 q->n_window = slots_num;
293 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
294 * and iwl_queue_dec_wrap are broken. */
295 BUG_ON(!is_power_of_2(count));
297 /* slots_num must be power-of-two size, otherwise
298 * get_cmd_index is broken. */
299 BUG_ON(!is_power_of_2(slots_num));
301 q->low_mark = q->n_window / 4;
305 q->high_mark = q->n_window / 8;
306 if (q->high_mark < 2)
309 q->write_ptr = q->read_ptr = 0;
315 * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
317 static int iwl_tx_queue_alloc(struct iwl_priv *priv,
318 struct iwl_tx_queue *txq, u32 id)
320 struct pci_dev *dev = priv->pci_dev;
322 /* Driver private data, only for Tx (not command) queues,
323 * not shared with device. */
324 if (id != IWL_CMD_QUEUE_NUM) {
325 txq->txb = kmalloc(sizeof(txq->txb[0]) *
326 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
328 IWL_ERROR("kmalloc for auxiliary BD "
329 "structures failed\n");
335 /* Circular buffer of transmit frame descriptors (TFDs),
336 * shared with device */
337 txq->bd = pci_alloc_consistent(dev,
338 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX,
342 IWL_ERROR("pci_alloc_consistent(%zd) failed\n",
343 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX);
358 * Tell nic where to find circular buffer of Tx Frame Descriptors for
359 * given Tx queue, and enable the DMA channel used for that queue.
361 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
362 * channels supported in hardware.
364 static int iwl_hw_tx_queue_init(struct iwl_priv *priv,
365 struct iwl_tx_queue *txq)
369 int txq_id = txq->q.id;
371 spin_lock_irqsave(&priv->lock, flags);
372 rc = iwl_grab_nic_access(priv);
374 spin_unlock_irqrestore(&priv->lock, flags);
378 /* Circular buffer (TFD queue in DRAM) physical base address */
379 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
380 txq->q.dma_addr >> 8);
382 /* Enable DMA channel, using same id as for TFD queue */
384 priv, FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
385 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
386 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
387 iwl_release_nic_access(priv);
388 spin_unlock_irqrestore(&priv->lock, flags);
394 * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
396 static int iwl_tx_queue_init(struct iwl_priv *priv,
397 struct iwl_tx_queue *txq,
398 int slots_num, u32 txq_id)
400 struct pci_dev *dev = priv->pci_dev;
405 * Alloc buffer array for commands (Tx or other types of commands).
406 * For the command queue (#4), allocate command space + one big
407 * command for scan, since scan command is very huge; the system will
408 * not have two scans at the same time, so only one is needed.
409 * For normal Tx queues (all other queues), no super-size command
412 len = sizeof(struct iwl_cmd) * slots_num;
413 if (txq_id == IWL_CMD_QUEUE_NUM)
414 len += IWL_MAX_SCAN_SIZE;
415 txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd);
419 /* Alloc driver data array and TFD circular buffer */
420 rc = iwl_tx_queue_alloc(priv, txq, txq_id);
422 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
426 txq->need_update = 0;
428 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
429 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
430 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
432 /* Initialize queue's high/low-water marks, and head/tail indexes */
433 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
435 /* Tell device where to find queue */
436 iwl_hw_tx_queue_init(priv, txq);
442 * iwl_txq_ctx_reset - Reset TX queue context
443 * Destroys all DMA structures and initialise them again
448 int iwl_txq_ctx_reset(struct iwl_priv *priv)
451 int txq_id, slots_num;
455 /* Free all tx/cmd queues and keep-warm buffer */
456 iwl_hw_txq_ctx_free(priv);
458 /* Alloc keep-warm buffer */
459 ret = iwl_kw_alloc(priv);
461 IWL_ERROR("Keep Warm allocation failed");
465 /* Turn off all Tx DMA fifos */
466 ret = priv->cfg->ops->lib->disable_tx_fifo(priv);
470 /* Tell nic where to find the keep-warm buffer */
471 ret = iwl_kw_init(priv);
473 IWL_ERROR("kw_init failed\n");
477 /* Alloc and init all (default 16) Tx queues,
478 * including the command queue (#4) */
479 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
480 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
481 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
482 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
485 IWL_ERROR("Tx %d queue init failed\n", txq_id);
493 iwl_hw_txq_ctx_free(priv);
501 * handle build REPLY_TX command notification.
503 static void iwl_tx_cmd_build_basic(struct iwl_priv *priv,
504 struct iwl_tx_cmd *tx_cmd,
505 struct ieee80211_tx_info *info,
506 struct ieee80211_hdr *hdr,
507 int is_unicast, u8 std_id)
509 u16 fc = le16_to_cpu(hdr->frame_control);
510 __le32 tx_flags = tx_cmd->tx_flags;
512 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
513 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
514 tx_flags |= TX_CMD_FLG_ACK_MSK;
515 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
516 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
517 if (ieee80211_is_probe_response(fc) &&
518 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
519 tx_flags |= TX_CMD_FLG_TSF_MSK;
521 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
522 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
525 if (ieee80211_is_back_request(fc))
526 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
529 tx_cmd->sta_id = std_id;
530 if (ieee80211_get_morefrag(hdr))
531 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
533 if (ieee80211_is_qos_data(fc)) {
534 u8 *qc = ieee80211_get_qos_ctrl(hdr, ieee80211_get_hdrlen(fc));
535 tx_cmd->tid_tspec = qc[0] & 0xf;
536 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
538 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
541 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) {
542 tx_flags |= TX_CMD_FLG_RTS_MSK;
543 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
544 } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) {
545 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
546 tx_flags |= TX_CMD_FLG_CTS_MSK;
549 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
550 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
552 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
553 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {
554 if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ ||
555 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
556 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
558 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
560 tx_cmd->timeout.pm_frame_timeout = 0;
563 tx_cmd->driver_txop = 0;
564 tx_cmd->tx_flags = tx_flags;
565 tx_cmd->next_frame_len = 0;
568 #define RTS_HCCA_RETRY_LIMIT 3
569 #define RTS_DFAULT_RETRY_LIMIT 60
571 static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
572 struct iwl_tx_cmd *tx_cmd,
573 struct ieee80211_tx_info *info,
577 u8 rts_retry_limit = 0;
578 u8 data_retry_limit = 0;
583 rate_idx = min(ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xffff,
586 rate_plcp = iwl_rates[rate_idx].plcp;
588 rts_retry_limit = (is_hcca) ?
589 RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT;
591 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
592 rate_flags |= RATE_MCS_CCK_MSK;
595 if (ieee80211_is_probe_response(fc)) {
596 data_retry_limit = 3;
597 if (data_retry_limit < rts_retry_limit)
598 rts_retry_limit = data_retry_limit;
600 data_retry_limit = IWL_DEFAULT_TX_RETRY;
602 if (priv->data_retry_limit != -1)
603 data_retry_limit = priv->data_retry_limit;
606 if (ieee80211_is_data(fc)) {
607 tx_cmd->initial_rate_index = 0;
608 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
610 switch (fc & IEEE80211_FCTL_STYPE) {
611 case IEEE80211_STYPE_AUTH:
612 case IEEE80211_STYPE_DEAUTH:
613 case IEEE80211_STYPE_ASSOC_REQ:
614 case IEEE80211_STYPE_REASSOC_REQ:
615 if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
616 tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
617 tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
624 /* Alternate between antenna A and B for successive frames */
625 if (priv->use_ant_b_for_management_frame) {
626 priv->use_ant_b_for_management_frame = 0;
627 rate_flags |= RATE_MCS_ANT_B_MSK;
629 priv->use_ant_b_for_management_frame = 1;
630 rate_flags |= RATE_MCS_ANT_A_MSK;
634 tx_cmd->rts_retry_limit = rts_retry_limit;
635 tx_cmd->data_retry_limit = data_retry_limit;
636 tx_cmd->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
639 static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
640 struct ieee80211_tx_info *info,
641 struct iwl_tx_cmd *tx_cmd,
642 struct sk_buff *skb_frag,
645 struct ieee80211_key_conf *keyconf = info->control.hw_key;
647 switch (keyconf->alg) {
649 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
650 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
651 if (info->flags & IEEE80211_TX_CTL_AMPDU)
652 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
653 IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n");
657 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
658 ieee80211_get_tkip_key(keyconf, skb_frag,
659 IEEE80211_TKIP_P2_KEY, tx_cmd->key);
660 IWL_DEBUG_TX("tx_cmd with tkip hwcrypto\n");
664 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
665 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
667 if (keyconf->keylen == WEP_KEY_LEN_128)
668 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
670 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
672 IWL_DEBUG_TX("Configuring packet for WEP encryption "
673 "with key %d\n", keyconf->keyidx);
677 printk(KERN_ERR "Unknown encode alg %d\n", keyconf->alg);
682 static void iwl_update_tx_stats(struct iwl_priv *priv, u16 fc, u16 len)
684 /* 0 - mgmt, 1 - cnt, 2 - data */
685 int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2;
686 priv->tx_stats[idx].cnt++;
687 priv->tx_stats[idx].bytes += len;
691 * start REPLY_TX command process
693 int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
695 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
696 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
697 struct iwl_tfd_frame *tfd;
699 int txq_id = skb_get_queue_mapping(skb);
700 struct iwl_tx_queue *txq = NULL;
701 struct iwl_queue *q = NULL;
702 dma_addr_t phys_addr;
703 dma_addr_t txcmd_phys;
704 dma_addr_t scratch_phys;
705 struct iwl_cmd *out_cmd = NULL;
706 struct iwl_tx_cmd *tx_cmd;
707 u16 len, idx, len_org;
709 u8 id, hdr_len, unicast;
712 u8 wait_write_ptr = 0;
718 spin_lock_irqsave(&priv->lock, flags);
719 if (iwl_is_rfkill(priv)) {
720 IWL_DEBUG_DROP("Dropping - RF KILL\n");
725 IWL_DEBUG_DROP("Dropping - !priv->vif\n");
729 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) ==
731 IWL_ERROR("ERROR: No TX rate available.\n");
735 unicast = !is_multicast_ether_addr(hdr->addr1);
738 fc = le16_to_cpu(hdr->frame_control);
740 #ifdef CONFIG_IWLWIFI_DEBUG
741 if (ieee80211_is_auth(fc))
742 IWL_DEBUG_TX("Sending AUTH frame\n");
743 else if (ieee80211_is_assoc_request(fc))
744 IWL_DEBUG_TX("Sending ASSOC frame\n");
745 else if (ieee80211_is_reassoc_request(fc))
746 IWL_DEBUG_TX("Sending REASSOC frame\n");
749 /* drop all data frame if we are not associated */
750 if (((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) &&
751 (!iwl_is_associated(priv) ||
752 ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && !priv->assoc_id) ||
753 !priv->assoc_station_added)) {
754 IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n");
758 spin_unlock_irqrestore(&priv->lock, flags);
760 hdr_len = ieee80211_get_hdrlen(fc);
762 /* Find (or create) index into station table for destination station */
763 sta_id = iwl_get_sta_id(priv, hdr);
764 if (sta_id == IWL_INVALID_STATION) {
765 DECLARE_MAC_BUF(mac);
767 IWL_DEBUG_DROP("Dropping - INVALID STATION: %s\n",
768 print_mac(mac, hdr->addr1));
772 IWL_DEBUG_TX("station Id %d\n", sta_id);
774 if (ieee80211_is_qos_data(fc)) {
775 qc = ieee80211_get_qos_ctrl(hdr, hdr_len);
777 seq_number = priv->stations[sta_id].tid[tid].seq_number &
779 hdr->seq_ctrl = cpu_to_le16(seq_number) |
781 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG));
783 #ifdef CONFIG_IWL4965_HT
784 /* aggregation is on for this <sta,tid> */
785 if (info->flags & IEEE80211_TX_CTL_AMPDU)
786 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
787 priv->stations[sta_id].tid[tid].tfds_in_queue++;
788 #endif /* CONFIG_IWL4965_HT */
791 /* Descriptor for chosen Tx queue */
792 txq = &priv->txq[txq_id];
795 spin_lock_irqsave(&priv->lock, flags);
797 /* Set up first empty TFD within this queue's circular TFD buffer */
798 tfd = &txq->bd[q->write_ptr];
799 memset(tfd, 0, sizeof(*tfd));
800 control_flags = (u32 *) tfd;
801 idx = get_cmd_index(q, q->write_ptr, 0);
803 /* Set up driver data for this TFD */
804 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
805 txq->txb[q->write_ptr].skb[0] = skb;
807 /* Set up first empty entry in queue's array of Tx/cmd buffers */
808 out_cmd = &txq->cmd[idx];
809 tx_cmd = &out_cmd->cmd.tx;
810 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
811 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
814 * Set up the Tx-command (not MAC!) header.
815 * Store the chosen Tx queue and TFD index within the sequence field;
816 * after Tx, uCode's Tx response will return this value so driver can
817 * locate the frame within the tx queue and do post-tx processing.
819 out_cmd->hdr.cmd = REPLY_TX;
820 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
821 INDEX_TO_SEQ(q->write_ptr)));
823 /* Copy MAC header from skb into command buffer */
824 memcpy(tx_cmd->hdr, hdr, hdr_len);
827 * Use the first empty entry in this queue's command buffer array
828 * to contain the Tx command and MAC header concatenated together
829 * (payload data will be in another buffer).
830 * Size of this varies, due to varying MAC header length.
831 * If end is not dword aligned, we'll have 2 extra bytes at the end
832 * of the MAC header (device reads on dword boundaries).
833 * We'll tell device about this padding later.
835 len = sizeof(struct iwl_tx_cmd) +
836 sizeof(struct iwl_cmd_header) + hdr_len;
839 len = (len + 3) & ~3;
846 /* Physical address of this Tx command's header (not MAC header!),
847 * within command buffer array. */
848 txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl_cmd) * idx +
849 offsetof(struct iwl_cmd, hdr);
851 /* Add buffer containing Tx command and MAC(!) header to TFD's
853 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
855 if (!(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT))
856 iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
858 /* Set up TFD's 2nd entry to point directly to remainder of skb,
859 * if any (802.11 null frames have no payload). */
860 len = skb->len - hdr_len;
862 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
863 len, PCI_DMA_TODEVICE);
864 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len);
867 /* Tell NIC about any 2-byte padding after MAC header */
869 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
871 /* Total # bytes to be transmitted */
873 tx_cmd->len = cpu_to_le16(len);
874 /* TODO need this for burst mode later on */
875 iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, unicast, sta_id);
877 /* set is_hcca to 0; it probably will never be implemented */
878 iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0);
880 iwl_update_tx_stats(priv, fc, len);
882 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
883 offsetof(struct iwl_tx_cmd, scratch);
884 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
885 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_address(scratch_phys);
887 if (!ieee80211_get_morefrag(hdr)) {
888 txq->need_update = 1;
890 priv->stations[sta_id].tid[tid].seq_number = seq_number;
893 txq->need_update = 0;
896 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
898 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
900 /* Set up entry for this TFD in Tx byte-count array */
901 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, len);
903 /* Tell device the write index *just past* this latest filled TFD */
904 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
905 ret = iwl_txq_update_write_ptr(priv, txq);
906 spin_unlock_irqrestore(&priv->lock, flags);
911 if ((iwl_queue_space(q) < q->high_mark)
912 && priv->mac80211_registered) {
913 if (wait_write_ptr) {
914 spin_lock_irqsave(&priv->lock, flags);
915 txq->need_update = 1;
916 iwl_txq_update_write_ptr(priv, txq);
917 spin_unlock_irqrestore(&priv->lock, flags);
920 ieee80211_stop_queue(priv->hw, skb_get_queue_mapping(skb));
926 spin_unlock_irqrestore(&priv->lock, flags);
930 EXPORT_SYMBOL(iwl_tx_skb);
932 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
935 * iwl_enqueue_hcmd - enqueue a uCode command
936 * @priv: device private data point
937 * @cmd: a point to the ucode command structure
939 * The function returns < 0 values to indicate the operation is
940 * failed. On success, it turns the index (> 0) of command in the
943 int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
945 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
946 struct iwl_queue *q = &txq->q;
947 struct iwl_tfd_frame *tfd;
949 struct iwl_cmd *out_cmd;
952 dma_addr_t phys_addr;
956 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
957 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
959 /* If any of the command structures end up being larger than
960 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
961 * we will need to increase the size of the TFD entries */
962 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
963 !(cmd->meta.flags & CMD_SIZE_HUGE));
965 if (iwl_is_rfkill(priv)) {
966 IWL_DEBUG_INFO("Not sending command - RF KILL");
970 if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
971 IWL_ERROR("No space for Tx\n");
975 spin_lock_irqsave(&priv->hcmd_lock, flags);
977 tfd = &txq->bd[q->write_ptr];
978 memset(tfd, 0, sizeof(*tfd));
980 control_flags = (u32 *) tfd;
982 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
983 out_cmd = &txq->cmd[idx];
985 out_cmd->hdr.cmd = cmd->id;
986 memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
987 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
989 /* At this point, the out_cmd now has all of the incoming cmd
992 out_cmd->hdr.flags = 0;
993 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
994 INDEX_TO_SEQ(q->write_ptr));
995 if (out_cmd->meta.flags & CMD_SIZE_HUGE)
996 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
998 phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx +
999 offsetof(struct iwl_cmd, hdr);
1000 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
1002 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
1003 "%d bytes at %d[%d]:%d\n",
1004 get_cmd_string(out_cmd->hdr.cmd),
1005 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
1006 fix_size, q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
1008 txq->need_update = 1;
1010 /* Set up entry in queue's byte count circular buffer */
1011 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
1013 /* Increment and update queue's write index */
1014 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1015 ret = iwl_txq_update_write_ptr(priv, txq);
1017 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
1018 return ret ? ret : idx;