2 * Copyright (c) 2008, Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 /* Implementation of the main "ATH" layer. */
22 static int ath_outdoor; /* enable outdoor use */
24 static u32 ath_chainmask_sel_up_rssi_thres =
25 ATH_CHAINMASK_SEL_UP_RSSI_THRES;
26 static u32 ath_chainmask_sel_down_rssi_thres =
27 ATH_CHAINMASK_SEL_DOWN_RSSI_THRES;
28 static u32 ath_chainmask_sel_period =
29 ATH_CHAINMASK_SEL_TIMEOUT;
31 /* return bus cachesize in 4B word units */
33 static void bus_read_cachesize(struct ath_softc *sc, int *csz)
37 pci_read_config_byte(sc->pdev, PCI_CACHE_LINE_SIZE, (u8 *)&u8tmp);
41 * This check was put in to avoid "unplesant" consequences if
42 * the bootrom has not fully initialized all PCI devices.
43 * Sometimes the cache line size register is not set
47 *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
51 * Set current operating mode
53 * This function initializes and fills the rate table in the ATH object based
54 * on the operating mode.
56 static void ath_setcurmode(struct ath_softc *sc, enum wireless_mode mode)
58 const struct ath9k_rate_table *rt;
61 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
62 rt = ath9k_hw_getratetable(sc->sc_ah, mode);
65 for (i = 0; i < rt->rateCount; i++)
66 sc->sc_rixmap[rt->info[i].rateCode] = (u8) i;
68 memzero(sc->sc_hwmap, sizeof(sc->sc_hwmap));
69 for (i = 0; i < 256; i++) {
70 u8 ix = rt->rateCodeToIndex[i];
75 sc->sc_hwmap[i].ieeerate =
76 rt->info[ix].dot11Rate & IEEE80211_RATE_VAL;
77 sc->sc_hwmap[i].rateKbps = rt->info[ix].rateKbps;
79 if (rt->info[ix].shortPreamble ||
80 rt->info[ix].phy == PHY_OFDM) {
81 /* XXX: Handle this */
84 /* NB: this uses the last entry if the rate isn't found */
85 /* XXX beware of overlow */
88 sc->sc_curmode = mode;
90 * All protection frames are transmited at 2Mb/s for
91 * 11g, otherwise at 1Mb/s.
92 * XXX select protection rate index from rate table.
94 sc->sc_protrix = (mode == ATH9K_MODE_11G ? 1 : 0);
98 * Set up rate table (legacy rates)
100 static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
102 struct ath_hal *ah = sc->sc_ah;
103 const struct ath9k_rate_table *rt = NULL;
104 struct ieee80211_supported_band *sband;
105 struct ieee80211_rate *rate;
109 case IEEE80211_BAND_2GHZ:
110 rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11G);
112 case IEEE80211_BAND_5GHZ:
113 rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11A);
122 sband = &sc->sbands[band];
123 rate = sc->rates[band];
125 if (rt->rateCount > ATH_RATE_MAX)
126 maxrates = ATH_RATE_MAX;
128 maxrates = rt->rateCount;
130 for (i = 0; i < maxrates; i++) {
131 rate[i].bitrate = rt->info[i].rateKbps / 100;
132 rate[i].hw_value = rt->info[i].rateCode;
134 DPRINTF(sc, ATH_DBG_CONFIG,
135 "%s: Rate: %2dMbps, ratecode: %2d\n",
137 rate[i].bitrate / 10,
143 * Set up channel list
145 static int ath_setup_channels(struct ath_softc *sc)
147 struct ath_hal *ah = sc->sc_ah;
148 int nchan, i, a = 0, b = 0;
149 u8 regclassids[ATH_REGCLASSIDS_MAX];
151 struct ieee80211_supported_band *band_2ghz;
152 struct ieee80211_supported_band *band_5ghz;
153 struct ieee80211_channel *chan_2ghz;
154 struct ieee80211_channel *chan_5ghz;
155 struct ath9k_channel *c;
157 /* Fill in ah->ah_channels */
158 if (!ath9k_regd_init_channels(ah,
167 u32 rd = ah->ah_currentRD;
169 DPRINTF(sc, ATH_DBG_FATAL,
170 "%s: unable to collect channel list; "
171 "regdomain likely %u country code %u\n",
172 __func__, rd, CTRY_DEFAULT);
176 band_2ghz = &sc->sbands[IEEE80211_BAND_2GHZ];
177 band_5ghz = &sc->sbands[IEEE80211_BAND_5GHZ];
178 chan_2ghz = sc->channels[IEEE80211_BAND_2GHZ];
179 chan_5ghz = sc->channels[IEEE80211_BAND_5GHZ];
181 for (i = 0; i < nchan; i++) {
182 c = &ah->ah_channels[i];
183 if (IS_CHAN_2GHZ(c)) {
184 chan_2ghz[a].band = IEEE80211_BAND_2GHZ;
185 chan_2ghz[a].center_freq = c->channel;
186 chan_2ghz[a].max_power = c->maxTxPower;
188 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
189 chan_2ghz[a].flags |=
190 IEEE80211_CHAN_NO_IBSS;
191 if (c->channelFlags & CHANNEL_PASSIVE)
192 chan_2ghz[a].flags |=
193 IEEE80211_CHAN_PASSIVE_SCAN;
195 band_2ghz->n_channels = ++a;
197 DPRINTF(sc, ATH_DBG_CONFIG,
198 "%s: 2MHz channel: %d, "
199 "channelFlags: 0x%x\n",
203 } else if (IS_CHAN_5GHZ(c)) {
204 chan_5ghz[b].band = IEEE80211_BAND_5GHZ;
205 chan_5ghz[b].center_freq = c->channel;
206 chan_5ghz[b].max_power = c->maxTxPower;
208 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
209 chan_5ghz[b].flags |=
210 IEEE80211_CHAN_NO_IBSS;
211 if (c->channelFlags & CHANNEL_PASSIVE)
212 chan_5ghz[b].flags |=
213 IEEE80211_CHAN_PASSIVE_SCAN;
215 band_5ghz->n_channels = ++b;
217 DPRINTF(sc, ATH_DBG_CONFIG,
218 "%s: 5MHz channel: %d, "
219 "channelFlags: 0x%x\n",
230 * Determine mode from channel flags
232 * This routine will provide the enumerated WIRELESSS_MODE value based
233 * on the settings of the channel flags. If no valid set of flags
234 * exist, the lowest mode (11b) is selected.
237 static enum wireless_mode ath_chan2mode(struct ath9k_channel *chan)
239 if (chan->chanmode == CHANNEL_A)
240 return ATH9K_MODE_11A;
241 else if (chan->chanmode == CHANNEL_G)
242 return ATH9K_MODE_11G;
243 else if (chan->chanmode == CHANNEL_B)
244 return ATH9K_MODE_11B;
245 else if (chan->chanmode == CHANNEL_A_HT20)
246 return ATH9K_MODE_11NA_HT20;
247 else if (chan->chanmode == CHANNEL_G_HT20)
248 return ATH9K_MODE_11NG_HT20;
249 else if (chan->chanmode == CHANNEL_A_HT40PLUS)
250 return ATH9K_MODE_11NA_HT40PLUS;
251 else if (chan->chanmode == CHANNEL_A_HT40MINUS)
252 return ATH9K_MODE_11NA_HT40MINUS;
253 else if (chan->chanmode == CHANNEL_G_HT40PLUS)
254 return ATH9K_MODE_11NG_HT40PLUS;
255 else if (chan->chanmode == CHANNEL_G_HT40MINUS)
256 return ATH9K_MODE_11NG_HT40MINUS;
258 WARN_ON(1); /* should not get here */
260 return ATH9K_MODE_11B;
264 * Stop the device, grabbing the top-level lock to protect
265 * against concurrent entry through ath_init (which can happen
266 * if another thread does a system call and the thread doing the
267 * stop is preempted).
270 static int ath_stop(struct ath_softc *sc)
272 struct ath_hal *ah = sc->sc_ah;
274 DPRINTF(sc, ATH_DBG_CONFIG, "%s: invalid %ld\n",
275 __func__, sc->sc_flags & SC_OP_INVALID);
278 * Shutdown the hardware and driver:
279 * stop output from above
282 * clear transmit machinery
283 * clear receive machinery
285 * reclaim beacon resources
287 * Note that some of this work is not possible if the
288 * hardware is gone (invalid).
291 if (!(sc->sc_flags & SC_OP_INVALID))
292 ath9k_hw_set_interrupts(ah, 0);
293 ath_draintxq(sc, false);
294 if (!(sc->sc_flags & SC_OP_INVALID)) {
296 ath9k_hw_phy_disable(ah);
298 sc->sc_rxlink = NULL;
304 * Set the current channel
306 * Set/change channels. If the channel is really being changed, it's done
307 * by reseting the chip. To accomplish this we must first cleanup any pending
308 * DMA, then restart stuff after a la ath_init.
310 int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan)
312 struct ath_hal *ah = sc->sc_ah;
313 bool fastcc = true, stopped;
315 if (sc->sc_flags & SC_OP_INVALID) /* the device is invalid or removed */
318 DPRINTF(sc, ATH_DBG_CONFIG,
319 "%s: %u (%u MHz) -> %u (%u MHz), cflags:%x\n",
321 ath9k_hw_mhz2ieee(ah, sc->sc_ah->ah_curchan->channel,
322 sc->sc_ah->ah_curchan->channelFlags),
323 sc->sc_ah->ah_curchan->channel,
324 ath9k_hw_mhz2ieee(ah, hchan->channel, hchan->channelFlags),
325 hchan->channel, hchan->channelFlags);
327 if (hchan->channel != sc->sc_ah->ah_curchan->channel ||
328 hchan->channelFlags != sc->sc_ah->ah_curchan->channelFlags ||
329 (sc->sc_flags & SC_OP_CHAINMASK_UPDATE) ||
330 (sc->sc_flags & SC_OP_FULL_RESET)) {
333 * This is only performed if the channel settings have
336 * To switch channels clear any pending DMA operations;
337 * wait long enough for the RX fifo to drain, reset the
338 * hardware at the new frequency, and then re-enable
339 * the relevant bits of the h/w.
341 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
342 ath_draintxq(sc, false); /* clear pending tx frames */
343 stopped = ath_stoprecv(sc); /* turn off frame recv */
345 /* XXX: do not flush receive queue here. We don't want
346 * to flush data frames already in queue because of
347 * changing channel. */
349 if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
352 spin_lock_bh(&sc->sc_resetlock);
353 if (!ath9k_hw_reset(ah, hchan,
354 sc->sc_ht_info.tx_chan_width,
357 sc->sc_ht_extprotspacing,
359 DPRINTF(sc, ATH_DBG_FATAL,
360 "%s: unable to reset channel %u (%uMhz) "
361 "flags 0x%x hal status %u\n", __func__,
362 ath9k_hw_mhz2ieee(ah, hchan->channel,
363 hchan->channelFlags),
364 hchan->channel, hchan->channelFlags, status);
365 spin_unlock_bh(&sc->sc_resetlock);
368 spin_unlock_bh(&sc->sc_resetlock);
370 sc->sc_flags &= ~SC_OP_CHAINMASK_UPDATE;
371 sc->sc_flags &= ~SC_OP_FULL_RESET;
373 /* Re-enable rx framework */
374 if (ath_startrecv(sc) != 0) {
375 DPRINTF(sc, ATH_DBG_FATAL,
376 "%s: unable to restart recv logic\n", __func__);
380 * Change channels and update the h/w rate map
381 * if we're switching; e.g. 11a to 11b/g.
383 ath_setcurmode(sc, ath_chan2mode(hchan));
385 ath_update_txpow(sc); /* update tx power state */
387 * Re-enable interrupts.
389 ath9k_hw_set_interrupts(ah, sc->sc_imask);
394 /**********************/
395 /* Chainmask Handling */
396 /**********************/
398 static void ath_chainmask_sel_timertimeout(unsigned long data)
400 struct ath_chainmask_sel *cm = (struct ath_chainmask_sel *)data;
401 cm->switch_allowed = 1;
404 /* Start chainmask select timer */
405 static void ath_chainmask_sel_timerstart(struct ath_chainmask_sel *cm)
407 cm->switch_allowed = 0;
408 mod_timer(&cm->timer, ath_chainmask_sel_period);
411 /* Stop chainmask select timer */
412 static void ath_chainmask_sel_timerstop(struct ath_chainmask_sel *cm)
414 cm->switch_allowed = 0;
415 del_timer_sync(&cm->timer);
418 static void ath_chainmask_sel_init(struct ath_softc *sc, struct ath_node *an)
420 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
422 memzero(cm, sizeof(struct ath_chainmask_sel));
424 cm->cur_tx_mask = sc->sc_tx_chainmask;
425 cm->cur_rx_mask = sc->sc_rx_chainmask;
426 cm->tx_avgrssi = ATH_RSSI_DUMMY_MARKER;
427 setup_timer(&cm->timer,
428 ath_chainmask_sel_timertimeout, (unsigned long) cm);
431 int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an)
433 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
436 * Disable auto-swtiching in one of the following if conditions.
437 * sc_chainmask_auto_sel is used for internal global auto-switching
438 * enabled/disabled setting
440 if (sc->sc_ah->ah_caps.tx_chainmask != ATH_CHAINMASK_SEL_3X3) {
441 cm->cur_tx_mask = sc->sc_tx_chainmask;
442 return cm->cur_tx_mask;
445 if (cm->tx_avgrssi == ATH_RSSI_DUMMY_MARKER)
446 return cm->cur_tx_mask;
448 if (cm->switch_allowed) {
449 /* Switch down from tx 3 to tx 2. */
450 if (cm->cur_tx_mask == ATH_CHAINMASK_SEL_3X3 &&
451 ATH_RSSI_OUT(cm->tx_avgrssi) >=
452 ath_chainmask_sel_down_rssi_thres) {
453 cm->cur_tx_mask = sc->sc_tx_chainmask;
455 /* Don't let another switch happen until
456 * this timer expires */
457 ath_chainmask_sel_timerstart(cm);
459 /* Switch up from tx 2 to 3. */
460 else if (cm->cur_tx_mask == sc->sc_tx_chainmask &&
461 ATH_RSSI_OUT(cm->tx_avgrssi) <=
462 ath_chainmask_sel_up_rssi_thres) {
463 cm->cur_tx_mask = ATH_CHAINMASK_SEL_3X3;
465 /* Don't let another switch happen
466 * until this timer expires */
467 ath_chainmask_sel_timerstart(cm);
471 return cm->cur_tx_mask;
475 * Update tx/rx chainmask. For legacy association,
476 * hard code chainmask to 1x1, for 11n association, use
477 * the chainmask configuration.
480 void ath_update_chainmask(struct ath_softc *sc, int is_ht)
482 sc->sc_flags |= SC_OP_CHAINMASK_UPDATE;
484 sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask;
485 sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask;
487 sc->sc_tx_chainmask = 1;
488 sc->sc_rx_chainmask = 1;
491 DPRINTF(sc, ATH_DBG_CONFIG, "%s: tx chmask: %d, rx chmask: %d\n",
492 __func__, sc->sc_tx_chainmask, sc->sc_rx_chainmask);
499 int ath_vap_attach(struct ath_softc *sc,
501 struct ieee80211_vif *if_data,
502 enum ath9k_opmode opmode)
506 if (if_id >= ATH_BCBUF || sc->sc_vaps[if_id] != NULL) {
507 DPRINTF(sc, ATH_DBG_FATAL,
508 "%s: Invalid interface id = %u\n", __func__, if_id);
515 case ATH9K_M_MONITOR:
518 /* XXX not right, beacon buffer is allocated on RUN trans */
519 if (list_empty(&sc->sc_bbuf))
527 avp = kmalloc(sizeof(struct ath_vap), GFP_KERNEL);
531 memzero(avp, sizeof(struct ath_vap));
532 avp->av_if_data = if_data;
533 /* Set the VAP opmode */
534 avp->av_opmode = opmode;
537 ath9k_hw_set_tsfadjust(sc->sc_ah, 1);
539 sc->sc_vaps[if_id] = avp;
541 /* Set the device opmode */
542 sc->sc_ah->ah_opmode = opmode;
544 /* default VAP configuration */
545 avp->av_config.av_fixed_rateset = IEEE80211_FIXED_RATE_NONE;
546 avp->av_config.av_fixed_retryset = 0x03030303;
551 int ath_vap_detach(struct ath_softc *sc, int if_id)
553 struct ath_hal *ah = sc->sc_ah;
556 avp = sc->sc_vaps[if_id];
558 DPRINTF(sc, ATH_DBG_FATAL, "%s: invalid interface id %u\n",
564 * Quiesce the hardware while we remove the vap. In
565 * particular we need to reclaim all references to the
566 * vap state by any frames pending on the tx queues.
568 * XXX can we do this w/o affecting other vap's?
570 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
571 ath_draintxq(sc, false); /* stop xmit side */
572 ath_stoprecv(sc); /* stop recv side */
573 ath_flushrecv(sc); /* flush recv queue */
576 sc->sc_vaps[if_id] = NULL;
582 int ath_vap_config(struct ath_softc *sc,
583 int if_id, struct ath_vap_config *if_config)
587 if (if_id >= ATH_BCBUF) {
588 DPRINTF(sc, ATH_DBG_FATAL,
589 "%s: Invalid interface id = %u\n", __func__, if_id);
593 avp = sc->sc_vaps[if_id];
597 memcpy(&avp->av_config, if_config, sizeof(avp->av_config));
606 int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
608 struct ath_hal *ah = sc->sc_ah;
612 DPRINTF(sc, ATH_DBG_CONFIG, "%s: mode %d\n",
613 __func__, sc->sc_ah->ah_opmode);
616 * Stop anything previously setup. This is safe
617 * whether this is the first time through or not.
621 /* Initialize chanmask selection */
622 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
623 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
625 /* Reset SERDES registers */
626 ath9k_hw_configpcipowersave(ah, 0);
629 * The basic interface to setting the hardware in a good
630 * state is ``reset''. On return the hardware is known to
631 * be powered up and with interrupts disabled. This must
632 * be followed by initialization of the appropriate bits
633 * and then setup of the interrupt mask.
636 spin_lock_bh(&sc->sc_resetlock);
637 if (!ath9k_hw_reset(ah, initial_chan,
638 sc->sc_ht_info.tx_chan_width,
639 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
640 sc->sc_ht_extprotspacing, false, &status)) {
641 DPRINTF(sc, ATH_DBG_FATAL,
642 "%s: unable to reset hardware; hal status %u "
643 "(freq %u flags 0x%x)\n", __func__, status,
644 initial_chan->channel, initial_chan->channelFlags);
646 spin_unlock_bh(&sc->sc_resetlock);
649 spin_unlock_bh(&sc->sc_resetlock);
651 * This is needed only to setup initial state
652 * but it's best done after a reset.
654 ath_update_txpow(sc);
657 * Setup the hardware after reset:
658 * The receive engine is set going.
659 * Frame transmit is handled entirely
660 * in the frame output path; there's nothing to do
661 * here except setup the interrupt mask.
663 if (ath_startrecv(sc) != 0) {
664 DPRINTF(sc, ATH_DBG_FATAL,
665 "%s: unable to start recv logic\n", __func__);
669 /* Setup our intr mask. */
670 sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX
671 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
672 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
674 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_GTT)
675 sc->sc_imask |= ATH9K_INT_GTT;
677 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
678 sc->sc_imask |= ATH9K_INT_CST;
681 * Enable MIB interrupts when there are hardware phy counters.
682 * Note we only do this (at the moment) for station mode.
684 if (ath9k_hw_phycounters(ah) &&
685 ((sc->sc_ah->ah_opmode == ATH9K_M_STA) ||
686 (sc->sc_ah->ah_opmode == ATH9K_M_IBSS)))
687 sc->sc_imask |= ATH9K_INT_MIB;
689 * Some hardware processes the TIM IE and fires an
690 * interrupt when the TIM bit is set. For hardware
691 * that does, if not overridden by configuration,
692 * enable the TIM interrupt when operating as station.
694 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
695 (sc->sc_ah->ah_opmode == ATH9K_M_STA) &&
696 !sc->sc_config.swBeaconProcess)
697 sc->sc_imask |= ATH9K_INT_TIM;
699 * Don't enable interrupts here as we've not yet built our
700 * vap and node data structures, which will be needed as soon
701 * as we start receiving.
703 ath_setcurmode(sc, ath_chan2mode(initial_chan));
705 /* XXX: we must make sure h/w is ready and clear invalid flag
706 * before turning on interrupt. */
707 sc->sc_flags &= ~SC_OP_INVALID;
712 int ath_reset(struct ath_softc *sc, bool retry_tx)
714 struct ath_hal *ah = sc->sc_ah;
718 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
719 ath_draintxq(sc, retry_tx); /* stop xmit */
720 ath_stoprecv(sc); /* stop recv */
721 ath_flushrecv(sc); /* flush recv queue */
724 spin_lock_bh(&sc->sc_resetlock);
725 if (!ath9k_hw_reset(ah, sc->sc_ah->ah_curchan,
726 sc->sc_ht_info.tx_chan_width,
727 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
728 sc->sc_ht_extprotspacing, false, &status)) {
729 DPRINTF(sc, ATH_DBG_FATAL,
730 "%s: unable to reset hardware; hal status %u\n",
734 spin_unlock_bh(&sc->sc_resetlock);
736 if (ath_startrecv(sc) != 0) /* restart recv */
737 DPRINTF(sc, ATH_DBG_FATAL,
738 "%s: unable to start recv logic\n", __func__);
741 * We may be doing a reset in response to a request
742 * that changes the channel so update any state that
743 * might change as a result.
745 ath_setcurmode(sc, ath_chan2mode(sc->sc_ah->ah_curchan));
747 ath_update_txpow(sc);
749 if (sc->sc_flags & SC_OP_BEACONS)
750 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
752 ath9k_hw_set_interrupts(ah, sc->sc_imask);
754 /* Restart the txq */
757 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
758 if (ATH_TXQ_SETUP(sc, i)) {
759 spin_lock_bh(&sc->sc_txq[i].axq_lock);
760 ath_txq_schedule(sc, &sc->sc_txq[i]);
761 spin_unlock_bh(&sc->sc_txq[i].axq_lock);
769 int ath_suspend(struct ath_softc *sc)
771 struct ath_hal *ah = sc->sc_ah;
773 /* No I/O if device has been surprise removed */
774 if (sc->sc_flags & SC_OP_INVALID)
777 /* Shut off the interrupt before setting sc->sc_invalid to '1' */
778 ath9k_hw_set_interrupts(ah, 0);
780 /* XXX: we must make sure h/w will not generate any interrupt
781 * before setting the invalid flag. */
782 sc->sc_flags |= SC_OP_INVALID;
784 /* disable HAL and put h/w to sleep */
785 ath9k_hw_disable(sc->sc_ah);
787 ath9k_hw_configpcipowersave(sc->sc_ah, 1);
792 /* Interrupt handler. Most of the actual processing is deferred.
793 * It's the caller's responsibility to ensure the chip is awake. */
795 irqreturn_t ath_isr(int irq, void *dev)
797 struct ath_softc *sc = dev;
798 struct ath_hal *ah = sc->sc_ah;
799 enum ath9k_int status;
803 if (sc->sc_flags & SC_OP_INVALID) {
805 * The hardware is not ready/present, don't
806 * touch anything. Note this can happen early
807 * on if the IRQ is shared.
811 if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */
816 * Figure out the reason(s) for the interrupt. Note
817 * that the hal returns a pseudo-ISR that may include
818 * bits we haven't explicitly enabled so we mask the
819 * value to insure we only process bits we requested.
821 ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
823 status &= sc->sc_imask; /* discard unasked-for bits */
826 * If there are no status bits set, then this interrupt was not
827 * for me (should have been caught above).
833 sc->sc_intrstatus = status;
835 if (status & ATH9K_INT_FATAL) {
836 /* need a chip reset */
838 } else if (status & ATH9K_INT_RXORN) {
839 /* need a chip reset */
842 if (status & ATH9K_INT_SWBA) {
843 /* schedule a tasklet for beacon handling */
844 tasklet_schedule(&sc->bcon_tasklet);
846 if (status & ATH9K_INT_RXEOL) {
848 * NB: the hardware should re-read the link when
849 * RXE bit is written, but it doesn't work
850 * at least on older hardware revs.
855 if (status & ATH9K_INT_TXURN)
856 /* bump tx trigger level */
857 ath9k_hw_updatetxtriglevel(ah, true);
858 /* XXX: optimize this */
859 if (status & ATH9K_INT_RX)
861 if (status & ATH9K_INT_TX)
863 if (status & ATH9K_INT_BMISS)
865 /* carrier sense timeout */
866 if (status & ATH9K_INT_CST)
868 if (status & ATH9K_INT_MIB) {
870 * Disable interrupts until we service the MIB
871 * interrupt; otherwise it will continue to
874 ath9k_hw_set_interrupts(ah, 0);
876 * Let the hal handle the event. We assume
877 * it will clear whatever condition caused
880 ath9k_hw_procmibevent(ah, &sc->sc_halstats);
881 ath9k_hw_set_interrupts(ah, sc->sc_imask);
883 if (status & ATH9K_INT_TIM_TIMER) {
884 if (!(ah->ah_caps.hw_caps &
885 ATH9K_HW_CAP_AUTOSLEEP)) {
886 /* Clear RxAbort bit so that we can
888 ath9k_hw_setrxabort(ah, 0);
896 /* turn off every interrupt except SWBA */
897 ath9k_hw_set_interrupts(ah, (sc->sc_imask & ATH9K_INT_SWBA));
898 tasklet_schedule(&sc->intr_tq);
904 /* Deferred interrupt processing */
906 static void ath9k_tasklet(unsigned long data)
908 struct ath_softc *sc = (struct ath_softc *)data;
909 u32 status = sc->sc_intrstatus;
911 if (status & ATH9K_INT_FATAL) {
912 /* need a chip reset */
913 ath_reset(sc, false);
918 (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
919 /* XXX: fill me in */
921 if (status & ATH9K_INT_RXORN) {
923 if (status & ATH9K_INT_RXEOL) {
926 spin_lock_bh(&sc->sc_rxflushlock);
927 ath_rx_tasklet(sc, 0);
928 spin_unlock_bh(&sc->sc_rxflushlock);
930 /* XXX: optimize this */
931 if (status & ATH9K_INT_TX)
933 /* XXX: fill me in */
935 if (status & ATH9K_INT_BMISS) {
937 if (status & (ATH9K_INT_TIM | ATH9K_INT_DTIMSYNC)) {
938 if (status & ATH9K_INT_TIM) {
940 if (status & ATH9K_INT_DTIMSYNC) {
946 /* re-enable hardware interrupt */
947 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
950 int ath_init(u16 devid, struct ath_softc *sc)
952 struct ath_hal *ah = NULL;
957 /* XXX: hardware will not be ready until ath_open() being called */
958 sc->sc_flags |= SC_OP_INVALID;
960 sc->sc_debug = DBG_DEFAULT;
961 DPRINTF(sc, ATH_DBG_CONFIG, "%s: devid 0x%x\n", __func__, devid);
963 /* Initialize tasklet */
964 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
965 tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
969 * Cache line size is used to size and align various
970 * structures used to communicate with the hardware.
972 bus_read_cachesize(sc, &csz);
973 /* XXX assert csz is non-zero */
974 sc->sc_cachelsz = csz << 2; /* convert to bytes */
976 spin_lock_init(&sc->sc_resetlock);
978 ah = ath9k_hw_attach(devid, sc, sc->mem, &status);
980 DPRINTF(sc, ATH_DBG_FATAL,
981 "%s: unable to attach hardware; HAL status %u\n",
988 /* Get the hardware key cache size. */
989 sc->sc_keymax = ah->ah_caps.keycache_size;
990 if (sc->sc_keymax > ATH_KEYMAX) {
991 DPRINTF(sc, ATH_DBG_KEYCACHE,
992 "%s: Warning, using only %u entries in %u key cache\n",
993 __func__, ATH_KEYMAX, sc->sc_keymax);
994 sc->sc_keymax = ATH_KEYMAX;
998 * Reset the key cache since some parts do not
999 * reset the contents on initial power up.
1001 for (i = 0; i < sc->sc_keymax; i++)
1002 ath9k_hw_keyreset(ah, (u16) i);
1004 * Mark key cache slots associated with global keys
1005 * as in use. If we knew TKIP was not to be used we
1006 * could leave the +32, +64, and +32+64 slots free.
1007 * XXX only for splitmic.
1009 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1010 set_bit(i, sc->sc_keymap);
1011 set_bit(i + 32, sc->sc_keymap);
1012 set_bit(i + 64, sc->sc_keymap);
1013 set_bit(i + 32 + 64, sc->sc_keymap);
1016 * Collect the channel list using the default country
1017 * code and including outdoor channels. The 802.11 layer
1018 * is resposible for filtering this list based on settings
1019 * like the phy mode.
1021 error = ath_setup_channels(sc);
1025 /* default to STA mode */
1026 sc->sc_ah->ah_opmode = ATH9K_M_MONITOR;
1028 /* Setup rate tables */
1030 ath_setup_rates(sc, IEEE80211_BAND_2GHZ);
1031 ath_setup_rates(sc, IEEE80211_BAND_5GHZ);
1033 /* NB: setup here so ath_rate_update is happy */
1034 ath_setcurmode(sc, ATH9K_MODE_11A);
1037 * Allocate hardware transmit queues: one queue for
1038 * beacon frames and one data queue for each QoS
1039 * priority. Note that the hal handles reseting
1040 * these queues at the needed time.
1042 sc->sc_bhalq = ath_beaconq_setup(ah);
1043 if (sc->sc_bhalq == -1) {
1044 DPRINTF(sc, ATH_DBG_FATAL,
1045 "%s: unable to setup a beacon xmit queue\n", __func__);
1049 sc->sc_cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
1050 if (sc->sc_cabq == NULL) {
1051 DPRINTF(sc, ATH_DBG_FATAL,
1052 "%s: unable to setup CAB xmit queue\n", __func__);
1057 sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME;
1058 ath_cabq_update(sc);
1060 for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++)
1061 sc->sc_haltype2q[i] = -1;
1063 /* Setup data queues */
1064 /* NB: ensure BK queue is the lowest priority h/w queue */
1065 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
1066 DPRINTF(sc, ATH_DBG_FATAL,
1067 "%s: unable to setup xmit queue for BK traffic\n",
1073 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
1074 DPRINTF(sc, ATH_DBG_FATAL,
1075 "%s: unable to setup xmit queue for BE traffic\n",
1080 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
1081 DPRINTF(sc, ATH_DBG_FATAL,
1082 "%s: unable to setup xmit queue for VI traffic\n",
1087 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
1088 DPRINTF(sc, ATH_DBG_FATAL,
1089 "%s: unable to setup xmit queue for VO traffic\n",
1095 sc->sc_rc = ath_rate_attach(ah);
1096 if (sc->sc_rc == NULL) {
1101 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1102 ATH9K_CIPHER_TKIP, NULL)) {
1104 * Whether we should enable h/w TKIP MIC.
1105 * XXX: if we don't support WME TKIP MIC, then we wouldn't
1106 * report WMM capable, so it's always safe to turn on
1107 * TKIP MIC in this case.
1109 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
1114 * Check whether the separate key cache entries
1115 * are required to handle both tx+rx MIC keys.
1116 * With split mic keys the number of stations is limited
1117 * to 27 otherwise 59.
1119 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1120 ATH9K_CIPHER_TKIP, NULL)
1121 && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1122 ATH9K_CIPHER_MIC, NULL)
1123 && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
1125 sc->sc_splitmic = 1;
1127 /* turn on mcast key search if possible */
1128 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
1129 (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
1132 sc->sc_config.txpowlimit = ATH_TXPOWER_MAX;
1133 sc->sc_config.txpowlimit_override = 0;
1135 /* 11n Capabilities */
1136 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
1137 sc->sc_flags |= SC_OP_TXAGGR;
1138 sc->sc_flags |= SC_OP_RXAGGR;
1141 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
1142 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
1144 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
1145 sc->sc_defant = ath9k_hw_getdefantenna(ah);
1147 ath9k_hw_getmac(ah, sc->sc_myaddr);
1148 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) {
1149 ath9k_hw_getbssidmask(ah, sc->sc_bssidmask);
1150 ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
1151 ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
1153 sc->sc_slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
1155 /* initialize beacon slots */
1156 for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++)
1157 sc->sc_bslot[i] = ATH_IF_ID_ANY;
1159 /* save MISC configurations */
1160 sc->sc_config.swBeaconProcess = 1;
1162 #ifdef CONFIG_SLOW_ANT_DIV
1163 /* range is 40 - 255, we use something in the middle */
1164 ath_slow_ant_div_init(&sc->sc_antdiv, sc, 0x127);
1169 /* cleanup tx queues */
1170 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1171 if (ATH_TXQ_SETUP(sc, i))
1172 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
1175 ath9k_hw_detach(ah);
1179 void ath_deinit(struct ath_softc *sc)
1181 struct ath_hal *ah = sc->sc_ah;
1184 DPRINTF(sc, ATH_DBG_CONFIG, "%s\n", __func__);
1187 if (!(sc->sc_flags & SC_OP_INVALID))
1188 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
1189 ath_rate_detach(sc->sc_rc);
1190 /* cleanup tx queues */
1191 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1192 if (ATH_TXQ_SETUP(sc, i))
1193 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
1194 ath9k_hw_detach(ah);
1197 /*******************/
1198 /* Node Management */
1199 /*******************/
1201 struct ath_node *ath_node_attach(struct ath_softc *sc, u8 *addr, int if_id)
1203 struct ath_vap *avp;
1204 struct ath_node *an;
1205 DECLARE_MAC_BUF(mac);
1207 avp = sc->sc_vaps[if_id];
1208 ASSERT(avp != NULL);
1210 /* mac80211 sta_notify callback is from an IRQ context, so no sleep */
1211 an = kmalloc(sizeof(struct ath_node), GFP_ATOMIC);
1214 memzero(an, sizeof(*an));
1217 memcpy(an->an_addr, addr, ETH_ALEN);
1218 atomic_set(&an->an_refcnt, 1);
1220 /* set up per-node tx/rx state */
1221 ath_tx_node_init(sc, an);
1222 ath_rx_node_init(sc, an);
1224 ath_chainmask_sel_init(sc, an);
1225 ath_chainmask_sel_timerstart(&an->an_chainmask_sel);
1226 list_add(&an->list, &sc->node_list);
1231 void ath_node_detach(struct ath_softc *sc, struct ath_node *an, bool bh_flag)
1233 unsigned long flags;
1235 DECLARE_MAC_BUF(mac);
1237 ath_chainmask_sel_timerstop(&an->an_chainmask_sel);
1238 an->an_flags |= ATH_NODE_CLEAN;
1239 ath_tx_node_cleanup(sc, an, bh_flag);
1240 ath_rx_node_cleanup(sc, an);
1242 ath_tx_node_free(sc, an);
1243 ath_rx_node_free(sc, an);
1245 spin_lock_irqsave(&sc->node_lock, flags);
1247 list_del(&an->list);
1249 spin_unlock_irqrestore(&sc->node_lock, flags);
1254 /* Finds a node and increases the refcnt if found */
1256 struct ath_node *ath_node_get(struct ath_softc *sc, u8 *addr)
1258 struct ath_node *an = NULL, *an_found = NULL;
1260 if (list_empty(&sc->node_list)) /* FIXME */
1262 list_for_each_entry(an, &sc->node_list, list) {
1263 if (!compare_ether_addr(an->an_addr, addr)) {
1264 atomic_inc(&an->an_refcnt);
1273 /* Decrements the refcnt and if it drops to zero, detach the node */
1275 void ath_node_put(struct ath_softc *sc, struct ath_node *an, bool bh_flag)
1277 if (atomic_dec_and_test(&an->an_refcnt))
1278 ath_node_detach(sc, an, bh_flag);
1281 /* Finds a node, doesn't increment refcnt. Caller must hold sc->node_lock */
1282 struct ath_node *ath_node_find(struct ath_softc *sc, u8 *addr)
1284 struct ath_node *an = NULL, *an_found = NULL;
1286 if (list_empty(&sc->node_list))
1289 list_for_each_entry(an, &sc->node_list, list)
1290 if (!compare_ether_addr(an->an_addr, addr)) {
1301 * Setup driver-specific state for a newly associated node. This routine
1302 * really only applies if compression or XR are enabled, there is no code
1303 * covering any other cases.
1306 void ath_newassoc(struct ath_softc *sc,
1307 struct ath_node *an, int isnew, int isuapsd)
1311 /* if station reassociates, tear down the aggregation state. */
1313 for (tidno = 0; tidno < WME_NUM_TID; tidno++) {
1314 if (sc->sc_flags & SC_OP_TXAGGR)
1315 ath_tx_aggr_teardown(sc, an, tidno);
1316 if (sc->sc_flags & SC_OP_RXAGGR)
1317 ath_rx_aggr_teardown(sc, an, tidno);
1327 void ath_key_reset(struct ath_softc *sc, u16 keyix, int freeslot)
1329 ath9k_hw_keyreset(sc->sc_ah, keyix);
1331 clear_bit(keyix, sc->sc_keymap);
1334 int ath_keyset(struct ath_softc *sc,
1336 struct ath9k_keyval *hk,
1337 const u8 mac[ETH_ALEN])
1341 status = ath9k_hw_set_keycache_entry(sc->sc_ah,
1342 keyix, hk, mac, false);
1344 return status != false;
1347 /***********************/
1348 /* TX Power/Regulatory */
1349 /***********************/
1352 * Set Transmit power in HAL
1354 * This routine makes the actual HAL calls to set the new transmit power
1358 void ath_update_txpow(struct ath_softc *sc)
1360 struct ath_hal *ah = sc->sc_ah;
1363 if (sc->sc_curtxpow != sc->sc_config.txpowlimit) {
1364 ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit);
1365 /* read back in case value is clamped */
1366 ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
1367 sc->sc_curtxpow = txpow;
1371 /* Return the current country and domain information */
1372 void ath_get_currentCountry(struct ath_softc *sc,
1373 struct ath9k_country_entry *ctry)
1375 ath9k_regd_get_current_country(sc->sc_ah, ctry);
1377 /* If HAL not specific yet, since it is band dependent,
1378 * use the one we passed in. */
1379 if (ctry->countryCode == CTRY_DEFAULT) {
1382 } else if (ctry->iso[0] && ctry->iso[1]) {
1383 if (!ctry->iso[2]) {
1392 /**************************/
1393 /* Slow Antenna Diversity */
1394 /**************************/
1396 void ath_slow_ant_div_init(struct ath_antdiv *antdiv,
1397 struct ath_softc *sc,
1402 /* antdivf_rssitrig can range from 40 - 0xff */
1403 trig = (rssitrig > 0xff) ? 0xff : rssitrig;
1404 trig = (rssitrig < 40) ? 40 : rssitrig;
1406 antdiv->antdiv_sc = sc;
1407 antdiv->antdivf_rssitrig = trig;
1410 void ath_slow_ant_div_start(struct ath_antdiv *antdiv,
1414 antdiv->antdiv_num_antcfg =
1415 num_antcfg < ATH_ANT_DIV_MAX_CFG ?
1416 num_antcfg : ATH_ANT_DIV_MAX_CFG;
1417 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1418 antdiv->antdiv_curcfg = 0;
1419 antdiv->antdiv_bestcfg = 0;
1420 antdiv->antdiv_laststatetsf = 0;
1422 memcpy(antdiv->antdiv_bssid, bssid, sizeof(antdiv->antdiv_bssid));
1424 antdiv->antdiv_start = 1;
1427 void ath_slow_ant_div_stop(struct ath_antdiv *antdiv)
1429 antdiv->antdiv_start = 0;
1432 static int32_t ath_find_max_val(int32_t *val,
1433 u8 num_val, u8 *max_index)
1435 u32 MaxVal = *val++;
1439 while (++cur_index < num_val) {
1440 if (*val > MaxVal) {
1442 *max_index = cur_index;
1451 void ath_slow_ant_div(struct ath_antdiv *antdiv,
1452 struct ieee80211_hdr *hdr,
1453 struct ath_rx_status *rx_stats)
1455 struct ath_softc *sc = antdiv->antdiv_sc;
1456 struct ath_hal *ah = sc->sc_ah;
1458 u8 bestcfg, curcfg = antdiv->antdiv_curcfg;
1459 __le16 fc = hdr->frame_control;
1461 if (antdiv->antdiv_start && ieee80211_is_beacon(fc)
1462 && !compare_ether_addr(hdr->addr3, antdiv->antdiv_bssid)) {
1463 antdiv->antdiv_lastbrssi[curcfg] = rx_stats->rs_rssi;
1464 antdiv->antdiv_lastbtsf[curcfg] = ath9k_hw_gettsf64(sc->sc_ah);
1465 curtsf = antdiv->antdiv_lastbtsf[curcfg];
1470 switch (antdiv->antdiv_state) {
1471 case ATH_ANT_DIV_IDLE:
1472 if ((antdiv->antdiv_lastbrssi[curcfg] <
1473 antdiv->antdivf_rssitrig)
1474 && ((curtsf - antdiv->antdiv_laststatetsf) >
1475 ATH_ANT_DIV_MIN_IDLE_US)) {
1478 if (curcfg == antdiv->antdiv_num_antcfg)
1481 if (!ath9k_hw_select_antconfig(ah, curcfg)) {
1482 antdiv->antdiv_bestcfg = antdiv->antdiv_curcfg;
1483 antdiv->antdiv_curcfg = curcfg;
1484 antdiv->antdiv_laststatetsf = curtsf;
1485 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1490 case ATH_ANT_DIV_SCAN:
1491 if ((curtsf - antdiv->antdiv_laststatetsf) <
1492 ATH_ANT_DIV_MIN_SCAN_US)
1496 if (curcfg == antdiv->antdiv_num_antcfg)
1499 if (curcfg == antdiv->antdiv_bestcfg) {
1500 ath_find_max_val(antdiv->antdiv_lastbrssi,
1501 antdiv->antdiv_num_antcfg, &bestcfg);
1502 if (!ath9k_hw_select_antconfig(ah, bestcfg)) {
1503 antdiv->antdiv_bestcfg = bestcfg;
1504 antdiv->antdiv_curcfg = bestcfg;
1505 antdiv->antdiv_laststatetsf = curtsf;
1506 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1509 if (!ath9k_hw_select_antconfig(ah, curcfg)) {
1510 antdiv->antdiv_curcfg = curcfg;
1511 antdiv->antdiv_laststatetsf = curtsf;
1512 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1520 /***********************/
1521 /* Descriptor Handling */
1522 /***********************/
1525 * Set up DMA descriptors
1527 * This function will allocate both the DMA descriptor structure, and the
1528 * buffers it contains. These are used to contain the descriptors used
1532 int ath_descdma_setup(struct ath_softc *sc,
1533 struct ath_descdma *dd,
1534 struct list_head *head,
1539 #define DS2PHYS(_dd, _ds) \
1540 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
1541 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
1542 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
1544 struct ath_desc *ds;
1546 int i, bsize, error;
1548 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA: %u buffers %u desc/buf\n",
1549 __func__, name, nbuf, ndesc);
1551 /* ath_desc must be a multiple of DWORDs */
1552 if ((sizeof(struct ath_desc) % 4) != 0) {
1553 DPRINTF(sc, ATH_DBG_FATAL, "%s: ath_desc not DWORD aligned\n",
1555 ASSERT((sizeof(struct ath_desc) % 4) == 0);
1561 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
1564 * Need additional DMA memory because we can't use
1565 * descriptors that cross the 4K page boundary. Assume
1566 * one skipped descriptor per 4K page.
1568 if (!(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1570 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
1573 while (ndesc_skipped) {
1574 dma_len = ndesc_skipped * sizeof(struct ath_desc);
1575 dd->dd_desc_len += dma_len;
1577 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
1581 /* allocate descriptors */
1582 dd->dd_desc = pci_alloc_consistent(sc->pdev,
1584 &dd->dd_desc_paddr);
1585 if (dd->dd_desc == NULL) {
1590 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA map: %p (%u) -> %llx (%u)\n",
1591 __func__, dd->dd_name, ds, (u32) dd->dd_desc_len,
1592 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
1594 /* allocate buffers */
1595 bsize = sizeof(struct ath_buf) * nbuf;
1596 bf = kmalloc(bsize, GFP_KERNEL);
1604 INIT_LIST_HEAD(head);
1605 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
1607 bf->bf_daddr = DS2PHYS(dd, ds);
1609 if (!(sc->sc_ah->ah_caps.hw_caps &
1610 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1612 * Skip descriptor addresses which can cause 4KB
1613 * boundary crossing (addr + length) with a 32 dword
1616 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
1617 ASSERT((caddr_t) bf->bf_desc <
1618 ((caddr_t) dd->dd_desc +
1623 bf->bf_daddr = DS2PHYS(dd, ds);
1626 list_add_tail(&bf->list, head);
1630 pci_free_consistent(sc->pdev,
1631 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1633 memzero(dd, sizeof(*dd));
1635 #undef ATH_DESC_4KB_BOUND_CHECK
1636 #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
1641 * Cleanup DMA descriptors
1643 * This function will free the DMA block that was allocated for the descriptor
1644 * pool. Since this was allocated as one "chunk", it is freed in the same
1648 void ath_descdma_cleanup(struct ath_softc *sc,
1649 struct ath_descdma *dd,
1650 struct list_head *head)
1652 /* Free memory associated with descriptors */
1653 pci_free_consistent(sc->pdev,
1654 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1656 INIT_LIST_HEAD(head);
1657 kfree(dd->dd_bufptr);
1658 memzero(dd, sizeof(*dd));
1665 int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
1671 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VO];
1674 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VI];
1677 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1680 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BK];
1683 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1690 int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
1695 case ATH9K_WME_AC_VO:
1698 case ATH9K_WME_AC_VI:
1701 case ATH9K_WME_AC_BE:
1704 case ATH9K_WME_AC_BK:
1717 * Expand time stamp to TSF
1719 * Extend 15-bit time stamp from rx descriptor to
1720 * a full 64-bit TSF using the current h/w TSF.
1723 u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp)
1727 tsf = ath9k_hw_gettsf64(sc->sc_ah);
1728 if ((tsf & 0x7fff) < rstamp)
1730 return (tsf & ~0x7fff) | rstamp;
1734 * Set Default Antenna
1736 * Call into the HAL to set the default antenna to use. Not really valid for
1740 void ath_setdefantenna(void *context, u32 antenna)
1742 struct ath_softc *sc = (struct ath_softc *)context;
1743 struct ath_hal *ah = sc->sc_ah;
1745 /* XXX block beacon interrupts */
1746 ath9k_hw_setantenna(ah, antenna);
1747 sc->sc_defant = antenna;
1748 sc->sc_rxotherant = 0;
1754 * This will wake up the chip if required, and set the slot time for the
1755 * frame (maximum transmit time). Slot time is assumed to be already set
1756 * in the ATH object member sc_slottime
1759 void ath_setslottime(struct ath_softc *sc)
1761 ath9k_hw_setslottime(sc->sc_ah, sc->sc_slottime);
1762 sc->sc_updateslot = OK;