2 * Copyright (c) 2008, Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 /* Implementation of the main "ATH" layer. */
22 static int ath_outdoor; /* enable outdoor use */
24 static u32 ath_chainmask_sel_up_rssi_thres =
25 ATH_CHAINMASK_SEL_UP_RSSI_THRES;
26 static u32 ath_chainmask_sel_down_rssi_thres =
27 ATH_CHAINMASK_SEL_DOWN_RSSI_THRES;
28 static u32 ath_chainmask_sel_period =
29 ATH_CHAINMASK_SEL_TIMEOUT;
31 /* return bus cachesize in 4B word units */
33 static void bus_read_cachesize(struct ath_softc *sc, int *csz)
37 pci_read_config_byte(sc->pdev, PCI_CACHE_LINE_SIZE, (u8 *)&u8tmp);
41 * This check was put in to avoid "unplesant" consequences if
42 * the bootrom has not fully initialized all PCI devices.
43 * Sometimes the cache line size register is not set
47 *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
51 * Set current operating mode
53 * This function initializes and fills the rate table in the ATH object based
54 * on the operating mode.
56 static void ath_setcurmode(struct ath_softc *sc, enum wireless_mode mode)
58 const struct ath9k_rate_table *rt;
61 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
62 rt = ath9k_hw_getratetable(sc->sc_ah, mode);
65 for (i = 0; i < rt->rateCount; i++)
66 sc->sc_rixmap[rt->info[i].rateCode] = (u8) i;
68 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap));
69 for (i = 0; i < 256; i++) {
70 u8 ix = rt->rateCodeToIndex[i];
75 sc->sc_hwmap[i].ieeerate =
76 rt->info[ix].dot11Rate & IEEE80211_RATE_VAL;
77 sc->sc_hwmap[i].rateKbps = rt->info[ix].rateKbps;
79 if (rt->info[ix].shortPreamble ||
80 rt->info[ix].phy == PHY_OFDM) {
81 /* XXX: Handle this */
84 /* NB: this uses the last entry if the rate isn't found */
85 /* XXX beware of overlow */
88 sc->sc_curmode = mode;
90 * All protection frames are transmited at 2Mb/s for
91 * 11g, otherwise at 1Mb/s.
92 * XXX select protection rate index from rate table.
94 sc->sc_protrix = (mode == ATH9K_MODE_11G ? 1 : 0);
98 * Set up rate table (legacy rates)
100 static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
102 struct ath_hal *ah = sc->sc_ah;
103 const struct ath9k_rate_table *rt = NULL;
104 struct ieee80211_supported_band *sband;
105 struct ieee80211_rate *rate;
109 case IEEE80211_BAND_2GHZ:
110 rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11G);
112 case IEEE80211_BAND_5GHZ:
113 rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11A);
122 sband = &sc->sbands[band];
123 rate = sc->rates[band];
125 if (rt->rateCount > ATH_RATE_MAX)
126 maxrates = ATH_RATE_MAX;
128 maxrates = rt->rateCount;
130 for (i = 0; i < maxrates; i++) {
131 rate[i].bitrate = rt->info[i].rateKbps / 100;
132 rate[i].hw_value = rt->info[i].rateCode;
134 DPRINTF(sc, ATH_DBG_CONFIG,
135 "%s: Rate: %2dMbps, ratecode: %2d\n",
137 rate[i].bitrate / 10,
143 * Set up channel list
145 static int ath_setup_channels(struct ath_softc *sc)
147 struct ath_hal *ah = sc->sc_ah;
148 int nchan, i, a = 0, b = 0;
149 u8 regclassids[ATH_REGCLASSIDS_MAX];
151 struct ieee80211_supported_band *band_2ghz;
152 struct ieee80211_supported_band *band_5ghz;
153 struct ieee80211_channel *chan_2ghz;
154 struct ieee80211_channel *chan_5ghz;
155 struct ath9k_channel *c;
157 /* Fill in ah->ah_channels */
158 if (!ath9k_regd_init_channels(ah,
167 u32 rd = ah->ah_currentRD;
169 DPRINTF(sc, ATH_DBG_FATAL,
170 "%s: unable to collect channel list; "
171 "regdomain likely %u country code %u\n",
172 __func__, rd, CTRY_DEFAULT);
176 band_2ghz = &sc->sbands[IEEE80211_BAND_2GHZ];
177 band_5ghz = &sc->sbands[IEEE80211_BAND_5GHZ];
178 chan_2ghz = sc->channels[IEEE80211_BAND_2GHZ];
179 chan_5ghz = sc->channels[IEEE80211_BAND_5GHZ];
181 for (i = 0; i < nchan; i++) {
182 c = &ah->ah_channels[i];
183 if (IS_CHAN_2GHZ(c)) {
184 chan_2ghz[a].band = IEEE80211_BAND_2GHZ;
185 chan_2ghz[a].center_freq = c->channel;
186 chan_2ghz[a].max_power = c->maxTxPower;
188 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
189 chan_2ghz[a].flags |=
190 IEEE80211_CHAN_NO_IBSS;
191 if (c->channelFlags & CHANNEL_PASSIVE)
192 chan_2ghz[a].flags |=
193 IEEE80211_CHAN_PASSIVE_SCAN;
195 band_2ghz->n_channels = ++a;
197 DPRINTF(sc, ATH_DBG_CONFIG,
198 "%s: 2MHz channel: %d, "
199 "channelFlags: 0x%x\n",
203 } else if (IS_CHAN_5GHZ(c)) {
204 chan_5ghz[b].band = IEEE80211_BAND_5GHZ;
205 chan_5ghz[b].center_freq = c->channel;
206 chan_5ghz[b].max_power = c->maxTxPower;
208 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
209 chan_5ghz[b].flags |=
210 IEEE80211_CHAN_NO_IBSS;
211 if (c->channelFlags & CHANNEL_PASSIVE)
212 chan_5ghz[b].flags |=
213 IEEE80211_CHAN_PASSIVE_SCAN;
215 band_5ghz->n_channels = ++b;
217 DPRINTF(sc, ATH_DBG_CONFIG,
218 "%s: 5MHz channel: %d, "
219 "channelFlags: 0x%x\n",
230 * Determine mode from channel flags
232 * This routine will provide the enumerated WIRELESSS_MODE value based
233 * on the settings of the channel flags. If no valid set of flags
234 * exist, the lowest mode (11b) is selected.
237 static enum wireless_mode ath_chan2mode(struct ath9k_channel *chan)
239 if (chan->chanmode == CHANNEL_A)
240 return ATH9K_MODE_11A;
241 else if (chan->chanmode == CHANNEL_G)
242 return ATH9K_MODE_11G;
243 else if (chan->chanmode == CHANNEL_B)
244 return ATH9K_MODE_11B;
245 else if (chan->chanmode == CHANNEL_A_HT20)
246 return ATH9K_MODE_11NA_HT20;
247 else if (chan->chanmode == CHANNEL_G_HT20)
248 return ATH9K_MODE_11NG_HT20;
249 else if (chan->chanmode == CHANNEL_A_HT40PLUS)
250 return ATH9K_MODE_11NA_HT40PLUS;
251 else if (chan->chanmode == CHANNEL_A_HT40MINUS)
252 return ATH9K_MODE_11NA_HT40MINUS;
253 else if (chan->chanmode == CHANNEL_G_HT40PLUS)
254 return ATH9K_MODE_11NG_HT40PLUS;
255 else if (chan->chanmode == CHANNEL_G_HT40MINUS)
256 return ATH9K_MODE_11NG_HT40MINUS;
258 WARN_ON(1); /* should not get here */
260 return ATH9K_MODE_11B;
264 * Stop the device, grabbing the top-level lock to protect
265 * against concurrent entry through ath_init (which can happen
266 * if another thread does a system call and the thread doing the
267 * stop is preempted).
270 static int ath_stop(struct ath_softc *sc)
272 struct ath_hal *ah = sc->sc_ah;
274 DPRINTF(sc, ATH_DBG_CONFIG, "%s: invalid %ld\n",
275 __func__, sc->sc_flags & SC_OP_INVALID);
278 * Shutdown the hardware and driver:
279 * stop output from above
282 * clear transmit machinery
283 * clear receive machinery
285 * reclaim beacon resources
287 * Note that some of this work is not possible if the
288 * hardware is gone (invalid).
291 ath_draintxq(sc, false);
292 if (!(sc->sc_flags & SC_OP_INVALID)) {
294 ath9k_hw_phy_disable(ah);
296 sc->sc_rxlink = NULL;
302 * Set the current channel
304 * Set/change channels. If the channel is really being changed, it's done
305 * by reseting the chip. To accomplish this we must first cleanup any pending
306 * DMA, then restart stuff after a la ath_init.
308 int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan)
310 struct ath_hal *ah = sc->sc_ah;
311 bool fastcc = true, stopped;
313 if (sc->sc_flags & SC_OP_INVALID) /* the device is invalid or removed */
316 DPRINTF(sc, ATH_DBG_CONFIG,
317 "%s: %u (%u MHz) -> %u (%u MHz), cflags:%x\n",
319 ath9k_hw_mhz2ieee(ah, sc->sc_ah->ah_curchan->channel,
320 sc->sc_ah->ah_curchan->channelFlags),
321 sc->sc_ah->ah_curchan->channel,
322 ath9k_hw_mhz2ieee(ah, hchan->channel, hchan->channelFlags),
323 hchan->channel, hchan->channelFlags);
325 if (hchan->channel != sc->sc_ah->ah_curchan->channel ||
326 hchan->channelFlags != sc->sc_ah->ah_curchan->channelFlags ||
327 (sc->sc_flags & SC_OP_CHAINMASK_UPDATE) ||
328 (sc->sc_flags & SC_OP_FULL_RESET)) {
331 * This is only performed if the channel settings have
334 * To switch channels clear any pending DMA operations;
335 * wait long enough for the RX fifo to drain, reset the
336 * hardware at the new frequency, and then re-enable
337 * the relevant bits of the h/w.
339 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
340 ath_draintxq(sc, false); /* clear pending tx frames */
341 stopped = ath_stoprecv(sc); /* turn off frame recv */
343 /* XXX: do not flush receive queue here. We don't want
344 * to flush data frames already in queue because of
345 * changing channel. */
347 if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
350 spin_lock_bh(&sc->sc_resetlock);
351 if (!ath9k_hw_reset(ah, hchan,
352 sc->sc_ht_info.tx_chan_width,
355 sc->sc_ht_extprotspacing,
357 DPRINTF(sc, ATH_DBG_FATAL,
358 "%s: unable to reset channel %u (%uMhz) "
359 "flags 0x%x hal status %u\n", __func__,
360 ath9k_hw_mhz2ieee(ah, hchan->channel,
361 hchan->channelFlags),
362 hchan->channel, hchan->channelFlags, status);
363 spin_unlock_bh(&sc->sc_resetlock);
366 spin_unlock_bh(&sc->sc_resetlock);
368 sc->sc_flags &= ~SC_OP_CHAINMASK_UPDATE;
369 sc->sc_flags &= ~SC_OP_FULL_RESET;
371 /* Re-enable rx framework */
372 if (ath_startrecv(sc) != 0) {
373 DPRINTF(sc, ATH_DBG_FATAL,
374 "%s: unable to restart recv logic\n", __func__);
378 * Change channels and update the h/w rate map
379 * if we're switching; e.g. 11a to 11b/g.
381 ath_setcurmode(sc, ath_chan2mode(hchan));
383 ath_update_txpow(sc); /* update tx power state */
385 * Re-enable interrupts.
387 ath9k_hw_set_interrupts(ah, sc->sc_imask);
392 /**********************/
393 /* Chainmask Handling */
394 /**********************/
396 static void ath_chainmask_sel_timertimeout(unsigned long data)
398 struct ath_chainmask_sel *cm = (struct ath_chainmask_sel *)data;
399 cm->switch_allowed = 1;
402 /* Start chainmask select timer */
403 static void ath_chainmask_sel_timerstart(struct ath_chainmask_sel *cm)
405 cm->switch_allowed = 0;
406 mod_timer(&cm->timer, ath_chainmask_sel_period);
409 /* Stop chainmask select timer */
410 static void ath_chainmask_sel_timerstop(struct ath_chainmask_sel *cm)
412 cm->switch_allowed = 0;
413 del_timer_sync(&cm->timer);
416 static void ath_chainmask_sel_init(struct ath_softc *sc, struct ath_node *an)
418 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
420 memset(cm, 0, sizeof(struct ath_chainmask_sel));
422 cm->cur_tx_mask = sc->sc_tx_chainmask;
423 cm->cur_rx_mask = sc->sc_rx_chainmask;
424 cm->tx_avgrssi = ATH_RSSI_DUMMY_MARKER;
425 setup_timer(&cm->timer,
426 ath_chainmask_sel_timertimeout, (unsigned long) cm);
429 int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an)
431 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
434 * Disable auto-swtiching in one of the following if conditions.
435 * sc_chainmask_auto_sel is used for internal global auto-switching
436 * enabled/disabled setting
438 if (sc->sc_ah->ah_caps.tx_chainmask != ATH_CHAINMASK_SEL_3X3) {
439 cm->cur_tx_mask = sc->sc_tx_chainmask;
440 return cm->cur_tx_mask;
443 if (cm->tx_avgrssi == ATH_RSSI_DUMMY_MARKER)
444 return cm->cur_tx_mask;
446 if (cm->switch_allowed) {
447 /* Switch down from tx 3 to tx 2. */
448 if (cm->cur_tx_mask == ATH_CHAINMASK_SEL_3X3 &&
449 ATH_RSSI_OUT(cm->tx_avgrssi) >=
450 ath_chainmask_sel_down_rssi_thres) {
451 cm->cur_tx_mask = sc->sc_tx_chainmask;
453 /* Don't let another switch happen until
454 * this timer expires */
455 ath_chainmask_sel_timerstart(cm);
457 /* Switch up from tx 2 to 3. */
458 else if (cm->cur_tx_mask == sc->sc_tx_chainmask &&
459 ATH_RSSI_OUT(cm->tx_avgrssi) <=
460 ath_chainmask_sel_up_rssi_thres) {
461 cm->cur_tx_mask = ATH_CHAINMASK_SEL_3X3;
463 /* Don't let another switch happen
464 * until this timer expires */
465 ath_chainmask_sel_timerstart(cm);
469 return cm->cur_tx_mask;
473 * Update tx/rx chainmask. For legacy association,
474 * hard code chainmask to 1x1, for 11n association, use
475 * the chainmask configuration.
478 void ath_update_chainmask(struct ath_softc *sc, int is_ht)
480 sc->sc_flags |= SC_OP_CHAINMASK_UPDATE;
482 sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask;
483 sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask;
485 sc->sc_tx_chainmask = 1;
486 sc->sc_rx_chainmask = 1;
489 DPRINTF(sc, ATH_DBG_CONFIG, "%s: tx chmask: %d, rx chmask: %d\n",
490 __func__, sc->sc_tx_chainmask, sc->sc_rx_chainmask);
498 * This routine performs the periodic noise floor calibration function
499 * that is used to adjust and optimize the chip performance. This
500 * takes environmental changes (location, temperature) into account.
501 * When the task is complete, it reschedules itself depending on the
502 * appropriate interval that was calculated.
505 static void ath_ani_calibrate(unsigned long data)
507 struct ath_softc *sc;
509 bool longcal = false;
510 bool shortcal = false;
511 bool aniflag = false;
512 unsigned int timestamp = jiffies_to_msecs(jiffies);
515 sc = (struct ath_softc *)data;
519 * don't calibrate when we're scanning.
520 * we are most likely not on our home channel.
522 if (sc->rx_filter & FIF_BCN_PRBRESP_PROMISC)
525 /* Long calibration runs independently of short calibration. */
526 if ((timestamp - sc->sc_ani.sc_longcal_timer) >= ATH_LONG_CALINTERVAL) {
528 DPRINTF(sc, ATH_DBG_ANI, "%s: longcal @%lu\n",
530 sc->sc_ani.sc_longcal_timer = timestamp;
533 /* Short calibration applies only while sc_caldone is false */
534 if (!sc->sc_ani.sc_caldone) {
535 if ((timestamp - sc->sc_ani.sc_shortcal_timer) >=
536 ATH_SHORT_CALINTERVAL) {
538 DPRINTF(sc, ATH_DBG_ANI, "%s: shortcal @%lu\n",
540 sc->sc_ani.sc_shortcal_timer = timestamp;
541 sc->sc_ani.sc_resetcal_timer = timestamp;
544 if ((timestamp - sc->sc_ani.sc_resetcal_timer) >=
545 ATH_RESTART_CALINTERVAL) {
546 ath9k_hw_reset_calvalid(ah, ah->ah_curchan,
547 &sc->sc_ani.sc_caldone);
548 if (sc->sc_ani.sc_caldone)
549 sc->sc_ani.sc_resetcal_timer = timestamp;
553 /* Verify whether we must check ANI */
554 if ((timestamp - sc->sc_ani.sc_checkani_timer) >=
555 ATH_ANI_POLLINTERVAL) {
557 sc->sc_ani.sc_checkani_timer = timestamp;
560 /* Skip all processing if there's nothing to do. */
561 if (longcal || shortcal || aniflag) {
562 /* Call ANI routine if necessary */
564 ath9k_hw_ani_monitor(ah, &sc->sc_halstats,
567 /* Perform calibration if necessary */
568 if (longcal || shortcal) {
569 bool iscaldone = false;
571 if (ath9k_hw_calibrate(ah, ah->ah_curchan,
572 sc->sc_rx_chainmask, longcal,
575 sc->sc_ani.sc_noise_floor =
576 ath9k_hw_getchan_noise(ah,
579 DPRINTF(sc, ATH_DBG_ANI,
580 "%s: calibrate chan %u/%x nf: %d\n",
582 ah->ah_curchan->channel,
583 ah->ah_curchan->channelFlags,
584 sc->sc_ani.sc_noise_floor);
586 DPRINTF(sc, ATH_DBG_ANY,
587 "%s: calibrate chan %u/%x failed\n",
589 ah->ah_curchan->channel,
590 ah->ah_curchan->channelFlags);
592 sc->sc_ani.sc_caldone = iscaldone;
597 * Set timer interval based on previous results.
598 * The interval must be the shortest necessary to satisfy ANI,
599 * short calibration and long calibration.
602 cal_interval = ATH_ANI_POLLINTERVAL;
603 if (!sc->sc_ani.sc_caldone)
604 cal_interval = min(cal_interval, (u32)ATH_SHORT_CALINTERVAL);
606 mod_timer(&sc->sc_ani.timer, jiffies + msecs_to_jiffies(cal_interval));
613 int ath_vap_attach(struct ath_softc *sc,
615 struct ieee80211_vif *if_data,
616 enum ath9k_opmode opmode)
620 if (if_id >= ATH_BCBUF || sc->sc_vaps[if_id] != NULL) {
621 DPRINTF(sc, ATH_DBG_FATAL,
622 "%s: Invalid interface id = %u\n", __func__, if_id);
629 case ATH9K_M_MONITOR:
632 /* XXX not right, beacon buffer is allocated on RUN trans */
633 if (list_empty(&sc->sc_bbuf))
641 avp = kmalloc(sizeof(struct ath_vap), GFP_KERNEL);
645 memset(avp, 0, sizeof(struct ath_vap));
646 avp->av_if_data = if_data;
647 /* Set the VAP opmode */
648 avp->av_opmode = opmode;
651 if (opmode == ATH9K_M_HOSTAP)
652 ath9k_hw_set_tsfadjust(sc->sc_ah, 1);
654 sc->sc_vaps[if_id] = avp;
656 /* Set the device opmode */
657 sc->sc_ah->ah_opmode = opmode;
659 /* default VAP configuration */
660 avp->av_config.av_fixed_rateset = IEEE80211_FIXED_RATE_NONE;
661 avp->av_config.av_fixed_retryset = 0x03030303;
666 int ath_vap_detach(struct ath_softc *sc, int if_id)
668 struct ath_hal *ah = sc->sc_ah;
671 avp = sc->sc_vaps[if_id];
673 DPRINTF(sc, ATH_DBG_FATAL, "%s: invalid interface id %u\n",
679 * Quiesce the hardware while we remove the vap. In
680 * particular we need to reclaim all references to the
681 * vap state by any frames pending on the tx queues.
683 * XXX can we do this w/o affecting other vap's?
685 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
686 ath_draintxq(sc, false); /* stop xmit side */
687 ath_stoprecv(sc); /* stop recv side */
688 ath_flushrecv(sc); /* flush recv queue */
691 sc->sc_vaps[if_id] = NULL;
697 int ath_vap_config(struct ath_softc *sc,
698 int if_id, struct ath_vap_config *if_config)
702 if (if_id >= ATH_BCBUF) {
703 DPRINTF(sc, ATH_DBG_FATAL,
704 "%s: Invalid interface id = %u\n", __func__, if_id);
708 avp = sc->sc_vaps[if_id];
712 memcpy(&avp->av_config, if_config, sizeof(avp->av_config));
721 int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
723 struct ath_hal *ah = sc->sc_ah;
727 DPRINTF(sc, ATH_DBG_CONFIG, "%s: mode %d\n",
728 __func__, sc->sc_ah->ah_opmode);
731 * Stop anything previously setup. This is safe
732 * whether this is the first time through or not.
736 /* Initialize chanmask selection */
737 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
738 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
740 /* Reset SERDES registers */
741 ath9k_hw_configpcipowersave(ah, 0);
744 * The basic interface to setting the hardware in a good
745 * state is ``reset''. On return the hardware is known to
746 * be powered up and with interrupts disabled. This must
747 * be followed by initialization of the appropriate bits
748 * and then setup of the interrupt mask.
751 spin_lock_bh(&sc->sc_resetlock);
752 if (!ath9k_hw_reset(ah, initial_chan,
753 sc->sc_ht_info.tx_chan_width,
754 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
755 sc->sc_ht_extprotspacing, false, &status)) {
756 DPRINTF(sc, ATH_DBG_FATAL,
757 "%s: unable to reset hardware; hal status %u "
758 "(freq %u flags 0x%x)\n", __func__, status,
759 initial_chan->channel, initial_chan->channelFlags);
761 spin_unlock_bh(&sc->sc_resetlock);
764 spin_unlock_bh(&sc->sc_resetlock);
766 * This is needed only to setup initial state
767 * but it's best done after a reset.
769 ath_update_txpow(sc);
772 * Setup the hardware after reset:
773 * The receive engine is set going.
774 * Frame transmit is handled entirely
775 * in the frame output path; there's nothing to do
776 * here except setup the interrupt mask.
778 if (ath_startrecv(sc) != 0) {
779 DPRINTF(sc, ATH_DBG_FATAL,
780 "%s: unable to start recv logic\n", __func__);
784 /* Setup our intr mask. */
785 sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX
786 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
787 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
789 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_GTT)
790 sc->sc_imask |= ATH9K_INT_GTT;
792 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
793 sc->sc_imask |= ATH9K_INT_CST;
796 * Enable MIB interrupts when there are hardware phy counters.
797 * Note we only do this (at the moment) for station mode.
799 if (ath9k_hw_phycounters(ah) &&
800 ((sc->sc_ah->ah_opmode == ATH9K_M_STA) ||
801 (sc->sc_ah->ah_opmode == ATH9K_M_IBSS)))
802 sc->sc_imask |= ATH9K_INT_MIB;
804 * Some hardware processes the TIM IE and fires an
805 * interrupt when the TIM bit is set. For hardware
806 * that does, if not overridden by configuration,
807 * enable the TIM interrupt when operating as station.
809 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
810 (sc->sc_ah->ah_opmode == ATH9K_M_STA) &&
811 !sc->sc_config.swBeaconProcess)
812 sc->sc_imask |= ATH9K_INT_TIM;
814 * Don't enable interrupts here as we've not yet built our
815 * vap and node data structures, which will be needed as soon
816 * as we start receiving.
818 ath_setcurmode(sc, ath_chan2mode(initial_chan));
820 /* XXX: we must make sure h/w is ready and clear invalid flag
821 * before turning on interrupt. */
822 sc->sc_flags &= ~SC_OP_INVALID;
827 int ath_reset(struct ath_softc *sc, bool retry_tx)
829 struct ath_hal *ah = sc->sc_ah;
833 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
834 ath_draintxq(sc, retry_tx); /* stop xmit */
835 ath_stoprecv(sc); /* stop recv */
836 ath_flushrecv(sc); /* flush recv queue */
839 spin_lock_bh(&sc->sc_resetlock);
840 if (!ath9k_hw_reset(ah, sc->sc_ah->ah_curchan,
841 sc->sc_ht_info.tx_chan_width,
842 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
843 sc->sc_ht_extprotspacing, false, &status)) {
844 DPRINTF(sc, ATH_DBG_FATAL,
845 "%s: unable to reset hardware; hal status %u\n",
849 spin_unlock_bh(&sc->sc_resetlock);
851 if (ath_startrecv(sc) != 0) /* restart recv */
852 DPRINTF(sc, ATH_DBG_FATAL,
853 "%s: unable to start recv logic\n", __func__);
856 * We may be doing a reset in response to a request
857 * that changes the channel so update any state that
858 * might change as a result.
860 ath_setcurmode(sc, ath_chan2mode(sc->sc_ah->ah_curchan));
862 ath_update_txpow(sc);
864 if (sc->sc_flags & SC_OP_BEACONS)
865 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
867 ath9k_hw_set_interrupts(ah, sc->sc_imask);
869 /* Restart the txq */
872 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
873 if (ATH_TXQ_SETUP(sc, i)) {
874 spin_lock_bh(&sc->sc_txq[i].axq_lock);
875 ath_txq_schedule(sc, &sc->sc_txq[i]);
876 spin_unlock_bh(&sc->sc_txq[i].axq_lock);
884 int ath_suspend(struct ath_softc *sc)
886 struct ath_hal *ah = sc->sc_ah;
888 /* No I/O if device has been surprise removed */
889 if (sc->sc_flags & SC_OP_INVALID)
892 /* Shut off the interrupt before setting sc->sc_invalid to '1' */
893 ath9k_hw_set_interrupts(ah, 0);
895 /* XXX: we must make sure h/w will not generate any interrupt
896 * before setting the invalid flag. */
897 sc->sc_flags |= SC_OP_INVALID;
899 /* disable HAL and put h/w to sleep */
900 ath9k_hw_disable(sc->sc_ah);
902 ath9k_hw_configpcipowersave(sc->sc_ah, 1);
907 /* Interrupt handler. Most of the actual processing is deferred.
908 * It's the caller's responsibility to ensure the chip is awake. */
910 irqreturn_t ath_isr(int irq, void *dev)
912 struct ath_softc *sc = dev;
913 struct ath_hal *ah = sc->sc_ah;
914 enum ath9k_int status;
918 if (sc->sc_flags & SC_OP_INVALID) {
920 * The hardware is not ready/present, don't
921 * touch anything. Note this can happen early
922 * on if the IRQ is shared.
926 if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */
931 * Figure out the reason(s) for the interrupt. Note
932 * that the hal returns a pseudo-ISR that may include
933 * bits we haven't explicitly enabled so we mask the
934 * value to insure we only process bits we requested.
936 ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
938 status &= sc->sc_imask; /* discard unasked-for bits */
941 * If there are no status bits set, then this interrupt was not
942 * for me (should have been caught above).
948 sc->sc_intrstatus = status;
950 if (status & ATH9K_INT_FATAL) {
951 /* need a chip reset */
953 } else if (status & ATH9K_INT_RXORN) {
954 /* need a chip reset */
957 if (status & ATH9K_INT_SWBA) {
958 /* schedule a tasklet for beacon handling */
959 tasklet_schedule(&sc->bcon_tasklet);
961 if (status & ATH9K_INT_RXEOL) {
963 * NB: the hardware should re-read the link when
964 * RXE bit is written, but it doesn't work
965 * at least on older hardware revs.
970 if (status & ATH9K_INT_TXURN)
971 /* bump tx trigger level */
972 ath9k_hw_updatetxtriglevel(ah, true);
973 /* XXX: optimize this */
974 if (status & ATH9K_INT_RX)
976 if (status & ATH9K_INT_TX)
978 if (status & ATH9K_INT_BMISS)
980 /* carrier sense timeout */
981 if (status & ATH9K_INT_CST)
983 if (status & ATH9K_INT_MIB) {
985 * Disable interrupts until we service the MIB
986 * interrupt; otherwise it will continue to
989 ath9k_hw_set_interrupts(ah, 0);
991 * Let the hal handle the event. We assume
992 * it will clear whatever condition caused
995 ath9k_hw_procmibevent(ah, &sc->sc_halstats);
996 ath9k_hw_set_interrupts(ah, sc->sc_imask);
998 if (status & ATH9K_INT_TIM_TIMER) {
999 if (!(ah->ah_caps.hw_caps &
1000 ATH9K_HW_CAP_AUTOSLEEP)) {
1001 /* Clear RxAbort bit so that we can
1003 ath9k_hw_setrxabort(ah, 0);
1011 /* turn off every interrupt except SWBA */
1012 ath9k_hw_set_interrupts(ah, (sc->sc_imask & ATH9K_INT_SWBA));
1013 tasklet_schedule(&sc->intr_tq);
1019 /* Deferred interrupt processing */
1021 static void ath9k_tasklet(unsigned long data)
1023 struct ath_softc *sc = (struct ath_softc *)data;
1024 u32 status = sc->sc_intrstatus;
1026 if (status & ATH9K_INT_FATAL) {
1027 /* need a chip reset */
1028 ath_reset(sc, false);
1033 (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
1034 /* XXX: fill me in */
1036 if (status & ATH9K_INT_RXORN) {
1038 if (status & ATH9K_INT_RXEOL) {
1041 spin_lock_bh(&sc->sc_rxflushlock);
1042 ath_rx_tasklet(sc, 0);
1043 spin_unlock_bh(&sc->sc_rxflushlock);
1045 /* XXX: optimize this */
1046 if (status & ATH9K_INT_TX)
1048 /* XXX: fill me in */
1050 if (status & ATH9K_INT_BMISS) {
1052 if (status & (ATH9K_INT_TIM | ATH9K_INT_DTIMSYNC)) {
1053 if (status & ATH9K_INT_TIM) {
1055 if (status & ATH9K_INT_DTIMSYNC) {
1061 /* re-enable hardware interrupt */
1062 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
1065 int ath_init(u16 devid, struct ath_softc *sc)
1067 struct ath_hal *ah = NULL;
1072 /* XXX: hardware will not be ready until ath_open() being called */
1073 sc->sc_flags |= SC_OP_INVALID;
1075 sc->sc_debug = DBG_DEFAULT;
1076 DPRINTF(sc, ATH_DBG_CONFIG, "%s: devid 0x%x\n", __func__, devid);
1078 /* Initialize tasklet */
1079 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
1080 tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
1084 * Cache line size is used to size and align various
1085 * structures used to communicate with the hardware.
1087 bus_read_cachesize(sc, &csz);
1088 /* XXX assert csz is non-zero */
1089 sc->sc_cachelsz = csz << 2; /* convert to bytes */
1091 spin_lock_init(&sc->sc_resetlock);
1093 ah = ath9k_hw_attach(devid, sc, sc->mem, &status);
1095 DPRINTF(sc, ATH_DBG_FATAL,
1096 "%s: unable to attach hardware; HAL status %u\n",
1103 /* Initializes the noise floor to a reasonable default value.
1104 * Later on this will be updated during ANI processing. */
1105 sc->sc_ani.sc_noise_floor = ATH_DEFAULT_NOISE_FLOOR;
1107 /* Get the hardware key cache size. */
1108 sc->sc_keymax = ah->ah_caps.keycache_size;
1109 if (sc->sc_keymax > ATH_KEYMAX) {
1110 DPRINTF(sc, ATH_DBG_KEYCACHE,
1111 "%s: Warning, using only %u entries in %u key cache\n",
1112 __func__, ATH_KEYMAX, sc->sc_keymax);
1113 sc->sc_keymax = ATH_KEYMAX;
1117 * Reset the key cache since some parts do not
1118 * reset the contents on initial power up.
1120 for (i = 0; i < sc->sc_keymax; i++)
1121 ath9k_hw_keyreset(ah, (u16) i);
1123 * Mark key cache slots associated with global keys
1124 * as in use. If we knew TKIP was not to be used we
1125 * could leave the +32, +64, and +32+64 slots free.
1126 * XXX only for splitmic.
1128 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1129 set_bit(i, sc->sc_keymap);
1130 set_bit(i + 32, sc->sc_keymap);
1131 set_bit(i + 64, sc->sc_keymap);
1132 set_bit(i + 32 + 64, sc->sc_keymap);
1135 * Collect the channel list using the default country
1136 * code and including outdoor channels. The 802.11 layer
1137 * is resposible for filtering this list based on settings
1138 * like the phy mode.
1140 error = ath_setup_channels(sc);
1144 /* default to STA mode */
1145 sc->sc_ah->ah_opmode = ATH9K_M_MONITOR;
1147 /* Setup rate tables */
1149 ath_setup_rates(sc, IEEE80211_BAND_2GHZ);
1150 ath_setup_rates(sc, IEEE80211_BAND_5GHZ);
1152 /* NB: setup here so ath_rate_update is happy */
1153 ath_setcurmode(sc, ATH9K_MODE_11A);
1156 * Allocate hardware transmit queues: one queue for
1157 * beacon frames and one data queue for each QoS
1158 * priority. Note that the hal handles reseting
1159 * these queues at the needed time.
1161 sc->sc_bhalq = ath_beaconq_setup(ah);
1162 if (sc->sc_bhalq == -1) {
1163 DPRINTF(sc, ATH_DBG_FATAL,
1164 "%s: unable to setup a beacon xmit queue\n", __func__);
1168 sc->sc_cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
1169 if (sc->sc_cabq == NULL) {
1170 DPRINTF(sc, ATH_DBG_FATAL,
1171 "%s: unable to setup CAB xmit queue\n", __func__);
1176 sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME;
1177 ath_cabq_update(sc);
1179 for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++)
1180 sc->sc_haltype2q[i] = -1;
1182 /* Setup data queues */
1183 /* NB: ensure BK queue is the lowest priority h/w queue */
1184 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
1185 DPRINTF(sc, ATH_DBG_FATAL,
1186 "%s: unable to setup xmit queue for BK traffic\n",
1192 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
1193 DPRINTF(sc, ATH_DBG_FATAL,
1194 "%s: unable to setup xmit queue for BE traffic\n",
1199 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
1200 DPRINTF(sc, ATH_DBG_FATAL,
1201 "%s: unable to setup xmit queue for VI traffic\n",
1206 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
1207 DPRINTF(sc, ATH_DBG_FATAL,
1208 "%s: unable to setup xmit queue for VO traffic\n",
1214 setup_timer(&sc->sc_ani.timer, ath_ani_calibrate, (unsigned long)sc);
1216 sc->sc_rc = ath_rate_attach(ah);
1217 if (sc->sc_rc == NULL) {
1222 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1223 ATH9K_CIPHER_TKIP, NULL)) {
1225 * Whether we should enable h/w TKIP MIC.
1226 * XXX: if we don't support WME TKIP MIC, then we wouldn't
1227 * report WMM capable, so it's always safe to turn on
1228 * TKIP MIC in this case.
1230 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
1235 * Check whether the separate key cache entries
1236 * are required to handle both tx+rx MIC keys.
1237 * With split mic keys the number of stations is limited
1238 * to 27 otherwise 59.
1240 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1241 ATH9K_CIPHER_TKIP, NULL)
1242 && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1243 ATH9K_CIPHER_MIC, NULL)
1244 && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
1246 sc->sc_splitmic = 1;
1248 /* turn on mcast key search if possible */
1249 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
1250 (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
1253 sc->sc_config.txpowlimit = ATH_TXPOWER_MAX;
1254 sc->sc_config.txpowlimit_override = 0;
1256 /* 11n Capabilities */
1257 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
1258 sc->sc_flags |= SC_OP_TXAGGR;
1259 sc->sc_flags |= SC_OP_RXAGGR;
1262 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
1263 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
1265 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
1266 sc->sc_defant = ath9k_hw_getdefantenna(ah);
1268 ath9k_hw_getmac(ah, sc->sc_myaddr);
1269 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) {
1270 ath9k_hw_getbssidmask(ah, sc->sc_bssidmask);
1271 ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
1272 ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
1274 sc->sc_slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
1276 /* initialize beacon slots */
1277 for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++)
1278 sc->sc_bslot[i] = ATH_IF_ID_ANY;
1280 /* save MISC configurations */
1281 sc->sc_config.swBeaconProcess = 1;
1283 #ifdef CONFIG_SLOW_ANT_DIV
1284 /* range is 40 - 255, we use something in the middle */
1285 ath_slow_ant_div_init(&sc->sc_antdiv, sc, 0x127);
1290 /* cleanup tx queues */
1291 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1292 if (ATH_TXQ_SETUP(sc, i))
1293 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
1296 ath9k_hw_detach(ah);
1300 void ath_deinit(struct ath_softc *sc)
1302 struct ath_hal *ah = sc->sc_ah;
1305 DPRINTF(sc, ATH_DBG_CONFIG, "%s\n", __func__);
1307 tasklet_kill(&sc->intr_tq);
1308 tasklet_kill(&sc->bcon_tasklet);
1310 if (!(sc->sc_flags & SC_OP_INVALID))
1311 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
1312 ath_rate_detach(sc->sc_rc);
1313 /* cleanup tx queues */
1314 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1315 if (ATH_TXQ_SETUP(sc, i))
1316 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
1317 ath9k_hw_detach(ah);
1320 /*******************/
1321 /* Node Management */
1322 /*******************/
1324 struct ath_node *ath_node_attach(struct ath_softc *sc, u8 *addr, int if_id)
1326 struct ath_vap *avp;
1327 struct ath_node *an;
1328 DECLARE_MAC_BUF(mac);
1330 avp = sc->sc_vaps[if_id];
1331 ASSERT(avp != NULL);
1333 /* mac80211 sta_notify callback is from an IRQ context, so no sleep */
1334 an = kmalloc(sizeof(struct ath_node), GFP_ATOMIC);
1337 memset(an, 0, sizeof(*an));
1340 memcpy(an->an_addr, addr, ETH_ALEN);
1341 atomic_set(&an->an_refcnt, 1);
1343 /* set up per-node tx/rx state */
1344 ath_tx_node_init(sc, an);
1345 ath_rx_node_init(sc, an);
1347 ath_chainmask_sel_init(sc, an);
1348 ath_chainmask_sel_timerstart(&an->an_chainmask_sel);
1349 list_add(&an->list, &sc->node_list);
1354 void ath_node_detach(struct ath_softc *sc, struct ath_node *an, bool bh_flag)
1356 unsigned long flags;
1358 DECLARE_MAC_BUF(mac);
1360 ath_chainmask_sel_timerstop(&an->an_chainmask_sel);
1361 an->an_flags |= ATH_NODE_CLEAN;
1362 ath_tx_node_cleanup(sc, an, bh_flag);
1363 ath_rx_node_cleanup(sc, an);
1365 ath_tx_node_free(sc, an);
1366 ath_rx_node_free(sc, an);
1368 spin_lock_irqsave(&sc->node_lock, flags);
1370 list_del(&an->list);
1372 spin_unlock_irqrestore(&sc->node_lock, flags);
1377 /* Finds a node and increases the refcnt if found */
1379 struct ath_node *ath_node_get(struct ath_softc *sc, u8 *addr)
1381 struct ath_node *an = NULL, *an_found = NULL;
1383 if (list_empty(&sc->node_list)) /* FIXME */
1385 list_for_each_entry(an, &sc->node_list, list) {
1386 if (!compare_ether_addr(an->an_addr, addr)) {
1387 atomic_inc(&an->an_refcnt);
1396 /* Decrements the refcnt and if it drops to zero, detach the node */
1398 void ath_node_put(struct ath_softc *sc, struct ath_node *an, bool bh_flag)
1400 if (atomic_dec_and_test(&an->an_refcnt))
1401 ath_node_detach(sc, an, bh_flag);
1404 /* Finds a node, doesn't increment refcnt. Caller must hold sc->node_lock */
1405 struct ath_node *ath_node_find(struct ath_softc *sc, u8 *addr)
1407 struct ath_node *an = NULL, *an_found = NULL;
1409 if (list_empty(&sc->node_list))
1412 list_for_each_entry(an, &sc->node_list, list)
1413 if (!compare_ether_addr(an->an_addr, addr)) {
1424 * Setup driver-specific state for a newly associated node. This routine
1425 * really only applies if compression or XR are enabled, there is no code
1426 * covering any other cases.
1429 void ath_newassoc(struct ath_softc *sc,
1430 struct ath_node *an, int isnew, int isuapsd)
1434 /* if station reassociates, tear down the aggregation state. */
1436 for (tidno = 0; tidno < WME_NUM_TID; tidno++) {
1437 if (sc->sc_flags & SC_OP_TXAGGR)
1438 ath_tx_aggr_teardown(sc, an, tidno);
1439 if (sc->sc_flags & SC_OP_RXAGGR)
1440 ath_rx_aggr_teardown(sc, an, tidno);
1450 void ath_key_reset(struct ath_softc *sc, u16 keyix, int freeslot)
1452 ath9k_hw_keyreset(sc->sc_ah, keyix);
1454 clear_bit(keyix, sc->sc_keymap);
1457 int ath_keyset(struct ath_softc *sc,
1459 struct ath9k_keyval *hk,
1460 const u8 mac[ETH_ALEN])
1464 status = ath9k_hw_set_keycache_entry(sc->sc_ah,
1465 keyix, hk, mac, false);
1467 return status != false;
1470 /***********************/
1471 /* TX Power/Regulatory */
1472 /***********************/
1475 * Set Transmit power in HAL
1477 * This routine makes the actual HAL calls to set the new transmit power
1481 void ath_update_txpow(struct ath_softc *sc)
1483 struct ath_hal *ah = sc->sc_ah;
1486 if (sc->sc_curtxpow != sc->sc_config.txpowlimit) {
1487 ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit);
1488 /* read back in case value is clamped */
1489 ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
1490 sc->sc_curtxpow = txpow;
1494 /* Return the current country and domain information */
1495 void ath_get_currentCountry(struct ath_softc *sc,
1496 struct ath9k_country_entry *ctry)
1498 ath9k_regd_get_current_country(sc->sc_ah, ctry);
1500 /* If HAL not specific yet, since it is band dependent,
1501 * use the one we passed in. */
1502 if (ctry->countryCode == CTRY_DEFAULT) {
1505 } else if (ctry->iso[0] && ctry->iso[1]) {
1506 if (!ctry->iso[2]) {
1515 /**************************/
1516 /* Slow Antenna Diversity */
1517 /**************************/
1519 void ath_slow_ant_div_init(struct ath_antdiv *antdiv,
1520 struct ath_softc *sc,
1525 /* antdivf_rssitrig can range from 40 - 0xff */
1526 trig = (rssitrig > 0xff) ? 0xff : rssitrig;
1527 trig = (rssitrig < 40) ? 40 : rssitrig;
1529 antdiv->antdiv_sc = sc;
1530 antdiv->antdivf_rssitrig = trig;
1533 void ath_slow_ant_div_start(struct ath_antdiv *antdiv,
1537 antdiv->antdiv_num_antcfg =
1538 num_antcfg < ATH_ANT_DIV_MAX_CFG ?
1539 num_antcfg : ATH_ANT_DIV_MAX_CFG;
1540 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1541 antdiv->antdiv_curcfg = 0;
1542 antdiv->antdiv_bestcfg = 0;
1543 antdiv->antdiv_laststatetsf = 0;
1545 memcpy(antdiv->antdiv_bssid, bssid, sizeof(antdiv->antdiv_bssid));
1547 antdiv->antdiv_start = 1;
1550 void ath_slow_ant_div_stop(struct ath_antdiv *antdiv)
1552 antdiv->antdiv_start = 0;
1555 static int32_t ath_find_max_val(int32_t *val,
1556 u8 num_val, u8 *max_index)
1558 u32 MaxVal = *val++;
1562 while (++cur_index < num_val) {
1563 if (*val > MaxVal) {
1565 *max_index = cur_index;
1574 void ath_slow_ant_div(struct ath_antdiv *antdiv,
1575 struct ieee80211_hdr *hdr,
1576 struct ath_rx_status *rx_stats)
1578 struct ath_softc *sc = antdiv->antdiv_sc;
1579 struct ath_hal *ah = sc->sc_ah;
1581 u8 bestcfg, curcfg = antdiv->antdiv_curcfg;
1582 __le16 fc = hdr->frame_control;
1584 if (antdiv->antdiv_start && ieee80211_is_beacon(fc)
1585 && !compare_ether_addr(hdr->addr3, antdiv->antdiv_bssid)) {
1586 antdiv->antdiv_lastbrssi[curcfg] = rx_stats->rs_rssi;
1587 antdiv->antdiv_lastbtsf[curcfg] = ath9k_hw_gettsf64(sc->sc_ah);
1588 curtsf = antdiv->antdiv_lastbtsf[curcfg];
1593 switch (antdiv->antdiv_state) {
1594 case ATH_ANT_DIV_IDLE:
1595 if ((antdiv->antdiv_lastbrssi[curcfg] <
1596 antdiv->antdivf_rssitrig)
1597 && ((curtsf - antdiv->antdiv_laststatetsf) >
1598 ATH_ANT_DIV_MIN_IDLE_US)) {
1601 if (curcfg == antdiv->antdiv_num_antcfg)
1604 if (!ath9k_hw_select_antconfig(ah, curcfg)) {
1605 antdiv->antdiv_bestcfg = antdiv->antdiv_curcfg;
1606 antdiv->antdiv_curcfg = curcfg;
1607 antdiv->antdiv_laststatetsf = curtsf;
1608 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1613 case ATH_ANT_DIV_SCAN:
1614 if ((curtsf - antdiv->antdiv_laststatetsf) <
1615 ATH_ANT_DIV_MIN_SCAN_US)
1619 if (curcfg == antdiv->antdiv_num_antcfg)
1622 if (curcfg == antdiv->antdiv_bestcfg) {
1623 ath_find_max_val(antdiv->antdiv_lastbrssi,
1624 antdiv->antdiv_num_antcfg, &bestcfg);
1625 if (!ath9k_hw_select_antconfig(ah, bestcfg)) {
1626 antdiv->antdiv_bestcfg = bestcfg;
1627 antdiv->antdiv_curcfg = bestcfg;
1628 antdiv->antdiv_laststatetsf = curtsf;
1629 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1632 if (!ath9k_hw_select_antconfig(ah, curcfg)) {
1633 antdiv->antdiv_curcfg = curcfg;
1634 antdiv->antdiv_laststatetsf = curtsf;
1635 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1643 /***********************/
1644 /* Descriptor Handling */
1645 /***********************/
1648 * Set up DMA descriptors
1650 * This function will allocate both the DMA descriptor structure, and the
1651 * buffers it contains. These are used to contain the descriptors used
1655 int ath_descdma_setup(struct ath_softc *sc,
1656 struct ath_descdma *dd,
1657 struct list_head *head,
1662 #define DS2PHYS(_dd, _ds) \
1663 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
1664 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
1665 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
1667 struct ath_desc *ds;
1669 int i, bsize, error;
1671 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA: %u buffers %u desc/buf\n",
1672 __func__, name, nbuf, ndesc);
1674 /* ath_desc must be a multiple of DWORDs */
1675 if ((sizeof(struct ath_desc) % 4) != 0) {
1676 DPRINTF(sc, ATH_DBG_FATAL, "%s: ath_desc not DWORD aligned\n",
1678 ASSERT((sizeof(struct ath_desc) % 4) == 0);
1684 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
1687 * Need additional DMA memory because we can't use
1688 * descriptors that cross the 4K page boundary. Assume
1689 * one skipped descriptor per 4K page.
1691 if (!(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1693 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
1696 while (ndesc_skipped) {
1697 dma_len = ndesc_skipped * sizeof(struct ath_desc);
1698 dd->dd_desc_len += dma_len;
1700 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
1704 /* allocate descriptors */
1705 dd->dd_desc = pci_alloc_consistent(sc->pdev,
1707 &dd->dd_desc_paddr);
1708 if (dd->dd_desc == NULL) {
1713 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA map: %p (%u) -> %llx (%u)\n",
1714 __func__, dd->dd_name, ds, (u32) dd->dd_desc_len,
1715 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
1717 /* allocate buffers */
1718 bsize = sizeof(struct ath_buf) * nbuf;
1719 bf = kmalloc(bsize, GFP_KERNEL);
1724 memset(bf, 0, bsize);
1727 INIT_LIST_HEAD(head);
1728 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
1730 bf->bf_daddr = DS2PHYS(dd, ds);
1732 if (!(sc->sc_ah->ah_caps.hw_caps &
1733 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1735 * Skip descriptor addresses which can cause 4KB
1736 * boundary crossing (addr + length) with a 32 dword
1739 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
1740 ASSERT((caddr_t) bf->bf_desc <
1741 ((caddr_t) dd->dd_desc +
1746 bf->bf_daddr = DS2PHYS(dd, ds);
1749 list_add_tail(&bf->list, head);
1753 pci_free_consistent(sc->pdev,
1754 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1756 memset(dd, 0, sizeof(*dd));
1758 #undef ATH_DESC_4KB_BOUND_CHECK
1759 #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
1764 * Cleanup DMA descriptors
1766 * This function will free the DMA block that was allocated for the descriptor
1767 * pool. Since this was allocated as one "chunk", it is freed in the same
1771 void ath_descdma_cleanup(struct ath_softc *sc,
1772 struct ath_descdma *dd,
1773 struct list_head *head)
1775 /* Free memory associated with descriptors */
1776 pci_free_consistent(sc->pdev,
1777 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1779 INIT_LIST_HEAD(head);
1780 kfree(dd->dd_bufptr);
1781 memset(dd, 0, sizeof(*dd));
1788 int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
1794 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VO];
1797 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VI];
1800 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1803 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BK];
1806 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1813 int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
1818 case ATH9K_WME_AC_VO:
1821 case ATH9K_WME_AC_VI:
1824 case ATH9K_WME_AC_BE:
1827 case ATH9K_WME_AC_BK:
1840 * Expand time stamp to TSF
1842 * Extend 15-bit time stamp from rx descriptor to
1843 * a full 64-bit TSF using the current h/w TSF.
1846 u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp)
1850 tsf = ath9k_hw_gettsf64(sc->sc_ah);
1851 if ((tsf & 0x7fff) < rstamp)
1853 return (tsf & ~0x7fff) | rstamp;
1857 * Set Default Antenna
1859 * Call into the HAL to set the default antenna to use. Not really valid for
1863 void ath_setdefantenna(void *context, u32 antenna)
1865 struct ath_softc *sc = (struct ath_softc *)context;
1866 struct ath_hal *ah = sc->sc_ah;
1868 /* XXX block beacon interrupts */
1869 ath9k_hw_setantenna(ah, antenna);
1870 sc->sc_defant = antenna;
1871 sc->sc_rxotherant = 0;
1877 * This will wake up the chip if required, and set the slot time for the
1878 * frame (maximum transmit time). Slot time is assumed to be already set
1879 * in the ATH object member sc_slottime
1882 void ath_setslottime(struct ath_softc *sc)
1884 ath9k_hw_setslottime(sc->sc_ah, sc->sc_slottime);
1885 sc->sc_updateslot = OK;