2 * Copyright (c) 2008, Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 /* Implementation of the main "ATH" layer. */
22 static int ath_outdoor; /* enable outdoor use */
24 static const u8 ath_bcast_mac[ETH_ALEN] =
25 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
27 static u32 ath_chainmask_sel_up_rssi_thres =
28 ATH_CHAINMASK_SEL_UP_RSSI_THRES;
29 static u32 ath_chainmask_sel_down_rssi_thres =
30 ATH_CHAINMASK_SEL_DOWN_RSSI_THRES;
31 static u32 ath_chainmask_sel_period =
32 ATH_CHAINMASK_SEL_TIMEOUT;
34 /* return bus cachesize in 4B word units */
36 static void bus_read_cachesize(struct ath_softc *sc, int *csz)
40 pci_read_config_byte(sc->pdev, PCI_CACHE_LINE_SIZE, (u8 *)&u8tmp);
44 * This check was put in to avoid "unplesant" consequences if
45 * the bootrom has not fully initialized all PCI devices.
46 * Sometimes the cache line size register is not set
50 *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
54 * Set current operating mode
56 * This function initializes and fills the rate table in the ATH object based
57 * on the operating mode. The blink rates are also set up here, although
58 * they have been superceeded by the ath_led module.
61 static void ath_setcurmode(struct ath_softc *sc, enum wireless_mode mode)
63 const struct ath9k_rate_table *rt;
66 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
67 rt = ath9k_hw_getratetable(sc->sc_ah, mode);
70 for (i = 0; i < rt->rateCount; i++)
71 sc->sc_rixmap[rt->info[i].rateCode] = (u8) i;
73 memzero(sc->sc_hwmap, sizeof(sc->sc_hwmap));
74 for (i = 0; i < 256; i++) {
75 u8 ix = rt->rateCodeToIndex[i];
80 sc->sc_hwmap[i].ieeerate =
81 rt->info[ix].dot11Rate & IEEE80211_RATE_VAL;
82 sc->sc_hwmap[i].rateKbps = rt->info[ix].rateKbps;
84 if (rt->info[ix].shortPreamble ||
85 rt->info[ix].phy == PHY_OFDM) {
86 /* XXX: Handle this */
89 /* NB: this uses the last entry if the rate isn't found */
90 /* XXX beware of overlow */
93 sc->sc_curmode = mode;
95 * All protection frames are transmited at 2Mb/s for
96 * 11g, otherwise at 1Mb/s.
97 * XXX select protection rate index from rate table.
99 sc->sc_protrix = (mode == ATH9K_MODE_11G ? 1 : 0);
103 * Set up rate table (legacy rates)
105 static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
107 struct ath_hal *ah = sc->sc_ah;
108 const struct ath9k_rate_table *rt = NULL;
109 struct ieee80211_supported_band *sband;
110 struct ieee80211_rate *rate;
114 case IEEE80211_BAND_2GHZ:
115 rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11G);
117 case IEEE80211_BAND_5GHZ:
118 rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11A);
127 sband = &sc->sbands[band];
128 rate = sc->rates[band];
130 if (rt->rateCount > ATH_RATE_MAX)
131 maxrates = ATH_RATE_MAX;
133 maxrates = rt->rateCount;
135 for (i = 0; i < maxrates; i++) {
136 rate[i].bitrate = rt->info[i].rateKbps / 100;
137 rate[i].hw_value = rt->info[i].rateCode;
139 DPRINTF(sc, ATH_DBG_CONFIG,
140 "%s: Rate: %2dMbps, ratecode: %2d\n",
142 rate[i].bitrate / 10,
148 * Set up channel list
150 static int ath_setup_channels(struct ath_softc *sc)
152 struct ath_hal *ah = sc->sc_ah;
153 int nchan, i, a = 0, b = 0;
154 u8 regclassids[ATH_REGCLASSIDS_MAX];
156 struct ieee80211_supported_band *band_2ghz;
157 struct ieee80211_supported_band *band_5ghz;
158 struct ieee80211_channel *chan_2ghz;
159 struct ieee80211_channel *chan_5ghz;
160 struct ath9k_channel *c;
162 /* Fill in ah->ah_channels */
163 if (!ath9k_regd_init_channels(ah,
172 u32 rd = ah->ah_currentRD;
174 DPRINTF(sc, ATH_DBG_FATAL,
175 "%s: unable to collect channel list; "
176 "regdomain likely %u country code %u\n",
177 __func__, rd, CTRY_DEFAULT);
181 band_2ghz = &sc->sbands[IEEE80211_BAND_2GHZ];
182 band_5ghz = &sc->sbands[IEEE80211_BAND_5GHZ];
183 chan_2ghz = sc->channels[IEEE80211_BAND_2GHZ];
184 chan_5ghz = sc->channels[IEEE80211_BAND_5GHZ];
186 for (i = 0; i < nchan; i++) {
187 c = &ah->ah_channels[i];
188 if (IS_CHAN_2GHZ(c)) {
189 chan_2ghz[a].band = IEEE80211_BAND_2GHZ;
190 chan_2ghz[a].center_freq = c->channel;
191 chan_2ghz[a].max_power = c->maxTxPower;
193 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
194 chan_2ghz[a].flags |=
195 IEEE80211_CHAN_NO_IBSS;
196 if (c->channelFlags & CHANNEL_PASSIVE)
197 chan_2ghz[a].flags |=
198 IEEE80211_CHAN_PASSIVE_SCAN;
200 band_2ghz->n_channels = ++a;
202 DPRINTF(sc, ATH_DBG_CONFIG,
203 "%s: 2MHz channel: %d, "
204 "channelFlags: 0x%x\n",
208 } else if (IS_CHAN_5GHZ(c)) {
209 chan_5ghz[b].band = IEEE80211_BAND_5GHZ;
210 chan_5ghz[b].center_freq = c->channel;
211 chan_5ghz[b].max_power = c->maxTxPower;
213 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
214 chan_5ghz[b].flags |=
215 IEEE80211_CHAN_NO_IBSS;
216 if (c->channelFlags & CHANNEL_PASSIVE)
217 chan_5ghz[b].flags |=
218 IEEE80211_CHAN_PASSIVE_SCAN;
220 band_5ghz->n_channels = ++b;
222 DPRINTF(sc, ATH_DBG_CONFIG,
223 "%s: 5MHz channel: %d, "
224 "channelFlags: 0x%x\n",
235 * Determine mode from channel flags
237 * This routine will provide the enumerated WIRELESSS_MODE value based
238 * on the settings of the channel flags. If ho valid set of flags
239 * exist, the lowest mode (11b) is selected.
242 static enum wireless_mode ath_chan2mode(struct ath9k_channel *chan)
244 if (chan->chanmode == CHANNEL_A)
245 return ATH9K_MODE_11A;
246 else if (chan->chanmode == CHANNEL_G)
247 return ATH9K_MODE_11G;
248 else if (chan->chanmode == CHANNEL_B)
249 return ATH9K_MODE_11B;
250 else if (chan->chanmode == CHANNEL_A_HT20)
251 return ATH9K_MODE_11NA_HT20;
252 else if (chan->chanmode == CHANNEL_G_HT20)
253 return ATH9K_MODE_11NG_HT20;
254 else if (chan->chanmode == CHANNEL_A_HT40PLUS)
255 return ATH9K_MODE_11NA_HT40PLUS;
256 else if (chan->chanmode == CHANNEL_A_HT40MINUS)
257 return ATH9K_MODE_11NA_HT40MINUS;
258 else if (chan->chanmode == CHANNEL_G_HT40PLUS)
259 return ATH9K_MODE_11NG_HT40PLUS;
260 else if (chan->chanmode == CHANNEL_G_HT40MINUS)
261 return ATH9K_MODE_11NG_HT40MINUS;
263 /* NB: should not get here */
264 return ATH9K_MODE_11B;
268 * Stop the device, grabbing the top-level lock to protect
269 * against concurrent entry through ath_init (which can happen
270 * if another thread does a system call and the thread doing the
271 * stop is preempted).
274 static int ath_stop(struct ath_softc *sc)
276 struct ath_hal *ah = sc->sc_ah;
278 DPRINTF(sc, ATH_DBG_CONFIG, "%s: invalid %u\n",
279 __func__, sc->sc_invalid);
282 * Shutdown the hardware and driver:
283 * stop output from above
284 * reset 802.11 state machine
285 * (sends station deassoc/deauth frames)
288 * clear transmit machinery
289 * clear receive machinery
291 * reclaim beacon resources
293 * Note that some of this work is not possible if the
294 * hardware is gone (invalid).
298 ath9k_hw_set_interrupts(ah, 0);
299 ath_draintxq(sc, false);
300 if (!sc->sc_invalid) {
302 ath9k_hw_phy_disable(ah);
304 sc->sc_rxlink = NULL;
312 * This function is called when starting a channel scan. It will perform
313 * power save wakeup processing, set the filter for the scan, and get the
314 * chip ready to send broadcast packets out during the scan.
317 void ath_scan_start(struct ath_softc *sc)
319 struct ath_hal *ah = sc->sc_ah;
321 u32 now = (u32) jiffies_to_msecs(get_timestamp());
324 rfilt = ath_calcrxfilter(sc);
325 ath9k_hw_setrxfilter(ah, rfilt);
326 ath9k_hw_write_associd(ah, ath_bcast_mac, 0);
328 /* Restore previous power management state. */
330 DPRINTF(sc, ATH_DBG_CONFIG, "%d.%03d | %s: RX filter 0x%x aid 0\n",
331 now / 1000, now % 1000, __func__, rfilt);
337 * This routine is called by the upper layer when the scan is completed. This
338 * will set the filters back to normal operating mode, set the BSSID to the
339 * correct value, and restore the power save state.
342 void ath_scan_end(struct ath_softc *sc)
344 struct ath_hal *ah = sc->sc_ah;
346 u32 now = (u32) jiffies_to_msecs(get_timestamp());
349 /* Request for a full reset due to rx packet filter changes */
350 sc->sc_full_reset = 1;
351 rfilt = ath_calcrxfilter(sc);
352 ath9k_hw_setrxfilter(ah, rfilt);
353 ath9k_hw_write_associd(ah, sc->sc_curbssid, sc->sc_curaid);
355 DPRINTF(sc, ATH_DBG_CONFIG, "%d.%03d | %s: RX filter 0x%x aid 0x%x\n",
356 now / 1000, now % 1000, __func__, rfilt, sc->sc_curaid);
360 * Set the current channel
362 * Set/change channels. If the channel is really being changed, it's done
363 * by reseting the chip. To accomplish this we must first cleanup any pending
364 * DMA, then restart stuff after a la ath_init.
366 int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan)
368 struct ath_hal *ah = sc->sc_ah;
369 bool fastcc = true, stopped;
370 enum ath9k_ht_macmode ht_macmode;
372 if (sc->sc_invalid) /* if the device is invalid or removed */
375 DPRINTF(sc, ATH_DBG_CONFIG,
376 "%s: %u (%u MHz) -> %u (%u MHz), cflags:%x\n",
378 ath9k_hw_mhz2ieee(ah, sc->sc_curchan.channel,
379 sc->sc_curchan.channelFlags),
380 sc->sc_curchan.channel,
381 ath9k_hw_mhz2ieee(ah, hchan->channel, hchan->channelFlags),
382 hchan->channel, hchan->channelFlags);
384 ht_macmode = ath_cwm_macmode(sc);
386 if (hchan->channel != sc->sc_curchan.channel ||
387 hchan->channelFlags != sc->sc_curchan.channelFlags ||
388 sc->sc_update_chainmask || sc->sc_full_reset) {
391 * This is only performed if the channel settings have
394 * To switch channels clear any pending DMA operations;
395 * wait long enough for the RX fifo to drain, reset the
396 * hardware at the new frequency, and then re-enable
397 * the relevant bits of the h/w.
399 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
400 ath_draintxq(sc, false); /* clear pending tx frames */
401 stopped = ath_stoprecv(sc); /* turn off frame recv */
403 /* XXX: do not flush receive queue here. We don't want
404 * to flush data frames already in queue because of
405 * changing channel. */
407 if (!stopped || sc->sc_full_reset)
410 spin_lock_bh(&sc->sc_resetlock);
411 if (!ath9k_hw_reset(ah, sc->sc_opmode, hchan,
412 ht_macmode, sc->sc_tx_chainmask,
414 sc->sc_ht_extprotspacing,
416 DPRINTF(sc, ATH_DBG_FATAL,
417 "%s: unable to reset channel %u (%uMhz) "
418 "flags 0x%x hal status %u\n", __func__,
419 ath9k_hw_mhz2ieee(ah, hchan->channel,
420 hchan->channelFlags),
421 hchan->channel, hchan->channelFlags, status);
422 spin_unlock_bh(&sc->sc_resetlock);
425 spin_unlock_bh(&sc->sc_resetlock);
427 sc->sc_curchan = *hchan;
428 sc->sc_update_chainmask = 0;
429 sc->sc_full_reset = 0;
431 /* Re-enable rx framework */
432 if (ath_startrecv(sc) != 0) {
433 DPRINTF(sc, ATH_DBG_FATAL,
434 "%s: unable to restart recv logic\n", __func__);
438 * Change channels and update the h/w rate map
439 * if we're switching; e.g. 11a to 11b/g.
441 ath_setcurmode(sc, ath_chan2mode(hchan));
443 ath_update_txpow(sc); /* update tx power state */
445 * Re-enable interrupts.
447 ath9k_hw_set_interrupts(ah, sc->sc_imask);
452 /**********************/
453 /* Chainmask Handling */
454 /**********************/
456 static void ath_chainmask_sel_timertimeout(unsigned long data)
458 struct ath_chainmask_sel *cm = (struct ath_chainmask_sel *)data;
459 cm->switch_allowed = 1;
462 /* Start chainmask select timer */
463 static void ath_chainmask_sel_timerstart(struct ath_chainmask_sel *cm)
465 cm->switch_allowed = 0;
466 mod_timer(&cm->timer, ath_chainmask_sel_period);
469 /* Stop chainmask select timer */
470 static void ath_chainmask_sel_timerstop(struct ath_chainmask_sel *cm)
472 cm->switch_allowed = 0;
473 del_timer_sync(&cm->timer);
476 static void ath_chainmask_sel_init(struct ath_softc *sc, struct ath_node *an)
478 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
480 memzero(cm, sizeof(struct ath_chainmask_sel));
482 cm->cur_tx_mask = sc->sc_tx_chainmask;
483 cm->cur_rx_mask = sc->sc_rx_chainmask;
484 cm->tx_avgrssi = ATH_RSSI_DUMMY_MARKER;
485 setup_timer(&cm->timer,
486 ath_chainmask_sel_timertimeout, (unsigned long) cm);
489 int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an)
491 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
494 * Disable auto-swtiching in one of the following if conditions.
495 * sc_chainmask_auto_sel is used for internal global auto-switching
496 * enabled/disabled setting
498 if (sc->sc_ah->ah_caps.tx_chainmask != ATH_CHAINMASK_SEL_3X3) {
499 cm->cur_tx_mask = sc->sc_tx_chainmask;
500 return cm->cur_tx_mask;
503 if (cm->tx_avgrssi == ATH_RSSI_DUMMY_MARKER)
504 return cm->cur_tx_mask;
506 if (cm->switch_allowed) {
507 /* Switch down from tx 3 to tx 2. */
508 if (cm->cur_tx_mask == ATH_CHAINMASK_SEL_3X3 &&
509 ATH_RSSI_OUT(cm->tx_avgrssi) >=
510 ath_chainmask_sel_down_rssi_thres) {
511 cm->cur_tx_mask = sc->sc_tx_chainmask;
513 /* Don't let another switch happen until
514 * this timer expires */
515 ath_chainmask_sel_timerstart(cm);
517 /* Switch up from tx 2 to 3. */
518 else if (cm->cur_tx_mask == sc->sc_tx_chainmask &&
519 ATH_RSSI_OUT(cm->tx_avgrssi) <=
520 ath_chainmask_sel_up_rssi_thres) {
521 cm->cur_tx_mask = ATH_CHAINMASK_SEL_3X3;
523 /* Don't let another switch happen
524 * until this timer expires */
525 ath_chainmask_sel_timerstart(cm);
529 return cm->cur_tx_mask;
533 * Update tx/rx chainmask. For legacy association,
534 * hard code chainmask to 1x1, for 11n association, use
535 * the chainmask configuration.
538 void ath_update_chainmask(struct ath_softc *sc, int is_ht)
540 sc->sc_update_chainmask = 1;
542 sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask;
543 sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask;
545 sc->sc_tx_chainmask = 1;
546 sc->sc_rx_chainmask = 1;
549 DPRINTF(sc, ATH_DBG_CONFIG, "%s: tx chmask: %d, rx chmask: %d\n",
550 __func__, sc->sc_tx_chainmask, sc->sc_rx_chainmask);
560 * This routine brings the VAP out of the down state into a "listen" state
561 * where it waits for association requests. This is used in AP and AdHoc
565 int ath_vap_listen(struct ath_softc *sc, int if_id)
567 struct ath_hal *ah = sc->sc_ah;
570 DECLARE_MAC_BUF(mac);
572 avp = sc->sc_vaps[if_id];
574 DPRINTF(sc, ATH_DBG_FATAL, "%s: invalid interface id %u\n",
579 #ifdef CONFIG_SLOW_ANT_DIV
580 ath_slow_ant_div_stop(&sc->sc_antdiv);
583 /* update ratectrl about the new state */
584 ath_rate_newstate(sc, avp);
586 rfilt = ath_calcrxfilter(sc);
587 ath9k_hw_setrxfilter(ah, rfilt);
589 if (sc->sc_opmode == ATH9K_M_STA || sc->sc_opmode == ATH9K_M_IBSS) {
590 memcpy(sc->sc_curbssid, ath_bcast_mac, ETH_ALEN);
591 ath9k_hw_write_associd(ah, sc->sc_curbssid, sc->sc_curaid);
595 DPRINTF(sc, ATH_DBG_CONFIG,
596 "%s: RX filter 0x%x bssid %s aid 0x%x\n",
597 __func__, rfilt, print_mac(mac,
598 sc->sc_curbssid), sc->sc_curaid);
602 * Disable BMISS interrupt when we're not associated
604 ath9k_hw_set_interrupts(ah,
605 sc->sc_imask & ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS));
606 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
607 /* need to reconfigure the beacons when it moves to RUN */
613 int ath_vap_attach(struct ath_softc *sc,
615 struct ieee80211_vif *if_data,
616 enum ath9k_opmode opmode)
620 if (if_id >= ATH_BCBUF || sc->sc_vaps[if_id] != NULL) {
621 DPRINTF(sc, ATH_DBG_FATAL,
622 "%s: Invalid interface id = %u\n", __func__, if_id);
629 case ATH9K_M_MONITOR:
632 /* XXX not right, beacon buffer is allocated on RUN trans */
633 if (list_empty(&sc->sc_bbuf))
641 avp = kmalloc(sizeof(struct ath_vap), GFP_KERNEL);
645 memzero(avp, sizeof(struct ath_vap));
646 avp->av_if_data = if_data;
647 /* Set the VAP opmode */
648 avp->av_opmode = opmode;
650 INIT_LIST_HEAD(&avp->av_mcastq.axq_q);
651 INIT_LIST_HEAD(&avp->av_mcastq.axq_acq);
652 spin_lock_init(&avp->av_mcastq.axq_lock);
654 ath9k_hw_set_tsfadjust(sc->sc_ah, 1);
656 sc->sc_vaps[if_id] = avp;
658 /* Set the device opmode */
659 sc->sc_opmode = opmode;
661 /* default VAP configuration */
662 avp->av_config.av_fixed_rateset = IEEE80211_FIXED_RATE_NONE;
663 avp->av_config.av_fixed_retryset = 0x03030303;
668 int ath_vap_detach(struct ath_softc *sc, int if_id)
670 struct ath_hal *ah = sc->sc_ah;
673 avp = sc->sc_vaps[if_id];
675 DPRINTF(sc, ATH_DBG_FATAL, "%s: invalid interface id %u\n",
681 * Quiesce the hardware while we remove the vap. In
682 * particular we need to reclaim all references to the
683 * vap state by any frames pending on the tx queues.
685 * XXX can we do this w/o affecting other vap's?
687 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
688 ath_draintxq(sc, false); /* stop xmit side */
689 ath_stoprecv(sc); /* stop recv side */
690 ath_flushrecv(sc); /* flush recv queue */
692 /* Reclaim any pending mcast bufs on the vap. */
693 ath_tx_draintxq(sc, &avp->av_mcastq, false);
696 sc->sc_vaps[if_id] = NULL;
702 int ath_vap_config(struct ath_softc *sc,
703 int if_id, struct ath_vap_config *if_config)
707 if (if_id >= ATH_BCBUF) {
708 DPRINTF(sc, ATH_DBG_FATAL,
709 "%s: Invalid interface id = %u\n", __func__, if_id);
713 avp = sc->sc_vaps[if_id];
717 memcpy(&avp->av_config, if_config, sizeof(avp->av_config));
726 int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
728 struct ath_hal *ah = sc->sc_ah;
731 enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc);
733 DPRINTF(sc, ATH_DBG_CONFIG, "%s: mode %d\n", __func__, sc->sc_opmode);
736 * Stop anything previously setup. This is safe
737 * whether this is the first time through or not.
741 /* Initialize chanmask selection */
742 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
743 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
745 /* Reset SERDES registers */
746 ath9k_hw_configpcipowersave(ah, 0);
749 * The basic interface to setting the hardware in a good
750 * state is ``reset''. On return the hardware is known to
751 * be powered up and with interrupts disabled. This must
752 * be followed by initialization of the appropriate bits
753 * and then setup of the interrupt mask.
755 sc->sc_curchan = *initial_chan;
757 spin_lock_bh(&sc->sc_resetlock);
758 if (!ath9k_hw_reset(ah, sc->sc_opmode, &sc->sc_curchan, ht_macmode,
759 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
760 sc->sc_ht_extprotspacing, false, &status)) {
761 DPRINTF(sc, ATH_DBG_FATAL,
762 "%s: unable to reset hardware; hal status %u "
763 "(freq %u flags 0x%x)\n", __func__, status,
764 sc->sc_curchan.channel, sc->sc_curchan.channelFlags);
766 spin_unlock_bh(&sc->sc_resetlock);
769 spin_unlock_bh(&sc->sc_resetlock);
771 * This is needed only to setup initial state
772 * but it's best done after a reset.
774 ath_update_txpow(sc);
777 * Setup the hardware after reset:
778 * The receive engine is set going.
779 * Frame transmit is handled entirely
780 * in the frame output path; there's nothing to do
781 * here except setup the interrupt mask.
783 if (ath_startrecv(sc) != 0) {
784 DPRINTF(sc, ATH_DBG_FATAL,
785 "%s: unable to start recv logic\n", __func__);
789 /* Setup our intr mask. */
790 sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX
791 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
792 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
794 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_GTT)
795 sc->sc_imask |= ATH9K_INT_GTT;
797 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
798 sc->sc_imask |= ATH9K_INT_CST;
801 * Enable MIB interrupts when there are hardware phy counters.
802 * Note we only do this (at the moment) for station mode.
804 if (ath9k_hw_phycounters(ah) &&
805 ((sc->sc_opmode == ATH9K_M_STA) || (sc->sc_opmode == ATH9K_M_IBSS)))
806 sc->sc_imask |= ATH9K_INT_MIB;
808 * Some hardware processes the TIM IE and fires an
809 * interrupt when the TIM bit is set. For hardware
810 * that does, if not overridden by configuration,
811 * enable the TIM interrupt when operating as station.
813 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
814 (sc->sc_opmode == ATH9K_M_STA) &&
815 !sc->sc_config.swBeaconProcess)
816 sc->sc_imask |= ATH9K_INT_TIM;
818 * Don't enable interrupts here as we've not yet built our
819 * vap and node data structures, which will be needed as soon
820 * as we start receiving.
822 ath_setcurmode(sc, ath_chan2mode(initial_chan));
824 /* XXX: we must make sure h/w is ready and clear invalid flag
825 * before turning on interrupt. */
832 * Reset the hardware w/o losing operational state. This is
833 * basically a more efficient way of doing ath_stop, ath_init,
834 * followed by state transitions to the current 802.11
835 * operational state. Used to recover from errors rx overrun
836 * and to reset the hardware when rf gain settings must be reset.
839 static int ath_reset_start(struct ath_softc *sc, u32 flag)
841 struct ath_hal *ah = sc->sc_ah;
843 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
844 ath_draintxq(sc, flag & RESET_RETRY_TXQ); /* stop xmit side */
845 ath_stoprecv(sc); /* stop recv side */
846 ath_flushrecv(sc); /* flush recv queue */
851 static int ath_reset_end(struct ath_softc *sc, u32 flag)
853 struct ath_hal *ah = sc->sc_ah;
855 if (ath_startrecv(sc) != 0) /* restart recv */
856 DPRINTF(sc, ATH_DBG_FATAL,
857 "%s: unable to start recv logic\n", __func__);
860 * We may be doing a reset in response to a request
861 * that changes the channel so update any state that
862 * might change as a result.
864 ath_setcurmode(sc, ath_chan2mode(&sc->sc_curchan));
866 ath_update_txpow(sc); /* update tx power state */
869 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
870 ath9k_hw_set_interrupts(ah, sc->sc_imask);
872 /* Restart the txq */
873 if (flag & RESET_RETRY_TXQ) {
875 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
876 if (ATH_TXQ_SETUP(sc, i)) {
877 spin_lock_bh(&sc->sc_txq[i].axq_lock);
878 ath_txq_schedule(sc, &sc->sc_txq[i]);
879 spin_unlock_bh(&sc->sc_txq[i].axq_lock);
886 int ath_reset(struct ath_softc *sc)
888 struct ath_hal *ah = sc->sc_ah;
891 enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc);
893 /* NB: indicate channel change so we do a full reset */
894 spin_lock_bh(&sc->sc_resetlock);
895 if (!ath9k_hw_reset(ah, sc->sc_opmode, &sc->sc_curchan,
897 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
898 sc->sc_ht_extprotspacing, false, &status)) {
899 DPRINTF(sc, ATH_DBG_FATAL,
900 "%s: unable to reset hardware; hal status %u\n",
904 spin_unlock_bh(&sc->sc_resetlock);
909 int ath_suspend(struct ath_softc *sc)
911 struct ath_hal *ah = sc->sc_ah;
913 /* No I/O if device has been surprise removed */
917 /* Shut off the interrupt before setting sc->sc_invalid to '1' */
918 ath9k_hw_set_interrupts(ah, 0);
920 /* XXX: we must make sure h/w will not generate any interrupt
921 * before setting the invalid flag. */
924 /* disable HAL and put h/w to sleep */
925 ath9k_hw_disable(sc->sc_ah);
927 ath9k_hw_configpcipowersave(sc->sc_ah, 1);
932 /* Interrupt handler. Most of the actual processing is deferred.
933 * It's the caller's responsibility to ensure the chip is awake. */
935 irqreturn_t ath_isr(int irq, void *dev)
937 struct ath_softc *sc = dev;
938 struct ath_hal *ah = sc->sc_ah;
939 enum ath9k_int status;
943 if (sc->sc_invalid) {
945 * The hardware is not ready/present, don't
946 * touch anything. Note this can happen early
947 * on if the IRQ is shared.
951 if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */
956 * Figure out the reason(s) for the interrupt. Note
957 * that the hal returns a pseudo-ISR that may include
958 * bits we haven't explicitly enabled so we mask the
959 * value to insure we only process bits we requested.
961 ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
963 status &= sc->sc_imask; /* discard unasked-for bits */
966 * If there are no status bits set, then this interrupt was not
967 * for me (should have been caught above).
973 sc->sc_intrstatus = status;
975 if (status & ATH9K_INT_FATAL) {
976 /* need a chip reset */
978 } else if (status & ATH9K_INT_RXORN) {
979 /* need a chip reset */
982 if (status & ATH9K_INT_SWBA) {
983 /* schedule a tasklet for beacon handling */
984 tasklet_schedule(&sc->bcon_tasklet);
986 if (status & ATH9K_INT_RXEOL) {
988 * NB: the hardware should re-read the link when
989 * RXE bit is written, but it doesn't work
990 * at least on older hardware revs.
995 if (status & ATH9K_INT_TXURN)
996 /* bump tx trigger level */
997 ath9k_hw_updatetxtriglevel(ah, true);
998 /* XXX: optimize this */
999 if (status & ATH9K_INT_RX)
1001 if (status & ATH9K_INT_TX)
1003 if (status & ATH9K_INT_BMISS)
1005 /* carrier sense timeout */
1006 if (status & ATH9K_INT_CST)
1008 if (status & ATH9K_INT_MIB) {
1010 * Disable interrupts until we service the MIB
1011 * interrupt; otherwise it will continue to
1014 ath9k_hw_set_interrupts(ah, 0);
1016 * Let the hal handle the event. We assume
1017 * it will clear whatever condition caused
1020 ath9k_hw_procmibevent(ah, &sc->sc_halstats);
1021 ath9k_hw_set_interrupts(ah, sc->sc_imask);
1023 if (status & ATH9K_INT_TIM_TIMER) {
1024 if (!(ah->ah_caps.hw_caps &
1025 ATH9K_HW_CAP_AUTOSLEEP)) {
1026 /* Clear RxAbort bit so that we can
1028 ath9k_hw_setrxabort(ah, 0);
1036 /* turn off every interrupt except SWBA */
1037 ath9k_hw_set_interrupts(ah, (sc->sc_imask & ATH9K_INT_SWBA));
1038 tasklet_schedule(&sc->intr_tq);
1044 /* Deferred interrupt processing */
1046 static void ath9k_tasklet(unsigned long data)
1048 struct ath_softc *sc = (struct ath_softc *)data;
1049 u32 status = sc->sc_intrstatus;
1051 if (status & ATH9K_INT_FATAL) {
1052 /* need a chip reset */
1053 ath_internal_reset(sc);
1058 (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
1059 /* XXX: fill me in */
1061 if (status & ATH9K_INT_RXORN) {
1063 if (status & ATH9K_INT_RXEOL) {
1066 spin_lock_bh(&sc->sc_rxflushlock);
1067 ath_rx_tasklet(sc, 0);
1068 spin_unlock_bh(&sc->sc_rxflushlock);
1070 /* XXX: optimize this */
1071 if (status & ATH9K_INT_TX)
1073 /* XXX: fill me in */
1075 if (status & ATH9K_INT_BMISS) {
1077 if (status & (ATH9K_INT_TIM | ATH9K_INT_DTIMSYNC)) {
1078 if (status & ATH9K_INT_TIM) {
1080 if (status & ATH9K_INT_DTIMSYNC) {
1086 /* re-enable hardware interrupt */
1087 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
1090 int ath_init(u16 devid, struct ath_softc *sc)
1092 struct ath_hal *ah = NULL;
1098 /* XXX: hardware will not be ready until ath_open() being called */
1101 sc->sc_debug = DBG_DEFAULT;
1102 DPRINTF(sc, ATH_DBG_CONFIG, "%s: devid 0x%x\n", __func__, devid);
1104 /* Initialize tasklet */
1105 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
1106 tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
1110 * Cache line size is used to size and align various
1111 * structures used to communicate with the hardware.
1113 bus_read_cachesize(sc, &csz);
1114 /* XXX assert csz is non-zero */
1115 sc->sc_cachelsz = csz << 2; /* convert to bytes */
1117 spin_lock_init(&sc->sc_resetlock);
1119 ah = ath9k_hw_attach(devid, sc, sc->mem, &status);
1121 DPRINTF(sc, ATH_DBG_FATAL,
1122 "%s: unable to attach hardware; HAL status %u\n",
1129 /* Get the chipset-specific aggr limit. */
1130 sc->sc_rtsaggrlimit = ah->ah_caps.rts_aggr_limit;
1132 /* Get the hardware key cache size. */
1133 sc->sc_keymax = ah->ah_caps.keycache_size;
1134 if (sc->sc_keymax > ATH_KEYMAX) {
1135 DPRINTF(sc, ATH_DBG_KEYCACHE,
1136 "%s: Warning, using only %u entries in %u key cache\n",
1137 __func__, ATH_KEYMAX, sc->sc_keymax);
1138 sc->sc_keymax = ATH_KEYMAX;
1142 * Reset the key cache since some parts do not
1143 * reset the contents on initial power up.
1145 for (i = 0; i < sc->sc_keymax; i++)
1146 ath9k_hw_keyreset(ah, (u16) i);
1148 * Mark key cache slots associated with global keys
1149 * as in use. If we knew TKIP was not to be used we
1150 * could leave the +32, +64, and +32+64 slots free.
1151 * XXX only for splitmic.
1153 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1154 set_bit(i, sc->sc_keymap);
1155 set_bit(i + 32, sc->sc_keymap);
1156 set_bit(i + 64, sc->sc_keymap);
1157 set_bit(i + 32 + 64, sc->sc_keymap);
1160 * Collect the channel list using the default country
1161 * code and including outdoor channels. The 802.11 layer
1162 * is resposible for filtering this list based on settings
1163 * like the phy mode.
1165 rd = ah->ah_currentRD;
1167 error = ath_setup_channels(sc);
1171 /* default to STA mode */
1172 sc->sc_opmode = ATH9K_M_MONITOR;
1174 /* Setup rate tables */
1176 ath_setup_rates(sc, IEEE80211_BAND_2GHZ);
1177 ath_setup_rates(sc, IEEE80211_BAND_5GHZ);
1179 /* NB: setup here so ath_rate_update is happy */
1180 ath_setcurmode(sc, ATH9K_MODE_11A);
1183 * Allocate hardware transmit queues: one queue for
1184 * beacon frames and one data queue for each QoS
1185 * priority. Note that the hal handles reseting
1186 * these queues at the needed time.
1188 sc->sc_bhalq = ath_beaconq_setup(ah);
1189 if (sc->sc_bhalq == -1) {
1190 DPRINTF(sc, ATH_DBG_FATAL,
1191 "%s: unable to setup a beacon xmit queue\n", __func__);
1195 sc->sc_cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
1196 if (sc->sc_cabq == NULL) {
1197 DPRINTF(sc, ATH_DBG_FATAL,
1198 "%s: unable to setup CAB xmit queue\n", __func__);
1203 sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME;
1204 ath_cabq_update(sc);
1206 for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++)
1207 sc->sc_haltype2q[i] = -1;
1209 /* Setup data queues */
1210 /* NB: ensure BK queue is the lowest priority h/w queue */
1211 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
1212 DPRINTF(sc, ATH_DBG_FATAL,
1213 "%s: unable to setup xmit queue for BK traffic\n",
1219 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
1220 DPRINTF(sc, ATH_DBG_FATAL,
1221 "%s: unable to setup xmit queue for BE traffic\n",
1226 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
1227 DPRINTF(sc, ATH_DBG_FATAL,
1228 "%s: unable to setup xmit queue for VI traffic\n",
1233 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
1234 DPRINTF(sc, ATH_DBG_FATAL,
1235 "%s: unable to setup xmit queue for VO traffic\n",
1241 sc->sc_rc = ath_rate_attach(ah);
1242 if (sc->sc_rc == NULL) {
1247 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1248 ATH9K_CIPHER_TKIP, NULL)) {
1250 * Whether we should enable h/w TKIP MIC.
1251 * XXX: if we don't support WME TKIP MIC, then we wouldn't
1252 * report WMM capable, so it's always safe to turn on
1253 * TKIP MIC in this case.
1255 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
1260 * Check whether the separate key cache entries
1261 * are required to handle both tx+rx MIC keys.
1262 * With split mic keys the number of stations is limited
1263 * to 27 otherwise 59.
1265 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1266 ATH9K_CIPHER_TKIP, NULL)
1267 && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1268 ATH9K_CIPHER_MIC, NULL)
1269 && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
1271 sc->sc_splitmic = 1;
1273 /* turn on mcast key search if possible */
1274 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
1275 (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
1278 sc->sc_config.txpowlimit = ATH_TXPOWER_MAX;
1279 sc->sc_config.txpowlimit_override = 0;
1281 /* 11n Capabilities */
1282 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
1287 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
1288 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
1290 /* Configuration for rx chain detection */
1291 sc->sc_rxchaindetect_ref = 0;
1292 sc->sc_rxchaindetect_thresh5GHz = 35;
1293 sc->sc_rxchaindetect_thresh2GHz = 35;
1294 sc->sc_rxchaindetect_delta5GHz = 30;
1295 sc->sc_rxchaindetect_delta2GHz = 30;
1297 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
1298 sc->sc_defant = ath9k_hw_getdefantenna(ah);
1300 ath9k_hw_getmac(ah, sc->sc_myaddr);
1301 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) {
1302 ath9k_hw_getbssidmask(ah, sc->sc_bssidmask);
1303 ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
1304 ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
1306 sc->sc_slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
1308 /* initialize beacon slots */
1309 for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++)
1310 sc->sc_bslot[i] = ATH_IF_ID_ANY;
1312 /* save MISC configurations */
1313 sc->sc_config.swBeaconProcess = 1;
1315 #ifdef CONFIG_SLOW_ANT_DIV
1316 /* range is 40 - 255, we use something in the middle */
1317 ath_slow_ant_div_init(&sc->sc_antdiv, sc, 0x127);
1322 /* cleanup tx queues */
1323 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1324 if (ATH_TXQ_SETUP(sc, i))
1325 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
1328 ath9k_hw_detach(ah);
1332 void ath_deinit(struct ath_softc *sc)
1334 struct ath_hal *ah = sc->sc_ah;
1337 DPRINTF(sc, ATH_DBG_CONFIG, "%s\n", __func__);
1340 if (!sc->sc_invalid)
1341 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
1342 ath_rate_detach(sc->sc_rc);
1343 /* cleanup tx queues */
1344 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1345 if (ATH_TXQ_SETUP(sc, i))
1346 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
1347 ath9k_hw_detach(ah);
1350 /*******************/
1351 /* Node Management */
1352 /*******************/
1354 struct ath_node *ath_node_attach(struct ath_softc *sc, u8 *addr, int if_id)
1356 struct ath_vap *avp;
1357 struct ath_node *an;
1358 DECLARE_MAC_BUF(mac);
1360 avp = sc->sc_vaps[if_id];
1361 ASSERT(avp != NULL);
1363 /* mac80211 sta_notify callback is from an IRQ context, so no sleep */
1364 an = kmalloc(sizeof(struct ath_node), GFP_ATOMIC);
1367 memzero(an, sizeof(*an));
1370 memcpy(an->an_addr, addr, ETH_ALEN);
1371 atomic_set(&an->an_refcnt, 1);
1373 /* set up per-node tx/rx state */
1374 ath_tx_node_init(sc, an);
1375 ath_rx_node_init(sc, an);
1377 ath_chainmask_sel_init(sc, an);
1378 ath_chainmask_sel_timerstart(&an->an_chainmask_sel);
1379 list_add(&an->list, &sc->node_list);
1384 void ath_node_detach(struct ath_softc *sc, struct ath_node *an, bool bh_flag)
1386 unsigned long flags;
1388 DECLARE_MAC_BUF(mac);
1390 ath_chainmask_sel_timerstop(&an->an_chainmask_sel);
1391 an->an_flags |= ATH_NODE_CLEAN;
1392 ath_tx_node_cleanup(sc, an, bh_flag);
1393 ath_rx_node_cleanup(sc, an);
1395 ath_tx_node_free(sc, an);
1396 ath_rx_node_free(sc, an);
1398 spin_lock_irqsave(&sc->node_lock, flags);
1400 list_del(&an->list);
1402 spin_unlock_irqrestore(&sc->node_lock, flags);
1407 /* Finds a node and increases the refcnt if found */
1409 struct ath_node *ath_node_get(struct ath_softc *sc, u8 *addr)
1411 struct ath_node *an = NULL, *an_found = NULL;
1413 if (list_empty(&sc->node_list)) /* FIXME */
1415 list_for_each_entry(an, &sc->node_list, list) {
1416 if (!compare_ether_addr(an->an_addr, addr)) {
1417 atomic_inc(&an->an_refcnt);
1426 /* Decrements the refcnt and if it drops to zero, detach the node */
1428 void ath_node_put(struct ath_softc *sc, struct ath_node *an, bool bh_flag)
1430 if (atomic_dec_and_test(&an->an_refcnt))
1431 ath_node_detach(sc, an, bh_flag);
1434 /* Finds a node, doesn't increment refcnt. Caller must hold sc->node_lock */
1435 struct ath_node *ath_node_find(struct ath_softc *sc, u8 *addr)
1437 struct ath_node *an = NULL, *an_found = NULL;
1439 if (list_empty(&sc->node_list))
1442 list_for_each_entry(an, &sc->node_list, list)
1443 if (!compare_ether_addr(an->an_addr, addr)) {
1454 * Setup driver-specific state for a newly associated node. This routine
1455 * really only applies if compression or XR are enabled, there is no code
1456 * covering any other cases.
1459 void ath_newassoc(struct ath_softc *sc,
1460 struct ath_node *an, int isnew, int isuapsd)
1464 /* if station reassociates, tear down the aggregation state. */
1466 for (tidno = 0; tidno < WME_NUM_TID; tidno++) {
1468 ath_tx_aggr_teardown(sc, an, tidno);
1470 ath_rx_aggr_teardown(sc, an, tidno);
1480 void ath_key_reset(struct ath_softc *sc, u16 keyix, int freeslot)
1482 ath9k_hw_keyreset(sc->sc_ah, keyix);
1484 clear_bit(keyix, sc->sc_keymap);
1487 int ath_keyset(struct ath_softc *sc,
1489 struct ath9k_keyval *hk,
1490 const u8 mac[ETH_ALEN])
1494 status = ath9k_hw_set_keycache_entry(sc->sc_ah,
1495 keyix, hk, mac, false);
1497 return status != false;
1500 /***********************/
1501 /* TX Power/Regulatory */
1502 /***********************/
1505 * Set Transmit power in HAL
1507 * This routine makes the actual HAL calls to set the new transmit power
1511 void ath_update_txpow(struct ath_softc *sc)
1513 struct ath_hal *ah = sc->sc_ah;
1516 if (sc->sc_curtxpow != sc->sc_config.txpowlimit) {
1517 ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit);
1518 /* read back in case value is clamped */
1519 ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
1520 sc->sc_curtxpow = txpow;
1524 /* Return the current country and domain information */
1525 void ath_get_currentCountry(struct ath_softc *sc,
1526 struct ath9k_country_entry *ctry)
1528 ath9k_regd_get_current_country(sc->sc_ah, ctry);
1530 /* If HAL not specific yet, since it is band dependent,
1531 * use the one we passed in. */
1532 if (ctry->countryCode == CTRY_DEFAULT) {
1535 } else if (ctry->iso[0] && ctry->iso[1]) {
1536 if (!ctry->iso[2]) {
1545 /**************************/
1546 /* Slow Antenna Diversity */
1547 /**************************/
1549 void ath_slow_ant_div_init(struct ath_antdiv *antdiv,
1550 struct ath_softc *sc,
1555 /* antdivf_rssitrig can range from 40 - 0xff */
1556 trig = (rssitrig > 0xff) ? 0xff : rssitrig;
1557 trig = (rssitrig < 40) ? 40 : rssitrig;
1559 antdiv->antdiv_sc = sc;
1560 antdiv->antdivf_rssitrig = trig;
1563 void ath_slow_ant_div_start(struct ath_antdiv *antdiv,
1567 antdiv->antdiv_num_antcfg =
1568 num_antcfg < ATH_ANT_DIV_MAX_CFG ?
1569 num_antcfg : ATH_ANT_DIV_MAX_CFG;
1570 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1571 antdiv->antdiv_curcfg = 0;
1572 antdiv->antdiv_bestcfg = 0;
1573 antdiv->antdiv_laststatetsf = 0;
1575 memcpy(antdiv->antdiv_bssid, bssid, sizeof(antdiv->antdiv_bssid));
1577 antdiv->antdiv_start = 1;
1580 void ath_slow_ant_div_stop(struct ath_antdiv *antdiv)
1582 antdiv->antdiv_start = 0;
1585 static int32_t ath_find_max_val(int32_t *val,
1586 u8 num_val, u8 *max_index)
1588 u32 MaxVal = *val++;
1592 while (++cur_index < num_val) {
1593 if (*val > MaxVal) {
1595 *max_index = cur_index;
1604 void ath_slow_ant_div(struct ath_antdiv *antdiv,
1605 struct ieee80211_hdr *hdr,
1606 struct ath_rx_status *rx_stats)
1608 struct ath_softc *sc = antdiv->antdiv_sc;
1609 struct ath_hal *ah = sc->sc_ah;
1611 u8 bestcfg, curcfg = antdiv->antdiv_curcfg;
1612 __le16 fc = hdr->frame_control;
1614 if (antdiv->antdiv_start && ieee80211_is_beacon(fc)
1615 && !compare_ether_addr(hdr->addr3, antdiv->antdiv_bssid)) {
1616 antdiv->antdiv_lastbrssi[curcfg] = rx_stats->rs_rssi;
1617 antdiv->antdiv_lastbtsf[curcfg] = ath9k_hw_gettsf64(sc->sc_ah);
1618 curtsf = antdiv->antdiv_lastbtsf[curcfg];
1623 switch (antdiv->antdiv_state) {
1624 case ATH_ANT_DIV_IDLE:
1625 if ((antdiv->antdiv_lastbrssi[curcfg] <
1626 antdiv->antdivf_rssitrig)
1627 && ((curtsf - antdiv->antdiv_laststatetsf) >
1628 ATH_ANT_DIV_MIN_IDLE_US)) {
1631 if (curcfg == antdiv->antdiv_num_antcfg)
1634 if (!ath9k_hw_select_antconfig(ah, curcfg)) {
1635 antdiv->antdiv_bestcfg = antdiv->antdiv_curcfg;
1636 antdiv->antdiv_curcfg = curcfg;
1637 antdiv->antdiv_laststatetsf = curtsf;
1638 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1643 case ATH_ANT_DIV_SCAN:
1644 if ((curtsf - antdiv->antdiv_laststatetsf) <
1645 ATH_ANT_DIV_MIN_SCAN_US)
1649 if (curcfg == antdiv->antdiv_num_antcfg)
1652 if (curcfg == antdiv->antdiv_bestcfg) {
1653 ath_find_max_val(antdiv->antdiv_lastbrssi,
1654 antdiv->antdiv_num_antcfg, &bestcfg);
1655 if (!ath9k_hw_select_antconfig(ah, bestcfg)) {
1656 antdiv->antdiv_bestcfg = bestcfg;
1657 antdiv->antdiv_curcfg = bestcfg;
1658 antdiv->antdiv_laststatetsf = curtsf;
1659 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1662 if (!ath9k_hw_select_antconfig(ah, curcfg)) {
1663 antdiv->antdiv_curcfg = curcfg;
1664 antdiv->antdiv_laststatetsf = curtsf;
1665 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1673 /***********************/
1674 /* Descriptor Handling */
1675 /***********************/
1678 * Set up DMA descriptors
1680 * This function will allocate both the DMA descriptor structure, and the
1681 * buffers it contains. These are used to contain the descriptors used
1685 int ath_descdma_setup(struct ath_softc *sc,
1686 struct ath_descdma *dd,
1687 struct list_head *head,
1692 #define DS2PHYS(_dd, _ds) \
1693 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
1694 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
1695 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
1697 struct ath_desc *ds;
1699 int i, bsize, error;
1701 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA: %u buffers %u desc/buf\n",
1702 __func__, name, nbuf, ndesc);
1704 /* ath_desc must be a multiple of DWORDs */
1705 if ((sizeof(struct ath_desc) % 4) != 0) {
1706 DPRINTF(sc, ATH_DBG_FATAL, "%s: ath_desc not DWORD aligned\n",
1708 ASSERT((sizeof(struct ath_desc) % 4) == 0);
1714 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
1717 * Need additional DMA memory because we can't use
1718 * descriptors that cross the 4K page boundary. Assume
1719 * one skipped descriptor per 4K page.
1721 if (!(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1723 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
1726 while (ndesc_skipped) {
1727 dma_len = ndesc_skipped * sizeof(struct ath_desc);
1728 dd->dd_desc_len += dma_len;
1730 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
1734 /* allocate descriptors */
1735 dd->dd_desc = pci_alloc_consistent(sc->pdev,
1737 &dd->dd_desc_paddr);
1738 if (dd->dd_desc == NULL) {
1743 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA map: %p (%u) -> %llx (%u)\n",
1744 __func__, dd->dd_name, ds, (u32) dd->dd_desc_len,
1745 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
1747 /* allocate buffers */
1748 bsize = sizeof(struct ath_buf) * nbuf;
1749 bf = kmalloc(bsize, GFP_KERNEL);
1757 INIT_LIST_HEAD(head);
1758 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
1760 bf->bf_daddr = DS2PHYS(dd, ds);
1762 if (!(sc->sc_ah->ah_caps.hw_caps &
1763 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1765 * Skip descriptor addresses which can cause 4KB
1766 * boundary crossing (addr + length) with a 32 dword
1769 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
1770 ASSERT((caddr_t) bf->bf_desc <
1771 ((caddr_t) dd->dd_desc +
1776 bf->bf_daddr = DS2PHYS(dd, ds);
1779 list_add_tail(&bf->list, head);
1783 pci_free_consistent(sc->pdev,
1784 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1786 memzero(dd, sizeof(*dd));
1788 #undef ATH_DESC_4KB_BOUND_CHECK
1789 #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
1794 * Cleanup DMA descriptors
1796 * This function will free the DMA block that was allocated for the descriptor
1797 * pool. Since this was allocated as one "chunk", it is freed in the same
1801 void ath_descdma_cleanup(struct ath_softc *sc,
1802 struct ath_descdma *dd,
1803 struct list_head *head)
1805 /* Free memory associated with descriptors */
1806 pci_free_consistent(sc->pdev,
1807 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1809 INIT_LIST_HEAD(head);
1810 kfree(dd->dd_bufptr);
1811 memzero(dd, sizeof(*dd));
1818 void ath_internal_reset(struct ath_softc *sc)
1820 ath_reset_start(sc, 0);
1822 ath_reset_end(sc, 0);
1825 int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
1831 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VO];
1834 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VI];
1837 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1840 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BK];
1843 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1850 int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
1855 case ATH9K_WME_AC_VO:
1858 case ATH9K_WME_AC_VI:
1861 case ATH9K_WME_AC_BE:
1864 case ATH9K_WME_AC_BK:
1877 * Expand time stamp to TSF
1879 * Extend 15-bit time stamp from rx descriptor to
1880 * a full 64-bit TSF using the current h/w TSF.
1883 u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp)
1887 tsf = ath9k_hw_gettsf64(sc->sc_ah);
1888 if ((tsf & 0x7fff) < rstamp)
1890 return (tsf & ~0x7fff) | rstamp;
1894 * Set Default Antenna
1896 * Call into the HAL to set the default antenna to use. Not really valid for
1900 void ath_setdefantenna(void *context, u32 antenna)
1902 struct ath_softc *sc = (struct ath_softc *)context;
1903 struct ath_hal *ah = sc->sc_ah;
1905 /* XXX block beacon interrupts */
1906 ath9k_hw_setantenna(ah, antenna);
1907 sc->sc_defant = antenna;
1908 sc->sc_rxotherant = 0;
1914 * This will wake up the chip if required, and set the slot time for the
1915 * frame (maximum transmit time). Slot time is assumed to be already set
1916 * in the ATH object member sc_slottime
1919 void ath_setslottime(struct ath_softc *sc)
1921 ath9k_hw_setslottime(sc->sc_ah, sc->sc_slottime);
1922 sc->sc_updateslot = OK;