2 * Copyright (c) 2008-2009 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 struct ath9k_vif_iter_data {
24 static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
26 struct ath9k_vif_iter_data *iter_data = data;
29 nbuf = krealloc(iter_data->addr, (iter_data->count + 1) * ETH_ALEN,
34 memcpy(nbuf + iter_data->count * ETH_ALEN, mac, ETH_ALEN);
35 iter_data->addr = nbuf;
39 void ath9k_set_bssid_mask(struct ieee80211_hw *hw)
41 struct ath_wiphy *aphy = hw->priv;
42 struct ath_softc *sc = aphy->sc;
43 struct ath9k_vif_iter_data iter_data;
48 * Add primary MAC address even if it is not in active use since it
49 * will be configured to the hardware as the starting point and the
50 * BSSID mask will need to be changed if another address is active.
52 iter_data.addr = kmalloc(ETH_ALEN, GFP_ATOMIC);
54 memcpy(iter_data.addr, sc->sc_ah->macaddr, ETH_ALEN);
59 /* Get list of all active MAC addresses */
60 spin_lock_bh(&sc->wiphy_lock);
61 ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter,
63 for (i = 0; i < sc->num_sec_wiphy; i++) {
64 if (sc->sec_wiphy[i] == NULL)
66 ieee80211_iterate_active_interfaces_atomic(
67 sc->sec_wiphy[i]->hw, ath9k_vif_iter, &iter_data);
69 spin_unlock_bh(&sc->wiphy_lock);
71 /* Generate an address mask to cover all active addresses */
72 memset(mask, 0, ETH_ALEN);
73 for (i = 0; i < iter_data.count; i++) {
74 u8 *a1 = iter_data.addr + i * ETH_ALEN;
75 for (j = i + 1; j < iter_data.count; j++) {
76 u8 *a2 = iter_data.addr + j * ETH_ALEN;
77 mask[0] |= a1[0] ^ a2[0];
78 mask[1] |= a1[1] ^ a2[1];
79 mask[2] |= a1[2] ^ a2[2];
80 mask[3] |= a1[3] ^ a2[3];
81 mask[4] |= a1[4] ^ a2[4];
82 mask[5] |= a1[5] ^ a2[5];
86 kfree(iter_data.addr);
88 /* Invert the mask and configure hardware */
89 sc->bssidmask[0] = ~mask[0];
90 sc->bssidmask[1] = ~mask[1];
91 sc->bssidmask[2] = ~mask[2];
92 sc->bssidmask[3] = ~mask[3];
93 sc->bssidmask[4] = ~mask[4];
94 sc->bssidmask[5] = ~mask[5];
96 ath9k_hw_setbssidmask(sc);
99 int ath9k_wiphy_add(struct ath_softc *sc)
102 struct ath_wiphy *aphy;
103 struct ieee80211_hw *hw;
106 hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy), &ath9k_ops);
110 spin_lock_bh(&sc->wiphy_lock);
111 for (i = 0; i < sc->num_sec_wiphy; i++) {
112 if (sc->sec_wiphy[i] == NULL)
116 if (i == sc->num_sec_wiphy) {
117 /* No empty slot available; increase array length */
118 struct ath_wiphy **n;
119 n = krealloc(sc->sec_wiphy,
120 (sc->num_sec_wiphy + 1) *
121 sizeof(struct ath_wiphy *),
124 spin_unlock_bh(&sc->wiphy_lock);
125 ieee80211_free_hw(hw);
133 SET_IEEE80211_DEV(hw, sc->dev);
138 sc->sec_wiphy[i] = aphy;
139 spin_unlock_bh(&sc->wiphy_lock);
141 memcpy(addr, sc->sc_ah->macaddr, ETH_ALEN);
142 addr[0] |= 0x02; /* Locally managed address */
144 * XOR virtual wiphy index into the least significant bits to generate
145 * a different MAC address for each virtual wiphy.
148 addr[4] ^= (i & 0xff00) >> 8;
149 addr[3] ^= (i & 0xff0000) >> 16;
151 SET_IEEE80211_PERM_ADDR(hw, addr);
153 ath_set_hw_capab(sc, hw);
155 error = ieee80211_register_hw(hw);
160 int ath9k_wiphy_del(struct ath_wiphy *aphy)
162 struct ath_softc *sc = aphy->sc;
165 spin_lock_bh(&sc->wiphy_lock);
166 for (i = 0; i < sc->num_sec_wiphy; i++) {
167 if (aphy == sc->sec_wiphy[i]) {
168 sc->sec_wiphy[i] = NULL;
169 spin_unlock_bh(&sc->wiphy_lock);
170 ieee80211_unregister_hw(aphy->hw);
171 ieee80211_free_hw(aphy->hw);
175 spin_unlock_bh(&sc->wiphy_lock);
179 static int ath9k_send_nullfunc(struct ath_wiphy *aphy,
180 struct ieee80211_vif *vif, const u8 *bssid,
183 struct ath_softc *sc = aphy->sc;
184 struct ath_tx_control txctl;
186 struct ieee80211_hdr *hdr;
188 struct ieee80211_tx_info *info;
190 skb = dev_alloc_skb(24);
193 hdr = (struct ieee80211_hdr *) skb_put(skb, 24);
195 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
196 IEEE80211_FCTL_TODS);
198 fc |= cpu_to_le16(IEEE80211_FCTL_PM);
199 hdr->frame_control = fc;
200 memcpy(hdr->addr1, bssid, ETH_ALEN);
201 memcpy(hdr->addr2, aphy->hw->wiphy->perm_addr, ETH_ALEN);
202 memcpy(hdr->addr3, bssid, ETH_ALEN);
204 info = IEEE80211_SKB_CB(skb);
205 memset(info, 0, sizeof(*info));
206 info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS;
207 info->control.vif = vif;
208 info->control.rates[0].idx = 0;
209 info->control.rates[0].count = 4;
210 info->control.rates[1].idx = -1;
212 memset(&txctl, 0, sizeof(struct ath_tx_control));
213 txctl.txq = &sc->tx.txq[sc->tx.hwq_map[ATH9K_WME_AC_VO]];
214 txctl.frame_type = ps ? ATH9K_INT_PAUSE : ATH9K_INT_UNPAUSE;
216 if (ath_tx_start(aphy->hw, skb, &txctl) != 0)
221 dev_kfree_skb_any(skb);
225 static bool __ath9k_wiphy_pausing(struct ath_softc *sc)
228 if (sc->pri_wiphy->state == ATH_WIPHY_PAUSING)
230 for (i = 0; i < sc->num_sec_wiphy; i++) {
231 if (sc->sec_wiphy[i] &&
232 sc->sec_wiphy[i]->state == ATH_WIPHY_PAUSING)
238 static bool ath9k_wiphy_pausing(struct ath_softc *sc)
241 spin_lock_bh(&sc->wiphy_lock);
242 ret = __ath9k_wiphy_pausing(sc);
243 spin_unlock_bh(&sc->wiphy_lock);
247 static bool __ath9k_wiphy_scanning(struct ath_softc *sc)
250 if (sc->pri_wiphy->state == ATH_WIPHY_SCAN)
252 for (i = 0; i < sc->num_sec_wiphy; i++) {
253 if (sc->sec_wiphy[i] &&
254 sc->sec_wiphy[i]->state == ATH_WIPHY_SCAN)
260 bool ath9k_wiphy_scanning(struct ath_softc *sc)
263 spin_lock_bh(&sc->wiphy_lock);
264 ret = __ath9k_wiphy_scanning(sc);
265 spin_unlock_bh(&sc->wiphy_lock);
269 static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy);
271 /* caller must hold wiphy_lock */
272 static void __ath9k_wiphy_unpause_ch(struct ath_wiphy *aphy)
276 if (aphy->chan_idx != aphy->sc->chan_idx)
277 return; /* wiphy not on the selected channel */
278 __ath9k_wiphy_unpause(aphy);
281 static void ath9k_wiphy_unpause_channel(struct ath_softc *sc)
284 spin_lock_bh(&sc->wiphy_lock);
285 __ath9k_wiphy_unpause_ch(sc->pri_wiphy);
286 for (i = 0; i < sc->num_sec_wiphy; i++)
287 __ath9k_wiphy_unpause_ch(sc->sec_wiphy[i]);
288 spin_unlock_bh(&sc->wiphy_lock);
291 void ath9k_wiphy_chan_work(struct work_struct *work)
293 struct ath_softc *sc = container_of(work, struct ath_softc, chan_work);
294 struct ath_wiphy *aphy = sc->next_wiphy;
300 * All pending interfaces paused; ready to change
304 /* Change channels */
305 mutex_lock(&sc->mutex);
306 /* XXX: remove me eventually */
307 ath9k_update_ichannel(sc, aphy->hw,
308 &sc->sc_ah->channels[sc->chan_idx]);
309 ath_update_chainmask(sc, sc->chan_is_ht);
310 if (ath_set_channel(sc, aphy->hw,
311 &sc->sc_ah->channels[sc->chan_idx]) < 0) {
312 printk(KERN_DEBUG "ath9k: Failed to set channel for new "
314 mutex_unlock(&sc->mutex);
317 mutex_unlock(&sc->mutex);
319 ath9k_wiphy_unpause_channel(sc);
323 * ath9k version of ieee80211_tx_status() for TX frames that are generated
324 * internally in the driver.
326 void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
328 struct ath_wiphy *aphy = hw->priv;
329 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
330 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
331 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
333 if (tx_info_priv && tx_info_priv->frame_type == ATH9K_INT_PAUSE &&
334 aphy->state == ATH_WIPHY_PAUSING) {
335 if (!(info->flags & IEEE80211_TX_STAT_ACK)) {
336 printk(KERN_DEBUG "ath9k: %s: no ACK for pause "
337 "frame\n", wiphy_name(hw->wiphy));
339 * The AP did not reply; ignore this to allow us to
343 aphy->state = ATH_WIPHY_PAUSED;
344 if (!ath9k_wiphy_pausing(aphy->sc)) {
346 * Drop from tasklet to work to allow mutex for channel
349 queue_work(aphy->sc->hw->workqueue,
350 &aphy->sc->chan_work);
355 tx_info->rate_driver_data[0] = NULL;
360 static void ath9k_mark_paused(struct ath_wiphy *aphy)
362 struct ath_softc *sc = aphy->sc;
363 aphy->state = ATH_WIPHY_PAUSED;
364 if (!__ath9k_wiphy_pausing(sc))
365 queue_work(sc->hw->workqueue, &sc->chan_work);
368 static void ath9k_pause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
370 struct ath_wiphy *aphy = data;
371 struct ath_vif *avp = (void *) vif->drv_priv;
374 case NL80211_IFTYPE_STATION:
375 if (!vif->bss_conf.assoc) {
376 ath9k_mark_paused(aphy);
379 /* TODO: could avoid this if already in PS mode */
380 if (ath9k_send_nullfunc(aphy, vif, avp->bssid, 1)) {
381 printk(KERN_DEBUG "%s: failed to send PS nullfunc\n",
383 ath9k_mark_paused(aphy);
386 case NL80211_IFTYPE_AP:
387 /* Beacon transmission is paused by aphy->state change */
388 ath9k_mark_paused(aphy);
395 /* caller must hold wiphy_lock */
396 static int __ath9k_wiphy_pause(struct ath_wiphy *aphy)
398 ieee80211_stop_queues(aphy->hw);
399 aphy->state = ATH_WIPHY_PAUSING;
401 * TODO: handle PAUSING->PAUSED for the case where there are multiple
402 * active vifs (now we do it on the first vif getting ready; should be
405 ieee80211_iterate_active_interfaces_atomic(aphy->hw, ath9k_pause_iter,
410 int ath9k_wiphy_pause(struct ath_wiphy *aphy)
413 spin_lock_bh(&aphy->sc->wiphy_lock);
414 ret = __ath9k_wiphy_pause(aphy);
415 spin_unlock_bh(&aphy->sc->wiphy_lock);
419 static void ath9k_unpause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
421 struct ath_wiphy *aphy = data;
422 struct ath_vif *avp = (void *) vif->drv_priv;
425 case NL80211_IFTYPE_STATION:
426 if (!vif->bss_conf.assoc)
428 ath9k_send_nullfunc(aphy, vif, avp->bssid, 0);
430 case NL80211_IFTYPE_AP:
431 /* Beacon transmission is re-enabled by aphy->state change */
438 /* caller must hold wiphy_lock */
439 static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy)
441 ieee80211_iterate_active_interfaces_atomic(aphy->hw,
442 ath9k_unpause_iter, aphy);
443 aphy->state = ATH_WIPHY_ACTIVE;
444 ieee80211_wake_queues(aphy->hw);
448 int ath9k_wiphy_unpause(struct ath_wiphy *aphy)
451 spin_lock_bh(&aphy->sc->wiphy_lock);
452 ret = __ath9k_wiphy_unpause(aphy);
453 spin_unlock_bh(&aphy->sc->wiphy_lock);
457 static void __ath9k_wiphy_mark_all_paused(struct ath_softc *sc)
460 if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE)
461 sc->pri_wiphy->state = ATH_WIPHY_PAUSED;
462 for (i = 0; i < sc->num_sec_wiphy; i++) {
463 if (sc->sec_wiphy[i] &&
464 sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE)
465 sc->sec_wiphy[i]->state = ATH_WIPHY_PAUSED;
469 /* caller must hold wiphy_lock */
470 static void __ath9k_wiphy_pause_all(struct ath_softc *sc)
473 if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
474 __ath9k_wiphy_pause(sc->pri_wiphy);
475 for (i = 0; i < sc->num_sec_wiphy; i++) {
476 if (sc->sec_wiphy[i] &&
477 sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
478 __ath9k_wiphy_pause(sc->sec_wiphy[i]);
482 int ath9k_wiphy_select(struct ath_wiphy *aphy)
484 struct ath_softc *sc = aphy->sc;
487 spin_lock_bh(&sc->wiphy_lock);
488 if (__ath9k_wiphy_scanning(sc)) {
490 * For now, we are using mac80211 sw scan and it expects to
491 * have full control over channel changes, so avoid wiphy
492 * scheduling during a scan. This could be optimized if the
493 * scanning control were moved into the driver.
495 spin_unlock_bh(&sc->wiphy_lock);
498 if (__ath9k_wiphy_pausing(sc)) {
499 if (sc->wiphy_select_failures == 0)
500 sc->wiphy_select_first_fail = jiffies;
501 sc->wiphy_select_failures++;
502 if (time_after(jiffies, sc->wiphy_select_first_fail + HZ / 2))
504 printk(KERN_DEBUG "ath9k: Previous wiphy select timed "
505 "out; disable/enable hw to recover\n");
506 __ath9k_wiphy_mark_all_paused(sc);
508 * TODO: this workaround to fix hardware is unlikely to
509 * be specific to virtual wiphy changes. It can happen
510 * on normal channel change, too, and as such, this
511 * should really be made more generic. For example,
512 * tricker radio disable/enable on GTT interrupt burst
513 * (say, 10 GTT interrupts received without any TX
514 * frame being completed)
516 spin_unlock_bh(&sc->wiphy_lock);
517 ath_radio_disable(sc);
518 ath_radio_enable(sc);
519 queue_work(aphy->sc->hw->workqueue,
520 &aphy->sc->chan_work);
521 return -EBUSY; /* previous select still in progress */
523 spin_unlock_bh(&sc->wiphy_lock);
524 return -EBUSY; /* previous select still in progress */
526 sc->wiphy_select_failures = 0;
528 /* Store the new channel */
529 sc->chan_idx = aphy->chan_idx;
530 sc->chan_is_ht = aphy->chan_is_ht;
531 sc->next_wiphy = aphy;
533 __ath9k_wiphy_pause_all(sc);
534 now = !__ath9k_wiphy_pausing(aphy->sc);
535 spin_unlock_bh(&sc->wiphy_lock);
538 /* Ready to request channel change immediately */
539 queue_work(aphy->sc->hw->workqueue, &aphy->sc->chan_work);
543 * wiphys will be unpaused in ath9k_tx_status() once channel has been
544 * changed if any wiphy needs time to become paused.
550 bool ath9k_wiphy_started(struct ath_softc *sc)
553 spin_lock_bh(&sc->wiphy_lock);
554 if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE) {
555 spin_unlock_bh(&sc->wiphy_lock);
558 for (i = 0; i < sc->num_sec_wiphy; i++) {
559 if (sc->sec_wiphy[i] &&
560 sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE) {
561 spin_unlock_bh(&sc->wiphy_lock);
565 spin_unlock_bh(&sc->wiphy_lock);
569 static void ath9k_wiphy_pause_chan(struct ath_wiphy *aphy,
570 struct ath_wiphy *selected)
572 if (selected->state == ATH_WIPHY_SCAN) {
573 if (aphy == selected)
576 * Pause all other wiphys for the duration of the scan even if
577 * they are on the current channel now.
579 } else if (aphy->chan_idx == selected->chan_idx)
581 aphy->state = ATH_WIPHY_PAUSED;
582 ieee80211_stop_queues(aphy->hw);
585 void ath9k_wiphy_pause_all_forced(struct ath_softc *sc,
586 struct ath_wiphy *selected)
589 spin_lock_bh(&sc->wiphy_lock);
590 if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
591 ath9k_wiphy_pause_chan(sc->pri_wiphy, selected);
592 for (i = 0; i < sc->num_sec_wiphy; i++) {
593 if (sc->sec_wiphy[i] &&
594 sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
595 ath9k_wiphy_pause_chan(sc->sec_wiphy[i], selected);
597 spin_unlock_bh(&sc->wiphy_lock);