2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 /********************************************\
20 Queue Control Unit, DFS Control Unit Functions
21 \********************************************/
29 * Get properties for a transmit queue
31 int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
32 struct ath5k_txq_info *queue_info)
34 ATH5K_TRACE(ah->ah_sc);
35 memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
40 * Set properties for a transmit queue
42 int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
43 const struct ath5k_txq_info *queue_info)
45 ATH5K_TRACE(ah->ah_sc);
46 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
48 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
51 memcpy(&ah->ah_txq[queue], queue_info, sizeof(struct ath5k_txq_info));
53 /*XXX: Is this supported on 5210 ?*/
54 if ((queue_info->tqi_type == AR5K_TX_QUEUE_DATA &&
55 ((queue_info->tqi_subtype == AR5K_WME_AC_VI) ||
56 (queue_info->tqi_subtype == AR5K_WME_AC_VO))) ||
57 queue_info->tqi_type == AR5K_TX_QUEUE_UAPSD)
58 ah->ah_txq[queue].tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS;
64 * Initialize a transmit queue
66 int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
67 struct ath5k_txq_info *queue_info)
72 ATH5K_TRACE(ah->ah_sc);
77 /*5210 only has 2 queues*/
78 if (ah->ah_version == AR5K_AR5210) {
80 case AR5K_TX_QUEUE_DATA:
81 queue = AR5K_TX_QUEUE_ID_NOQCU_DATA;
83 case AR5K_TX_QUEUE_BEACON:
84 case AR5K_TX_QUEUE_CAB:
85 queue = AR5K_TX_QUEUE_ID_NOQCU_BEACON;
92 case AR5K_TX_QUEUE_DATA:
93 for (queue = AR5K_TX_QUEUE_ID_DATA_MIN;
94 ah->ah_txq[queue].tqi_type !=
95 AR5K_TX_QUEUE_INACTIVE; queue++) {
97 if (queue > AR5K_TX_QUEUE_ID_DATA_MAX)
101 case AR5K_TX_QUEUE_UAPSD:
102 queue = AR5K_TX_QUEUE_ID_UAPSD;
104 case AR5K_TX_QUEUE_BEACON:
105 queue = AR5K_TX_QUEUE_ID_BEACON;
107 case AR5K_TX_QUEUE_CAB:
108 queue = AR5K_TX_QUEUE_ID_CAB;
110 case AR5K_TX_QUEUE_XR_DATA:
111 if (ah->ah_version != AR5K_AR5212)
113 "XR data queues only supported in"
115 queue = AR5K_TX_QUEUE_ID_XR_DATA;
123 * Setup internal queue structure
125 memset(&ah->ah_txq[queue], 0, sizeof(struct ath5k_txq_info));
126 ah->ah_txq[queue].tqi_type = queue_type;
128 if (queue_info != NULL) {
129 queue_info->tqi_type = queue_type;
130 ret = ath5k_hw_set_tx_queueprops(ah, queue, queue_info);
136 * We use ah_txq_status to hold a temp value for
137 * the Secondary interrupt mask registers on 5211+
138 * check out ath5k_hw_reset_tx_queue
140 AR5K_Q_ENABLE_BITS(ah->ah_txq_status, queue);
146 * Get number of pending frames
147 * for a specific queue [5211+]
149 u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
152 ATH5K_TRACE(ah->ah_sc);
153 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
155 /* Return if queue is declared inactive */
156 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
159 /* XXX: How about AR5K_CFG_TXCNT ? */
160 if (ah->ah_version == AR5K_AR5210)
163 pending = (AR5K_QUEUE_STATUS(queue) & AR5K_QCU_STS_FRMPENDCNT);
165 /* It's possible to have no frames pending even if TXE
166 * is set. To indicate that q has not stopped return
168 if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
175 * Set a transmit queue inactive
177 void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
179 ATH5K_TRACE(ah->ah_sc);
180 if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
183 /* This queue will be skipped in further operations */
184 ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE;
186 AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
190 * Set DFS properties for a transmit queue on DCU
192 int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
194 u32 cw_min, cw_max, retry_lg, retry_sh;
195 struct ath5k_txq_info *tq = &ah->ah_txq[queue];
197 ATH5K_TRACE(ah->ah_sc);
198 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
200 tq = &ah->ah_txq[queue];
202 if (tq->tqi_type == AR5K_TX_QUEUE_INACTIVE)
205 if (ah->ah_version == AR5K_AR5210) {
206 /* Only handle data queues, others will be ignored */
207 if (tq->tqi_type != AR5K_TX_QUEUE_DATA)
211 ath5k_hw_reg_write(ah, ah->ah_turbo ?
212 AR5K_INIT_SLOT_TIME_TURBO : AR5K_INIT_SLOT_TIME,
214 /* Set ACK_CTS timeout */
215 ath5k_hw_reg_write(ah, ah->ah_turbo ?
216 AR5K_INIT_ACK_CTS_TIMEOUT_TURBO :
217 AR5K_INIT_ACK_CTS_TIMEOUT, AR5K_SLOT_TIME);
218 /* Set Transmit Latency */
219 ath5k_hw_reg_write(ah, ah->ah_turbo ?
220 AR5K_INIT_TRANSMIT_LATENCY_TURBO :
221 AR5K_INIT_TRANSMIT_LATENCY, AR5K_USEC_5210);
225 ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS_TURBO +
226 (ah->ah_aifs + tq->tqi_aifs) *
227 AR5K_INIT_SLOT_TIME_TURBO) <<
228 AR5K_IFS0_DIFS_S) | AR5K_INIT_SIFS_TURBO,
231 ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS +
232 (ah->ah_aifs + tq->tqi_aifs) *
233 AR5K_INIT_SLOT_TIME) << AR5K_IFS0_DIFS_S) |
234 AR5K_INIT_SIFS, AR5K_IFS0);
238 ath5k_hw_reg_write(ah, ah->ah_turbo ?
239 AR5K_INIT_PROTO_TIME_CNTRL_TURBO :
240 AR5K_INIT_PROTO_TIME_CNTRL, AR5K_IFS1);
241 /* Set AR5K_PHY_SETTLING */
242 ath5k_hw_reg_write(ah, ah->ah_turbo ?
243 (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
245 (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
248 /* Set Frame Control Register */
249 ath5k_hw_reg_write(ah, ah->ah_turbo ?
250 (AR5K_PHY_FRAME_CTL_INI | AR5K_PHY_TURBO_MODE |
251 AR5K_PHY_TURBO_SHORT | 0x2020) :
252 (AR5K_PHY_FRAME_CTL_INI | 0x1020),
253 AR5K_PHY_FRAME_CTL_5210);
257 * Calculate cwmin/max by channel mode
259 cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN;
260 cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX;
261 ah->ah_aifs = AR5K_TUNE_AIFS;
262 /*XR is only supported on 5212*/
263 if (IS_CHAN_XR(ah->ah_current_channel) &&
264 ah->ah_version == AR5K_AR5212) {
265 cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_XR;
266 cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_XR;
267 ah->ah_aifs = AR5K_TUNE_AIFS_XR;
268 /*B mode is not supported on 5210*/
269 } else if (IS_CHAN_B(ah->ah_current_channel) &&
270 ah->ah_version != AR5K_AR5210) {
271 cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_11B;
272 cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_11B;
273 ah->ah_aifs = AR5K_TUNE_AIFS_11B;
277 while (cw_min < ah->ah_cw_min)
278 cw_min = (cw_min << 1) | 1;
280 cw_min = tq->tqi_cw_min < 0 ? (cw_min >> (-tq->tqi_cw_min)) :
281 ((cw_min << tq->tqi_cw_min) + (1 << tq->tqi_cw_min) - 1);
282 cw_max = tq->tqi_cw_max < 0 ? (cw_max >> (-tq->tqi_cw_max)) :
283 ((cw_max << tq->tqi_cw_max) + (1 << tq->tqi_cw_max) - 1);
286 * Calculate and set retry limits
288 if (ah->ah_software_retry) {
289 /* XXX Need to test this */
290 retry_lg = ah->ah_limit_tx_retries;
291 retry_sh = retry_lg = retry_lg > AR5K_DCU_RETRY_LMT_SH_RETRY ?
292 AR5K_DCU_RETRY_LMT_SH_RETRY : retry_lg;
294 retry_lg = AR5K_INIT_LG_RETRY;
295 retry_sh = AR5K_INIT_SH_RETRY;
298 /*No QCU/DCU [5210]*/
299 if (ah->ah_version == AR5K_AR5210) {
300 ath5k_hw_reg_write(ah,
301 (cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
302 | AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
303 AR5K_NODCU_RETRY_LMT_SLG_RETRY)
304 | AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
305 AR5K_NODCU_RETRY_LMT_SSH_RETRY)
306 | AR5K_REG_SM(retry_lg, AR5K_NODCU_RETRY_LMT_LG_RETRY)
307 | AR5K_REG_SM(retry_sh, AR5K_NODCU_RETRY_LMT_SH_RETRY),
308 AR5K_NODCU_RETRY_LMT);
311 ath5k_hw_reg_write(ah,
312 AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
313 AR5K_DCU_RETRY_LMT_SLG_RETRY) |
314 AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
315 AR5K_DCU_RETRY_LMT_SSH_RETRY) |
316 AR5K_REG_SM(retry_lg, AR5K_DCU_RETRY_LMT_LG_RETRY) |
317 AR5K_REG_SM(retry_sh, AR5K_DCU_RETRY_LMT_SH_RETRY),
318 AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
320 /*===Rest is also for QCU/DCU only [5211+]===*/
323 * Set initial content window (cw_min/cw_max)
324 * and arbitrated interframe space (aifs)...
326 ath5k_hw_reg_write(ah,
327 AR5K_REG_SM(cw_min, AR5K_DCU_LCL_IFS_CW_MIN) |
328 AR5K_REG_SM(cw_max, AR5K_DCU_LCL_IFS_CW_MAX) |
329 AR5K_REG_SM(ah->ah_aifs + tq->tqi_aifs,
330 AR5K_DCU_LCL_IFS_AIFS),
331 AR5K_QUEUE_DFS_LOCAL_IFS(queue));
336 /* Enable DCU early termination for this queue */
337 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
338 AR5K_QCU_MISC_DCU_EARLY);
340 /* Enable DCU to wait for next fragment from QCU */
341 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
342 AR5K_DCU_MISC_FRAG_WAIT);
344 /* On Maui and Spirit use the global seqnum on DCU */
345 if (ah->ah_mac_version < AR5K_SREV_AR5211)
346 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
347 AR5K_DCU_MISC_SEQNUM_CTL);
349 if (tq->tqi_cbr_period) {
350 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period,
351 AR5K_QCU_CBRCFG_INTVAL) |
352 AR5K_REG_SM(tq->tqi_cbr_overflow_limit,
353 AR5K_QCU_CBRCFG_ORN_THRES),
354 AR5K_QUEUE_CBRCFG(queue));
355 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
356 AR5K_QCU_MISC_FRSHED_CBR);
357 if (tq->tqi_cbr_overflow_limit)
358 AR5K_REG_ENABLE_BITS(ah,
359 AR5K_QUEUE_MISC(queue),
360 AR5K_QCU_MISC_CBR_THRES_ENABLE);
363 if (tq->tqi_ready_time &&
364 (tq->tqi_type != AR5K_TX_QUEUE_ID_CAB))
365 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time,
366 AR5K_QCU_RDYTIMECFG_INTVAL) |
367 AR5K_QCU_RDYTIMECFG_ENABLE,
368 AR5K_QUEUE_RDYTIMECFG(queue));
370 if (tq->tqi_burst_time) {
371 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time,
372 AR5K_DCU_CHAN_TIME_DUR) |
373 AR5K_DCU_CHAN_TIME_ENABLE,
374 AR5K_QUEUE_DFS_CHANNEL_TIME(queue));
377 & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)
378 AR5K_REG_ENABLE_BITS(ah,
379 AR5K_QUEUE_MISC(queue),
380 AR5K_QCU_MISC_RDY_VEOL_POLICY);
383 if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE)
384 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS,
385 AR5K_QUEUE_DFS_MISC(queue));
387 if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
388 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG,
389 AR5K_QUEUE_DFS_MISC(queue));
392 * Set registers by queue type
394 switch (tq->tqi_type) {
395 case AR5K_TX_QUEUE_BEACON:
396 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
397 AR5K_QCU_MISC_FRSHED_DBA_GT |
398 AR5K_QCU_MISC_CBREXP_BCN_DIS |
399 AR5K_QCU_MISC_BCN_ENABLE);
401 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
402 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
403 AR5K_DCU_MISC_ARBLOCK_CTL_S) |
404 AR5K_DCU_MISC_POST_FR_BKOFF_DIS |
405 AR5K_DCU_MISC_BCN_ENABLE);
408 case AR5K_TX_QUEUE_CAB:
409 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
410 AR5K_QCU_MISC_FRSHED_DBA_GT |
411 AR5K_QCU_MISC_CBREXP_DIS |
412 AR5K_QCU_MISC_CBREXP_BCN_DIS);
414 ath5k_hw_reg_write(ah, ((AR5K_TUNE_BEACON_INTERVAL -
415 (AR5K_TUNE_SW_BEACON_RESP -
416 AR5K_TUNE_DMA_BEACON_RESP) -
417 AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
418 AR5K_QCU_RDYTIMECFG_ENABLE,
419 AR5K_QUEUE_RDYTIMECFG(queue));
421 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
422 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
423 AR5K_DCU_MISC_ARBLOCK_CTL_S));
426 case AR5K_TX_QUEUE_UAPSD:
427 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
428 AR5K_QCU_MISC_CBREXP_DIS);
431 case AR5K_TX_QUEUE_DATA:
436 /* TODO: Handle frame compression */
439 * Enable interrupts for this tx queue
440 * in the secondary interrupt mask registers
442 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE)
443 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue);
445 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE)
446 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue);
448 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE)
449 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue);
451 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE)
452 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue);
454 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE)
455 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue);
457 if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRORNINT_ENABLE)
458 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrorn, queue);
460 if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRURNINT_ENABLE)
461 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrurn, queue);
463 if (tq->tqi_flags & AR5K_TXQ_FLAG_QTRIGINT_ENABLE)
464 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_qtrig, queue);
466 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXNOFRMINT_ENABLE)
467 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_nofrm, queue);
469 /* Update secondary interrupt mask registers */
471 /* Filter out inactive queues */
472 ah->ah_txq_imr_txok &= ah->ah_txq_status;
473 ah->ah_txq_imr_txerr &= ah->ah_txq_status;
474 ah->ah_txq_imr_txurn &= ah->ah_txq_status;
475 ah->ah_txq_imr_txdesc &= ah->ah_txq_status;
476 ah->ah_txq_imr_txeol &= ah->ah_txq_status;
477 ah->ah_txq_imr_cbrorn &= ah->ah_txq_status;
478 ah->ah_txq_imr_cbrurn &= ah->ah_txq_status;
479 ah->ah_txq_imr_qtrig &= ah->ah_txq_status;
480 ah->ah_txq_imr_nofrm &= ah->ah_txq_status;
482 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok,
483 AR5K_SIMR0_QCU_TXOK) |
484 AR5K_REG_SM(ah->ah_txq_imr_txdesc,
485 AR5K_SIMR0_QCU_TXDESC), AR5K_SIMR0);
486 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr,
487 AR5K_SIMR1_QCU_TXERR) |
488 AR5K_REG_SM(ah->ah_txq_imr_txeol,
489 AR5K_SIMR1_QCU_TXEOL), AR5K_SIMR1);
490 /* Update simr2 but don't overwrite rest simr2 settings */
491 AR5K_REG_DISABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_QCU_TXURN);
492 AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2,
493 AR5K_REG_SM(ah->ah_txq_imr_txurn,
494 AR5K_SIMR2_QCU_TXURN));
495 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_cbrorn,
496 AR5K_SIMR3_QCBRORN) |
497 AR5K_REG_SM(ah->ah_txq_imr_cbrurn,
498 AR5K_SIMR3_QCBRURN), AR5K_SIMR3);
499 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_qtrig,
500 AR5K_SIMR4_QTRIG), AR5K_SIMR4);
501 /* Set TXNOFRM_QCU for the queues with TXNOFRM enabled */
502 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_nofrm,
503 AR5K_TXNOFRM_QCU), AR5K_TXNOFRM);
504 /* No queue has TXNOFRM enabled, disable the interrupt
505 * by setting AR5K_TXNOFRM to zero */
506 if (ah->ah_txq_imr_nofrm == 0)
507 ath5k_hw_reg_write(ah, 0, AR5K_TXNOFRM);
509 /* Set QCU mask for this DCU to save power */
510 AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(queue), queue);
517 * Get slot time from DCU
519 unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
521 ATH5K_TRACE(ah->ah_sc);
522 if (ah->ah_version == AR5K_AR5210)
523 return ath5k_hw_clocktoh(ath5k_hw_reg_read(ah,
524 AR5K_SLOT_TIME) & 0xffff, ah->ah_turbo);
526 return ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT) & 0xffff;
530 * Set slot time on DCU
532 int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time)
534 ATH5K_TRACE(ah->ah_sc);
535 if (slot_time < AR5K_SLOT_TIME_9 || slot_time > AR5K_SLOT_TIME_MAX)
538 if (ah->ah_version == AR5K_AR5210)
539 ath5k_hw_reg_write(ah, ath5k_hw_htoclock(slot_time,
540 ah->ah_turbo), AR5K_SLOT_TIME);
542 ath5k_hw_reg_write(ah, slot_time, AR5K_DCU_GBL_IFS_SLOT);