1 /******************************************************************************
3 Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 The full GNU General Public License is included in this distribution in the
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 ******************************************************************************/
34 #include <linux/version.h>
36 #define IPW2200_VERSION "1.0.0"
37 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
38 #define DRV_COPYRIGHT "Copyright(c) 2003-2004 Intel Corporation"
39 #define DRV_VERSION IPW2200_VERSION
41 MODULE_DESCRIPTION(DRV_DESCRIPTION);
42 MODULE_VERSION(DRV_VERSION);
43 MODULE_AUTHOR(DRV_COPYRIGHT);
44 MODULE_LICENSE("GPL");
47 static int channel = 0;
51 static u32 ipw_debug_level;
52 static int associate = 1;
53 static int auto_create = 1;
54 static int disable = 0;
55 static const char ipw_modes[] = {
59 static void ipw_rx(struct ipw_priv *priv);
60 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
61 struct clx2_tx_queue *txq, int qindex);
62 static int ipw_queue_reset(struct ipw_priv *priv);
64 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
67 static void ipw_tx_queue_free(struct ipw_priv *);
69 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
70 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
71 static void ipw_rx_queue_replenish(void *);
73 static int ipw_up(struct ipw_priv *);
74 static void ipw_down(struct ipw_priv *);
75 static int ipw_config(struct ipw_priv *);
76 static int init_supported_rates(struct ipw_priv *priv,
77 struct ipw_supported_rates *prates);
79 static u8 band_b_active_channel[MAX_B_CHANNELS] = {
80 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 0
82 static u8 band_a_active_channel[MAX_A_CHANNELS] = {
83 36, 40, 44, 48, 149, 153, 157, 161, 165, 52, 56, 60, 64, 0
86 static int is_valid_channel(int mode_mask, int channel)
93 if (mode_mask & IEEE_A)
94 for (i = 0; i < MAX_A_CHANNELS; i++)
95 if (band_a_active_channel[i] == channel)
98 if (mode_mask & (IEEE_B | IEEE_G))
99 for (i = 0; i < MAX_B_CHANNELS; i++)
100 if (band_b_active_channel[i] == channel)
101 return mode_mask & (IEEE_B | IEEE_G);
106 static char *snprint_line(char *buf, size_t count,
107 const u8 * data, u32 len, u32 ofs)
112 out = snprintf(buf, count, "%08X", ofs);
114 for (l = 0, i = 0; i < 2; i++) {
115 out += snprintf(buf + out, count - out, " ");
116 for (j = 0; j < 8 && l < len; j++, l++)
117 out += snprintf(buf + out, count - out, "%02X ",
120 out += snprintf(buf + out, count - out, " ");
123 out += snprintf(buf + out, count - out, " ");
124 for (l = 0, i = 0; i < 2; i++) {
125 out += snprintf(buf + out, count - out, " ");
126 for (j = 0; j < 8 && l < len; j++, l++) {
127 c = data[(i * 8 + j)];
128 if (!isascii(c) || !isprint(c))
131 out += snprintf(buf + out, count - out, "%c", c);
135 out += snprintf(buf + out, count - out, " ");
141 static void printk_buf(int level, const u8 * data, u32 len)
145 if (!(ipw_debug_level & level))
149 printk(KERN_DEBUG "%s\n",
150 snprint_line(line, sizeof(line), &data[ofs],
151 min(len, 16U), ofs));
153 len -= min(len, 16U);
157 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
158 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
160 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
161 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
163 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
164 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
166 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
167 __LINE__, (u32) (b), (u32) (c));
168 _ipw_write_reg8(a, b, c);
171 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
172 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
174 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
175 __LINE__, (u32) (b), (u32) (c));
176 _ipw_write_reg16(a, b, c);
179 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
180 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
182 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
183 __LINE__, (u32) (b), (u32) (c));
184 _ipw_write_reg32(a, b, c);
187 #define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
188 #define ipw_write8(ipw, ofs, val) \
189 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
190 _ipw_write8(ipw, ofs, val)
192 #define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
193 #define ipw_write16(ipw, ofs, val) \
194 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
195 _ipw_write16(ipw, ofs, val)
197 #define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
198 #define ipw_write32(ipw, ofs, val) \
199 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
200 _ipw_write32(ipw, ofs, val)
202 #define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
203 static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
205 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs));
206 return _ipw_read8(ipw, ofs);
209 #define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
211 #define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
212 static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
214 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs));
215 return _ipw_read16(ipw, ofs);
218 #define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
220 #define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
221 static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
223 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs));
224 return _ipw_read32(ipw, ofs);
227 #define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
229 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
230 #define ipw_read_indirect(a, b, c, d) \
231 IPW_DEBUG_IO("%s %d: read_inddirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
232 _ipw_read_indirect(a, b, c, d)
234 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
236 #define ipw_write_indirect(a, b, c, d) \
237 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
238 _ipw_write_indirect(a, b, c, d)
240 /* indirect write s */
241 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
243 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
244 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg);
245 _ipw_write32(priv, CX2_INDIRECT_DATA, value);
248 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
250 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
251 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK);
252 _ipw_write8(priv, CX2_INDIRECT_DATA, value);
253 IPW_DEBUG_IO(" reg = 0x%8lX : value = 0x%8X\n",
254 (unsigned long)(priv->hw_base + CX2_INDIRECT_DATA), value);
257 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
259 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
260 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK);
261 _ipw_write16(priv, CX2_INDIRECT_DATA, value);
264 /* indirect read s */
266 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
269 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK);
270 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
271 word = _ipw_read32(priv, CX2_INDIRECT_DATA);
272 return (word >> ((reg & 0x3) * 8)) & 0xff;
275 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
279 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
281 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg);
282 value = _ipw_read32(priv, CX2_INDIRECT_DATA);
283 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
287 /* iterative/auto-increment 32 bit reads and writes */
288 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
291 u32 aligned_addr = addr & CX2_INDIRECT_ADDR_MASK;
292 u32 dif_len = addr - aligned_addr;
296 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
298 /* Read the first nibble byte by byte */
299 if (unlikely(dif_len)) {
300 /* Start reading at aligned_addr + dif_len */
301 _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
302 for (i = dif_len; i < 4; i++, buf++)
303 *buf = _ipw_read8(priv, CX2_INDIRECT_DATA + i);
308 /* Read DWs through autoinc register */
309 _ipw_write32(priv, CX2_AUTOINC_ADDR, aligned_addr);
310 aligned_len = num & CX2_INDIRECT_ADDR_MASK;
311 for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4)
312 *(u32 *) buf = ipw_read32(priv, CX2_AUTOINC_DATA);
314 /* Copy the last nibble */
315 dif_len = num - aligned_len;
316 _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
317 for (i = 0; i < dif_len; i++, buf++)
318 *buf = ipw_read8(priv, CX2_INDIRECT_DATA + i);
321 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
324 u32 aligned_addr = addr & CX2_INDIRECT_ADDR_MASK;
325 u32 dif_len = addr - aligned_addr;
329 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
331 /* Write the first nibble byte by byte */
332 if (unlikely(dif_len)) {
333 /* Start writing at aligned_addr + dif_len */
334 _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
335 for (i = dif_len; i < 4; i++, buf++)
336 _ipw_write8(priv, CX2_INDIRECT_DATA + i, *buf);
341 /* Write DWs through autoinc register */
342 _ipw_write32(priv, CX2_AUTOINC_ADDR, aligned_addr);
343 aligned_len = num & CX2_INDIRECT_ADDR_MASK;
344 for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4)
345 _ipw_write32(priv, CX2_AUTOINC_DATA, *(u32 *) buf);
347 /* Copy the last nibble */
348 dif_len = num - aligned_len;
349 _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
350 for (i = 0; i < dif_len; i++, buf++)
351 _ipw_write8(priv, CX2_INDIRECT_DATA + i, *buf);
354 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
357 memcpy_toio((priv->hw_base + addr), buf, num);
360 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
362 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
365 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
367 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
370 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
372 if (priv->status & STATUS_INT_ENABLED)
374 priv->status |= STATUS_INT_ENABLED;
375 ipw_write32(priv, CX2_INTA_MASK_R, CX2_INTA_MASK_ALL);
378 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
380 if (!(priv->status & STATUS_INT_ENABLED))
382 priv->status &= ~STATUS_INT_ENABLED;
383 ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL);
386 static char *ipw_error_desc(u32 val)
389 case IPW_FW_ERROR_OK:
391 case IPW_FW_ERROR_FAIL:
393 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
394 return "MEMORY_UNDERFLOW";
395 case IPW_FW_ERROR_MEMORY_OVERFLOW:
396 return "MEMORY_OVERFLOW";
397 case IPW_FW_ERROR_BAD_PARAM:
398 return "ERROR_BAD_PARAM";
399 case IPW_FW_ERROR_BAD_CHECKSUM:
400 return "ERROR_BAD_CHECKSUM";
401 case IPW_FW_ERROR_NMI_INTERRUPT:
402 return "ERROR_NMI_INTERRUPT";
403 case IPW_FW_ERROR_BAD_DATABASE:
404 return "ERROR_BAD_DATABASE";
405 case IPW_FW_ERROR_ALLOC_FAIL:
406 return "ERROR_ALLOC_FAIL";
407 case IPW_FW_ERROR_DMA_UNDERRUN:
408 return "ERROR_DMA_UNDERRUN";
409 case IPW_FW_ERROR_DMA_STATUS:
410 return "ERROR_DMA_STATUS";
411 case IPW_FW_ERROR_DINOSTATUS_ERROR:
412 return "ERROR_DINOSTATUS_ERROR";
413 case IPW_FW_ERROR_EEPROMSTATUS_ERROR:
414 return "ERROR_EEPROMSTATUS_ERROR";
415 case IPW_FW_ERROR_SYSASSERT:
416 return "ERROR_SYSASSERT";
417 case IPW_FW_ERROR_FATAL_ERROR:
418 return "ERROR_FATALSTATUS_ERROR";
420 return "UNKNOWNSTATUS_ERROR";
424 static void ipw_dump_nic_error_log(struct ipw_priv *priv)
426 u32 desc, time, blink1, blink2, ilink1, ilink2, idata, i, count, base;
428 base = ipw_read32(priv, IPWSTATUS_ERROR_LOG);
429 count = ipw_read_reg32(priv, base);
431 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
432 IPW_ERROR("Start IPW Error Log Dump:\n");
433 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
434 priv->status, priv->config);
437 for (i = ERROR_START_OFFSET;
438 i <= count * ERROR_ELEM_SIZE; i += ERROR_ELEM_SIZE) {
439 desc = ipw_read_reg32(priv, base + i);
440 time = ipw_read_reg32(priv, base + i + 1 * sizeof(u32));
441 blink1 = ipw_read_reg32(priv, base + i + 2 * sizeof(u32));
442 blink2 = ipw_read_reg32(priv, base + i + 3 * sizeof(u32));
443 ilink1 = ipw_read_reg32(priv, base + i + 4 * sizeof(u32));
444 ilink2 = ipw_read_reg32(priv, base + i + 5 * sizeof(u32));
445 idata = ipw_read_reg32(priv, base + i + 6 * sizeof(u32));
447 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
448 ipw_error_desc(desc), time, blink1, blink2,
449 ilink1, ilink2, idata);
453 static void ipw_dump_nic_event_log(struct ipw_priv *priv)
455 u32 ev, time, data, i, count, base;
457 base = ipw_read32(priv, IPW_EVENT_LOG);
458 count = ipw_read_reg32(priv, base);
460 if (EVENT_START_OFFSET <= count * EVENT_ELEM_SIZE)
461 IPW_ERROR("Start IPW Event Log Dump:\n");
463 for (i = EVENT_START_OFFSET;
464 i <= count * EVENT_ELEM_SIZE; i += EVENT_ELEM_SIZE) {
465 ev = ipw_read_reg32(priv, base + i);
466 time = ipw_read_reg32(priv, base + i + 1 * sizeof(u32));
467 data = ipw_read_reg32(priv, base + i + 2 * sizeof(u32));
469 #ifdef CONFIG_IPW_DEBUG
470 IPW_ERROR("%i\t0x%08x\t%i\n", time, data, ev);
475 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
477 u32 addr, field_info, field_len, field_count, total_len;
479 IPW_DEBUG_ORD("ordinal = %i\n", ord);
481 if (!priv || !val || !len) {
482 IPW_DEBUG_ORD("Invalid argument\n");
486 /* verify device ordinal tables have been initialized */
487 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
488 IPW_DEBUG_ORD("Access ordinals before initialization\n");
492 switch (IPW_ORD_TABLE_ID_MASK & ord) {
493 case IPW_ORD_TABLE_0_MASK:
495 * TABLE 0: Direct access to a table of 32 bit values
497 * This is a very simple table with the data directly
498 * read from the table
501 /* remove the table id from the ordinal */
502 ord &= IPW_ORD_TABLE_VALUE_MASK;
505 if (ord > priv->table0_len) {
506 IPW_DEBUG_ORD("ordinal value (%i) longer then "
507 "max (%i)\n", ord, priv->table0_len);
511 /* verify we have enough room to store the value */
512 if (*len < sizeof(u32)) {
513 IPW_DEBUG_ORD("ordinal buffer length too small, "
514 "need %zd\n", sizeof(u32));
518 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
519 ord, priv->table0_addr + (ord << 2));
523 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
526 case IPW_ORD_TABLE_1_MASK:
528 * TABLE 1: Indirect access to a table of 32 bit values
530 * This is a fairly large table of u32 values each
531 * representing starting addr for the data (which is
535 /* remove the table id from the ordinal */
536 ord &= IPW_ORD_TABLE_VALUE_MASK;
539 if (ord > priv->table1_len) {
540 IPW_DEBUG_ORD("ordinal value too long\n");
544 /* verify we have enough room to store the value */
545 if (*len < sizeof(u32)) {
546 IPW_DEBUG_ORD("ordinal buffer length too small, "
547 "need %zd\n", sizeof(u32));
552 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
556 case IPW_ORD_TABLE_2_MASK:
558 * TABLE 2: Indirect access to a table of variable sized values
560 * This table consist of six values, each containing
561 * - dword containing the starting offset of the data
562 * - dword containing the lengh in the first 16bits
563 * and the count in the second 16bits
566 /* remove the table id from the ordinal */
567 ord &= IPW_ORD_TABLE_VALUE_MASK;
570 if (ord > priv->table2_len) {
571 IPW_DEBUG_ORD("ordinal value too long\n");
575 /* get the address of statistic */
576 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
578 /* get the second DW of statistics ;
579 * two 16-bit words - first is length, second is count */
582 priv->table2_addr + (ord << 3) +
585 /* get each entry length */
586 field_len = *((u16 *) & field_info);
588 /* get number of entries */
589 field_count = *(((u16 *) & field_info) + 1);
591 /* abort if not enought memory */
592 total_len = field_len * field_count;
593 if (total_len > *len) {
602 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
603 "field_info = 0x%08x\n",
604 addr, total_len, field_info);
605 ipw_read_indirect(priv, addr, val, total_len);
609 IPW_DEBUG_ORD("Invalid ordinal!\n");
617 static void ipw_init_ordinals(struct ipw_priv *priv)
619 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
620 priv->table0_len = ipw_read32(priv, priv->table0_addr);
622 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
623 priv->table0_addr, priv->table0_len);
625 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
626 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
628 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
629 priv->table1_addr, priv->table1_len);
631 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
632 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
633 priv->table2_len &= 0x0000ffff; /* use first two bytes */
635 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
636 priv->table2_addr, priv->table2_len);
641 * The following adds a new attribute to the sysfs representation
642 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
643 * used for controling the debug level.
645 * See the level definitions in ipw for details.
647 static ssize_t show_debug_level(struct device_driver *d, char *buf)
649 return sprintf(buf, "0x%08X\n", ipw_debug_level);
651 static ssize_t store_debug_level(struct device_driver *d,
652 const char *buf, size_t count)
654 char *p = (char *)buf;
657 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
659 if (p[0] == 'x' || p[0] == 'X')
661 val = simple_strtoul(p, &p, 16);
663 val = simple_strtoul(p, &p, 10);
665 printk(KERN_INFO DRV_NAME
666 ": %s is not in hex or decimal form.\n", buf);
668 ipw_debug_level = val;
670 return strnlen(buf, count);
673 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
674 show_debug_level, store_debug_level);
676 static ssize_t show_status(struct device *d,
677 struct device_attribute *attr, char *buf)
679 struct ipw_priv *p = d->driver_data;
680 return sprintf(buf, "0x%08x\n", (int)p->status);
683 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
685 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
688 struct ipw_priv *p = d->driver_data;
689 return sprintf(buf, "0x%08x\n", (int)p->config);
692 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
694 static ssize_t show_nic_type(struct device *d,
695 struct device_attribute *attr, char *buf)
697 struct ipw_priv *p = d->driver_data;
698 u8 type = p->eeprom[EEPROM_NIC_TYPE];
701 case EEPROM_NIC_TYPE_STANDARD:
702 return sprintf(buf, "STANDARD\n");
703 case EEPROM_NIC_TYPE_DELL:
704 return sprintf(buf, "DELL\n");
705 case EEPROM_NIC_TYPE_FUJITSU:
706 return sprintf(buf, "FUJITSU\n");
707 case EEPROM_NIC_TYPE_IBM:
708 return sprintf(buf, "IBM\n");
709 case EEPROM_NIC_TYPE_HP:
710 return sprintf(buf, "HP\n");
713 return sprintf(buf, "UNKNOWN\n");
716 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
718 static ssize_t dump_error_log(struct device *d,
719 struct device_attribute *attr, const char *buf,
722 char *p = (char *)buf;
725 ipw_dump_nic_error_log((struct ipw_priv *)d->driver_data);
727 return strnlen(buf, count);
730 static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
732 static ssize_t dump_event_log(struct device *d,
733 struct device_attribute *attr, const char *buf,
736 char *p = (char *)buf;
739 ipw_dump_nic_event_log((struct ipw_priv *)d->driver_data);
741 return strnlen(buf, count);
744 static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
746 static ssize_t show_ucode_version(struct device *d,
747 struct device_attribute *attr, char *buf)
749 u32 len = sizeof(u32), tmp = 0;
750 struct ipw_priv *p = d->driver_data;
752 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
755 return sprintf(buf, "0x%08x\n", tmp);
758 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
760 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
763 u32 len = sizeof(u32), tmp = 0;
764 struct ipw_priv *p = d->driver_data;
766 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
769 return sprintf(buf, "0x%08x\n", tmp);
772 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
775 * Add a device attribute to view/control the delay between eeprom
778 static ssize_t show_eeprom_delay(struct device *d,
779 struct device_attribute *attr, char *buf)
781 int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
782 return sprintf(buf, "%i\n", n);
784 static ssize_t store_eeprom_delay(struct device *d,
785 struct device_attribute *attr,
786 const char *buf, size_t count)
788 struct ipw_priv *p = d->driver_data;
789 sscanf(buf, "%i", &p->eeprom_delay);
790 return strnlen(buf, count);
793 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
794 show_eeprom_delay, store_eeprom_delay);
796 static ssize_t show_command_event_reg(struct device *d,
797 struct device_attribute *attr, char *buf)
800 struct ipw_priv *p = d->driver_data;
802 reg = ipw_read_reg32(p, CX2_INTERNAL_CMD_EVENT);
803 return sprintf(buf, "0x%08x\n", reg);
805 static ssize_t store_command_event_reg(struct device *d,
806 struct device_attribute *attr,
807 const char *buf, size_t count)
810 struct ipw_priv *p = d->driver_data;
812 sscanf(buf, "%x", ®);
813 ipw_write_reg32(p, CX2_INTERNAL_CMD_EVENT, reg);
814 return strnlen(buf, count);
817 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
818 show_command_event_reg, store_command_event_reg);
820 static ssize_t show_mem_gpio_reg(struct device *d,
821 struct device_attribute *attr, char *buf)
824 struct ipw_priv *p = d->driver_data;
826 reg = ipw_read_reg32(p, 0x301100);
827 return sprintf(buf, "0x%08x\n", reg);
829 static ssize_t store_mem_gpio_reg(struct device *d,
830 struct device_attribute *attr,
831 const char *buf, size_t count)
834 struct ipw_priv *p = d->driver_data;
836 sscanf(buf, "%x", ®);
837 ipw_write_reg32(p, 0x301100, reg);
838 return strnlen(buf, count);
841 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
842 show_mem_gpio_reg, store_mem_gpio_reg);
844 static ssize_t show_indirect_dword(struct device *d,
845 struct device_attribute *attr, char *buf)
848 struct ipw_priv *priv = d->driver_data;
849 if (priv->status & STATUS_INDIRECT_DWORD)
850 reg = ipw_read_reg32(priv, priv->indirect_dword);
854 return sprintf(buf, "0x%08x\n", reg);
856 static ssize_t store_indirect_dword(struct device *d,
857 struct device_attribute *attr,
858 const char *buf, size_t count)
860 struct ipw_priv *priv = d->driver_data;
862 sscanf(buf, "%x", &priv->indirect_dword);
863 priv->status |= STATUS_INDIRECT_DWORD;
864 return strnlen(buf, count);
867 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
868 show_indirect_dword, store_indirect_dword);
870 static ssize_t show_indirect_byte(struct device *d,
871 struct device_attribute *attr, char *buf)
874 struct ipw_priv *priv = d->driver_data;
875 if (priv->status & STATUS_INDIRECT_BYTE)
876 reg = ipw_read_reg8(priv, priv->indirect_byte);
880 return sprintf(buf, "0x%02x\n", reg);
882 static ssize_t store_indirect_byte(struct device *d,
883 struct device_attribute *attr,
884 const char *buf, size_t count)
886 struct ipw_priv *priv = d->driver_data;
888 sscanf(buf, "%x", &priv->indirect_byte);
889 priv->status |= STATUS_INDIRECT_BYTE;
890 return strnlen(buf, count);
893 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
894 show_indirect_byte, store_indirect_byte);
896 static ssize_t show_direct_dword(struct device *d,
897 struct device_attribute *attr, char *buf)
900 struct ipw_priv *priv = d->driver_data;
902 if (priv->status & STATUS_DIRECT_DWORD)
903 reg = ipw_read32(priv, priv->direct_dword);
907 return sprintf(buf, "0x%08x\n", reg);
909 static ssize_t store_direct_dword(struct device *d,
910 struct device_attribute *attr,
911 const char *buf, size_t count)
913 struct ipw_priv *priv = d->driver_data;
915 sscanf(buf, "%x", &priv->direct_dword);
916 priv->status |= STATUS_DIRECT_DWORD;
917 return strnlen(buf, count);
920 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
921 show_direct_dword, store_direct_dword);
923 static inline int rf_kill_active(struct ipw_priv *priv)
925 if (0 == (ipw_read32(priv, 0x30) & 0x10000))
926 priv->status |= STATUS_RF_KILL_HW;
928 priv->status &= ~STATUS_RF_KILL_HW;
930 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
933 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
936 /* 0 - RF kill not enabled
937 1 - SW based RF kill active (sysfs)
938 2 - HW based RF kill active
939 3 - Both HW and SW baed RF kill active */
940 struct ipw_priv *priv = d->driver_data;
941 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
942 (rf_kill_active(priv) ? 0x2 : 0x0);
943 return sprintf(buf, "%i\n", val);
946 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
948 if ((disable_radio ? 1 : 0) ==
949 (priv->status & STATUS_RF_KILL_SW ? 1 : 0))
952 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
953 disable_radio ? "OFF" : "ON");
956 priv->status |= STATUS_RF_KILL_SW;
958 if (priv->workqueue) {
959 cancel_delayed_work(&priv->request_scan);
961 wake_up_interruptible(&priv->wait_command_queue);
962 queue_work(priv->workqueue, &priv->down);
964 priv->status &= ~STATUS_RF_KILL_SW;
965 if (rf_kill_active(priv)) {
966 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
967 "disabled by HW switch\n");
968 /* Make sure the RF_KILL check timer is running */
969 cancel_delayed_work(&priv->rf_kill);
970 queue_delayed_work(priv->workqueue, &priv->rf_kill,
973 queue_work(priv->workqueue, &priv->up);
979 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
980 const char *buf, size_t count)
982 struct ipw_priv *priv = d->driver_data;
984 ipw_radio_kill_sw(priv, buf[0] == '1');
989 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
991 static void ipw_irq_tasklet(struct ipw_priv *priv)
993 u32 inta, inta_mask, handled = 0;
997 spin_lock_irqsave(&priv->lock, flags);
999 inta = ipw_read32(priv, CX2_INTA_RW);
1000 inta_mask = ipw_read32(priv, CX2_INTA_MASK_R);
1001 inta &= (CX2_INTA_MASK_ALL & inta_mask);
1003 /* Add any cached INTA values that need to be handled */
1004 inta |= priv->isr_inta;
1006 /* handle all the justifications for the interrupt */
1007 if (inta & CX2_INTA_BIT_RX_TRANSFER) {
1009 handled |= CX2_INTA_BIT_RX_TRANSFER;
1012 if (inta & CX2_INTA_BIT_TX_CMD_QUEUE) {
1013 IPW_DEBUG_HC("Command completed.\n");
1014 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1015 priv->status &= ~STATUS_HCMD_ACTIVE;
1016 wake_up_interruptible(&priv->wait_command_queue);
1017 handled |= CX2_INTA_BIT_TX_CMD_QUEUE;
1020 if (inta & CX2_INTA_BIT_TX_QUEUE_1) {
1021 IPW_DEBUG_TX("TX_QUEUE_1\n");
1022 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1023 handled |= CX2_INTA_BIT_TX_QUEUE_1;
1026 if (inta & CX2_INTA_BIT_TX_QUEUE_2) {
1027 IPW_DEBUG_TX("TX_QUEUE_2\n");
1028 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1029 handled |= CX2_INTA_BIT_TX_QUEUE_2;
1032 if (inta & CX2_INTA_BIT_TX_QUEUE_3) {
1033 IPW_DEBUG_TX("TX_QUEUE_3\n");
1034 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
1035 handled |= CX2_INTA_BIT_TX_QUEUE_3;
1038 if (inta & CX2_INTA_BIT_TX_QUEUE_4) {
1039 IPW_DEBUG_TX("TX_QUEUE_4\n");
1040 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
1041 handled |= CX2_INTA_BIT_TX_QUEUE_4;
1044 if (inta & CX2_INTA_BIT_STATUS_CHANGE) {
1045 IPW_WARNING("STATUS_CHANGE\n");
1046 handled |= CX2_INTA_BIT_STATUS_CHANGE;
1049 if (inta & CX2_INTA_BIT_BEACON_PERIOD_EXPIRED) {
1050 IPW_WARNING("TX_PERIOD_EXPIRED\n");
1051 handled |= CX2_INTA_BIT_BEACON_PERIOD_EXPIRED;
1054 if (inta & CX2_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
1055 IPW_WARNING("HOST_CMD_DONE\n");
1056 handled |= CX2_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
1059 if (inta & CX2_INTA_BIT_FW_INITIALIZATION_DONE) {
1060 IPW_WARNING("FW_INITIALIZATION_DONE\n");
1061 handled |= CX2_INTA_BIT_FW_INITIALIZATION_DONE;
1064 if (inta & CX2_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
1065 IPW_WARNING("PHY_OFF_DONE\n");
1066 handled |= CX2_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
1069 if (inta & CX2_INTA_BIT_RF_KILL_DONE) {
1070 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
1071 priv->status |= STATUS_RF_KILL_HW;
1072 wake_up_interruptible(&priv->wait_command_queue);
1073 netif_carrier_off(priv->net_dev);
1074 netif_stop_queue(priv->net_dev);
1075 cancel_delayed_work(&priv->request_scan);
1076 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
1077 handled |= CX2_INTA_BIT_RF_KILL_DONE;
1080 if (inta & CX2_INTA_BIT_FATAL_ERROR) {
1081 IPW_ERROR("Firmware error detected. Restarting.\n");
1082 #ifdef CONFIG_IPW_DEBUG
1083 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
1084 ipw_dump_nic_error_log(priv);
1085 ipw_dump_nic_event_log(priv);
1088 queue_work(priv->workqueue, &priv->adapter_restart);
1089 handled |= CX2_INTA_BIT_FATAL_ERROR;
1092 if (inta & CX2_INTA_BIT_PARITY_ERROR) {
1093 IPW_ERROR("Parity error\n");
1094 handled |= CX2_INTA_BIT_PARITY_ERROR;
1097 if (handled != inta) {
1098 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
1101 /* enable all interrupts */
1102 ipw_enable_interrupts(priv);
1104 spin_unlock_irqrestore(&priv->lock, flags);
1107 #ifdef CONFIG_IPW_DEBUG
1108 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
1109 static char *get_cmd_string(u8 cmd)
1112 IPW_CMD(HOST_COMPLETE);
1113 IPW_CMD(POWER_DOWN);
1114 IPW_CMD(SYSTEM_CONFIG);
1115 IPW_CMD(MULTICAST_ADDRESS);
1117 IPW_CMD(ADAPTER_ADDRESS);
1119 IPW_CMD(RTS_THRESHOLD);
1120 IPW_CMD(FRAG_THRESHOLD);
1121 IPW_CMD(POWER_MODE);
1123 IPW_CMD(TGI_TX_KEY);
1124 IPW_CMD(SCAN_REQUEST);
1125 IPW_CMD(SCAN_REQUEST_EXT);
1127 IPW_CMD(SUPPORTED_RATES);
1128 IPW_CMD(SCAN_ABORT);
1130 IPW_CMD(QOS_PARAMETERS);
1131 IPW_CMD(DINO_CONFIG);
1132 IPW_CMD(RSN_CAPABILITIES);
1134 IPW_CMD(CARD_DISABLE);
1135 IPW_CMD(SEED_NUMBER);
1137 IPW_CMD(COUNTRY_INFO);
1138 IPW_CMD(AIRONET_INFO);
1139 IPW_CMD(AP_TX_POWER);
1141 IPW_CMD(CCX_VER_INFO);
1142 IPW_CMD(SET_CALIBRATION);
1143 IPW_CMD(SENSITIVITY_CALIB);
1144 IPW_CMD(RETRY_LIMIT);
1145 IPW_CMD(IPW_PRE_POWER_DOWN);
1146 IPW_CMD(VAP_BEACON_TEMPLATE);
1147 IPW_CMD(VAP_DTIM_PERIOD);
1148 IPW_CMD(EXT_SUPPORTED_RATES);
1149 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
1150 IPW_CMD(VAP_QUIET_INTERVALS);
1151 IPW_CMD(VAP_CHANNEL_SWITCH);
1152 IPW_CMD(VAP_MANDATORY_CHANNELS);
1153 IPW_CMD(VAP_CELL_PWR_LIMIT);
1154 IPW_CMD(VAP_CF_PARAM_SET);
1155 IPW_CMD(VAP_SET_BEACONING_STATE);
1156 IPW_CMD(MEASUREMENT);
1157 IPW_CMD(POWER_CAPABILITY);
1158 IPW_CMD(SUPPORTED_CHANNELS);
1159 IPW_CMD(TPC_REPORT);
1161 IPW_CMD(PRODUCTION_COMMAND);
1166 #endif /* CONFIG_IPW_DEBUG */
1168 #define HOST_COMPLETE_TIMEOUT HZ
1169 static int ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
1173 if (priv->status & STATUS_HCMD_ACTIVE) {
1174 IPW_ERROR("Already sending a command\n");
1178 priv->status |= STATUS_HCMD_ACTIVE;
1180 IPW_DEBUG_HC("Sending %s command (#%d), %d bytes\n",
1181 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len);
1182 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
1184 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, &cmd->param, cmd->len, 0);
1188 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
1190 status & STATUS_HCMD_ACTIVE),
1191 HOST_COMPLETE_TIMEOUT);
1193 IPW_DEBUG_INFO("Command completion failed out after %dms.\n",
1194 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
1195 priv->status &= ~STATUS_HCMD_ACTIVE;
1198 if (priv->status & STATUS_RF_KILL_MASK) {
1199 IPW_DEBUG_INFO("Command aborted due to RF Kill Switch\n");
1206 static int ipw_send_host_complete(struct ipw_priv *priv)
1208 struct host_cmd cmd = {
1209 .cmd = IPW_CMD_HOST_COMPLETE,
1214 IPW_ERROR("Invalid args\n");
1218 if (ipw_send_cmd(priv, &cmd)) {
1219 IPW_ERROR("failed to send HOST_COMPLETE command\n");
1226 static int ipw_send_system_config(struct ipw_priv *priv,
1227 struct ipw_sys_config *config)
1229 struct host_cmd cmd = {
1230 .cmd = IPW_CMD_SYSTEM_CONFIG,
1231 .len = sizeof(*config)
1234 if (!priv || !config) {
1235 IPW_ERROR("Invalid args\n");
1239 memcpy(&cmd.param, config, sizeof(*config));
1240 if (ipw_send_cmd(priv, &cmd)) {
1241 IPW_ERROR("failed to send SYSTEM_CONFIG command\n");
1248 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
1250 struct host_cmd cmd = {
1251 .cmd = IPW_CMD_SSID,
1252 .len = min(len, IW_ESSID_MAX_SIZE)
1255 if (!priv || !ssid) {
1256 IPW_ERROR("Invalid args\n");
1260 memcpy(&cmd.param, ssid, cmd.len);
1261 if (ipw_send_cmd(priv, &cmd)) {
1262 IPW_ERROR("failed to send SSID command\n");
1269 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
1271 struct host_cmd cmd = {
1272 .cmd = IPW_CMD_ADAPTER_ADDRESS,
1276 if (!priv || !mac) {
1277 IPW_ERROR("Invalid args\n");
1281 IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n",
1282 priv->net_dev->name, MAC_ARG(mac));
1284 memcpy(&cmd.param, mac, ETH_ALEN);
1286 if (ipw_send_cmd(priv, &cmd)) {
1287 IPW_ERROR("failed to send ADAPTER_ADDRESS command\n");
1294 static void ipw_adapter_restart(void *adapter)
1296 struct ipw_priv *priv = adapter;
1298 if (priv->status & STATUS_RF_KILL_MASK)
1303 IPW_ERROR("Failed to up device\n");
1308 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
1310 static void ipw_scan_check(void *data)
1312 struct ipw_priv *priv = data;
1313 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
1314 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
1315 "adapter (%dms).\n",
1316 IPW_SCAN_CHECK_WATCHDOG / 100);
1317 ipw_adapter_restart(priv);
1321 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
1322 struct ipw_scan_request_ext *request)
1324 struct host_cmd cmd = {
1325 .cmd = IPW_CMD_SCAN_REQUEST_EXT,
1326 .len = sizeof(*request)
1329 if (!priv || !request) {
1330 IPW_ERROR("Invalid args\n");
1334 memcpy(&cmd.param, request, sizeof(*request));
1335 if (ipw_send_cmd(priv, &cmd)) {
1336 IPW_ERROR("failed to send SCAN_REQUEST_EXT command\n");
1340 queue_delayed_work(priv->workqueue, &priv->scan_check,
1341 IPW_SCAN_CHECK_WATCHDOG);
1345 static int ipw_send_scan_abort(struct ipw_priv *priv)
1347 struct host_cmd cmd = {
1348 .cmd = IPW_CMD_SCAN_ABORT,
1353 IPW_ERROR("Invalid args\n");
1357 if (ipw_send_cmd(priv, &cmd)) {
1358 IPW_ERROR("failed to send SCAN_ABORT command\n");
1365 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
1367 struct host_cmd cmd = {
1368 .cmd = IPW_CMD_SENSITIVITY_CALIB,
1369 .len = sizeof(struct ipw_sensitivity_calib)
1371 struct ipw_sensitivity_calib *calib = (struct ipw_sensitivity_calib *)
1373 calib->beacon_rssi_raw = sens;
1374 if (ipw_send_cmd(priv, &cmd)) {
1375 IPW_ERROR("failed to send SENSITIVITY CALIB command\n");
1382 static int ipw_send_associate(struct ipw_priv *priv,
1383 struct ipw_associate *associate)
1385 struct host_cmd cmd = {
1386 .cmd = IPW_CMD_ASSOCIATE,
1387 .len = sizeof(*associate)
1390 if (!priv || !associate) {
1391 IPW_ERROR("Invalid args\n");
1395 memcpy(&cmd.param, associate, sizeof(*associate));
1396 if (ipw_send_cmd(priv, &cmd)) {
1397 IPW_ERROR("failed to send ASSOCIATE command\n");
1404 static int ipw_send_supported_rates(struct ipw_priv *priv,
1405 struct ipw_supported_rates *rates)
1407 struct host_cmd cmd = {
1408 .cmd = IPW_CMD_SUPPORTED_RATES,
1409 .len = sizeof(*rates)
1412 if (!priv || !rates) {
1413 IPW_ERROR("Invalid args\n");
1417 memcpy(&cmd.param, rates, sizeof(*rates));
1418 if (ipw_send_cmd(priv, &cmd)) {
1419 IPW_ERROR("failed to send SUPPORTED_RATES command\n");
1426 static int ipw_set_random_seed(struct ipw_priv *priv)
1428 struct host_cmd cmd = {
1429 .cmd = IPW_CMD_SEED_NUMBER,
1434 IPW_ERROR("Invalid args\n");
1438 get_random_bytes(&cmd.param, sizeof(u32));
1440 if (ipw_send_cmd(priv, &cmd)) {
1441 IPW_ERROR("failed to send SEED_NUMBER command\n");
1449 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
1451 struct host_cmd cmd = {
1452 .cmd = IPW_CMD_CARD_DISABLE,
1457 IPW_ERROR("Invalid args\n");
1461 *((u32 *) & cmd.param) = phy_off;
1463 if (ipw_send_cmd(priv, &cmd)) {
1464 IPW_ERROR("failed to send CARD_DISABLE command\n");
1472 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
1474 struct host_cmd cmd = {
1475 .cmd = IPW_CMD_TX_POWER,
1476 .len = sizeof(*power)
1479 if (!priv || !power) {
1480 IPW_ERROR("Invalid args\n");
1484 memcpy(&cmd.param, power, sizeof(*power));
1485 if (ipw_send_cmd(priv, &cmd)) {
1486 IPW_ERROR("failed to send TX_POWER command\n");
1493 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
1495 struct ipw_rts_threshold rts_threshold = {
1496 .rts_threshold = rts,
1498 struct host_cmd cmd = {
1499 .cmd = IPW_CMD_RTS_THRESHOLD,
1500 .len = sizeof(rts_threshold)
1504 IPW_ERROR("Invalid args\n");
1508 memcpy(&cmd.param, &rts_threshold, sizeof(rts_threshold));
1509 if (ipw_send_cmd(priv, &cmd)) {
1510 IPW_ERROR("failed to send RTS_THRESHOLD command\n");
1517 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
1519 struct ipw_frag_threshold frag_threshold = {
1520 .frag_threshold = frag,
1522 struct host_cmd cmd = {
1523 .cmd = IPW_CMD_FRAG_THRESHOLD,
1524 .len = sizeof(frag_threshold)
1528 IPW_ERROR("Invalid args\n");
1532 memcpy(&cmd.param, &frag_threshold, sizeof(frag_threshold));
1533 if (ipw_send_cmd(priv, &cmd)) {
1534 IPW_ERROR("failed to send FRAG_THRESHOLD command\n");
1541 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
1543 struct host_cmd cmd = {
1544 .cmd = IPW_CMD_POWER_MODE,
1547 u32 *param = (u32 *) (&cmd.param);
1550 IPW_ERROR("Invalid args\n");
1554 /* If on battery, set to 3, if AC set to CAM, else user
1557 case IPW_POWER_BATTERY:
1558 *param = IPW_POWER_INDEX_3;
1561 *param = IPW_POWER_MODE_CAM;
1568 if (ipw_send_cmd(priv, &cmd)) {
1569 IPW_ERROR("failed to send POWER_MODE command\n");
1577 * The IPW device contains a Microwire compatible EEPROM that stores
1578 * various data like the MAC address. Usually the firmware has exclusive
1579 * access to the eeprom, but during device initialization (before the
1580 * device driver has sent the HostComplete command to the firmware) the
1581 * device driver has read access to the EEPROM by way of indirect addressing
1582 * through a couple of memory mapped registers.
1584 * The following is a simplified implementation for pulling data out of the
1585 * the eeprom, along with some helper functions to find information in
1586 * the per device private data's copy of the eeprom.
1588 * NOTE: To better understand how these functions work (i.e what is a chip
1589 * select and why do have to keep driving the eeprom clock?), read
1590 * just about any data sheet for a Microwire compatible EEPROM.
1593 /* write a 32 bit value into the indirect accessor register */
1594 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
1596 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
1598 /* the eeprom requires some time to complete the operation */
1599 udelay(p->eeprom_delay);
1604 /* perform a chip select operation */
1605 static inline void eeprom_cs(struct ipw_priv *priv)
1607 eeprom_write_reg(priv, 0);
1608 eeprom_write_reg(priv, EEPROM_BIT_CS);
1609 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
1610 eeprom_write_reg(priv, EEPROM_BIT_CS);
1613 /* perform a chip select operation */
1614 static inline void eeprom_disable_cs(struct ipw_priv *priv)
1616 eeprom_write_reg(priv, EEPROM_BIT_CS);
1617 eeprom_write_reg(priv, 0);
1618 eeprom_write_reg(priv, EEPROM_BIT_SK);
1621 /* push a single bit down to the eeprom */
1622 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
1624 int d = (bit ? EEPROM_BIT_DI : 0);
1625 eeprom_write_reg(p, EEPROM_BIT_CS | d);
1626 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
1629 /* push an opcode followed by an address down to the eeprom */
1630 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
1635 eeprom_write_bit(priv, 1);
1636 eeprom_write_bit(priv, op & 2);
1637 eeprom_write_bit(priv, op & 1);
1638 for (i = 7; i >= 0; i--) {
1639 eeprom_write_bit(priv, addr & (1 << i));
1643 /* pull 16 bits off the eeprom, one bit at a time */
1644 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
1649 /* Send READ Opcode */
1650 eeprom_op(priv, EEPROM_CMD_READ, addr);
1652 /* Send dummy bit */
1653 eeprom_write_reg(priv, EEPROM_BIT_CS);
1655 /* Read the byte off the eeprom one bit at a time */
1656 for (i = 0; i < 16; i++) {
1658 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
1659 eeprom_write_reg(priv, EEPROM_BIT_CS);
1660 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
1661 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
1664 /* Send another dummy bit */
1665 eeprom_write_reg(priv, 0);
1666 eeprom_disable_cs(priv);
1671 /* helper function for pulling the mac address out of the private */
1672 /* data's copy of the eeprom data */
1673 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
1675 u8 *ee = (u8 *) priv->eeprom;
1676 memcpy(mac, &ee[EEPROM_MAC_ADDRESS], 6);
1680 * Either the device driver (i.e. the host) or the firmware can
1681 * load eeprom data into the designated region in SRAM. If neither
1682 * happens then the FW will shutdown with a fatal error.
1684 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
1685 * bit needs region of shared SRAM needs to be non-zero.
1687 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
1690 u16 *eeprom = (u16 *) priv->eeprom;
1692 IPW_DEBUG_TRACE(">>\n");
1694 /* read entire contents of eeprom into private buffer */
1695 for (i = 0; i < 128; i++)
1696 eeprom[i] = eeprom_read_u16(priv, (u8) i);
1699 If the data looks correct, then copy it to our private
1700 copy. Otherwise let the firmware know to perform the operation
1703 if ((priv->eeprom + EEPROM_VERSION) != 0) {
1704 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
1706 /* write the eeprom data to sram */
1707 for (i = 0; i < CX2_EEPROM_IMAGE_SIZE; i++)
1708 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
1710 /* Do not load eeprom data on fatal error or suspend */
1711 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
1713 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
1715 /* Load eeprom data on fatal error or suspend */
1716 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
1719 IPW_DEBUG_TRACE("<<\n");
1722 static inline void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
1727 _ipw_write32(priv, CX2_AUTOINC_ADDR, start);
1729 _ipw_write32(priv, CX2_AUTOINC_DATA, 0);
1732 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
1734 ipw_zero_memory(priv, CX2_SHARED_SRAM_DMA_CONTROL,
1735 CB_NUMBER_OF_ELEMENTS_SMALL *
1736 sizeof(struct command_block));
1739 static int ipw_fw_dma_enable(struct ipw_priv *priv)
1740 { /* start dma engine but no transfers yet */
1742 IPW_DEBUG_FW(">> : \n");
1745 ipw_fw_dma_reset_command_blocks(priv);
1747 /* Write CB base address */
1748 ipw_write_reg32(priv, CX2_DMA_I_CB_BASE, CX2_SHARED_SRAM_DMA_CONTROL);
1750 IPW_DEBUG_FW("<< : \n");
1754 static void ipw_fw_dma_abort(struct ipw_priv *priv)
1758 IPW_DEBUG_FW(">> :\n");
1760 //set the Stop and Abort bit
1761 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
1762 ipw_write_reg32(priv, CX2_DMA_I_DMA_CONTROL, control);
1763 priv->sram_desc.last_cb_index = 0;
1765 IPW_DEBUG_FW("<< \n");
1768 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
1769 struct command_block *cb)
1772 CX2_SHARED_SRAM_DMA_CONTROL +
1773 (sizeof(struct command_block) * index);
1774 IPW_DEBUG_FW(">> :\n");
1776 ipw_write_indirect(priv, address, (u8 *) cb,
1777 (int)sizeof(struct command_block));
1779 IPW_DEBUG_FW("<< :\n");
1784 static int ipw_fw_dma_kick(struct ipw_priv *priv)
1789 IPW_DEBUG_FW(">> :\n");
1791 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
1792 ipw_fw_dma_write_command_block(priv, index,
1793 &priv->sram_desc.cb_list[index]);
1795 /* Enable the DMA in the CSR register */
1796 ipw_clear_bit(priv, CX2_RESET_REG,
1797 CX2_RESET_REG_MASTER_DISABLED |
1798 CX2_RESET_REG_STOP_MASTER);
1800 /* Set the Start bit. */
1801 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
1802 ipw_write_reg32(priv, CX2_DMA_I_DMA_CONTROL, control);
1804 IPW_DEBUG_FW("<< :\n");
1808 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
1811 u32 register_value = 0;
1812 u32 cb_fields_address = 0;
1814 IPW_DEBUG_FW(">> :\n");
1815 address = ipw_read_reg32(priv, CX2_DMA_I_CURRENT_CB);
1816 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
1818 /* Read the DMA Controlor register */
1819 register_value = ipw_read_reg32(priv, CX2_DMA_I_DMA_CONTROL);
1820 IPW_DEBUG_FW_INFO("CX2_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
1822 /* Print the CB values */
1823 cb_fields_address = address;
1824 register_value = ipw_read_reg32(priv, cb_fields_address);
1825 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
1827 cb_fields_address += sizeof(u32);
1828 register_value = ipw_read_reg32(priv, cb_fields_address);
1829 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
1831 cb_fields_address += sizeof(u32);
1832 register_value = ipw_read_reg32(priv, cb_fields_address);
1833 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
1836 cb_fields_address += sizeof(u32);
1837 register_value = ipw_read_reg32(priv, cb_fields_address);
1838 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
1840 IPW_DEBUG_FW(">> :\n");
1843 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
1845 u32 current_cb_address = 0;
1846 u32 current_cb_index = 0;
1848 IPW_DEBUG_FW("<< :\n");
1849 current_cb_address = ipw_read_reg32(priv, CX2_DMA_I_CURRENT_CB);
1851 current_cb_index = (current_cb_address - CX2_SHARED_SRAM_DMA_CONTROL) /
1852 sizeof(struct command_block);
1854 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
1855 current_cb_index, current_cb_address);
1857 IPW_DEBUG_FW(">> :\n");
1858 return current_cb_index;
1862 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
1866 int interrupt_enabled, int is_last)
1869 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
1870 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
1872 struct command_block *cb;
1873 u32 last_cb_element = 0;
1875 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
1876 src_address, dest_address, length);
1878 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
1881 last_cb_element = priv->sram_desc.last_cb_index;
1882 cb = &priv->sram_desc.cb_list[last_cb_element];
1883 priv->sram_desc.last_cb_index++;
1885 /* Calculate the new CB control word */
1886 if (interrupt_enabled)
1887 control |= CB_INT_ENABLED;
1890 control |= CB_LAST_VALID;
1894 /* Calculate the CB Element's checksum value */
1895 cb->status = control ^ src_address ^ dest_address;
1897 /* Copy the Source and Destination addresses */
1898 cb->dest_addr = dest_address;
1899 cb->source_addr = src_address;
1901 /* Copy the Control Word last */
1902 cb->control = control;
1907 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
1908 u32 src_phys, u32 dest_address, u32 length)
1910 u32 bytes_left = length;
1912 u32 dest_offset = 0;
1914 IPW_DEBUG_FW(">> \n");
1915 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
1916 src_phys, dest_address, length);
1917 while (bytes_left > CB_MAX_LENGTH) {
1918 status = ipw_fw_dma_add_command_block(priv,
1919 src_phys + src_offset,
1922 CB_MAX_LENGTH, 0, 0);
1924 IPW_DEBUG_FW_INFO(": Failed\n");
1927 IPW_DEBUG_FW_INFO(": Added new cb\n");
1929 src_offset += CB_MAX_LENGTH;
1930 dest_offset += CB_MAX_LENGTH;
1931 bytes_left -= CB_MAX_LENGTH;
1934 /* add the buffer tail */
1935 if (bytes_left > 0) {
1937 ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
1938 dest_address + dest_offset,
1941 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
1945 (": Adding new cb - the buffer tail\n");
1948 IPW_DEBUG_FW("<< \n");
1952 static int ipw_fw_dma_wait(struct ipw_priv *priv)
1954 u32 current_index = 0;
1957 IPW_DEBUG_FW(">> : \n");
1959 current_index = ipw_fw_dma_command_block_index(priv);
1960 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%8X\n",
1961 (int)priv->sram_desc.last_cb_index);
1963 while (current_index < priv->sram_desc.last_cb_index) {
1965 current_index = ipw_fw_dma_command_block_index(priv);
1969 if (watchdog > 400) {
1970 IPW_DEBUG_FW_INFO("Timeout\n");
1971 ipw_fw_dma_dump_command_block(priv);
1972 ipw_fw_dma_abort(priv);
1977 ipw_fw_dma_abort(priv);
1979 /*Disable the DMA in the CSR register */
1980 ipw_set_bit(priv, CX2_RESET_REG,
1981 CX2_RESET_REG_MASTER_DISABLED | CX2_RESET_REG_STOP_MASTER);
1983 IPW_DEBUG_FW("<< dmaWaitSync \n");
1987 static void ipw_remove_current_network(struct ipw_priv *priv)
1989 struct list_head *element, *safe;
1990 struct ieee80211_network *network = NULL;
1991 list_for_each_safe(element, safe, &priv->ieee->network_list) {
1992 network = list_entry(element, struct ieee80211_network, list);
1993 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
1995 list_add_tail(&network->list,
1996 &priv->ieee->network_free_list);
2002 * Check that card is still alive.
2003 * Reads debug register from domain0.
2004 * If card is present, pre-defined value should
2008 * @return 1 if card is present, 0 otherwise
2010 static inline int ipw_alive(struct ipw_priv *priv)
2012 return ipw_read32(priv, 0x90) == 0xd55555d5;
2015 static inline int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2021 if ((ipw_read32(priv, addr) & mask) == mask)
2025 } while (i < timeout);
2030 /* These functions load the firmware and micro code for the operation of
2031 * the ipw hardware. It assumes the buffer has all the bits for the
2032 * image and the caller is handling the memory allocation and clean up.
2035 static int ipw_stop_master(struct ipw_priv *priv)
2039 IPW_DEBUG_TRACE(">> \n");
2040 /* stop master. typical delay - 0 */
2041 ipw_set_bit(priv, CX2_RESET_REG, CX2_RESET_REG_STOP_MASTER);
2043 rc = ipw_poll_bit(priv, CX2_RESET_REG,
2044 CX2_RESET_REG_MASTER_DISABLED, 100);
2046 IPW_ERROR("stop master failed in 10ms\n");
2050 IPW_DEBUG_INFO("stop master %dms\n", rc);
2055 static void ipw_arc_release(struct ipw_priv *priv)
2057 IPW_DEBUG_TRACE(">> \n");
2060 ipw_clear_bit(priv, CX2_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
2062 /* no one knows timing, for safety add some delay */
2076 #define IPW_FW_MAJOR_VERSION 2
2077 #define IPW_FW_MINOR_VERSION 2
2079 #define IPW_FW_MINOR(x) ((x & 0xff) >> 8)
2080 #define IPW_FW_MAJOR(x) (x & 0xff)
2082 #define IPW_FW_VERSION ((IPW_FW_MINOR_VERSION << 8) | \
2083 IPW_FW_MAJOR_VERSION)
2085 #define IPW_FW_PREFIX "ipw-" __stringify(IPW_FW_MAJOR_VERSION) \
2086 "." __stringify(IPW_FW_MINOR_VERSION) "-"
2088 #if IPW_FW_MAJOR_VERSION >= 2 && IPW_FW_MINOR_VERSION > 0
2089 #define IPW_FW_NAME(x) IPW_FW_PREFIX "" x ".fw"
2091 #define IPW_FW_NAME(x) "ipw2200_" x ".fw"
2094 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
2096 int rc = 0, i, addr;
2100 image = (u16 *) data;
2102 IPW_DEBUG_TRACE(">> \n");
2104 rc = ipw_stop_master(priv);
2109 // spin_lock_irqsave(&priv->lock, flags);
2111 for (addr = CX2_SHARED_LOWER_BOUND;
2112 addr < CX2_REGISTER_DOMAIN1_END; addr += 4) {
2113 ipw_write32(priv, addr, 0);
2116 /* no ucode (yet) */
2117 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
2118 /* destroy DMA queues */
2119 /* reset sequence */
2121 ipw_write_reg32(priv, CX2_MEM_HALT_AND_RESET, CX2_BIT_HALT_RESET_ON);
2122 ipw_arc_release(priv);
2123 ipw_write_reg32(priv, CX2_MEM_HALT_AND_RESET, CX2_BIT_HALT_RESET_OFF);
2127 ipw_write_reg32(priv, CX2_INTERNAL_CMD_EVENT, CX2_BASEBAND_POWER_DOWN);
2130 ipw_write_reg32(priv, CX2_INTERNAL_CMD_EVENT, 0);
2133 /* enable ucode store */
2134 ipw_write_reg8(priv, DINO_CONTROL_REG, 0x0);
2135 ipw_write_reg8(priv, DINO_CONTROL_REG, DINO_ENABLE_CS);
2141 * Do NOT set indirect address register once and then
2142 * store data to indirect data register in the loop.
2143 * It seems very reasonable, but in this case DINO do not
2144 * accept ucode. It is essential to set address each time.
2146 /* load new ipw uCode */
2147 for (i = 0; i < len / 2; i++)
2148 ipw_write_reg16(priv, CX2_BASEBAND_CONTROL_STORE, image[i]);
2151 ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, 0);
2152 ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
2154 /* this is where the igx / win driver deveates from the VAP driver. */
2156 /* wait for alive response */
2157 for (i = 0; i < 100; i++) {
2158 /* poll for incoming data */
2159 cr = ipw_read_reg8(priv, CX2_BASEBAND_CONTROL_STATUS);
2160 if (cr & DINO_RXFIFO_DATA)
2165 if (cr & DINO_RXFIFO_DATA) {
2166 /* alive_command_responce size is NOT multiple of 4 */
2167 u32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
2169 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
2170 response_buffer[i] =
2171 ipw_read_reg32(priv, CX2_BASEBAND_RX_FIFO_READ);
2172 memcpy(&priv->dino_alive, response_buffer,
2173 sizeof(priv->dino_alive));
2174 if (priv->dino_alive.alive_command == 1
2175 && priv->dino_alive.ucode_valid == 1) {
2178 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
2179 "of %02d/%02d/%02d %02d:%02d\n",
2180 priv->dino_alive.software_revision,
2181 priv->dino_alive.software_revision,
2182 priv->dino_alive.device_identifier,
2183 priv->dino_alive.device_identifier,
2184 priv->dino_alive.time_stamp[0],
2185 priv->dino_alive.time_stamp[1],
2186 priv->dino_alive.time_stamp[2],
2187 priv->dino_alive.time_stamp[3],
2188 priv->dino_alive.time_stamp[4]);
2190 IPW_DEBUG_INFO("Microcode is not alive\n");
2194 IPW_DEBUG_INFO("No alive response from DINO\n");
2198 /* disable DINO, otherwise for some reason
2199 firmware have problem getting alive resp. */
2200 ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, 0);
2202 // spin_unlock_irqrestore(&priv->lock, flags);
2207 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
2211 struct fw_chunk *chunk;
2212 dma_addr_t shared_phys;
2215 IPW_DEBUG_TRACE("<< : \n");
2216 shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
2221 memmove(shared_virt, data, len);
2224 rc = ipw_fw_dma_enable(priv);
2226 if (priv->sram_desc.last_cb_index > 0) {
2227 /* the DMA is already ready this would be a bug. */
2233 chunk = (struct fw_chunk *)(data + offset);
2234 offset += sizeof(struct fw_chunk);
2235 /* build DMA packet and queue up for sending */
2236 /* dma to chunk->address, the chunk->length bytes from data +
2239 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
2240 chunk->address, chunk->length);
2242 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
2246 offset += chunk->length;
2247 } while (offset < len);
2249 /* Run the DMA and wait for the answer */
2250 rc = ipw_fw_dma_kick(priv);
2252 IPW_ERROR("dmaKick Failed\n");
2256 rc = ipw_fw_dma_wait(priv);
2258 IPW_ERROR("dmaWaitSync Failed\n");
2262 pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
2267 static int ipw_stop_nic(struct ipw_priv *priv)
2272 ipw_write32(priv, CX2_RESET_REG, CX2_RESET_REG_STOP_MASTER);
2274 rc = ipw_poll_bit(priv, CX2_RESET_REG,
2275 CX2_RESET_REG_MASTER_DISABLED, 500);
2277 IPW_ERROR("wait for reg master disabled failed\n");
2281 ipw_set_bit(priv, CX2_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
2286 static void ipw_start_nic(struct ipw_priv *priv)
2288 IPW_DEBUG_TRACE(">>\n");
2290 /* prvHwStartNic release ARC */
2291 ipw_clear_bit(priv, CX2_RESET_REG,
2292 CX2_RESET_REG_MASTER_DISABLED |
2293 CX2_RESET_REG_STOP_MASTER |
2294 CBD_RESET_REG_PRINCETON_RESET);
2296 /* enable power management */
2297 ipw_set_bit(priv, CX2_GP_CNTRL_RW,
2298 CX2_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
2300 IPW_DEBUG_TRACE("<<\n");
2303 static int ipw_init_nic(struct ipw_priv *priv)
2307 IPW_DEBUG_TRACE(">>\n");
2310 /* set "initialization complete" bit to move adapter to D0 state */
2311 ipw_set_bit(priv, CX2_GP_CNTRL_RW, CX2_GP_CNTRL_BIT_INIT_DONE);
2313 /* low-level PLL activation */
2314 ipw_write32(priv, CX2_READ_INT_REGISTER,
2315 CX2_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
2317 /* wait for clock stabilization */
2318 rc = ipw_poll_bit(priv, CX2_GP_CNTRL_RW,
2319 CX2_GP_CNTRL_BIT_CLOCK_READY, 250);
2321 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
2323 /* assert SW reset */
2324 ipw_set_bit(priv, CX2_RESET_REG, CX2_RESET_REG_SW_RESET);
2328 /* set "initialization complete" bit to move adapter to D0 state */
2329 ipw_set_bit(priv, CX2_GP_CNTRL_RW, CX2_GP_CNTRL_BIT_INIT_DONE);
2331 IPW_DEBUG_TRACE(">>\n");
2335 /* Call this function from process context, it will sleep in request_firmware.
2336 * Probe is an ok place to call this from.
2338 static int ipw_reset_nic(struct ipw_priv *priv)
2342 IPW_DEBUG_TRACE(">>\n");
2344 rc = ipw_init_nic(priv);
2346 /* Clear the 'host command active' bit... */
2347 priv->status &= ~STATUS_HCMD_ACTIVE;
2348 wake_up_interruptible(&priv->wait_command_queue);
2350 IPW_DEBUG_TRACE("<<\n");
2354 static int ipw_get_fw(struct ipw_priv *priv,
2355 const struct firmware **fw, const char *name)
2357 struct fw_header *header;
2360 /* ask firmware_class module to get the boot firmware off disk */
2361 rc = request_firmware(fw, name, &priv->pci_dev->dev);
2363 IPW_ERROR("%s load failed: Reason %d\n", name, rc);
2367 header = (struct fw_header *)(*fw)->data;
2368 if (IPW_FW_MAJOR(header->version) != IPW_FW_MAJOR_VERSION) {
2369 IPW_ERROR("'%s' firmware version not compatible (%d != %d)\n",
2371 IPW_FW_MAJOR(header->version), IPW_FW_MAJOR_VERSION);
2375 IPW_DEBUG_INFO("Loading firmware '%s' file v%d.%d (%zd bytes)\n",
2377 IPW_FW_MAJOR(header->version),
2378 IPW_FW_MINOR(header->version),
2379 (*fw)->size - sizeof(struct fw_header));
2383 #define CX2_RX_BUF_SIZE (3000)
2385 static inline void ipw_rx_queue_reset(struct ipw_priv *priv,
2386 struct ipw_rx_queue *rxq)
2388 unsigned long flags;
2391 spin_lock_irqsave(&rxq->lock, flags);
2393 INIT_LIST_HEAD(&rxq->rx_free);
2394 INIT_LIST_HEAD(&rxq->rx_used);
2396 /* Fill the rx_used queue with _all_ of the Rx buffers */
2397 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
2398 /* In the reset function, these buffers may have been allocated
2399 * to an SKB, so we need to unmap and free potential storage */
2400 if (rxq->pool[i].skb != NULL) {
2401 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
2402 CX2_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
2403 dev_kfree_skb(rxq->pool[i].skb);
2405 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
2408 /* Set us so that we have processed and used all buffers, but have
2409 * not restocked the Rx queue with fresh buffers */
2410 rxq->read = rxq->write = 0;
2411 rxq->processed = RX_QUEUE_SIZE - 1;
2412 rxq->free_count = 0;
2413 spin_unlock_irqrestore(&rxq->lock, flags);
2417 static int fw_loaded = 0;
2418 static const struct firmware *bootfw = NULL;
2419 static const struct firmware *firmware = NULL;
2420 static const struct firmware *ucode = NULL;
2423 static int ipw_load(struct ipw_priv *priv)
2426 const struct firmware *bootfw = NULL;
2427 const struct firmware *firmware = NULL;
2428 const struct firmware *ucode = NULL;
2430 int rc = 0, retries = 3;
2435 rc = ipw_get_fw(priv, &bootfw, IPW_FW_NAME("boot"));
2439 switch (priv->ieee->iw_mode) {
2441 rc = ipw_get_fw(priv, &ucode,
2442 IPW_FW_NAME("ibss_ucode"));
2446 rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("ibss"));
2449 #ifdef CONFIG_IPW_PROMISC
2450 case IW_MODE_MONITOR:
2451 rc = ipw_get_fw(priv, &ucode,
2452 IPW_FW_NAME("ibss_ucode"));
2456 rc = ipw_get_fw(priv, &firmware,
2457 IPW_FW_NAME("sniffer"));
2461 rc = ipw_get_fw(priv, &ucode, IPW_FW_NAME("bss_ucode"));
2465 rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("bss"));
2481 priv->rxq = ipw_rx_queue_alloc(priv);
2483 ipw_rx_queue_reset(priv, priv->rxq);
2485 IPW_ERROR("Unable to initialize Rx queue\n");
2490 /* Ensure interrupts are disabled */
2491 ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL);
2492 priv->status &= ~STATUS_INT_ENABLED;
2494 /* ack pending interrupts */
2495 ipw_write32(priv, CX2_INTA_RW, CX2_INTA_MASK_ALL);
2499 rc = ipw_reset_nic(priv);
2501 IPW_ERROR("Unable to reset NIC\n");
2505 ipw_zero_memory(priv, CX2_NIC_SRAM_LOWER_BOUND,
2506 CX2_NIC_SRAM_UPPER_BOUND - CX2_NIC_SRAM_LOWER_BOUND);
2508 /* DMA the initial boot firmware into the device */
2509 rc = ipw_load_firmware(priv, bootfw->data + sizeof(struct fw_header),
2510 bootfw->size - sizeof(struct fw_header));
2512 IPW_ERROR("Unable to load boot firmware\n");
2516 /* kick start the device */
2517 ipw_start_nic(priv);
2519 /* wait for the device to finish it's initial startup sequence */
2520 rc = ipw_poll_bit(priv, CX2_INTA_RW,
2521 CX2_INTA_BIT_FW_INITIALIZATION_DONE, 500);
2523 IPW_ERROR("device failed to boot initial fw image\n");
2526 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
2528 /* ack fw init done interrupt */
2529 ipw_write32(priv, CX2_INTA_RW, CX2_INTA_BIT_FW_INITIALIZATION_DONE);
2531 /* DMA the ucode into the device */
2532 rc = ipw_load_ucode(priv, ucode->data + sizeof(struct fw_header),
2533 ucode->size - sizeof(struct fw_header));
2535 IPW_ERROR("Unable to load ucode\n");
2542 /* DMA bss firmware into the device */
2543 rc = ipw_load_firmware(priv, firmware->data +
2544 sizeof(struct fw_header),
2545 firmware->size - sizeof(struct fw_header));
2547 IPW_ERROR("Unable to load firmware\n");
2551 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2553 rc = ipw_queue_reset(priv);
2555 IPW_ERROR("Unable to initialize queues\n");
2559 /* Ensure interrupts are disabled */
2560 ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL);
2562 /* kick start the device */
2563 ipw_start_nic(priv);
2565 if (ipw_read32(priv, CX2_INTA_RW) & CX2_INTA_BIT_PARITY_ERROR) {
2567 IPW_WARNING("Parity error. Retrying init.\n");
2572 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
2577 /* wait for the device */
2578 rc = ipw_poll_bit(priv, CX2_INTA_RW,
2579 CX2_INTA_BIT_FW_INITIALIZATION_DONE, 500);
2581 IPW_ERROR("device failed to start after 500ms\n");
2584 IPW_DEBUG_INFO("device response after %dms\n", rc);
2586 /* ack fw init done interrupt */
2587 ipw_write32(priv, CX2_INTA_RW, CX2_INTA_BIT_FW_INITIALIZATION_DONE);
2589 /* read eeprom data and initialize the eeprom region of sram */
2590 priv->eeprom_delay = 1;
2591 ipw_eeprom_init_sram(priv);
2593 /* enable interrupts */
2594 ipw_enable_interrupts(priv);
2596 /* Ensure our queue has valid packets */
2597 ipw_rx_queue_replenish(priv);
2599 ipw_write32(priv, CX2_RX_READ_INDEX, priv->rxq->read);
2601 /* ack pending interrupts */
2602 ipw_write32(priv, CX2_INTA_RW, CX2_INTA_MASK_ALL);
2605 release_firmware(bootfw);
2606 release_firmware(ucode);
2607 release_firmware(firmware);
2613 ipw_rx_queue_free(priv, priv->rxq);
2616 ipw_tx_queue_free(priv);
2618 release_firmware(bootfw);
2620 release_firmware(ucode);
2622 release_firmware(firmware);
2625 bootfw = ucode = firmware = NULL;
2634 * Theory of operation
2636 * A queue is a circular buffers with 'Read' and 'Write' pointers.
2637 * 2 empty entries always kept in the buffer to protect from overflow.
2639 * For Tx queue, there are low mark and high mark limits. If, after queuing
2640 * the packet for Tx, free space become < low mark, Tx queue stopped. When
2641 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
2644 * The IPW operates with six queues, one receive queue in the device's
2645 * sram, one transmit queue for sending commands to the device firmware,
2646 * and four transmit queues for data.
2648 * The four transmit queues allow for performing quality of service (qos)
2649 * transmissions as per the 802.11 protocol. Currently Linux does not
2650 * provide a mechanism to the user for utilizing prioritized queues, so
2651 * we only utilize the first data transmit queue (queue1).
2655 * Driver allocates buffers of this size for Rx
2658 static inline int ipw_queue_space(const struct clx2_queue *q)
2660 int s = q->last_used - q->first_empty;
2663 s -= 2; /* keep some reserve to not confuse empty and full situations */
2669 static inline int ipw_queue_inc_wrap(int index, int n_bd)
2671 return (++index == n_bd) ? 0 : index;
2675 * Initialize common DMA queue structure
2677 * @param q queue to init
2678 * @param count Number of BD's to allocate. Should be power of 2
2679 * @param read_register Address for 'read' register
2680 * (not offset within BAR, full address)
2681 * @param write_register Address for 'write' register
2682 * (not offset within BAR, full address)
2683 * @param base_register Address for 'base' register
2684 * (not offset within BAR, full address)
2685 * @param size Address for 'size' register
2686 * (not offset within BAR, full address)
2688 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
2689 int count, u32 read, u32 write, u32 base, u32 size)
2693 q->low_mark = q->n_bd / 4;
2694 if (q->low_mark < 4)
2697 q->high_mark = q->n_bd / 8;
2698 if (q->high_mark < 2)
2701 q->first_empty = q->last_used = 0;
2705 ipw_write32(priv, base, q->dma_addr);
2706 ipw_write32(priv, size, count);
2707 ipw_write32(priv, read, 0);
2708 ipw_write32(priv, write, 0);
2710 _ipw_read32(priv, 0x90);
2713 static int ipw_queue_tx_init(struct ipw_priv *priv,
2714 struct clx2_tx_queue *q,
2715 int count, u32 read, u32 write, u32 base, u32 size)
2717 struct pci_dev *dev = priv->pci_dev;
2719 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
2721 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
2726 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
2728 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
2729 sizeof(q->bd[0]) * count);
2735 ipw_queue_init(priv, &q->q, count, read, write, base, size);
2740 * Free one TFD, those at index [txq->q.last_used].
2741 * Do NOT advance any indexes
2746 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
2747 struct clx2_tx_queue *txq)
2749 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
2750 struct pci_dev *dev = priv->pci_dev;
2754 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
2755 /* nothing to cleanup after for host commands */
2759 if (bd->u.data.num_chunks > NUM_TFD_CHUNKS) {
2760 IPW_ERROR("Too many chunks: %i\n", bd->u.data.num_chunks);
2761 /** @todo issue fatal error, it is quite serious situation */
2765 /* unmap chunks if any */
2766 for (i = 0; i < bd->u.data.num_chunks; i++) {
2767 pci_unmap_single(dev, bd->u.data.chunk_ptr[i],
2768 bd->u.data.chunk_len[i], PCI_DMA_TODEVICE);
2769 if (txq->txb[txq->q.last_used]) {
2770 ieee80211_txb_free(txq->txb[txq->q.last_used]);
2771 txq->txb[txq->q.last_used] = NULL;
2777 * Deallocate DMA queue.
2779 * Empty queue by removing and destroying all BD's.
2785 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
2787 struct clx2_queue *q = &txq->q;
2788 struct pci_dev *dev = priv->pci_dev;
2793 /* first, empty all BD's */
2794 for (; q->first_empty != q->last_used;
2795 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
2796 ipw_queue_tx_free_tfd(priv, txq);
2799 /* free buffers belonging to queue itself */
2800 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
2804 /* 0 fill whole structure */
2805 memset(txq, 0, sizeof(*txq));
2809 * Destroy all DMA queues and structures
2813 static void ipw_tx_queue_free(struct ipw_priv *priv)
2816 ipw_queue_tx_free(priv, &priv->txq_cmd);
2819 ipw_queue_tx_free(priv, &priv->txq[0]);
2820 ipw_queue_tx_free(priv, &priv->txq[1]);
2821 ipw_queue_tx_free(priv, &priv->txq[2]);
2822 ipw_queue_tx_free(priv, &priv->txq[3]);
2825 static void inline __maybe_wake_tx(struct ipw_priv *priv)
2827 if (netif_running(priv->net_dev)) {
2828 switch (priv->port_type) {
2829 case DCR_TYPE_MU_BSS:
2830 case DCR_TYPE_MU_IBSS:
2831 if (!(priv->status & STATUS_ASSOCIATED)) {
2835 netif_wake_queue(priv->net_dev);
2840 static inline void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
2842 /* First 3 bytes are manufacturer */
2843 bssid[0] = priv->mac_addr[0];
2844 bssid[1] = priv->mac_addr[1];
2845 bssid[2] = priv->mac_addr[2];
2847 /* Last bytes are random */
2848 get_random_bytes(&bssid[3], ETH_ALEN - 3);
2850 bssid[0] &= 0xfe; /* clear multicast bit */
2851 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
2854 static inline u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
2856 struct ipw_station_entry entry;
2859 for (i = 0; i < priv->num_stations; i++) {
2860 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
2861 /* Another node is active in network */
2862 priv->missed_adhoc_beacons = 0;
2863 if (!(priv->config & CFG_STATIC_CHANNEL))
2864 /* when other nodes drop out, we drop out */
2865 priv->config &= ~CFG_ADHOC_PERSIST;
2871 if (i == MAX_STATIONS)
2872 return IPW_INVALID_STATION;
2874 IPW_DEBUG_SCAN("Adding AdHoc station: " MAC_FMT "\n", MAC_ARG(bssid));
2877 entry.support_mode = 0;
2878 memcpy(entry.mac_addr, bssid, ETH_ALEN);
2879 memcpy(priv->stations[i], bssid, ETH_ALEN);
2880 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
2881 &entry, sizeof(entry));
2882 priv->num_stations++;
2887 static inline u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
2891 for (i = 0; i < priv->num_stations; i++)
2892 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
2895 return IPW_INVALID_STATION;
2898 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
2902 if (!(priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))) {
2903 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
2907 IPW_DEBUG_ASSOC("Disassocation attempt from " MAC_FMT " "
2909 MAC_ARG(priv->assoc_request.bssid),
2910 priv->assoc_request.channel);
2912 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
2913 priv->status |= STATUS_DISASSOCIATING;
2916 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
2918 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
2919 err = ipw_send_associate(priv, &priv->assoc_request);
2921 IPW_DEBUG_HC("Attempt to send [dis]associate command "
2928 static void ipw_disassociate(void *data)
2930 ipw_send_disassociate(data, 0);
2933 static void notify_wx_assoc_event(struct ipw_priv *priv)
2935 union iwreq_data wrqu;
2936 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
2937 if (priv->status & STATUS_ASSOCIATED)
2938 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
2940 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
2941 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
2944 struct ipw_status_code {
2949 static const struct ipw_status_code ipw_status_codes[] = {
2950 {0x00, "Successful"},
2951 {0x01, "Unspecified failure"},
2952 {0x0A, "Cannot support all requested capabilities in the "
2953 "Capability information field"},
2954 {0x0B, "Reassociation denied due to inability to confirm that "
2955 "association exists"},
2956 {0x0C, "Association denied due to reason outside the scope of this "
2959 "Responding station does not support the specified authentication "
2962 "Received an Authentication frame with authentication sequence "
2963 "transaction sequence number out of expected sequence"},
2964 {0x0F, "Authentication rejected because of challenge failure"},
2965 {0x10, "Authentication rejected due to timeout waiting for next "
2966 "frame in sequence"},
2967 {0x11, "Association denied because AP is unable to handle additional "
2968 "associated stations"},
2970 "Association denied due to requesting station not supporting all "
2971 "of the datarates in the BSSBasicServiceSet Parameter"},
2973 "Association denied due to requesting station not supporting "
2974 "short preamble operation"},
2976 "Association denied due to requesting station not supporting "
2979 "Association denied due to requesting station not supporting "
2982 "Association denied due to requesting station not supporting "
2983 "short slot operation"},
2985 "Association denied due to requesting station not supporting "
2986 "DSSS-OFDM operation"},
2987 {0x28, "Invalid Information Element"},
2988 {0x29, "Group Cipher is not valid"},
2989 {0x2A, "Pairwise Cipher is not valid"},
2990 {0x2B, "AKMP is not valid"},
2991 {0x2C, "Unsupported RSN IE version"},
2992 {0x2D, "Invalid RSN IE Capabilities"},
2993 {0x2E, "Cipher suite is rejected per security policy"},
2996 #ifdef CONFIG_IPW_DEBUG
2997 static const char *ipw_get_status_code(u16 status)
3000 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
3001 if (ipw_status_codes[i].status == status)
3002 return ipw_status_codes[i].reason;
3003 return "Unknown status value.";
3007 static void inline average_init(struct average *avg)
3009 memset(avg, 0, sizeof(*avg));
3012 static void inline average_add(struct average *avg, s16 val)
3014 avg->sum -= avg->entries[avg->pos];
3016 avg->entries[avg->pos++] = val;
3017 if (unlikely(avg->pos == AVG_ENTRIES)) {
3023 static s16 inline average_value(struct average *avg)
3025 if (!unlikely(avg->init)) {
3027 return avg->sum / avg->pos;
3031 return avg->sum / AVG_ENTRIES;
3034 static void ipw_reset_stats(struct ipw_priv *priv)
3036 u32 len = sizeof(u32);
3040 average_init(&priv->average_missed_beacons);
3041 average_init(&priv->average_rssi);
3042 average_init(&priv->average_noise);
3044 priv->last_rate = 0;
3045 priv->last_missed_beacons = 0;
3046 priv->last_rx_packets = 0;
3047 priv->last_tx_packets = 0;
3048 priv->last_tx_failures = 0;
3050 /* Firmware managed, reset only when NIC is restarted, so we have to
3051 * normalize on the current value */
3052 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
3053 &priv->last_rx_err, &len);
3054 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
3055 &priv->last_tx_failures, &len);
3057 /* Driver managed, reset with each association */
3058 priv->missed_adhoc_beacons = 0;
3059 priv->missed_beacons = 0;
3060 priv->tx_packets = 0;
3061 priv->rx_packets = 0;
3065 static inline u32 ipw_get_max_rate(struct ipw_priv *priv)
3068 u32 mask = priv->rates_mask;
3069 /* If currently associated in B mode, restrict the maximum
3070 * rate match to B rates */
3071 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
3072 mask &= IEEE80211_CCK_RATES_MASK;
3074 /* TODO: Verify that the rate is supported by the current rates
3077 while (i && !(mask & i))
3080 case IEEE80211_CCK_RATE_1MB_MASK: return 1000000;
3081 case IEEE80211_CCK_RATE_2MB_MASK: return 2000000;
3082 case IEEE80211_CCK_RATE_5MB_MASK: return 5500000;
3083 case IEEE80211_OFDM_RATE_6MB_MASK: return 6000000;
3084 case IEEE80211_OFDM_RATE_9MB_MASK: return 9000000;
3085 case IEEE80211_CCK_RATE_11MB_MASK: return 11000000;
3086 case IEEE80211_OFDM_RATE_12MB_MASK: return 12000000;
3087 case IEEE80211_OFDM_RATE_18MB_MASK: return 18000000;
3088 case IEEE80211_OFDM_RATE_24MB_MASK: return 24000000;
3089 case IEEE80211_OFDM_RATE_36MB_MASK: return 36000000;
3090 case IEEE80211_OFDM_RATE_48MB_MASK: return 48000000;
3091 case IEEE80211_OFDM_RATE_54MB_MASK: return 54000000;
3094 if (priv->ieee->mode == IEEE_B)
3100 static u32 ipw_get_current_rate(struct ipw_priv *priv)
3102 u32 rate, len = sizeof(rate);
3105 if (!(priv->status & STATUS_ASSOCIATED))
3108 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
3109 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
3112 IPW_DEBUG_INFO("failed querying ordinals.\n");
3116 return ipw_get_max_rate(priv);
3119 case IPW_TX_RATE_1MB: return 1000000;
3120 case IPW_TX_RATE_2MB: return 2000000;
3121 case IPW_TX_RATE_5MB: return 5500000;
3122 case IPW_TX_RATE_6MB: return 6000000;
3123 case IPW_TX_RATE_9MB: return 9000000;
3124 case IPW_TX_RATE_11MB: return 11000000;
3125 case IPW_TX_RATE_12MB: return 12000000;
3126 case IPW_TX_RATE_18MB: return 18000000;
3127 case IPW_TX_RATE_24MB: return 24000000;
3128 case IPW_TX_RATE_36MB: return 36000000;
3129 case IPW_TX_RATE_48MB: return 48000000;
3130 case IPW_TX_RATE_54MB: return 54000000;
3136 #define PERFECT_RSSI (-50)
3137 #define WORST_RSSI (-85)
3138 #define IPW_STATS_INTERVAL (2 * HZ)
3139 static void ipw_gather_stats(struct ipw_priv *priv)
3141 u32 rx_err, rx_err_delta, rx_packets_delta;
3142 u32 tx_failures, tx_failures_delta, tx_packets_delta;
3143 u32 missed_beacons_percent, missed_beacons_delta;
3145 u32 len = sizeof(u32);
3147 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
3150 if (!(priv->status & STATUS_ASSOCIATED)) {
3155 /* Update the statistics */
3156 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
3157 &priv->missed_beacons, &len);
3158 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
3159 priv->last_missed_beacons = priv->missed_beacons;
3160 if (priv->assoc_request.beacon_interval) {
3161 missed_beacons_percent = missed_beacons_delta *
3162 (HZ * priv->assoc_request.beacon_interval) /
3163 (IPW_STATS_INTERVAL * 10);
3165 missed_beacons_percent = 0;
3167 average_add(&priv->average_missed_beacons, missed_beacons_percent);
3169 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
3170 rx_err_delta = rx_err - priv->last_rx_err;
3171 priv->last_rx_err = rx_err;
3173 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
3174 tx_failures_delta = tx_failures - priv->last_tx_failures;
3175 priv->last_tx_failures = tx_failures;
3177 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
3178 priv->last_rx_packets = priv->rx_packets;
3180 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
3181 priv->last_tx_packets = priv->tx_packets;
3183 /* Calculate quality based on the following:
3185 * Missed beacon: 100% = 0, 0% = 70% missed
3186 * Rate: 60% = 1Mbs, 100% = Max
3187 * Rx and Tx errors represent a straight % of total Rx/Tx
3188 * RSSI: 100% = > -50, 0% = < -80
3189 * Rx errors: 100% = 0, 0% = 50% missed
3191 * The lowest computed quality is used.
3194 #define BEACON_THRESHOLD 5
3195 beacon_quality = 100 - missed_beacons_percent;
3196 if (beacon_quality < BEACON_THRESHOLD)
3199 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
3200 (100 - BEACON_THRESHOLD);
3201 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
3202 beacon_quality, missed_beacons_percent);
3204 priv->last_rate = ipw_get_current_rate(priv);
3205 rate_quality = priv->last_rate * 40 / priv->last_rate + 60;
3206 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
3207 rate_quality, priv->last_rate / 1000000);
3209 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
3210 rx_quality = 100 - (rx_err_delta * 100) /
3211 (rx_packets_delta + rx_err_delta);
3214 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
3215 rx_quality, rx_err_delta, rx_packets_delta);
3217 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
3218 tx_quality = 100 - (tx_failures_delta * 100) /
3219 (tx_packets_delta + tx_failures_delta);
3222 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
3223 tx_quality, tx_failures_delta, tx_packets_delta);
3225 rssi = average_value(&priv->average_rssi);
3226 if (rssi > PERFECT_RSSI)
3227 signal_quality = 100;
3228 else if (rssi < WORST_RSSI)
3231 signal_quality = (rssi - WORST_RSSI) * 100 /
3232 (PERFECT_RSSI - WORST_RSSI);
3233 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
3234 signal_quality, rssi);
3236 quality = min(beacon_quality,
3238 min(tx_quality, min(rx_quality, signal_quality))));
3239 if (quality == beacon_quality)
3240 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
3242 if (quality == rate_quality)
3243 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
3245 if (quality == tx_quality)
3246 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
3248 if (quality == rx_quality)
3249 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
3251 if (quality == signal_quality)
3252 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
3255 priv->quality = quality;
3257 queue_delayed_work(priv->workqueue, &priv->gather_stats,
3258 IPW_STATS_INTERVAL);
3262 * Handle host notification packet.
3263 * Called from interrupt routine
3265 static inline void ipw_rx_notification(struct ipw_priv *priv,
3266 struct ipw_rx_notification *notif)
3268 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, notif->size);
3270 switch (notif->subtype) {
3271 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
3272 struct notif_association *assoc = ¬if->u.assoc;
3274 switch (assoc->state) {
3275 case CMAS_ASSOCIATED:{
3276 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3278 "associated: '%s' " MAC_FMT
3280 escape_essid(priv->essid,
3282 MAC_ARG(priv->bssid));
3284 switch (priv->ieee->iw_mode) {
3286 memcpy(priv->ieee->bssid,
3287 priv->bssid, ETH_ALEN);
3291 memcpy(priv->ieee->bssid,
3292 priv->bssid, ETH_ALEN);
3294 /* clear out the station table */
3295 priv->num_stations = 0;
3298 ("queueing adhoc check\n");
3299 queue_delayed_work(priv->
3309 priv->status &= ~STATUS_ASSOCIATING;
3310 priv->status |= STATUS_ASSOCIATED;
3312 netif_carrier_on(priv->net_dev);
3313 if (netif_queue_stopped(priv->net_dev)) {
3316 netif_wake_queue(priv->net_dev);
3319 ("starting queue\n");
3320 netif_start_queue(priv->
3324 ipw_reset_stats(priv);
3325 /* Ensure the rate is updated immediately */
3327 ipw_get_current_rate(priv);
3328 schedule_work(&priv->gather_stats);
3329 notify_wx_assoc_event(priv);
3331 /* queue_delayed_work(priv->workqueue,
3332 &priv->request_scan,
3333 SCAN_ASSOCIATED_INTERVAL);
3338 case CMAS_AUTHENTICATED:{
3340 status & (STATUS_ASSOCIATED |
3342 #ifdef CONFIG_IPW_DEBUG
3343 struct notif_authenticate *auth
3345 IPW_DEBUG(IPW_DL_NOTIF |
3348 "deauthenticated: '%s' "
3350 ": (0x%04X) - %s \n",
3355 MAC_ARG(priv->bssid),
3356 ntohs(auth->status),
3363 ~(STATUS_ASSOCIATING |
3367 netif_carrier_off(priv->
3369 netif_stop_queue(priv->net_dev);
3370 queue_work(priv->workqueue,
3371 &priv->request_scan);
3372 notify_wx_assoc_event(priv);
3376 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3378 "authenticated: '%s' " MAC_FMT
3380 escape_essid(priv->essid,
3382 MAC_ARG(priv->bssid));
3387 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3389 "disassociated: '%s' " MAC_FMT
3391 escape_essid(priv->essid,
3393 MAC_ARG(priv->bssid));
3396 ~(STATUS_DISASSOCIATING |
3397 STATUS_ASSOCIATING |
3398 STATUS_ASSOCIATED | STATUS_AUTH);
3400 netif_stop_queue(priv->net_dev);
3401 if (!(priv->status & STATUS_ROAMING)) {
3402 netif_carrier_off(priv->
3404 notify_wx_assoc_event(priv);
3406 /* Cancel any queued work ... */
3407 cancel_delayed_work(&priv->
3409 cancel_delayed_work(&priv->
3412 /* Queue up another scan... */
3413 queue_work(priv->workqueue,
3414 &priv->request_scan);
3416 cancel_delayed_work(&priv->
3419 priv->status |= STATUS_ROAMING;
3420 queue_work(priv->workqueue,
3421 &priv->request_scan);
3424 ipw_reset_stats(priv);
3429 IPW_ERROR("assoc: unknown (%d)\n",
3437 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
3438 struct notif_authenticate *auth = ¬if->u.auth;
3439 switch (auth->state) {
3440 case CMAS_AUTHENTICATED:
3441 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
3442 "authenticated: '%s' " MAC_FMT " \n",
3443 escape_essid(priv->essid,
3445 MAC_ARG(priv->bssid));
3446 priv->status |= STATUS_AUTH;
3450 if (priv->status & STATUS_AUTH) {
3451 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3453 "authentication failed (0x%04X): %s\n",
3454 ntohs(auth->status),
3455 ipw_get_status_code(ntohs
3459 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3461 "deauthenticated: '%s' " MAC_FMT "\n",
3462 escape_essid(priv->essid,
3464 MAC_ARG(priv->bssid));
3466 priv->status &= ~(STATUS_ASSOCIATING |
3470 netif_carrier_off(priv->net_dev);
3471 netif_stop_queue(priv->net_dev);
3472 queue_work(priv->workqueue,
3473 &priv->request_scan);
3474 notify_wx_assoc_event(priv);
3477 case CMAS_TX_AUTH_SEQ_1:
3478 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3479 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
3481 case CMAS_RX_AUTH_SEQ_2:
3482 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3483 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
3485 case CMAS_AUTH_SEQ_1_PASS:
3486 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3487 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
3489 case CMAS_AUTH_SEQ_1_FAIL:
3490 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3491 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
3493 case CMAS_TX_AUTH_SEQ_3:
3494 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3495 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
3497 case CMAS_RX_AUTH_SEQ_4:
3498 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3499 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
3501 case CMAS_AUTH_SEQ_2_PASS:
3502 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3503 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
3505 case CMAS_AUTH_SEQ_2_FAIL:
3506 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3507 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
3510 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3511 IPW_DL_ASSOC, "TX_ASSOC\n");
3513 case CMAS_RX_ASSOC_RESP:
3514 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3515 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
3517 case CMAS_ASSOCIATED:
3518 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3519 IPW_DL_ASSOC, "ASSOCIATED\n");
3522 IPW_DEBUG_NOTIF("auth: failure - %d\n",
3529 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
3530 struct notif_channel_result *x =
3531 ¬if->u.channel_result;
3533 if (notif->size == sizeof(*x)) {
3534 IPW_DEBUG_SCAN("Scan result for channel %d\n",
3537 IPW_DEBUG_SCAN("Scan result of wrong size %d "
3538 "(should be %zd)\n",
3539 notif->size, sizeof(*x));
3544 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
3545 struct notif_scan_complete *x = ¬if->u.scan_complete;
3546 if (notif->size == sizeof(*x)) {
3548 ("Scan completed: type %d, %d channels, "
3549 "%d status\n", x->scan_type,
3550 x->num_channels, x->status);
3552 IPW_ERROR("Scan completed of wrong size %d "
3553 "(should be %zd)\n",
3554 notif->size, sizeof(*x));
3558 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3560 cancel_delayed_work(&priv->scan_check);
3562 if (!(priv->status & (STATUS_ASSOCIATED |
3563 STATUS_ASSOCIATING |
3565 STATUS_DISASSOCIATING)))
3566 queue_work(priv->workqueue, &priv->associate);
3567 else if (priv->status & STATUS_ROAMING) {
3568 /* If a scan completed and we are in roam mode, then
3569 * the scan that completed was the one requested as a
3570 * result of entering roam... so, schedule the
3572 queue_work(priv->workqueue, &priv->roam);
3573 } else if (priv->status & STATUS_SCAN_PENDING)
3574 queue_work(priv->workqueue,
3575 &priv->request_scan);
3577 priv->ieee->scans++;
3581 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
3582 struct notif_frag_length *x = ¬if->u.frag_len;
3584 if (notif->size == sizeof(*x)) {
3585 IPW_ERROR("Frag length: %d\n", x->frag_length);
3587 IPW_ERROR("Frag length of wrong size %d "
3588 "(should be %zd)\n",
3589 notif->size, sizeof(*x));
3594 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
3595 struct notif_link_deterioration *x =
3596 ¬if->u.link_deterioration;
3597 if (notif->size == sizeof(*x)) {
3598 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
3599 "link deterioration: '%s' " MAC_FMT
3600 " \n", escape_essid(priv->essid,
3602 MAC_ARG(priv->bssid));
3603 memcpy(&priv->last_link_deterioration, x,
3606 IPW_ERROR("Link Deterioration of wrong size %d "
3607 "(should be %zd)\n",
3608 notif->size, sizeof(*x));
3613 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
3614 IPW_ERROR("Dino config\n");
3616 && priv->hcmd->cmd == HOST_CMD_DINO_CONFIG) {
3617 /* TODO: Do anything special? */
3619 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
3624 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
3625 struct notif_beacon_state *x = ¬if->u.beacon_state;
3626 if (notif->size != sizeof(*x)) {
3628 ("Beacon state of wrong size %d (should "
3629 "be %zd)\n", notif->size, sizeof(*x));
3633 if (x->state == HOST_NOTIFICATION_STATUS_BEACON_MISSING) {
3634 if (priv->status & STATUS_SCANNING) {
3635 /* Stop scan to keep fw from getting
3637 queue_work(priv->workqueue,
3641 if (x->number > priv->missed_beacon_threshold &&
3642 priv->status & STATUS_ASSOCIATED) {
3643 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
3645 "Missed beacon: %d - disassociate\n",
3647 queue_work(priv->workqueue,
3648 &priv->disassociate);
3649 } else if (x->number > priv->roaming_threshold) {
3650 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
3651 "Missed beacon: %d - initiate "
3652 "roaming\n", x->number);
3653 queue_work(priv->workqueue,
3656 IPW_DEBUG_NOTIF("Missed beacon: %d\n",
3660 priv->notif_missed_beacons = x->number;
3667 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
3668 struct notif_tgi_tx_key *x = ¬if->u.tgi_tx_key;
3669 if (notif->size == sizeof(*x)) {
3670 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
3671 "0x%02x station %d\n",
3672 x->key_state, x->security_type,
3678 ("TGi Tx Key of wrong size %d (should be %zd)\n",
3679 notif->size, sizeof(*x));
3683 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
3684 struct notif_calibration *x = ¬if->u.calibration;
3686 if (notif->size == sizeof(*x)) {
3687 memcpy(&priv->calib, x, sizeof(*x));
3688 IPW_DEBUG_INFO("TODO: Calibration\n");
3693 ("Calibration of wrong size %d (should be %zd)\n",
3694 notif->size, sizeof(*x));
3698 case HOST_NOTIFICATION_NOISE_STATS:{
3699 if (notif->size == sizeof(u32)) {
3701 (u8) (notif->u.noise.value & 0xff);
3702 average_add(&priv->average_noise,
3708 ("Noise stat is wrong size %d (should be %zd)\n",
3709 notif->size, sizeof(u32));
3714 IPW_ERROR("Unknown notification: "
3715 "subtype=%d,flags=0x%2x,size=%d\n",
3716 notif->subtype, notif->flags, notif->size);
3721 * Destroys all DMA structures and initialise them again
3724 * @return error code
3726 static int ipw_queue_reset(struct ipw_priv *priv)
3729 /** @todo customize queue sizes */
3730 int nTx = 64, nTxCmd = 8;
3731 ipw_tx_queue_free(priv);
3733 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
3734 CX2_TX_CMD_QUEUE_READ_INDEX,
3735 CX2_TX_CMD_QUEUE_WRITE_INDEX,
3736 CX2_TX_CMD_QUEUE_BD_BASE,
3737 CX2_TX_CMD_QUEUE_BD_SIZE);
3739 IPW_ERROR("Tx Cmd queue init failed\n");
3743 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
3744 CX2_TX_QUEUE_0_READ_INDEX,
3745 CX2_TX_QUEUE_0_WRITE_INDEX,
3746 CX2_TX_QUEUE_0_BD_BASE, CX2_TX_QUEUE_0_BD_SIZE);
3748 IPW_ERROR("Tx 0 queue init failed\n");
3751 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
3752 CX2_TX_QUEUE_1_READ_INDEX,
3753 CX2_TX_QUEUE_1_WRITE_INDEX,
3754 CX2_TX_QUEUE_1_BD_BASE, CX2_TX_QUEUE_1_BD_SIZE);
3756 IPW_ERROR("Tx 1 queue init failed\n");
3759 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
3760 CX2_TX_QUEUE_2_READ_INDEX,
3761 CX2_TX_QUEUE_2_WRITE_INDEX,
3762 CX2_TX_QUEUE_2_BD_BASE, CX2_TX_QUEUE_2_BD_SIZE);
3764 IPW_ERROR("Tx 2 queue init failed\n");
3767 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
3768 CX2_TX_QUEUE_3_READ_INDEX,
3769 CX2_TX_QUEUE_3_WRITE_INDEX,
3770 CX2_TX_QUEUE_3_BD_BASE, CX2_TX_QUEUE_3_BD_SIZE);
3772 IPW_ERROR("Tx 3 queue init failed\n");
3776 priv->rx_bufs_min = 0;
3777 priv->rx_pend_max = 0;
3781 ipw_tx_queue_free(priv);
3786 * Reclaim Tx queue entries no more used by NIC.
3788 * When FW adwances 'R' index, all entries between old and
3789 * new 'R' index need to be reclaimed. As result, some free space
3790 * forms. If there is enough free space (> low mark), wake Tx queue.
3792 * @note Need to protect against garbage in 'R' index
3796 * @return Number of used entries remains in the queue
3798 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
3799 struct clx2_tx_queue *txq, int qindex)
3803 struct clx2_queue *q = &txq->q;
3805 hw_tail = ipw_read32(priv, q->reg_r);
3806 if (hw_tail >= q->n_bd) {
3808 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
3812 for (; q->last_used != hw_tail;
3813 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3814 ipw_queue_tx_free_tfd(priv, txq);
3818 if (ipw_queue_space(q) > q->low_mark && qindex >= 0) {
3819 __maybe_wake_tx(priv);
3821 used = q->first_empty - q->last_used;
3828 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
3831 struct clx2_tx_queue *txq = &priv->txq_cmd;
3832 struct clx2_queue *q = &txq->q;
3833 struct tfd_frame *tfd;
3835 if (ipw_queue_space(q) < (sync ? 1 : 2)) {
3836 IPW_ERROR("No space for Tx\n");
3840 tfd = &txq->bd[q->first_empty];
3841 txq->txb[q->first_empty] = NULL;
3843 memset(tfd, 0, sizeof(*tfd));
3844 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
3845 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
3847 tfd->u.cmd.index = hcmd;
3848 tfd->u.cmd.length = len;
3849 memcpy(tfd->u.cmd.payload, buf, len);
3850 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
3851 ipw_write32(priv, q->reg_w, q->first_empty);
3852 _ipw_read32(priv, 0x90);
3858 * Rx theory of operation
3860 * The host allocates 32 DMA target addresses and passes the host address
3861 * to the firmware at register CX2_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
3865 * The host/firmware share two index registers for managing the Rx buffers.
3867 * The READ index maps to the first position that the firmware may be writing
3868 * to -- the driver can read up to (but not including) this position and get
3870 * The READ index is managed by the firmware once the card is enabled.
3872 * The WRITE index maps to the last position the driver has read from -- the
3873 * position preceding WRITE is the last slot the firmware can place a packet.
3875 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
3878 * During initialization the host sets up the READ queue position to the first
3879 * INDEX position, and WRITE to the last (READ - 1 wrapped)
3881 * When the firmware places a packet in a buffer it will advance the READ index
3882 * and fire the RX interrupt. The driver can then query the READ index and
3883 * process as many packets as possible, moving the WRITE index forward as it
3884 * resets the Rx queue buffers with new memory.
3886 * The management in the driver is as follows:
3887 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
3888 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
3889 * to replensish the ipw->rxq->rx_free.
3890 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
3891 * ipw->rxq is replenished and the READ INDEX is updated (updating the
3892 * 'processed' and 'read' driver indexes as well)
3893 * + A received packet is processed and handed to the kernel network stack,
3894 * detached from the ipw->rxq. The driver 'processed' index is updated.
3895 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
3896 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
3897 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
3898 * were enough free buffers and RX_STALLED is set it is cleared.
3903 * ipw_rx_queue_alloc() Allocates rx_free
3904 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
3905 * ipw_rx_queue_restock
3906 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
3907 * queue, updates firmware pointers, and updates
3908 * the WRITE index. If insufficient rx_free buffers
3909 * are available, schedules ipw_rx_queue_replenish
3911 * -- enable interrupts --
3912 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
3913 * READ INDEX, detaching the SKB from the pool.
3914 * Moves the packet buffer from queue to rx_used.
3915 * Calls ipw_rx_queue_restock to refill any empty
3922 * If there are slots in the RX queue that need to be restocked,
3923 * and we have free pre-allocated buffers, fill the ranks as much
3924 * as we can pulling from rx_free.
3926 * This moves the 'write' index forward to catch up with 'processed', and
3927 * also updates the memory address in the firmware to reference the new
3930 static void ipw_rx_queue_restock(struct ipw_priv *priv)
3932 struct ipw_rx_queue *rxq = priv->rxq;
3933 struct list_head *element;
3934 struct ipw_rx_mem_buffer *rxb;
3935 unsigned long flags;
3938 spin_lock_irqsave(&rxq->lock, flags);
3940 while ((rxq->write != rxq->processed) && (rxq->free_count)) {
3941 element = rxq->rx_free.next;
3942 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
3945 ipw_write32(priv, CX2_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
3947 rxq->queue[rxq->write] = rxb;
3948 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
3951 spin_unlock_irqrestore(&rxq->lock, flags);
3953 /* If the pre-allocated buffer pool is dropping low, schedule to
3955 if (rxq->free_count <= RX_LOW_WATERMARK)
3956 queue_work(priv->workqueue, &priv->rx_replenish);
3958 /* If we've added more space for the firmware to place data, tell it */
3959 if (write != rxq->write)
3960 ipw_write32(priv, CX2_RX_WRITE_INDEX, rxq->write);
3964 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
3965 * Also restock the Rx queue via ipw_rx_queue_restock.
3967 * This is called as a scheduled work item (except for during intialization)
3969 static void ipw_rx_queue_replenish(void *data)
3971 struct ipw_priv *priv = data;
3972 struct ipw_rx_queue *rxq = priv->rxq;
3973 struct list_head *element;
3974 struct ipw_rx_mem_buffer *rxb;
3975 unsigned long flags;
3977 spin_lock_irqsave(&rxq->lock, flags);
3978 while (!list_empty(&rxq->rx_used)) {
3979 element = rxq->rx_used.next;
3980 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
3981 rxb->skb = alloc_skb(CX2_RX_BUF_SIZE, GFP_ATOMIC);
3983 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
3984 priv->net_dev->name);
3985 /* We don't reschedule replenish work here -- we will
3986 * call the restock method and if it still needs
3987 * more buffers it will schedule replenish */
3992 rxb->rxb = (struct ipw_rx_buffer *)rxb->skb->data;
3994 pci_map_single(priv->pci_dev, rxb->skb->data,
3995 CX2_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3997 list_add_tail(&rxb->list, &rxq->rx_free);
4000 spin_unlock_irqrestore(&rxq->lock, flags);
4002 ipw_rx_queue_restock(priv);
4005 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
4006 * If an SKB has been detached, the POOL needs to have it's SKB set to NULL
4007 * This free routine walks the list of POOL entries and if SKB is set to
4008 * non NULL it is unmapped and freed
4010 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
4017 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
4018 if (rxq->pool[i].skb != NULL) {
4019 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
4020 CX2_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4021 dev_kfree_skb(rxq->pool[i].skb);
4028 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
4030 struct ipw_rx_queue *rxq;
4033 rxq = (struct ipw_rx_queue *)kmalloc(sizeof(*rxq), GFP_KERNEL);
4034 if (unlikely(!rxq)) {
4035 IPW_ERROR("memory allocation failed\n");
4038 memset(rxq, 0, sizeof(*rxq));
4039 spin_lock_init(&rxq->lock);
4040 INIT_LIST_HEAD(&rxq->rx_free);
4041 INIT_LIST_HEAD(&rxq->rx_used);
4043 /* Fill the rx_used queue with _all_ of the Rx buffers */
4044 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
4045 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
4047 /* Set us so that we have processed and used all buffers, but have
4048 * not restocked the Rx queue with fresh buffers */
4049 rxq->read = rxq->write = 0;
4050 rxq->processed = RX_QUEUE_SIZE - 1;
4051 rxq->free_count = 0;
4056 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
4058 rate &= ~IEEE80211_BASIC_RATE_MASK;
4059 if (ieee_mode == IEEE_A) {
4061 case IEEE80211_OFDM_RATE_6MB:
4062 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
4064 case IEEE80211_OFDM_RATE_9MB:
4065 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
4067 case IEEE80211_OFDM_RATE_12MB:
4069 rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
4070 case IEEE80211_OFDM_RATE_18MB:
4072 rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
4073 case IEEE80211_OFDM_RATE_24MB:
4075 rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
4076 case IEEE80211_OFDM_RATE_36MB:
4078 rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
4079 case IEEE80211_OFDM_RATE_48MB:
4081 rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
4082 case IEEE80211_OFDM_RATE_54MB:
4084 rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
4092 case IEEE80211_CCK_RATE_1MB:
4093 return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
4094 case IEEE80211_CCK_RATE_2MB:
4095 return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
4096 case IEEE80211_CCK_RATE_5MB:
4097 return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
4098 case IEEE80211_CCK_RATE_11MB:
4099 return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
4102 /* If we are limited to B modulations, bail at this point */
4103 if (ieee_mode == IEEE_B)
4108 case IEEE80211_OFDM_RATE_6MB:
4109 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
4110 case IEEE80211_OFDM_RATE_9MB:
4111 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
4112 case IEEE80211_OFDM_RATE_12MB:
4113 return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
4114 case IEEE80211_OFDM_RATE_18MB:
4115 return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
4116 case IEEE80211_OFDM_RATE_24MB:
4117 return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
4118 case IEEE80211_OFDM_RATE_36MB:
4119 return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
4120 case IEEE80211_OFDM_RATE_48MB:
4121 return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
4122 case IEEE80211_OFDM_RATE_54MB:
4123 return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
4129 static int ipw_compatible_rates(struct ipw_priv *priv,
4130 const struct ieee80211_network *network,
4131 struct ipw_supported_rates *rates)
4135 memset(rates, 0, sizeof(*rates));
4136 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
4137 rates->num_rates = 0;
4138 for (i = 0; i < num_rates; i++) {
4139 if (!ipw_is_rate_in_mask
4140 (priv, network->mode, network->rates[i])) {
4141 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
4142 network->rates[i], priv->rates_mask);
4146 rates->supported_rates[rates->num_rates++] = network->rates[i];
4150 min(network->rates_ex_len, (u8) (IPW_MAX_RATES - num_rates));
4151 for (i = 0; i < num_rates; i++) {
4152 if (!ipw_is_rate_in_mask
4153 (priv, network->mode, network->rates_ex[i])) {
4154 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
4155 network->rates_ex[i], priv->rates_mask);
4159 rates->supported_rates[rates->num_rates++] =
4160 network->rates_ex[i];
4163 return rates->num_rates;
4166 static inline void ipw_copy_rates(struct ipw_supported_rates *dest,
4167 const struct ipw_supported_rates *src)
4170 for (i = 0; i < src->num_rates; i++)
4171 dest->supported_rates[i] = src->supported_rates[i];
4172 dest->num_rates = src->num_rates;
4175 /* TODO: Look at sniffed packets in the air to determine if the basic rate
4176 * mask should ever be used -- right now all callers to add the scan rates are
4177 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
4178 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
4179 u8 modulation, u32 rate_mask)
4181 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
4182 IEEE80211_BASIC_RATE_MASK : 0;
4184 if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
4185 rates->supported_rates[rates->num_rates++] =
4186 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
4188 if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
4189 rates->supported_rates[rates->num_rates++] =
4190 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
4192 if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
4193 rates->supported_rates[rates->num_rates++] = basic_mask |
4194 IEEE80211_CCK_RATE_5MB;
4196 if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
4197 rates->supported_rates[rates->num_rates++] = basic_mask |
4198 IEEE80211_CCK_RATE_11MB;
4201 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
4202 u8 modulation, u32 rate_mask)
4204 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
4205 IEEE80211_BASIC_RATE_MASK : 0;
4207 if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
4208 rates->supported_rates[rates->num_rates++] = basic_mask |
4209 IEEE80211_OFDM_RATE_6MB;
4211 if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
4212 rates->supported_rates[rates->num_rates++] =
4213 IEEE80211_OFDM_RATE_9MB;
4215 if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
4216 rates->supported_rates[rates->num_rates++] = basic_mask |
4217 IEEE80211_OFDM_RATE_12MB;
4219 if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
4220 rates->supported_rates[rates->num_rates++] =
4221 IEEE80211_OFDM_RATE_18MB;
4223 if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
4224 rates->supported_rates[rates->num_rates++] = basic_mask |
4225 IEEE80211_OFDM_RATE_24MB;
4227 if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
4228 rates->supported_rates[rates->num_rates++] =
4229 IEEE80211_OFDM_RATE_36MB;
4231 if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
4232 rates->supported_rates[rates->num_rates++] =
4233 IEEE80211_OFDM_RATE_48MB;
4235 if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
4236 rates->supported_rates[rates->num_rates++] =
4237 IEEE80211_OFDM_RATE_54MB;
4240 struct ipw_network_match {
4241 struct ieee80211_network *network;
4242 struct ipw_supported_rates rates;
4245 static int ipw_best_network(struct ipw_priv *priv,
4246 struct ipw_network_match *match,
4247 struct ieee80211_network *network, int roaming)
4249 struct ipw_supported_rates rates;
4251 /* Verify that this network's capability is compatible with the
4252 * current mode (AdHoc or Infrastructure) */
4253 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
4254 !(network->capability & WLAN_CAPABILITY_ESS)) ||
4255 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
4256 !(network->capability & WLAN_CAPABILITY_IBSS))) {
4257 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded due to "
4258 "capability mismatch.\n",
4259 escape_essid(network->ssid, network->ssid_len),
4260 MAC_ARG(network->bssid));
4264 /* If we do not have an ESSID for this AP, we can not associate with
4266 if (network->flags & NETWORK_EMPTY_ESSID) {
4267 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4268 "because of hidden ESSID.\n",
4269 escape_essid(network->ssid, network->ssid_len),
4270 MAC_ARG(network->bssid));
4274 if (unlikely(roaming)) {
4275 /* If we are roaming, then ensure check if this is a valid
4276 * network to try and roam to */
4277 if ((network->ssid_len != match->network->ssid_len) ||
4278 memcmp(network->ssid, match->network->ssid,
4279 network->ssid_len)) {
4280 IPW_DEBUG_ASSOC("Netowrk '%s (" MAC_FMT ")' excluded "
4281 "because of non-network ESSID.\n",
4282 escape_essid(network->ssid,
4284 MAC_ARG(network->bssid));
4288 /* If an ESSID has been configured then compare the broadcast
4290 if ((priv->config & CFG_STATIC_ESSID) &&
4291 ((network->ssid_len != priv->essid_len) ||
4292 memcmp(network->ssid, priv->essid,
4293 min(network->ssid_len, priv->essid_len)))) {
4294 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
4296 escape_essid(network->ssid, network->ssid_len),
4298 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4299 "because of ESSID mismatch: '%s'.\n",
4300 escaped, MAC_ARG(network->bssid),
4301 escape_essid(priv->essid,
4307 /* If the old network rate is better than this one, don't bother
4308 * testing everything else. */
4309 if (match->network && match->network->stats.rssi > network->stats.rssi) {
4310 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
4312 escape_essid(network->ssid, network->ssid_len),
4314 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded because "
4315 "'%s (" MAC_FMT ")' has a stronger signal.\n",
4316 escaped, MAC_ARG(network->bssid),
4317 escape_essid(match->network->ssid,
4318 match->network->ssid_len),
4319 MAC_ARG(match->network->bssid));
4323 /* If this network has already had an association attempt within the
4324 * last 3 seconds, do not try and associate again... */
4325 if (network->last_associate &&
4326 time_after(network->last_associate + (HZ * 5UL), jiffies)) {
4327 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4328 "because of storming (%lu since last "
4329 "assoc attempt).\n",
4330 escape_essid(network->ssid, network->ssid_len),
4331 MAC_ARG(network->bssid),
4332 (jiffies - network->last_associate) / HZ);
4336 /* Now go through and see if the requested network is valid... */
4337 if (priv->ieee->scan_age != 0 &&
4338 jiffies - network->last_scanned > priv->ieee->scan_age) {
4339 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4340 "because of age: %lums.\n",
4341 escape_essid(network->ssid, network->ssid_len),
4342 MAC_ARG(network->bssid),
4343 (jiffies - network->last_scanned) / (HZ / 100));
4347 if ((priv->config & CFG_STATIC_CHANNEL) &&
4348 (network->channel != priv->channel)) {
4349 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4350 "because of channel mismatch: %d != %d.\n",
4351 escape_essid(network->ssid, network->ssid_len),
4352 MAC_ARG(network->bssid),
4353 network->channel, priv->channel);
4357 /* Verify privacy compatability */
4358 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
4359 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
4360 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4361 "because of privacy mismatch: %s != %s.\n",
4362 escape_essid(network->ssid, network->ssid_len),
4363 MAC_ARG(network->bssid),
4364 priv->capability & CAP_PRIVACY_ON ? "on" :
4366 network->capability &
4367 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
4371 if ((priv->config & CFG_STATIC_BSSID) &&
4372 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
4373 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4374 "because of BSSID mismatch: " MAC_FMT ".\n",
4375 escape_essid(network->ssid, network->ssid_len),
4376 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
4380 /* Filter out any incompatible freq / mode combinations */
4381 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
4382 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4383 "because of invalid frequency/mode "
4385 escape_essid(network->ssid, network->ssid_len),
4386 MAC_ARG(network->bssid));
4390 ipw_compatible_rates(priv, network, &rates);
4391 if (rates.num_rates == 0) {
4392 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4393 "because of no compatible rates.\n",
4394 escape_essid(network->ssid, network->ssid_len),
4395 MAC_ARG(network->bssid));
4399 /* TODO: Perform any further minimal comparititive tests. We do not
4400 * want to put too much policy logic here; intelligent scan selection
4401 * should occur within a generic IEEE 802.11 user space tool. */
4403 /* Set up 'new' AP to this network */
4404 ipw_copy_rates(&match->rates, &rates);
4405 match->network = network;
4407 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' is a viable match.\n",
4408 escape_essid(network->ssid, network->ssid_len),
4409 MAC_ARG(network->bssid));
4414 static void ipw_adhoc_create(struct ipw_priv *priv,
4415 struct ieee80211_network *network)
4418 * For the purposes of scanning, we can set our wireless mode
4419 * to trigger scans across combinations of bands, but when it
4420 * comes to creating a new ad-hoc network, we have tell the FW
4421 * exactly which band to use.
4423 * We also have the possibility of an invalid channel for the
4424 * chossen band. Attempting to create a new ad-hoc network
4425 * with an invalid channel for wireless mode will trigger a
4428 network->mode = is_valid_channel(priv->ieee->mode, priv->channel);
4429 if (network->mode) {
4430 network->channel = priv->channel;
4432 IPW_WARNING("Overriding invalid channel\n");
4433 if (priv->ieee->mode & IEEE_A) {
4434 network->mode = IEEE_A;
4435 priv->channel = band_a_active_channel[0];
4436 } else if (priv->ieee->mode & IEEE_G) {
4437 network->mode = IEEE_G;
4438 priv->channel = band_b_active_channel[0];
4440 network->mode = IEEE_B;
4441 priv->channel = band_b_active_channel[0];
4445 network->channel = priv->channel;
4446 priv->config |= CFG_ADHOC_PERSIST;
4447 ipw_create_bssid(priv, network->bssid);
4448 network->ssid_len = priv->essid_len;
4449 memcpy(network->ssid, priv->essid, priv->essid_len);
4450 memset(&network->stats, 0, sizeof(network->stats));
4451 network->capability = WLAN_CAPABILITY_IBSS;
4452 if (priv->capability & CAP_PRIVACY_ON)
4453 network->capability |= WLAN_CAPABILITY_PRIVACY;
4454 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
4455 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
4456 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
4457 memcpy(network->rates_ex,
4458 &priv->rates.supported_rates[network->rates_len],
4459 network->rates_ex_len);
4460 network->last_scanned = 0;
4462 network->last_associate = 0;
4463 network->time_stamp[0] = 0;
4464 network->time_stamp[1] = 0;
4465 network->beacon_interval = 100; /* Default */
4466 network->listen_interval = 10; /* Default */
4467 network->atim_window = 0; /* Default */
4468 #ifdef CONFIG_IEEE80211_WPA
4469 network->wpa_ie_len = 0;
4470 network->rsn_ie_len = 0;
4471 #endif /* CONFIG_IEEE80211_WPA */
4474 static void ipw_send_wep_keys(struct ipw_priv *priv)
4476 struct ipw_wep_key *key;
4478 struct host_cmd cmd = {
4479 .cmd = IPW_CMD_WEP_KEY,
4483 key = (struct ipw_wep_key *)&cmd.param;
4484 key->cmd_id = DINO_CMD_WEP_KEY;
4487 for (i = 0; i < 4; i++) {
4489 if (!(priv->sec.flags & (1 << i))) {
4492 key->key_size = priv->sec.key_sizes[i];
4493 memcpy(key->key, priv->sec.keys[i], key->key_size);
4496 if (ipw_send_cmd(priv, &cmd)) {
4497 IPW_ERROR("failed to send WEP_KEY command\n");
4503 static void ipw_adhoc_check(void *data)
4505 struct ipw_priv *priv = data;
4507 if (priv->missed_adhoc_beacons++ > priv->missed_beacon_threshold &&
4508 !(priv->config & CFG_ADHOC_PERSIST)) {
4509 IPW_DEBUG_SCAN("Disassociating due to missed beacons\n");
4510 ipw_remove_current_network(priv);
4511 ipw_disassociate(priv);
4515 queue_delayed_work(priv->workqueue, &priv->adhoc_check,
4516 priv->assoc_request.beacon_interval);
4519 #ifdef CONFIG_IPW_DEBUG
4520 static void ipw_debug_config(struct ipw_priv *priv)
4522 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
4523 "[CFG 0x%08X]\n", priv->config);
4524 if (priv->config & CFG_STATIC_CHANNEL)
4525 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
4527 IPW_DEBUG_INFO("Channel unlocked.\n");
4528 if (priv->config & CFG_STATIC_ESSID)
4529 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
4530 escape_essid(priv->essid, priv->essid_len));
4532 IPW_DEBUG_INFO("ESSID unlocked.\n");
4533 if (priv->config & CFG_STATIC_BSSID)
4534 IPW_DEBUG_INFO("BSSID locked to %d\n", priv->channel);
4536 IPW_DEBUG_INFO("BSSID unlocked.\n");
4537 if (priv->capability & CAP_PRIVACY_ON)
4538 IPW_DEBUG_INFO("PRIVACY on\n");
4540 IPW_DEBUG_INFO("PRIVACY off\n");
4541 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
4544 #define ipw_debug_config(x) do {} while (0)
4547 static inline void ipw_set_fixed_rate(struct ipw_priv *priv,
4548 struct ieee80211_network *network)
4550 /* TODO: Verify that this works... */
4551 struct ipw_fixed_rate fr = {
4552 .tx_rates = priv->rates_mask
4557 /* Identify 'current FW band' and match it with the fixed
4560 switch (priv->ieee->freq_band) {
4561 case IEEE80211_52GHZ_BAND: /* A only */
4563 if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
4564 /* Invalid fixed rate mask */
4569 fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
4572 default: /* 2.4Ghz or Mixed */
4574 if (network->mode == IEEE_B) {
4575 if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
4576 /* Invalid fixed rate mask */
4583 if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
4584 IEEE80211_OFDM_RATES_MASK)) {
4585 /* Invalid fixed rate mask */
4590 if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
4591 mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
4592 fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
4595 if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
4596 mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
4597 fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
4600 if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
4601 mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
4602 fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
4605 fr.tx_rates |= mask;
4609 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
4610 ipw_write_reg32(priv, reg, *(u32 *) & fr);
4613 static int ipw_associate_network(struct ipw_priv *priv,
4614 struct ieee80211_network *network,
4615 struct ipw_supported_rates *rates, int roaming)
4619 if (priv->config & CFG_FIXED_RATE)
4620 ipw_set_fixed_rate(priv, network);
4622 if (!(priv->config & CFG_STATIC_ESSID)) {
4623 priv->essid_len = min(network->ssid_len,
4624 (u8) IW_ESSID_MAX_SIZE);
4625 memcpy(priv->essid, network->ssid, priv->essid_len);
4628 network->last_associate = jiffies;
4630 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
4631 priv->assoc_request.channel = network->channel;
4632 if ((priv->capability & CAP_PRIVACY_ON) &&
4633 (priv->capability & CAP_SHARED_KEY)) {
4634 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
4635 priv->assoc_request.auth_key = priv->sec.active_key;
4637 priv->assoc_request.auth_type = AUTH_OPEN;
4638 priv->assoc_request.auth_key = 0;
4641 if (priv->capability & CAP_PRIVACY_ON)
4642 ipw_send_wep_keys(priv);
4645 * It is valid for our ieee device to support multiple modes, but
4646 * when it comes to associating to a given network we have to choose
4649 if (network->mode & priv->ieee->mode & IEEE_A)
4650 priv->assoc_request.ieee_mode = IPW_A_MODE;
4651 else if (network->mode & priv->ieee->mode & IEEE_G)
4652 priv->assoc_request.ieee_mode = IPW_G_MODE;
4653 else if (network->mode & priv->ieee->mode & IEEE_B)
4654 priv->assoc_request.ieee_mode = IPW_B_MODE;
4656 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
4657 "802.11%c [%d], enc=%s%s%s%c%c\n",
4658 roaming ? "Rea" : "A",
4659 escape_essid(priv->essid, priv->essid_len),
4661 ipw_modes[priv->assoc_request.ieee_mode],
4663 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
4664 priv->capability & CAP_PRIVACY_ON ?
4665 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
4667 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
4668 priv->capability & CAP_PRIVACY_ON ?
4669 '1' + priv->sec.active_key : '.',
4670 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
4672 priv->assoc_request.beacon_interval = network->beacon_interval;
4673 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
4674 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
4675 priv->assoc_request.assoc_type = HC_IBSS_START;
4676 priv->assoc_request.assoc_tsf_msw = 0;
4677 priv->assoc_request.assoc_tsf_lsw = 0;
4679 if (unlikely(roaming))
4680 priv->assoc_request.assoc_type = HC_REASSOCIATE;
4682 priv->assoc_request.assoc_type = HC_ASSOCIATE;
4683 priv->assoc_request.assoc_tsf_msw = network->time_stamp[1];
4684 priv->assoc_request.assoc_tsf_lsw = network->time_stamp[0];
4687 memcpy(&priv->assoc_request.bssid, network->bssid, ETH_ALEN);
4689 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
4690 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
4691 priv->assoc_request.atim_window = network->atim_window;
4693 memcpy(&priv->assoc_request.dest, network->bssid, ETH_ALEN);
4694 priv->assoc_request.atim_window = 0;
4697 priv->assoc_request.capability = network->capability;
4698 priv->assoc_request.listen_interval = network->listen_interval;
4700 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
4702 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
4706 rates->ieee_mode = priv->assoc_request.ieee_mode;
4707 rates->purpose = IPW_RATE_CONNECT;
4708 ipw_send_supported_rates(priv, rates);
4710 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
4711 priv->sys_config.dot11g_auto_detection = 1;
4713 priv->sys_config.dot11g_auto_detection = 0;
4714 err = ipw_send_system_config(priv, &priv->sys_config);
4716 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
4720 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
4721 err = ipw_set_sensitivity(priv, network->stats.rssi);
4723 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
4728 * If preemption is enabled, it is possible for the association
4729 * to complete before we return from ipw_send_associate. Therefore
4730 * we have to be sure and update our priviate data first.
4732 priv->channel = network->channel;
4733 memcpy(priv->bssid, network->bssid, ETH_ALEN);
4734 priv->status |= STATUS_ASSOCIATING;
4735 priv->status &= ~STATUS_SECURITY_UPDATED;
4737 priv->assoc_network = network;
4739 err = ipw_send_associate(priv, &priv->assoc_request);
4741 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
4745 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' " MAC_FMT " \n",
4746 escape_essid(priv->essid, priv->essid_len),
4747 MAC_ARG(priv->bssid));
4752 static void ipw_roam(void *data)
4754 struct ipw_priv *priv = data;
4755 struct ieee80211_network *network = NULL;
4756 struct ipw_network_match match = {
4757 .network = priv->assoc_network
4760 /* The roaming process is as follows:
4762 * 1. Missed beacon threshold triggers the roaming process by
4763 * setting the status ROAM bit and requesting a scan.
4764 * 2. When the scan completes, it schedules the ROAM work
4765 * 3. The ROAM work looks at all of the known networks for one that
4766 * is a better network than the currently associated. If none
4767 * found, the ROAM process is over (ROAM bit cleared)
4768 * 4. If a better network is found, a disassociation request is
4770 * 5. When the disassociation completes, the roam work is again
4771 * scheduled. The second time through, the driver is no longer
4772 * associated, and the newly selected network is sent an
4773 * association request.
4774 * 6. At this point ,the roaming process is complete and the ROAM
4775 * status bit is cleared.
4778 /* If we are no longer associated, and the roaming bit is no longer
4779 * set, then we are not actively roaming, so just return */
4780 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
4783 if (priv->status & STATUS_ASSOCIATED) {
4784 /* First pass through ROAM process -- look for a better
4786 u8 rssi = priv->assoc_network->stats.rssi;
4787 priv->assoc_network->stats.rssi = -128;
4788 list_for_each_entry(network, &priv->ieee->network_list, list) {
4789 if (network != priv->assoc_network)
4790 ipw_best_network(priv, &match, network, 1);
4792 priv->assoc_network->stats.rssi = rssi;
4794 if (match.network == priv->assoc_network) {
4795 IPW_DEBUG_ASSOC("No better APs in this network to "
4797 priv->status &= ~STATUS_ROAMING;
4798 ipw_debug_config(priv);
4802 ipw_send_disassociate(priv, 1);
4803 priv->assoc_network = match.network;
4808 /* Second pass through ROAM process -- request association */
4809 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
4810 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
4811 priv->status &= ~STATUS_ROAMING;
4814 static void ipw_associate(void *data)
4816 struct ipw_priv *priv = data;
4818 struct ieee80211_network *network = NULL;
4819 struct ipw_network_match match = {
4822 struct ipw_supported_rates *rates;
4823 struct list_head *element;
4825 if (!(priv->config & CFG_ASSOCIATE) &&
4826 !(priv->config & (CFG_STATIC_ESSID |
4827 CFG_STATIC_CHANNEL | CFG_STATIC_BSSID))) {
4828 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
4832 list_for_each_entry(network, &priv->ieee->network_list, list)
4833 ipw_best_network(priv, &match, network, 0);
4835 network = match.network;
4836 rates = &match.rates;
4838 if (network == NULL &&
4839 priv->ieee->iw_mode == IW_MODE_ADHOC &&
4840 priv->config & CFG_ADHOC_CREATE &&
4841 priv->config & CFG_STATIC_ESSID &&
4842 !list_empty(&priv->ieee->network_free_list)) {
4843 element = priv->ieee->network_free_list.next;
4844 network = list_entry(element, struct ieee80211_network, list);
4845 ipw_adhoc_create(priv, network);
4846 rates = &priv->rates;
4848 list_add_tail(&network->list, &priv->ieee->network_list);
4851 /* If we reached the end of the list, then we don't have any valid
4854 ipw_debug_config(priv);
4856 queue_delayed_work(priv->workqueue, &priv->request_scan,
4862 ipw_associate_network(priv, network, rates, 0);
4865 static inline void ipw_handle_data_packet(struct ipw_priv *priv,
4866 struct ipw_rx_mem_buffer *rxb,
4867 struct ieee80211_rx_stats *stats)
4869 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
4871 /* We received data from the HW, so stop the watchdog */
4872 priv->net_dev->trans_start = jiffies;
4874 /* We only process data packets if the
4875 * interface is open */
4876 if (unlikely((pkt->u.frame.length + IPW_RX_FRAME_SIZE) >
4877 skb_tailroom(rxb->skb))) {
4878 priv->ieee->stats.rx_errors++;
4879 priv->wstats.discard.misc++;
4880 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
4882 } else if (unlikely(!netif_running(priv->net_dev))) {
4883 priv->ieee->stats.rx_dropped++;
4884 priv->wstats.discard.misc++;
4885 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
4889 /* Advance skb->data to the start of the actual payload */
4890 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
4892 /* Set the size of the skb to the size of the frame */
4893 skb_put(rxb->skb, pkt->u.frame.length);
4895 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
4897 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
4898 priv->ieee->stats.rx_errors++;
4899 else /* ieee80211_rx succeeded, so it now owns the SKB */
4904 * Main entry function for recieving a packet with 80211 headers. This
4905 * should be called when ever the FW has notified us that there is a new
4906 * skb in the recieve queue.
4908 static void ipw_rx(struct ipw_priv *priv)
4910 struct ipw_rx_mem_buffer *rxb;
4911 struct ipw_rx_packet *pkt;
4912 struct ieee80211_hdr_4addr *header;
4916 r = ipw_read32(priv, CX2_RX_READ_INDEX);
4917 w = ipw_read32(priv, CX2_RX_WRITE_INDEX);
4918 i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE;
4921 rxb = priv->rxq->queue[i];
4922 #ifdef CONFIG_IPW_DEBUG
4923 if (unlikely(rxb == NULL)) {
4924 printk(KERN_CRIT "Queue not allocated!\n");
4928 priv->rxq->queue[i] = NULL;
4930 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
4932 PCI_DMA_FROMDEVICE);
4934 pkt = (struct ipw_rx_packet *)rxb->skb->data;
4935 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
4936 pkt->header.message_type,
4937 pkt->header.rx_seq_num, pkt->header.control_bits);
4939 switch (pkt->header.message_type) {
4940 case RX_FRAME_TYPE: /* 802.11 frame */ {
4941 struct ieee80211_rx_stats stats = {
4942 .rssi = pkt->u.frame.rssi_dbm -
4944 .signal = pkt->u.frame.signal,
4945 .rate = pkt->u.frame.rate,
4946 .mac_time = jiffies,
4948 pkt->u.frame.received_channel,
4951 control & (1 << 0)) ?
4952 IEEE80211_24GHZ_BAND :
4953 IEEE80211_52GHZ_BAND,
4954 .len = pkt->u.frame.length,
4957 if (stats.rssi != 0)
4958 stats.mask |= IEEE80211_STATMASK_RSSI;
4959 if (stats.signal != 0)
4960 stats.mask |= IEEE80211_STATMASK_SIGNAL;
4961 if (stats.rate != 0)
4962 stats.mask |= IEEE80211_STATMASK_RATE;
4966 #ifdef CONFIG_IPW_PROMISC
4967 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4968 ipw_handle_data_packet(priv, rxb,
4975 (struct ieee80211_hdr_4addr *)(rxb->skb->
4978 /* TODO: Check Ad-Hoc dest/source and make sure
4979 * that we are actually parsing these packets
4980 * correctly -- we should probably use the
4981 * frame control of the packet and disregard
4982 * the current iw_mode */
4983 switch (priv->ieee->iw_mode) {
4986 !memcmp(header->addr1,
4987 priv->net_dev->dev_addr,
4989 !memcmp(header->addr3,
4990 priv->bssid, ETH_ALEN) ||
4991 is_broadcast_ether_addr(header->
4993 || is_multicast_ether_addr(header->
5000 !memcmp(header->addr3,
5001 priv->bssid, ETH_ALEN) ||
5002 !memcmp(header->addr1,
5003 priv->net_dev->dev_addr,
5005 is_broadcast_ether_addr(header->
5007 || is_multicast_ether_addr(header->
5012 if (network_packet && priv->assoc_network) {
5013 priv->assoc_network->stats.rssi =
5015 average_add(&priv->average_rssi,
5017 priv->last_rx_rssi = stats.rssi;
5020 IPW_DEBUG_RX("Frame: len=%u\n",
5021 pkt->u.frame.length);
5023 if (pkt->u.frame.length < frame_hdr_len(header)) {
5025 ("Received packet is too small. "
5027 priv->ieee->stats.rx_errors++;
5028 priv->wstats.discard.misc++;
5032 switch (WLAN_FC_GET_TYPE(header->frame_ctl)) {
5033 case IEEE80211_FTYPE_MGMT:
5034 ieee80211_rx_mgt(priv->ieee, header,
5036 if (priv->ieee->iw_mode == IW_MODE_ADHOC
5039 (header->frame_ctl) ==
5040 IEEE80211_STYPE_PROBE_RESP)
5043 (header->frame_ctl) ==
5044 IEEE80211_STYPE_BEACON))
5045 && !memcmp(header->addr3,
5046 priv->bssid, ETH_ALEN))
5047 ipw_add_station(priv,
5051 case IEEE80211_FTYPE_CTL:
5054 case IEEE80211_FTYPE_DATA:
5056 ipw_handle_data_packet(priv,
5060 IPW_DEBUG_DROP("Dropping: "
5075 case RX_HOST_NOTIFICATION_TYPE:{
5077 ("Notification: subtype=%02X flags=%02X size=%d\n",
5078 pkt->u.notification.subtype,
5079 pkt->u.notification.flags,
5080 pkt->u.notification.size);
5081 ipw_rx_notification(priv, &pkt->u.notification);
5086 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
5087 pkt->header.message_type);
5091 /* For now we just don't re-use anything. We can tweak this
5092 * later to try and re-use notification packets and SKBs that
5093 * fail to Rx correctly */
5094 if (rxb->skb != NULL) {
5095 dev_kfree_skb_any(rxb->skb);
5099 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
5100 CX2_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5101 list_add_tail(&rxb->list, &priv->rxq->rx_used);
5103 i = (i + 1) % RX_QUEUE_SIZE;
5106 /* Backtrack one entry */
5107 priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1;
5109 ipw_rx_queue_restock(priv);
5112 static void ipw_abort_scan(struct ipw_priv *priv)
5116 if (priv->status & STATUS_SCAN_ABORTING) {
5117 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
5120 priv->status |= STATUS_SCAN_ABORTING;
5122 err = ipw_send_scan_abort(priv);
5124 IPW_DEBUG_HC("Request to abort scan failed.\n");
5127 static int ipw_request_scan(struct ipw_priv *priv)
5129 struct ipw_scan_request_ext scan;
5130 int channel_index = 0;
5131 int i, err, scan_type;
5133 if (priv->status & STATUS_EXIT_PENDING) {
5134 IPW_DEBUG_SCAN("Aborting scan due to device shutdown\n");
5135 priv->status |= STATUS_SCAN_PENDING;
5139 if (priv->status & STATUS_SCANNING) {
5140 IPW_DEBUG_HC("Concurrent scan requested. Aborting first.\n");
5141 priv->status |= STATUS_SCAN_PENDING;
5142 ipw_abort_scan(priv);
5146 if (priv->status & STATUS_SCAN_ABORTING) {
5147 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
5148 priv->status |= STATUS_SCAN_PENDING;
5152 if (priv->status & STATUS_RF_KILL_MASK) {
5153 IPW_DEBUG_HC("Aborting scan due to RF Kill activation\n");
5154 priv->status |= STATUS_SCAN_PENDING;
5158 memset(&scan, 0, sizeof(scan));
5160 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = 20;
5161 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] = 20;
5162 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = 20;
5164 scan.full_scan_index = ieee80211_get_scans(priv->ieee);
5165 /* If we are roaming, then make this a directed scan for the current
5166 * network. Otherwise, ensure that every other scan is a fast
5167 * channel hop scan */
5168 if ((priv->status & STATUS_ROAMING)
5169 || (!(priv->status & STATUS_ASSOCIATED)
5170 && (priv->config & CFG_STATIC_ESSID)
5171 && (scan.full_scan_index % 2))) {
5172 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
5174 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
5178 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
5180 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
5183 if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
5184 int start = channel_index;
5185 for (i = 0; i < MAX_A_CHANNELS; i++) {
5186 if (band_a_active_channel[i] == 0)
5188 if ((priv->status & STATUS_ASSOCIATED) &&
5189 band_a_active_channel[i] == priv->channel)
5192 scan.channels_list[channel_index] =
5193 band_a_active_channel[i];
5194 ipw_set_scan_type(&scan, channel_index, scan_type);
5197 if (start != channel_index) {
5198 scan.channels_list[start] = (u8) (IPW_A_MODE << 6) |
5199 (channel_index - start);
5204 if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
5205 int start = channel_index;
5206 for (i = 0; i < MAX_B_CHANNELS; i++) {
5207 if (band_b_active_channel[i] == 0)
5209 if ((priv->status & STATUS_ASSOCIATED) &&
5210 band_b_active_channel[i] == priv->channel)
5213 scan.channels_list[channel_index] =
5214 band_b_active_channel[i];
5215 ipw_set_scan_type(&scan, channel_index, scan_type);
5218 if (start != channel_index) {
5219 scan.channels_list[start] = (u8) (IPW_B_MODE << 6) |
5220 (channel_index - start);
5224 err = ipw_send_scan_request_ext(priv, &scan);
5226 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
5230 priv->status |= STATUS_SCANNING;
5231 priv->status &= ~STATUS_SCAN_PENDING;
5237 * This file defines the Wireless Extension handlers. It does not
5238 * define any methods of hardware manipulation and relies on the
5239 * functions defined in ipw_main to provide the HW interaction.
5241 * The exception to this is the use of the ipw_get_ordinal()
5242 * function used to poll the hardware vs. making unecessary calls.
5246 static int ipw_wx_get_name(struct net_device *dev,
5247 struct iw_request_info *info,
5248 union iwreq_data *wrqu, char *extra)
5250 struct ipw_priv *priv = ieee80211_priv(dev);
5251 if (!(priv->status & STATUS_ASSOCIATED))
5252 strcpy(wrqu->name, "unassociated");
5254 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
5255 ipw_modes[priv->assoc_request.ieee_mode]);
5256 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
5260 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
5263 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
5264 priv->config &= ~CFG_STATIC_CHANNEL;
5265 if (!(priv->status & (STATUS_SCANNING | STATUS_ASSOCIATED |
5266 STATUS_ASSOCIATING))) {
5267 IPW_DEBUG_ASSOC("Attempting to associate with new "
5269 ipw_associate(priv);
5275 priv->config |= CFG_STATIC_CHANNEL;
5277 if (priv->channel == channel) {
5278 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
5283 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
5284 priv->channel = channel;
5286 /* If we are currently associated, or trying to associate
5287 * then see if this is a new channel (causing us to disassociate) */
5288 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
5289 IPW_DEBUG_ASSOC("Disassociating due to channel change.\n");
5290 ipw_disassociate(priv);
5292 ipw_associate(priv);
5298 static int ipw_wx_set_freq(struct net_device *dev,
5299 struct iw_request_info *info,
5300 union iwreq_data *wrqu, char *extra)
5302 struct ipw_priv *priv = ieee80211_priv(dev);
5303 struct iw_freq *fwrq = &wrqu->freq;
5305 /* if setting by freq convert to channel */
5307 if ((fwrq->m >= (int)2.412e8 && fwrq->m <= (int)2.487e8)) {
5308 int f = fwrq->m / 100000;
5311 while ((c < REG_MAX_CHANNEL) &&
5312 (f != ipw_frequencies[c]))
5315 /* hack to fall through */
5321 if (fwrq->e > 0 || fwrq->m > 1000)
5324 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
5325 return ipw_set_channel(priv, (u8) fwrq->m);
5328 static int ipw_wx_get_freq(struct net_device *dev,
5329 struct iw_request_info *info,
5330 union iwreq_data *wrqu, char *extra)
5332 struct ipw_priv *priv = ieee80211_priv(dev);
5336 /* If we are associated, trying to associate, or have a statically
5337 * configured CHANNEL then return that; otherwise return ANY */
5338 if (priv->config & CFG_STATIC_CHANNEL ||
5339 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))
5340 wrqu->freq.m = priv->channel;
5344 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
5348 static int ipw_wx_set_mode(struct net_device *dev,
5349 struct iw_request_info *info,
5350 union iwreq_data *wrqu, char *extra)
5352 struct ipw_priv *priv = ieee80211_priv(dev);
5355 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
5357 if (wrqu->mode == priv->ieee->iw_mode)
5360 switch (wrqu->mode) {
5361 #ifdef CONFIG_IPW_PROMISC
5362 case IW_MODE_MONITOR:
5368 wrqu->mode = IW_MODE_INFRA;
5374 #ifdef CONFIG_IPW_PROMISC
5375 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
5376 priv->net_dev->type = ARPHRD_ETHER;
5378 if (wrqu->mode == IW_MODE_MONITOR)
5379 priv->net_dev->type = ARPHRD_IEEE80211;
5380 #endif /* CONFIG_IPW_PROMISC */
5383 /* Free the existing firmware and reset the fw_loaded
5384 * flag so ipw_load() will bring in the new firmawre */
5389 release_firmware(bootfw);
5390 release_firmware(ucode);
5391 release_firmware(firmware);
5392 bootfw = ucode = firmware = NULL;
5395 priv->ieee->iw_mode = wrqu->mode;
5396 ipw_adapter_restart(priv);
5401 static int ipw_wx_get_mode(struct net_device *dev,
5402 struct iw_request_info *info,
5403 union iwreq_data *wrqu, char *extra)
5405 struct ipw_priv *priv = ieee80211_priv(dev);
5407 wrqu->mode = priv->ieee->iw_mode;
5408 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
5413 #define DEFAULT_RTS_THRESHOLD 2304U
5414 #define MIN_RTS_THRESHOLD 1U
5415 #define MAX_RTS_THRESHOLD 2304U
5416 #define DEFAULT_BEACON_INTERVAL 100U
5417 #define DEFAULT_SHORT_RETRY_LIMIT 7U
5418 #define DEFAULT_LONG_RETRY_LIMIT 4U
5420 /* Values are in microsecond */
5421 static const s32 timeout_duration[] = {
5429 static const s32 period_duration[] = {
5437 static int ipw_wx_get_range(struct net_device *dev,
5438 struct iw_request_info *info,
5439 union iwreq_data *wrqu, char *extra)
5441 struct ipw_priv *priv = ieee80211_priv(dev);
5442 struct iw_range *range = (struct iw_range *)extra;
5446 wrqu->data.length = sizeof(*range);
5447 memset(range, 0, sizeof(*range));
5449 /* 54Mbs == ~27 Mb/s real (802.11g) */
5450 range->throughput = 27 * 1000 * 1000;
5452 range->max_qual.qual = 100;
5453 /* TODO: Find real max RSSI and stick here */
5454 range->max_qual.level = 0;
5455 range->max_qual.noise = 0;
5456 range->max_qual.updated = 7; /* Updated all three */
5458 range->avg_qual.qual = 70;
5459 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
5460 range->avg_qual.level = 0; /* FIXME to real average level */
5461 range->avg_qual.noise = 0;
5462 range->avg_qual.updated = 7; /* Updated all three */
5464 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
5466 for (i = 0; i < range->num_bitrates; i++)
5467 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
5470 range->max_rts = DEFAULT_RTS_THRESHOLD;
5471 range->min_frag = MIN_FRAG_THRESHOLD;
5472 range->max_frag = MAX_FRAG_THRESHOLD;
5474 range->encoding_size[0] = 5;
5475 range->encoding_size[1] = 13;
5476 range->num_encoding_sizes = 2;
5477 range->max_encoding_tokens = WEP_KEYS;
5479 /* Set the Wireless Extension versions */
5480 range->we_version_compiled = WIRELESS_EXT;
5481 range->we_version_source = 16;
5483 range->num_channels = FREQ_COUNT;
5486 for (i = 0; i < FREQ_COUNT; i++) {
5487 range->freq[val].i = i + 1;
5488 range->freq[val].m = ipw_frequencies[i] * 100000;
5489 range->freq[val].e = 1;
5492 if (val == IW_MAX_FREQUENCIES)
5495 range->num_frequency = val;
5497 IPW_DEBUG_WX("GET Range\n");
5501 static int ipw_wx_set_wap(struct net_device *dev,
5502 struct iw_request_info *info,
5503 union iwreq_data *wrqu, char *extra)
5505 struct ipw_priv *priv = ieee80211_priv(dev);
5507 static const unsigned char any[] = {
5508 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
5510 static const unsigned char off[] = {
5511 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
5514 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
5517 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
5518 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
5519 /* we disable mandatory BSSID association */
5520 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
5521 priv->config &= ~CFG_STATIC_BSSID;
5522 if (!(priv->status & (STATUS_SCANNING | STATUS_ASSOCIATED |
5523 STATUS_ASSOCIATING))) {
5524 IPW_DEBUG_ASSOC("Attempting to associate with new "
5526 ipw_associate(priv);
5532 priv->config |= CFG_STATIC_BSSID;
5533 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
5534 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
5538 IPW_DEBUG_WX("Setting mandatory BSSID to " MAC_FMT "\n",
5539 MAC_ARG(wrqu->ap_addr.sa_data));
5541 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
5543 /* If we are currently associated, or trying to associate
5544 * then see if this is a new BSSID (causing us to disassociate) */
5545 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
5546 IPW_DEBUG_ASSOC("Disassociating due to BSSID change.\n");
5547 ipw_disassociate(priv);
5549 ipw_associate(priv);
5555 static int ipw_wx_get_wap(struct net_device *dev,
5556 struct iw_request_info *info,
5557 union iwreq_data *wrqu, char *extra)
5559 struct ipw_priv *priv = ieee80211_priv(dev);
5560 /* If we are associated, trying to associate, or have a statically
5561 * configured BSSID then return that; otherwise return ANY */
5562 if (priv->config & CFG_STATIC_BSSID ||
5563 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
5564 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
5565 memcpy(wrqu->ap_addr.sa_data, &priv->bssid, ETH_ALEN);
5567 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
5569 IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n",
5570 MAC_ARG(wrqu->ap_addr.sa_data));
5574 static int ipw_wx_set_essid(struct net_device *dev,
5575 struct iw_request_info *info,
5576 union iwreq_data *wrqu, char *extra)
5578 struct ipw_priv *priv = ieee80211_priv(dev);
5579 char *essid = ""; /* ANY */
5582 if (wrqu->essid.flags && wrqu->essid.length) {
5583 length = wrqu->essid.length - 1;
5587 IPW_DEBUG_WX("Setting ESSID to ANY\n");
5588 priv->config &= ~CFG_STATIC_ESSID;
5589 if (!(priv->status & (STATUS_SCANNING | STATUS_ASSOCIATED |
5590 STATUS_ASSOCIATING))) {
5591 IPW_DEBUG_ASSOC("Attempting to associate with new "
5593 ipw_associate(priv);
5599 length = min(length, IW_ESSID_MAX_SIZE);
5601 priv->config |= CFG_STATIC_ESSID;
5603 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) {
5604 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
5608 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(essid, length),
5611 priv->essid_len = length;
5612 memcpy(priv->essid, essid, priv->essid_len);
5614 /* If we are currently associated, or trying to associate
5615 * then see if this is a new ESSID (causing us to disassociate) */
5616 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
5617 IPW_DEBUG_ASSOC("Disassociating due to ESSID change.\n");
5618 ipw_disassociate(priv);
5620 ipw_associate(priv);
5626 static int ipw_wx_get_essid(struct net_device *dev,
5627 struct iw_request_info *info,
5628 union iwreq_data *wrqu, char *extra)
5630 struct ipw_priv *priv = ieee80211_priv(dev);
5632 /* If we are associated, trying to associate, or have a statically
5633 * configured ESSID then return that; otherwise return ANY */
5634 if (priv->config & CFG_STATIC_ESSID ||
5635 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
5636 IPW_DEBUG_WX("Getting essid: '%s'\n",
5637 escape_essid(priv->essid, priv->essid_len));
5638 memcpy(extra, priv->essid, priv->essid_len);
5639 wrqu->essid.length = priv->essid_len;
5640 wrqu->essid.flags = 1; /* active */
5642 IPW_DEBUG_WX("Getting essid: ANY\n");
5643 wrqu->essid.length = 0;
5644 wrqu->essid.flags = 0; /* active */
5650 static int ipw_wx_set_nick(struct net_device *dev,
5651 struct iw_request_info *info,
5652 union iwreq_data *wrqu, char *extra)
5654 struct ipw_priv *priv = ieee80211_priv(dev);
5656 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
5657 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
5660 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
5661 memset(priv->nick, 0, sizeof(priv->nick));
5662 memcpy(priv->nick, extra, wrqu->data.length);
5663 IPW_DEBUG_TRACE("<<\n");
5668 static int ipw_wx_get_nick(struct net_device *dev,
5669 struct iw_request_info *info,
5670 union iwreq_data *wrqu, char *extra)
5672 struct ipw_priv *priv = ieee80211_priv(dev);
5673 IPW_DEBUG_WX("Getting nick\n");
5674 wrqu->data.length = strlen(priv->nick) + 1;
5675 memcpy(extra, priv->nick, wrqu->data.length);
5676 wrqu->data.flags = 1; /* active */
5680 static int ipw_wx_set_rate(struct net_device *dev,
5681 struct iw_request_info *info,
5682 union iwreq_data *wrqu, char *extra)
5684 IPW_DEBUG_WX("0x%p, 0x%p, 0x%p\n", dev, info, wrqu);
5688 static int ipw_wx_get_rate(struct net_device *dev,
5689 struct iw_request_info *info,
5690 union iwreq_data *wrqu, char *extra)
5692 struct ipw_priv *priv = ieee80211_priv(dev);
5693 wrqu->bitrate.value = priv->last_rate;
5695 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
5699 static int ipw_wx_set_rts(struct net_device *dev,
5700 struct iw_request_info *info,
5701 union iwreq_data *wrqu, char *extra)
5703 struct ipw_priv *priv = ieee80211_priv(dev);
5705 if (wrqu->rts.disabled)
5706 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
5708 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
5709 wrqu->rts.value > MAX_RTS_THRESHOLD)
5712 priv->rts_threshold = wrqu->rts.value;
5715 ipw_send_rts_threshold(priv, priv->rts_threshold);
5716 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
5720 static int ipw_wx_get_rts(struct net_device *dev,
5721 struct iw_request_info *info,
5722 union iwreq_data *wrqu, char *extra)
5724 struct ipw_priv *priv = ieee80211_priv(dev);
5725 wrqu->rts.value = priv->rts_threshold;
5726 wrqu->rts.fixed = 0; /* no auto select */
5727 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
5729 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
5733 static int ipw_wx_set_txpow(struct net_device *dev,
5734 struct iw_request_info *info,
5735 union iwreq_data *wrqu, char *extra)
5737 struct ipw_priv *priv = ieee80211_priv(dev);
5738 struct ipw_tx_power tx_power;
5741 if (ipw_radio_kill_sw(priv, wrqu->power.disabled))
5742 return -EINPROGRESS;
5744 if (wrqu->power.flags != IW_TXPOW_DBM)
5747 if ((wrqu->power.value > 20) || (wrqu->power.value < -12))
5750 priv->tx_power = wrqu->power.value;
5752 memset(&tx_power, 0, sizeof(tx_power));
5754 /* configure device for 'G' band */
5755 tx_power.ieee_mode = IPW_G_MODE;
5756 tx_power.num_channels = 11;
5757 for (i = 0; i < 11; i++) {
5758 tx_power.channels_tx_power[i].channel_number = i + 1;
5759 tx_power.channels_tx_power[i].tx_power = priv->tx_power;
5761 if (ipw_send_tx_power(priv, &tx_power))
5764 /* configure device to also handle 'B' band */
5765 tx_power.ieee_mode = IPW_B_MODE;
5766 if (ipw_send_tx_power(priv, &tx_power))
5775 static int ipw_wx_get_txpow(struct net_device *dev,
5776 struct iw_request_info *info,
5777 union iwreq_data *wrqu, char *extra)
5779 struct ipw_priv *priv = ieee80211_priv(dev);
5781 wrqu->power.value = priv->tx_power;
5782 wrqu->power.fixed = 1;
5783 wrqu->power.flags = IW_TXPOW_DBM;
5784 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
5786 IPW_DEBUG_WX("GET TX Power -> %s %d \n",
5787 wrqu->power.disabled ? "ON" : "OFF", wrqu->power.value);
5792 static int ipw_wx_set_frag(struct net_device *dev,
5793 struct iw_request_info *info,
5794 union iwreq_data *wrqu, char *extra)
5796 struct ipw_priv *priv = ieee80211_priv(dev);
5798 if (wrqu->frag.disabled)
5799 priv->ieee->fts = DEFAULT_FTS;
5801 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
5802 wrqu->frag.value > MAX_FRAG_THRESHOLD)
5805 priv->ieee->fts = wrqu->frag.value & ~0x1;
5808 ipw_send_frag_threshold(priv, wrqu->frag.value);
5809 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
5813 static int ipw_wx_get_frag(struct net_device *dev,
5814 struct iw_request_info *info,
5815 union iwreq_data *wrqu, char *extra)
5817 struct ipw_priv *priv = ieee80211_priv(dev);
5818 wrqu->frag.value = priv->ieee->fts;
5819 wrqu->frag.fixed = 0; /* no auto select */
5820 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
5822 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
5827 static int ipw_wx_set_retry(struct net_device *dev,
5828 struct iw_request_info *info,
5829 union iwreq_data *wrqu, char *extra)
5831 IPW_DEBUG_WX("0x%p, 0x%p, 0x%p\n", dev, info, wrqu);
5835 static int ipw_wx_get_retry(struct net_device *dev,
5836 struct iw_request_info *info,
5837 union iwreq_data *wrqu, char *extra)
5839 IPW_DEBUG_WX("0x%p, 0x%p, 0x%p\n", dev, info, wrqu);
5843 static int ipw_wx_set_scan(struct net_device *dev,
5844 struct iw_request_info *info,
5845 union iwreq_data *wrqu, char *extra)
5847 struct ipw_priv *priv = ieee80211_priv(dev);
5848 IPW_DEBUG_WX("Start scan\n");
5849 if (ipw_request_scan(priv))
5854 static int ipw_wx_get_scan(struct net_device *dev,
5855 struct iw_request_info *info,
5856 union iwreq_data *wrqu, char *extra)
5858 struct ipw_priv *priv = ieee80211_priv(dev);
5859 return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
5862 static int ipw_wx_set_encode(struct net_device *dev,
5863 struct iw_request_info *info,
5864 union iwreq_data *wrqu, char *key)
5866 struct ipw_priv *priv = ieee80211_priv(dev);
5867 return ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
5870 static int ipw_wx_get_encode(struct net_device *dev,
5871 struct iw_request_info *info,
5872 union iwreq_data *wrqu, char *key)
5874 struct ipw_priv *priv = ieee80211_priv(dev);
5875 return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
5878 static int ipw_wx_set_power(struct net_device *dev,
5879 struct iw_request_info *info,
5880 union iwreq_data *wrqu, char *extra)
5882 struct ipw_priv *priv = ieee80211_priv(dev);
5885 if (wrqu->power.disabled) {
5886 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
5887 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
5889 IPW_DEBUG_WX("failed setting power mode.\n");
5893 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
5898 switch (wrqu->power.flags & IW_POWER_MODE) {
5899 case IW_POWER_ON: /* If not specified */
5900 case IW_POWER_MODE: /* If set all mask */
5901 case IW_POWER_ALL_R: /* If explicitely state all */
5903 default: /* Otherwise we don't support it */
5904 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
5909 /* If the user hasn't specified a power management mode yet, default
5911 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
5912 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
5914 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
5915 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
5917 IPW_DEBUG_WX("failed setting power mode.\n");
5921 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
5926 static int ipw_wx_get_power(struct net_device *dev,
5927 struct iw_request_info *info,
5928 union iwreq_data *wrqu, char *extra)
5930 struct ipw_priv *priv = ieee80211_priv(dev);
5932 if (!(priv->power_mode & IPW_POWER_ENABLED)) {
5933 wrqu->power.disabled = 1;
5935 wrqu->power.disabled = 0;
5938 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
5943 static int ipw_wx_set_powermode(struct net_device *dev,
5944 struct iw_request_info *info,
5945 union iwreq_data *wrqu, char *extra)
5947 struct ipw_priv *priv = ieee80211_priv(dev);
5948 int mode = *(int *)extra;
5951 if ((mode < 1) || (mode > IPW_POWER_LIMIT)) {
5952 mode = IPW_POWER_AC;
5953 priv->power_mode = mode;
5955 priv->power_mode = IPW_POWER_ENABLED | mode;
5958 if (priv->power_mode != mode) {
5959 err = ipw_send_power_mode(priv, mode);
5962 IPW_DEBUG_WX("failed setting power mode.\n");
5970 #define MAX_WX_STRING 80
5971 static int ipw_wx_get_powermode(struct net_device *dev,
5972 struct iw_request_info *info,
5973 union iwreq_data *wrqu, char *extra)
5975 struct ipw_priv *priv = ieee80211_priv(dev);
5976 int level = IPW_POWER_LEVEL(priv->power_mode);
5979 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
5983 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
5985 case IPW_POWER_BATTERY:
5986 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
5989 p += snprintf(p, MAX_WX_STRING - (p - extra),
5990 "(Timeout %dms, Period %dms)",
5991 timeout_duration[level - 1] / 1000,
5992 period_duration[level - 1] / 1000);
5995 if (!(priv->power_mode & IPW_POWER_ENABLED))
5996 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
5998 wrqu->data.length = p - extra + 1;
6003 static int ipw_wx_set_wireless_mode(struct net_device *dev,
6004 struct iw_request_info *info,
6005 union iwreq_data *wrqu, char *extra)
6007 struct ipw_priv *priv = ieee80211_priv(dev);
6008 int mode = *(int *)extra;
6009 u8 band = 0, modulation = 0;
6011 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
6012 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
6016 if (priv->adapter == IPW_2915ABG) {
6017 priv->ieee->abg_true = 1;
6018 if (mode & IEEE_A) {
6019 band |= IEEE80211_52GHZ_BAND;
6020 modulation |= IEEE80211_OFDM_MODULATION;
6022 priv->ieee->abg_true = 0;
6024 if (mode & IEEE_A) {
6025 IPW_WARNING("Attempt to set 2200BG into "
6030 priv->ieee->abg_true = 0;
6033 if (mode & IEEE_B) {
6034 band |= IEEE80211_24GHZ_BAND;
6035 modulation |= IEEE80211_CCK_MODULATION;
6037 priv->ieee->abg_true = 0;
6039 if (mode & IEEE_G) {
6040 band |= IEEE80211_24GHZ_BAND;
6041 modulation |= IEEE80211_OFDM_MODULATION;
6043 priv->ieee->abg_true = 0;
6045 priv->ieee->mode = mode;
6046 priv->ieee->freq_band = band;
6047 priv->ieee->modulation = modulation;
6048 init_supported_rates(priv, &priv->rates);
6050 /* If we are currently associated, or trying to associate
6051 * then see if this is a new configuration (causing us to
6053 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
6054 /* The resulting association will trigger
6055 * the new rates to be sent to the device */
6056 IPW_DEBUG_ASSOC("Disassociating due to mode change.\n");
6057 ipw_disassociate(priv);
6059 ipw_send_supported_rates(priv, &priv->rates);
6061 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
6062 mode & IEEE_A ? 'a' : '.',
6063 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
6067 static int ipw_wx_get_wireless_mode(struct net_device *dev,
6068 struct iw_request_info *info,
6069 union iwreq_data *wrqu, char *extra)
6071 struct ipw_priv *priv = ieee80211_priv(dev);
6073 switch (priv->ieee->freq_band) {
6074 case IEEE80211_24GHZ_BAND:
6075 switch (priv->ieee->modulation) {
6076 case IEEE80211_CCK_MODULATION:
6077 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
6079 case IEEE80211_OFDM_MODULATION:
6080 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
6083 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
6088 case IEEE80211_52GHZ_BAND:
6089 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
6092 default: /* Mixed Band */
6093 switch (priv->ieee->modulation) {
6094 case IEEE80211_CCK_MODULATION:
6095 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
6097 case IEEE80211_OFDM_MODULATION:
6098 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
6101 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
6107 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
6109 wrqu->data.length = strlen(extra) + 1;
6114 #ifdef CONFIG_IPW_PROMISC
6115 static int ipw_wx_set_promisc(struct net_device *dev,
6116 struct iw_request_info *info,
6117 union iwreq_data *wrqu, char *extra)
6119 struct ipw_priv *priv = ieee80211_priv(dev);
6120 int *parms = (int *)extra;
6121 int enable = (parms[0] > 0);
6123 IPW_DEBUG_WX("SET PROMISC: %d %d\n", enable, parms[1]);
6125 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
6126 priv->net_dev->type = ARPHRD_IEEE80211;
6127 ipw_adapter_restart(priv);
6130 ipw_set_channel(priv, parms[1]);
6132 if (priv->ieee->iw_mode != IW_MODE_MONITOR)
6134 priv->net_dev->type = ARPHRD_ETHER;
6135 ipw_adapter_restart(priv);
6140 static int ipw_wx_reset(struct net_device *dev,
6141 struct iw_request_info *info,
6142 union iwreq_data *wrqu, char *extra)
6144 struct ipw_priv *priv = ieee80211_priv(dev);
6145 IPW_DEBUG_WX("RESET\n");
6146 ipw_adapter_restart(priv);
6149 #endif // CONFIG_IPW_PROMISC
6151 /* Rebase the WE IOCTLs to zero for the handler array */
6152 #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
6153 static iw_handler ipw_wx_handlers[] = {
6154 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
6155 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
6156 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
6157 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
6158 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
6159 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
6160 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
6161 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
6162 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
6163 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
6164 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
6165 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
6166 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
6167 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
6168 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
6169 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
6170 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
6171 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
6172 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
6173 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
6174 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
6175 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
6176 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
6177 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
6178 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
6179 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
6180 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
6181 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
6184 #define IPW_PRIV_SET_POWER SIOCIWFIRSTPRIV
6185 #define IPW_PRIV_GET_POWER SIOCIWFIRSTPRIV+1
6186 #define IPW_PRIV_SET_MODE SIOCIWFIRSTPRIV+2
6187 #define IPW_PRIV_GET_MODE SIOCIWFIRSTPRIV+3
6188 #define IPW_PRIV_SET_PROMISC SIOCIWFIRSTPRIV+4
6189 #define IPW_PRIV_RESET SIOCIWFIRSTPRIV+5
6191 static struct iw_priv_args ipw_priv_args[] = {
6193 .cmd = IPW_PRIV_SET_POWER,
6194 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
6195 .name = "set_power"},
6197 .cmd = IPW_PRIV_GET_POWER,
6198 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
6199 .name = "get_power"},
6201 .cmd = IPW_PRIV_SET_MODE,
6202 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
6203 .name = "set_mode"},
6205 .cmd = IPW_PRIV_GET_MODE,
6206 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
6207 .name = "get_mode"},
6208 #ifdef CONFIG_IPW_PROMISC
6210 IPW_PRIV_SET_PROMISC,
6211 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
6214 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
6215 #endif /* CONFIG_IPW_PROMISC */
6218 static iw_handler ipw_priv_handler[] = {
6219 ipw_wx_set_powermode,
6220 ipw_wx_get_powermode,
6221 ipw_wx_set_wireless_mode,
6222 ipw_wx_get_wireless_mode,
6223 #ifdef CONFIG_IPW_PROMISC
6229 static struct iw_handler_def ipw_wx_handler_def = {
6230 .standard = ipw_wx_handlers,
6231 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
6232 .num_private = ARRAY_SIZE(ipw_priv_handler),
6233 .num_private_args = ARRAY_SIZE(ipw_priv_args),
6234 .private = ipw_priv_handler,
6235 .private_args = ipw_priv_args,
6239 * Get wireless statistics.
6240 * Called by /proc/net/wireless
6241 * Also called by SIOCGIWSTATS
6243 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
6245 struct ipw_priv *priv = ieee80211_priv(dev);
6246 struct iw_statistics *wstats;
6248 wstats = &priv->wstats;
6250 /* if hw is disabled, then ipw2100_get_ordinal() can't be called.
6251 * ipw2100_wx_wireless_stats seems to be called before fw is
6252 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
6253 * and associated; if not associcated, the values are all meaningless
6254 * anyway, so set them all to NULL and INVALID */
6255 if (!(priv->status & STATUS_ASSOCIATED)) {
6256 wstats->miss.beacon = 0;
6257 wstats->discard.retries = 0;
6258 wstats->qual.qual = 0;
6259 wstats->qual.level = 0;
6260 wstats->qual.noise = 0;
6261 wstats->qual.updated = 7;
6262 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
6263 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
6267 wstats->qual.qual = priv->quality;
6268 wstats->qual.level = average_value(&priv->average_rssi);
6269 wstats->qual.noise = average_value(&priv->average_noise);
6270 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
6271 IW_QUAL_NOISE_UPDATED;
6273 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
6274 wstats->discard.retries = priv->last_tx_failures;
6275 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
6277 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
6278 goto fail_get_ordinal;
6279 wstats->discard.retries += tx_retry; */
6284 /* net device stuff */
6286 static inline void init_sys_config(struct ipw_sys_config *sys_config)
6288 memset(sys_config, 0, sizeof(struct ipw_sys_config));
6289 sys_config->bt_coexistence = 1; /* We may need to look into prvStaBtConfig */
6290 sys_config->answer_broadcast_ssid_probe = 0;
6291 sys_config->accept_all_data_frames = 0;
6292 sys_config->accept_non_directed_frames = 1;
6293 sys_config->exclude_unicast_unencrypted = 0;
6294 sys_config->disable_unicast_decryption = 1;
6295 sys_config->exclude_multicast_unencrypted = 0;
6296 sys_config->disable_multicast_decryption = 1;
6297 sys_config->antenna_diversity = CFG_SYS_ANTENNA_BOTH;
6298 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
6299 sys_config->dot11g_auto_detection = 0;
6300 sys_config->enable_cts_to_self = 0;
6301 sys_config->bt_coexist_collision_thr = 0;
6302 sys_config->pass_noise_stats_to_host = 1;
6305 static int ipw_net_open(struct net_device *dev)
6307 struct ipw_priv *priv = ieee80211_priv(dev);
6308 IPW_DEBUG_INFO("dev->open\n");
6309 /* we should be verifying the device is ready to be opened */
6310 if (!(priv->status & STATUS_RF_KILL_MASK) &&
6311 (priv->status & STATUS_ASSOCIATED))
6312 netif_start_queue(dev);
6316 static int ipw_net_stop(struct net_device *dev)
6318 IPW_DEBUG_INFO("dev->close\n");
6319 netif_stop_queue(dev);
6326 modify to send one tfd per fragment instead of using chunking. otherwise
6327 we need to heavily modify the ieee80211_skb_to_txb.
6330 static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb)
6332 struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *)
6333 txb->fragments[0]->data;
6335 struct tfd_frame *tfd;
6336 struct clx2_tx_queue *txq = &priv->txq[0];
6337 struct clx2_queue *q = &txq->q;
6338 u8 id, hdr_len, unicast;
6339 u16 remaining_bytes;
6341 switch (priv->ieee->iw_mode) {
6343 hdr_len = IEEE80211_3ADDR_LEN;
6344 unicast = !is_broadcast_ether_addr(hdr->addr1) &&
6345 !is_multicast_ether_addr(hdr->addr1);
6346 id = ipw_find_station(priv, hdr->addr1);
6347 if (id == IPW_INVALID_STATION) {
6348 id = ipw_add_station(priv, hdr->addr1);
6349 if (id == IPW_INVALID_STATION) {
6350 IPW_WARNING("Attempt to send data to "
6351 "invalid cell: " MAC_FMT "\n",
6352 MAC_ARG(hdr->addr1));
6360 unicast = !is_broadcast_ether_addr(hdr->addr3) &&
6361 !is_multicast_ether_addr(hdr->addr3);
6362 hdr_len = IEEE80211_3ADDR_LEN;
6367 tfd = &txq->bd[q->first_empty];
6368 txq->txb[q->first_empty] = txb;
6369 memset(tfd, 0, sizeof(*tfd));
6370 tfd->u.data.station_number = id;
6372 tfd->control_flags.message_type = TX_FRAME_TYPE;
6373 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
6375 tfd->u.data.cmd_id = DINO_CMD_TX;
6376 tfd->u.data.len = txb->payload_size;
6377 remaining_bytes = txb->payload_size;
6378 if (unlikely(!unicast))
6379 tfd->u.data.tx_flags = DCT_FLAG_NO_WEP;
6381 tfd->u.data.tx_flags = DCT_FLAG_NO_WEP | DCT_FLAG_ACK_REQD;
6383 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
6384 tfd->u.data.tx_flags_ext = DCT_FLAG_EXT_MODE_CCK;
6386 tfd->u.data.tx_flags_ext = DCT_FLAG_EXT_MODE_OFDM;
6388 if (priv->config & CFG_PREAMBLE)
6389 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREMBL;
6391 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
6394 tfd->u.data.num_chunks = min((u8) (NUM_TFD_CHUNKS - 2), txb->nr_frags);
6395 for (i = 0; i < tfd->u.data.num_chunks; i++) {
6396 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
6397 i, tfd->u.data.num_chunks,
6398 txb->fragments[i]->len - hdr_len);
6399 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
6400 txb->fragments[i]->len - hdr_len);
6402 tfd->u.data.chunk_ptr[i] =
6403 pci_map_single(priv->pci_dev,
6404 txb->fragments[i]->data + hdr_len,
6405 txb->fragments[i]->len - hdr_len,
6407 tfd->u.data.chunk_len[i] = txb->fragments[i]->len - hdr_len;
6410 if (i != txb->nr_frags) {
6411 struct sk_buff *skb;
6412 u16 remaining_bytes = 0;
6415 for (j = i; j < txb->nr_frags; j++)
6416 remaining_bytes += txb->fragments[j]->len - hdr_len;
6418 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
6420 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
6422 tfd->u.data.chunk_len[i] = remaining_bytes;
6423 for (j = i; j < txb->nr_frags; j++) {
6424 int size = txb->fragments[j]->len - hdr_len;
6425 printk(KERN_INFO "Adding frag %d %d...\n",
6427 memcpy(skb_put(skb, size),
6428 txb->fragments[j]->data + hdr_len, size);
6430 dev_kfree_skb_any(txb->fragments[i]);
6431 txb->fragments[i] = skb;
6432 tfd->u.data.chunk_ptr[i] =
6433 pci_map_single(priv->pci_dev, skb->data,
6434 tfd->u.data.chunk_len[i],
6436 tfd->u.data.num_chunks++;
6441 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
6442 ipw_write32(priv, q->reg_w, q->first_empty);
6444 if (ipw_queue_space(q) < q->high_mark)
6445 netif_stop_queue(priv->net_dev);
6450 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
6451 ieee80211_txb_free(txb);
6454 static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
6455 struct net_device *dev, int pri)
6457 struct ipw_priv *priv = ieee80211_priv(dev);
6458 unsigned long flags;
6460 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
6462 spin_lock_irqsave(&priv->lock, flags);
6464 if (!(priv->status & STATUS_ASSOCIATED)) {
6465 IPW_DEBUG_INFO("Tx attempt while not associated.\n");
6466 priv->ieee->stats.tx_carrier_errors++;
6467 netif_stop_queue(dev);
6471 ipw_tx_skb(priv, txb);
6473 spin_unlock_irqrestore(&priv->lock, flags);
6477 spin_unlock_irqrestore(&priv->lock, flags);
6481 static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
6483 struct ipw_priv *priv = ieee80211_priv(dev);
6485 priv->ieee->stats.tx_packets = priv->tx_packets;
6486 priv->ieee->stats.rx_packets = priv->rx_packets;
6487 return &priv->ieee->stats;
6490 static void ipw_net_set_multicast_list(struct net_device *dev)
6495 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
6497 struct ipw_priv *priv = ieee80211_priv(dev);
6498 struct sockaddr *addr = p;
6499 if (!is_valid_ether_addr(addr->sa_data))
6500 return -EADDRNOTAVAIL;
6501 priv->config |= CFG_CUSTOM_MAC;
6502 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
6503 printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n",
6504 priv->net_dev->name, MAC_ARG(priv->mac_addr));
6505 ipw_adapter_restart(priv);
6509 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
6510 struct ethtool_drvinfo *info)
6512 struct ipw_priv *p = ieee80211_priv(dev);
6517 strcpy(info->driver, DRV_NAME);
6518 strcpy(info->version, DRV_VERSION);
6521 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
6523 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
6525 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
6527 strcpy(info->bus_info, pci_name(p->pci_dev));
6528 info->eedump_len = CX2_EEPROM_IMAGE_SIZE;
6531 static u32 ipw_ethtool_get_link(struct net_device *dev)
6533 struct ipw_priv *priv = ieee80211_priv(dev);
6534 return (priv->status & STATUS_ASSOCIATED) != 0;
6537 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
6539 return CX2_EEPROM_IMAGE_SIZE;
6542 static int ipw_ethtool_get_eeprom(struct net_device *dev,
6543 struct ethtool_eeprom *eeprom, u8 * bytes)
6545 struct ipw_priv *p = ieee80211_priv(dev);
6547 if (eeprom->offset + eeprom->len > CX2_EEPROM_IMAGE_SIZE)
6550 memcpy(bytes, &((u8 *) p->eeprom)[eeprom->offset], eeprom->len);
6554 static int ipw_ethtool_set_eeprom(struct net_device *dev,
6555 struct ethtool_eeprom *eeprom, u8 * bytes)
6557 struct ipw_priv *p = ieee80211_priv(dev);
6560 if (eeprom->offset + eeprom->len > CX2_EEPROM_IMAGE_SIZE)
6563 memcpy(&((u8 *) p->eeprom)[eeprom->offset], bytes, eeprom->len);
6564 for (i = IPW_EEPROM_DATA;
6565 i < IPW_EEPROM_DATA + CX2_EEPROM_IMAGE_SIZE; i++)
6566 ipw_write8(p, i, p->eeprom[i]);
6571 static struct ethtool_ops ipw_ethtool_ops = {
6572 .get_link = ipw_ethtool_get_link,
6573 .get_drvinfo = ipw_ethtool_get_drvinfo,
6574 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
6575 .get_eeprom = ipw_ethtool_get_eeprom,
6576 .set_eeprom = ipw_ethtool_set_eeprom,
6579 static irqreturn_t ipw_isr(int irq, void *data, struct pt_regs *regs)
6581 struct ipw_priv *priv = data;
6582 u32 inta, inta_mask;
6587 spin_lock(&priv->lock);
6589 if (!(priv->status & STATUS_INT_ENABLED)) {
6594 inta = ipw_read32(priv, CX2_INTA_RW);
6595 inta_mask = ipw_read32(priv, CX2_INTA_MASK_R);
6597 if (inta == 0xFFFFFFFF) {
6598 /* Hardware disappeared */
6599 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
6603 if (!(inta & (CX2_INTA_MASK_ALL & inta_mask))) {
6604 /* Shared interrupt */
6608 /* tell the device to stop sending interrupts */
6609 ipw_disable_interrupts(priv);
6611 /* ack current interrupts */
6612 inta &= (CX2_INTA_MASK_ALL & inta_mask);
6613 ipw_write32(priv, CX2_INTA_RW, inta);
6615 /* Cache INTA value for our tasklet */
6616 priv->isr_inta = inta;
6618 tasklet_schedule(&priv->irq_tasklet);
6620 spin_unlock(&priv->lock);
6624 spin_unlock(&priv->lock);
6628 static void ipw_rf_kill(void *adapter)
6630 struct ipw_priv *priv = adapter;
6631 unsigned long flags;
6633 spin_lock_irqsave(&priv->lock, flags);
6635 if (rf_kill_active(priv)) {
6636 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
6637 if (priv->workqueue)
6638 queue_delayed_work(priv->workqueue,
6639 &priv->rf_kill, 2 * HZ);
6643 /* RF Kill is now disabled, so bring the device back up */
6645 if (!(priv->status & STATUS_RF_KILL_MASK)) {
6646 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
6649 /* we can not do an adapter restart while inside an irq lock */
6650 queue_work(priv->workqueue, &priv->adapter_restart);
6652 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
6656 spin_unlock_irqrestore(&priv->lock, flags);
6659 static int ipw_setup_deferred_work(struct ipw_priv *priv)
6663 priv->workqueue = create_workqueue(DRV_NAME);
6664 init_waitqueue_head(&priv->wait_command_queue);
6666 INIT_WORK(&priv->adhoc_check, ipw_adhoc_check, priv);
6667 INIT_WORK(&priv->associate, ipw_associate, priv);
6668 INIT_WORK(&priv->disassociate, ipw_disassociate, priv);
6669 INIT_WORK(&priv->rx_replenish, ipw_rx_queue_replenish, priv);
6670 INIT_WORK(&priv->adapter_restart, ipw_adapter_restart, priv);
6671 INIT_WORK(&priv->rf_kill, ipw_rf_kill, priv);
6672 INIT_WORK(&priv->up, (void (*)(void *))ipw_up, priv);
6673 INIT_WORK(&priv->down, (void (*)(void *))ipw_down, priv);
6674 INIT_WORK(&priv->request_scan,
6675 (void (*)(void *))ipw_request_scan, priv);
6676 INIT_WORK(&priv->gather_stats,
6677 (void (*)(void *))ipw_gather_stats, priv);
6678 INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_abort_scan, priv);
6679 INIT_WORK(&priv->roam, ipw_roam, priv);
6680 INIT_WORK(&priv->scan_check, ipw_scan_check, priv);
6682 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
6683 ipw_irq_tasklet, (unsigned long)priv);
6688 static void shim__set_security(struct net_device *dev,
6689 struct ieee80211_security *sec)
6691 struct ipw_priv *priv = ieee80211_priv(dev);
6694 for (i = 0; i < 4; i++) {
6695 if (sec->flags & (1 << i)) {
6696 priv->sec.key_sizes[i] = sec->key_sizes[i];
6697 if (sec->key_sizes[i] == 0)
6698 priv->sec.flags &= ~(1 << i);
6700 memcpy(priv->sec.keys[i], sec->keys[i],
6702 priv->sec.flags |= (1 << i);
6703 priv->status |= STATUS_SECURITY_UPDATED;
6707 if ((sec->flags & SEC_ACTIVE_KEY) &&
6708 priv->sec.active_key != sec->active_key) {
6709 if (sec->active_key <= 3) {
6710 priv->sec.active_key = sec->active_key;
6711 priv->sec.flags |= SEC_ACTIVE_KEY;
6713 priv->sec.flags &= ~SEC_ACTIVE_KEY;
6714 priv->status |= STATUS_SECURITY_UPDATED;
6717 if ((sec->flags & SEC_AUTH_MODE) &&
6718 (priv->sec.auth_mode != sec->auth_mode)) {
6719 priv->sec.auth_mode = sec->auth_mode;
6720 priv->sec.flags |= SEC_AUTH_MODE;
6721 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
6722 priv->capability |= CAP_SHARED_KEY;
6724 priv->capability &= ~CAP_SHARED_KEY;
6725 priv->status |= STATUS_SECURITY_UPDATED;
6728 if (sec->flags & SEC_ENABLED && priv->sec.enabled != sec->enabled) {
6729 priv->sec.flags |= SEC_ENABLED;
6730 priv->sec.enabled = sec->enabled;
6731 priv->status |= STATUS_SECURITY_UPDATED;
6733 priv->capability |= CAP_PRIVACY_ON;
6735 priv->capability &= ~CAP_PRIVACY_ON;
6738 if (sec->flags & SEC_LEVEL && priv->sec.level != sec->level) {
6739 priv->sec.level = sec->level;
6740 priv->sec.flags |= SEC_LEVEL;
6741 priv->status |= STATUS_SECURITY_UPDATED;
6744 /* To match current functionality of ipw2100 (which works well w/
6745 * various supplicants, we don't force a disassociate if the
6746 * privacy capability changes ... */
6748 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
6749 (((priv->assoc_request.capability &
6750 WLAN_CAPABILITY_PRIVACY) && !sec->enabled) ||
6751 (!(priv->assoc_request.capability &
6752 WLAN_CAPABILITY_PRIVACY) && sec->enabled))) {
6753 IPW_DEBUG_ASSOC("Disassociating due to capability "
6755 ipw_disassociate(priv);
6760 static int init_supported_rates(struct ipw_priv *priv,
6761 struct ipw_supported_rates *rates)
6763 /* TODO: Mask out rates based on priv->rates_mask */
6765 memset(rates, 0, sizeof(*rates));
6766 /* configure supported rates */
6767 switch (priv->ieee->freq_band) {
6768 case IEEE80211_52GHZ_BAND:
6769 rates->ieee_mode = IPW_A_MODE;
6770 rates->purpose = IPW_RATE_CAPABILITIES;
6771 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
6772 IEEE80211_OFDM_DEFAULT_RATES_MASK);
6775 default: /* Mixed or 2.4Ghz */
6776 rates->ieee_mode = IPW_G_MODE;
6777 rates->purpose = IPW_RATE_CAPABILITIES;
6778 ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
6779 IEEE80211_CCK_DEFAULT_RATES_MASK);
6780 if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
6781 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
6782 IEEE80211_OFDM_DEFAULT_RATES_MASK);
6790 static int ipw_config(struct ipw_priv *priv)
6793 struct ipw_tx_power tx_power;
6795 memset(&priv->sys_config, 0, sizeof(priv->sys_config));
6796 memset(&tx_power, 0, sizeof(tx_power));
6798 /* This is only called from ipw_up, which resets/reloads the firmware
6799 so, we don't need to first disable the card before we configure
6802 /* configure device for 'G' band */
6803 tx_power.ieee_mode = IPW_G_MODE;
6804 tx_power.num_channels = 11;
6805 for (i = 0; i < 11; i++) {
6806 tx_power.channels_tx_power[i].channel_number = i + 1;
6807 tx_power.channels_tx_power[i].tx_power = priv->tx_power;
6809 if (ipw_send_tx_power(priv, &tx_power))
6812 /* configure device to also handle 'B' band */
6813 tx_power.ieee_mode = IPW_B_MODE;
6814 if (ipw_send_tx_power(priv, &tx_power))
6817 /* initialize adapter address */
6818 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
6821 /* set basic system config settings */
6822 init_sys_config(&priv->sys_config);
6823 if (ipw_send_system_config(priv, &priv->sys_config))
6826 init_supported_rates(priv, &priv->rates);
6827 if (ipw_send_supported_rates(priv, &priv->rates))
6830 /* Set request-to-send threshold */
6831 if (priv->rts_threshold) {
6832 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
6836 if (ipw_set_random_seed(priv))
6839 /* final state transition to the RUN state */
6840 if (ipw_send_host_complete(priv))
6843 /* If configured to try and auto-associate, kick off a scan */
6844 if ((priv->config & CFG_ASSOCIATE) && ipw_request_scan(priv))
6853 #define MAX_HW_RESTARTS 5
6854 static int ipw_up(struct ipw_priv *priv)
6858 if (priv->status & STATUS_EXIT_PENDING)
6861 for (i = 0; i < MAX_HW_RESTARTS; i++) {
6862 /* Load the microcode, firmware, and eeprom.
6863 * Also start the clocks. */
6864 rc = ipw_load(priv);
6866 IPW_ERROR("Unable to load firmware: 0x%08X\n", rc);
6870 ipw_init_ordinals(priv);
6871 if (!(priv->config & CFG_CUSTOM_MAC))
6872 eeprom_parse_mac(priv, priv->mac_addr);
6873 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
6875 if (priv->status & STATUS_RF_KILL_MASK)
6878 rc = ipw_config(priv);
6880 IPW_DEBUG_INFO("Configured device on count %i\n", i);
6881 priv->notif_missed_beacons = 0;
6882 netif_start_queue(priv->net_dev);
6885 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n",
6889 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
6890 i, MAX_HW_RESTARTS);
6892 /* We had an error bringing up the hardware, so take it
6893 * all the way back down so we can try again */
6897 /* tried to restart and config the device for as long as our
6898 * patience could withstand */
6899 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
6903 static void ipw_down(struct ipw_priv *priv)
6905 /* Attempt to disable the card */
6907 ipw_send_card_disable(priv, 0);
6910 /* tell the device to stop sending interrupts */
6911 ipw_disable_interrupts(priv);
6913 /* Clear all bits but the RF Kill */
6914 priv->status &= STATUS_RF_KILL_MASK;
6916 netif_carrier_off(priv->net_dev);
6917 netif_stop_queue(priv->net_dev);
6922 /* Called by register_netdev() */
6923 static int ipw_net_init(struct net_device *dev)
6925 struct ipw_priv *priv = ieee80211_priv(dev);
6927 if (priv->status & STATUS_RF_KILL_SW) {
6928 IPW_WARNING("Radio disabled by module parameter.\n");
6930 } else if (rf_kill_active(priv)) {
6931 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
6932 "Kill switch must be turned off for "
6933 "wireless networking to work.\n");
6934 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
6944 /* PCI driver stuff */
6945 static struct pci_device_id card_ids[] = {
6946 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
6947 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
6948 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
6949 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
6950 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
6951 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
6952 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
6953 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
6954 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
6955 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
6956 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
6957 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
6958 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
6959 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
6960 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
6961 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
6962 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
6963 {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
6964 {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
6965 {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* 2225BG */
6966 {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
6967 {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
6969 /* required last entry */
6973 MODULE_DEVICE_TABLE(pci, card_ids);
6975 static struct attribute *ipw_sysfs_entries[] = {
6976 &dev_attr_rf_kill.attr,
6977 &dev_attr_direct_dword.attr,
6978 &dev_attr_indirect_byte.attr,
6979 &dev_attr_indirect_dword.attr,
6980 &dev_attr_mem_gpio_reg.attr,
6981 &dev_attr_command_event_reg.attr,
6982 &dev_attr_nic_type.attr,
6983 &dev_attr_status.attr,
6985 &dev_attr_dump_errors.attr,
6986 &dev_attr_dump_events.attr,
6987 &dev_attr_eeprom_delay.attr,
6988 &dev_attr_ucode_version.attr,
6993 static struct attribute_group ipw_attribute_group = {
6994 .name = NULL, /* put in device directory */
6995 .attrs = ipw_sysfs_entries,
6998 static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7001 struct net_device *net_dev;
7004 struct ipw_priv *priv;
7005 int band, modulation;
7007 net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
7008 if (net_dev == NULL) {
7013 priv = ieee80211_priv(net_dev);
7014 priv->ieee = netdev_priv(net_dev);
7015 priv->net_dev = net_dev;
7016 priv->pci_dev = pdev;
7017 #ifdef CONFIG_IPW_DEBUG
7018 ipw_debug_level = debug;
7020 spin_lock_init(&priv->lock);
7022 if (pci_enable_device(pdev)) {
7024 goto out_free_ieee80211;
7027 pci_set_master(pdev);
7029 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
7031 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
7033 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
7034 goto out_pci_disable_device;
7037 pci_set_drvdata(pdev, priv);
7039 err = pci_request_regions(pdev, DRV_NAME);
7041 goto out_pci_disable_device;
7043 /* We disable the RETRY_TIMEOUT register (0x41) to keep
7044 * PCI Tx retries from interfering with C3 CPU state */
7045 pci_read_config_dword(pdev, 0x40, &val);
7046 if ((val & 0x0000ff00) != 0)
7047 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
7049 length = pci_resource_len(pdev, 0);
7050 priv->hw_len = length;
7052 base = ioremap_nocache(pci_resource_start(pdev, 0), length);
7055 goto out_pci_release_regions;
7058 priv->hw_base = base;
7059 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
7060 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
7062 err = ipw_setup_deferred_work(priv);
7064 IPW_ERROR("Unable to setup deferred work\n");
7068 /* Initialize module parameter values here */
7070 strncpy(net_dev->name, ifname, IFNAMSIZ);
7073 priv->config |= CFG_ASSOCIATE;
7075 IPW_DEBUG_INFO("Auto associate disabled.\n");
7078 priv->config |= CFG_ADHOC_CREATE;
7080 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
7083 priv->status |= STATUS_RF_KILL_SW;
7084 IPW_DEBUG_INFO("Radio disabled.\n");
7088 priv->config |= CFG_STATIC_CHANNEL;
7089 priv->channel = channel;
7090 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
7091 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
7092 /* TODO: Validate that provided channel is in range */
7097 priv->ieee->iw_mode = IW_MODE_ADHOC;
7099 #ifdef CONFIG_IPW_PROMISC
7101 priv->ieee->iw_mode = IW_MODE_MONITOR;
7106 priv->ieee->iw_mode = IW_MODE_INFRA;
7110 if ((priv->pci_dev->device == 0x4223) ||
7111 (priv->pci_dev->device == 0x4224)) {
7112 printk(KERN_INFO DRV_NAME
7113 ": Detected Intel PRO/Wireless 2915ABG Network "
7115 priv->ieee->abg_true = 1;
7116 band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
7117 modulation = IEEE80211_OFDM_MODULATION |
7118 IEEE80211_CCK_MODULATION;
7119 priv->adapter = IPW_2915ABG;
7120 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
7122 if (priv->pci_dev->device == 0x4221)
7123 printk(KERN_INFO DRV_NAME
7124 ": Detected Intel PRO/Wireless 2225BG Network "
7127 printk(KERN_INFO DRV_NAME
7128 ": Detected Intel PRO/Wireless 2200BG Network "
7131 priv->ieee->abg_true = 0;
7132 band = IEEE80211_24GHZ_BAND;
7133 modulation = IEEE80211_OFDM_MODULATION |
7134 IEEE80211_CCK_MODULATION;
7135 priv->adapter = IPW_2200BG;
7136 priv->ieee->mode = IEEE_G | IEEE_B;
7139 priv->ieee->freq_band = band;
7140 priv->ieee->modulation = modulation;
7142 priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
7144 priv->missed_beacon_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
7145 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
7147 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
7149 /* If power management is turned on, default to AC mode */
7150 priv->power_mode = IPW_POWER_AC;
7151 priv->tx_power = IPW_DEFAULT_TX_POWER;
7153 err = request_irq(pdev->irq, ipw_isr, SA_SHIRQ, DRV_NAME, priv);
7155 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
7156 goto out_destroy_workqueue;
7159 SET_MODULE_OWNER(net_dev);
7160 SET_NETDEV_DEV(net_dev, &pdev->dev);
7162 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
7163 priv->ieee->set_security = shim__set_security;
7165 net_dev->open = ipw_net_open;
7166 net_dev->stop = ipw_net_stop;
7167 net_dev->init = ipw_net_init;
7168 net_dev->get_stats = ipw_net_get_stats;
7169 net_dev->set_multicast_list = ipw_net_set_multicast_list;
7170 net_dev->set_mac_address = ipw_net_set_mac_address;
7171 net_dev->get_wireless_stats = ipw_get_wireless_stats;
7172 net_dev->wireless_handlers = &ipw_wx_handler_def;
7173 net_dev->ethtool_ops = &ipw_ethtool_ops;
7174 net_dev->irq = pdev->irq;
7175 net_dev->base_addr = (unsigned long)priv->hw_base;
7176 net_dev->mem_start = pci_resource_start(pdev, 0);
7177 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
7179 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
7181 IPW_ERROR("failed to create sysfs device attributes\n");
7182 goto out_release_irq;
7185 err = register_netdev(net_dev);
7187 IPW_ERROR("failed to register network device\n");
7188 goto out_remove_group;
7194 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
7196 free_irq(pdev->irq, priv);
7197 out_destroy_workqueue:
7198 destroy_workqueue(priv->workqueue);
7199 priv->workqueue = NULL;
7201 iounmap(priv->hw_base);
7202 out_pci_release_regions:
7203 pci_release_regions(pdev);
7204 out_pci_disable_device:
7205 pci_disable_device(pdev);
7206 pci_set_drvdata(pdev, NULL);
7208 free_ieee80211(priv->net_dev);
7213 static void ipw_pci_remove(struct pci_dev *pdev)
7215 struct ipw_priv *priv = pci_get_drvdata(pdev);
7219 priv->status |= STATUS_EXIT_PENDING;
7221 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
7225 unregister_netdev(priv->net_dev);
7228 ipw_rx_queue_free(priv, priv->rxq);
7231 ipw_tx_queue_free(priv);
7233 /* ipw_down will ensure that there is no more pending work
7234 * in the workqueue's, so we can safely remove them now. */
7235 if (priv->workqueue) {
7236 cancel_delayed_work(&priv->adhoc_check);
7237 cancel_delayed_work(&priv->gather_stats);
7238 cancel_delayed_work(&priv->request_scan);
7239 cancel_delayed_work(&priv->rf_kill);
7240 cancel_delayed_work(&priv->scan_check);
7241 destroy_workqueue(priv->workqueue);
7242 priv->workqueue = NULL;
7245 free_irq(pdev->irq, priv);
7246 iounmap(priv->hw_base);
7247 pci_release_regions(pdev);
7248 pci_disable_device(pdev);
7249 pci_set_drvdata(pdev, NULL);
7250 free_ieee80211(priv->net_dev);
7254 release_firmware(bootfw);
7255 release_firmware(ucode);
7256 release_firmware(firmware);
7263 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
7265 struct ipw_priv *priv = pci_get_drvdata(pdev);
7266 struct net_device *dev = priv->net_dev;
7268 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
7270 /* Take down the device; powers it off, etc. */
7273 /* Remove the PRESENT state of the device */
7274 netif_device_detach(dev);
7276 pci_save_state(pdev);
7277 pci_disable_device(pdev);
7278 pci_set_power_state(pdev, pci_choose_state(pdev, state));
7283 static int ipw_pci_resume(struct pci_dev *pdev)
7285 struct ipw_priv *priv = pci_get_drvdata(pdev);
7286 struct net_device *dev = priv->net_dev;
7289 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
7291 pci_set_power_state(pdev, 0);
7292 pci_enable_device(pdev);
7293 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
7294 pci_restore_state(pdev, priv->pm_state);
7296 pci_restore_state(pdev);
7299 * Suspend/Resume resets the PCI configuration space, so we have to
7300 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
7301 * from interfering with C3 CPU state. pci_restore_state won't help
7302 * here since it only restores the first 64 bytes pci config header.
7304 pci_read_config_dword(pdev, 0x40, &val);
7305 if ((val & 0x0000ff00) != 0)
7306 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
7308 /* Set the device back into the PRESENT state; this will also wake
7309 * the queue of needed */
7310 netif_device_attach(dev);
7312 /* Bring the device back up */
7313 queue_work(priv->workqueue, &priv->up);
7319 /* driver initialization stuff */
7320 static struct pci_driver ipw_driver = {
7322 .id_table = card_ids,
7323 .probe = ipw_pci_probe,
7324 .remove = __devexit_p(ipw_pci_remove),
7326 .suspend = ipw_pci_suspend,
7327 .resume = ipw_pci_resume,
7331 static int __init ipw_init(void)
7335 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
7336 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
7338 ret = pci_module_init(&ipw_driver);
7340 IPW_ERROR("Unable to initialize PCI module\n");
7344 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
7346 IPW_ERROR("Unable to create driver sysfs file\n");
7347 pci_unregister_driver(&ipw_driver);
7354 static void __exit ipw_exit(void)
7356 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
7357 pci_unregister_driver(&ipw_driver);
7360 module_param(disable, int, 0444);
7361 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
7363 module_param(associate, int, 0444);
7364 MODULE_PARM_DESC(associate, "auto associate when scanning (default on)");
7366 module_param(auto_create, int, 0444);
7367 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
7369 module_param(debug, int, 0444);
7370 MODULE_PARM_DESC(debug, "debug output mask");
7372 module_param(channel, int, 0444);
7373 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
7375 module_param(ifname, charp, 0444);
7376 MODULE_PARM_DESC(ifname, "network device name (default eth%d)");
7378 #ifdef CONFIG_IPW_PROMISC
7379 module_param(mode, int, 0444);
7380 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
7382 module_param(mode, int, 0444);
7383 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
7386 module_exit(ipw_exit);
7387 module_init(ipw_init);