1 /******************************************************************************
3 Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 The full GNU General Public License is included in this distribution in the
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 ******************************************************************************/
35 #define IPW2200_VERSION "1.0.0"
36 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
37 #define DRV_COPYRIGHT "Copyright(c) 2003-2004 Intel Corporation"
38 #define DRV_VERSION IPW2200_VERSION
40 MODULE_DESCRIPTION(DRV_DESCRIPTION);
41 MODULE_VERSION(DRV_VERSION);
42 MODULE_AUTHOR(DRV_COPYRIGHT);
43 MODULE_LICENSE("GPL");
46 static int channel = 0;
50 static u32 ipw_debug_level;
51 static int associate = 1;
52 static int auto_create = 1;
53 static int disable = 0;
54 static const char ipw_modes[] = {
58 static void ipw_rx(struct ipw_priv *priv);
59 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
60 struct clx2_tx_queue *txq, int qindex);
61 static int ipw_queue_reset(struct ipw_priv *priv);
63 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
66 static void ipw_tx_queue_free(struct ipw_priv *);
68 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
69 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
70 static void ipw_rx_queue_replenish(void *);
72 static int ipw_up(struct ipw_priv *);
73 static void ipw_down(struct ipw_priv *);
74 static int ipw_config(struct ipw_priv *);
75 static int init_supported_rates(struct ipw_priv *priv, struct ipw_supported_rates *prates);
77 static u8 band_b_active_channel[MAX_B_CHANNELS] = {
78 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 0
80 static u8 band_a_active_channel[MAX_A_CHANNELS] = {
81 36, 40, 44, 48, 149, 153, 157, 161, 165, 52, 56, 60, 64, 0
84 static int is_valid_channel(int mode_mask, int channel)
91 if (mode_mask & IEEE_A)
92 for (i = 0; i < MAX_A_CHANNELS; i++)
93 if (band_a_active_channel[i] == channel)
96 if (mode_mask & (IEEE_B | IEEE_G))
97 for (i = 0; i < MAX_B_CHANNELS; i++)
98 if (band_b_active_channel[i] == channel)
99 return mode_mask & (IEEE_B | IEEE_G);
104 static char *snprint_line(char *buf, size_t count,
105 const u8 *data, u32 len, u32 ofs)
110 out = snprintf(buf, count, "%08X", ofs);
112 for (l = 0, i = 0; i < 2; i++) {
113 out += snprintf(buf + out, count - out, " ");
114 for (j = 0; j < 8 && l < len; j++, l++)
115 out += snprintf(buf + out, count - out, "%02X ",
118 out += snprintf(buf + out, count - out, " ");
121 out += snprintf(buf + out, count - out, " ");
122 for (l = 0, i = 0; i < 2; i++) {
123 out += snprintf(buf + out, count - out, " ");
124 for (j = 0; j < 8 && l < len; j++, l++) {
125 c = data[(i * 8 + j)];
126 if (!isascii(c) || !isprint(c))
129 out += snprintf(buf + out, count - out, "%c", c);
133 out += snprintf(buf + out, count - out, " ");
139 static void printk_buf(int level, const u8 *data, u32 len)
143 if (!(ipw_debug_level & level))
147 printk(KERN_DEBUG "%s\n",
148 snprint_line(line, sizeof(line), &data[ofs],
149 min(len, 16U), ofs));
151 len -= min(len, 16U);
155 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
156 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
158 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
159 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
161 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
162 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
164 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(b), (u32)(c));
165 _ipw_write_reg8(a, b, c);
168 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
169 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
171 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(b), (u32)(c));
172 _ipw_write_reg16(a, b, c);
175 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
176 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
178 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(b), (u32)(c));
179 _ipw_write_reg32(a, b, c);
182 #define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
183 #define ipw_write8(ipw, ofs, val) \
184 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
185 _ipw_write8(ipw, ofs, val)
187 #define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
188 #define ipw_write16(ipw, ofs, val) \
189 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
190 _ipw_write16(ipw, ofs, val)
192 #define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
193 #define ipw_write32(ipw, ofs, val) \
194 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
195 _ipw_write32(ipw, ofs, val)
197 #define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
198 static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs) {
199 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32)(ofs));
200 return _ipw_read8(ipw, ofs);
202 #define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
204 #define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
205 static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs) {
206 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32)(ofs));
207 return _ipw_read16(ipw, ofs);
209 #define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
211 #define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
212 static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs) {
213 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32)(ofs));
214 return _ipw_read32(ipw, ofs);
216 #define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
218 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
219 #define ipw_read_indirect(a, b, c, d) \
220 IPW_DEBUG_IO("%s %d: read_inddirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
221 _ipw_read_indirect(a, b, c, d)
223 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 *data, int num);
224 #define ipw_write_indirect(a, b, c, d) \
225 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
226 _ipw_write_indirect(a, b, c, d)
228 /* indirect write s */
229 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg,
232 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n",
234 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg);
235 _ipw_write32(priv, CX2_INDIRECT_DATA, value);
239 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
241 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
242 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK);
243 _ipw_write8(priv, CX2_INDIRECT_DATA, value);
244 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n",
245 (unsigned)(priv->hw_base + CX2_INDIRECT_DATA),
249 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg,
252 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
253 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK);
254 _ipw_write16(priv, CX2_INDIRECT_DATA, value);
257 /* indirect read s */
259 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
262 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK);
263 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
264 word = _ipw_read32(priv, CX2_INDIRECT_DATA);
265 return (word >> ((reg & 0x3)*8)) & 0xff;
268 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
272 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
274 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg);
275 value = _ipw_read32(priv, CX2_INDIRECT_DATA);
276 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
280 /* iterative/auto-increment 32 bit reads and writes */
281 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
284 u32 aligned_addr = addr & CX2_INDIRECT_ADDR_MASK;
285 u32 dif_len = addr - aligned_addr;
289 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
291 /* Read the first nibble byte by byte */
292 if (unlikely(dif_len)) {
293 /* Start reading at aligned_addr + dif_len */
294 _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
295 for (i = dif_len; i < 4; i++, buf++)
296 *buf = _ipw_read8(priv, CX2_INDIRECT_DATA + i);
301 /* Read DWs through autoinc register */
302 _ipw_write32(priv, CX2_AUTOINC_ADDR, aligned_addr);
303 aligned_len = num & CX2_INDIRECT_ADDR_MASK;
304 for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4)
305 *(u32*)buf = ipw_read32(priv, CX2_AUTOINC_DATA);
307 /* Copy the last nibble */
308 dif_len = num - aligned_len;
309 _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
310 for (i = 0; i < dif_len; i++, buf++)
311 *buf = ipw_read8(priv, CX2_INDIRECT_DATA + i);
314 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 *buf,
317 u32 aligned_addr = addr & CX2_INDIRECT_ADDR_MASK;
318 u32 dif_len = addr - aligned_addr;
322 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
324 /* Write the first nibble byte by byte */
325 if (unlikely(dif_len)) {
326 /* Start writing at aligned_addr + dif_len */
327 _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
328 for (i = dif_len; i < 4; i++, buf++)
329 _ipw_write8(priv, CX2_INDIRECT_DATA + i, *buf);
334 /* Write DWs through autoinc register */
335 _ipw_write32(priv, CX2_AUTOINC_ADDR, aligned_addr);
336 aligned_len = num & CX2_INDIRECT_ADDR_MASK;
337 for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4)
338 _ipw_write32(priv, CX2_AUTOINC_DATA, *(u32*)buf);
340 /* Copy the last nibble */
341 dif_len = num - aligned_len;
342 _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
343 for (i = 0; i < dif_len; i++, buf++)
344 _ipw_write8(priv, CX2_INDIRECT_DATA + i, *buf);
347 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
350 memcpy_toio((priv->hw_base + addr), buf, num);
353 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
355 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
358 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
360 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
363 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
365 if (priv->status & STATUS_INT_ENABLED)
367 priv->status |= STATUS_INT_ENABLED;
368 ipw_write32(priv, CX2_INTA_MASK_R, CX2_INTA_MASK_ALL);
371 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
373 if (!(priv->status & STATUS_INT_ENABLED))
375 priv->status &= ~STATUS_INT_ENABLED;
376 ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL);
379 static char *ipw_error_desc(u32 val)
382 case IPW_FW_ERROR_OK:
384 case IPW_FW_ERROR_FAIL:
386 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
387 return "MEMORY_UNDERFLOW";
388 case IPW_FW_ERROR_MEMORY_OVERFLOW:
389 return "MEMORY_OVERFLOW";
390 case IPW_FW_ERROR_BAD_PARAM:
391 return "ERROR_BAD_PARAM";
392 case IPW_FW_ERROR_BAD_CHECKSUM:
393 return "ERROR_BAD_CHECKSUM";
394 case IPW_FW_ERROR_NMI_INTERRUPT:
395 return "ERROR_NMI_INTERRUPT";
396 case IPW_FW_ERROR_BAD_DATABASE:
397 return "ERROR_BAD_DATABASE";
398 case IPW_FW_ERROR_ALLOC_FAIL:
399 return "ERROR_ALLOC_FAIL";
400 case IPW_FW_ERROR_DMA_UNDERRUN:
401 return "ERROR_DMA_UNDERRUN";
402 case IPW_FW_ERROR_DMA_STATUS:
403 return "ERROR_DMA_STATUS";
404 case IPW_FW_ERROR_DINOSTATUS_ERROR:
405 return "ERROR_DINOSTATUS_ERROR";
406 case IPW_FW_ERROR_EEPROMSTATUS_ERROR:
407 return "ERROR_EEPROMSTATUS_ERROR";
408 case IPW_FW_ERROR_SYSASSERT:
409 return "ERROR_SYSASSERT";
410 case IPW_FW_ERROR_FATAL_ERROR:
411 return "ERROR_FATALSTATUS_ERROR";
413 return "UNKNOWNSTATUS_ERROR";
417 static void ipw_dump_nic_error_log(struct ipw_priv *priv)
419 u32 desc, time, blink1, blink2, ilink1, ilink2, idata, i, count, base;
421 base = ipw_read32(priv, IPWSTATUS_ERROR_LOG);
422 count = ipw_read_reg32(priv, base);
424 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
425 IPW_ERROR("Start IPW Error Log Dump:\n");
426 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
427 priv->status, priv->config);
430 for (i = ERROR_START_OFFSET;
431 i <= count * ERROR_ELEM_SIZE;
432 i += ERROR_ELEM_SIZE) {
433 desc = ipw_read_reg32(priv, base + i);
434 time = ipw_read_reg32(priv, base + i + 1*sizeof(u32));
435 blink1 = ipw_read_reg32(priv, base + i + 2*sizeof(u32));
436 blink2 = ipw_read_reg32(priv, base + i + 3*sizeof(u32));
437 ilink1 = ipw_read_reg32(priv, base + i + 4*sizeof(u32));
438 ilink2 = ipw_read_reg32(priv, base + i + 5*sizeof(u32));
439 idata = ipw_read_reg32(priv, base + i + 6*sizeof(u32));
442 "%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
443 ipw_error_desc(desc), time, blink1, blink2,
444 ilink1, ilink2, idata);
448 static void ipw_dump_nic_event_log(struct ipw_priv *priv)
450 u32 ev, time, data, i, count, base;
452 base = ipw_read32(priv, IPW_EVENT_LOG);
453 count = ipw_read_reg32(priv, base);
455 if (EVENT_START_OFFSET <= count * EVENT_ELEM_SIZE)
456 IPW_ERROR("Start IPW Event Log Dump:\n");
458 for (i = EVENT_START_OFFSET;
459 i <= count * EVENT_ELEM_SIZE;
460 i += EVENT_ELEM_SIZE) {
461 ev = ipw_read_reg32(priv, base + i);
462 time = ipw_read_reg32(priv, base + i + 1*sizeof(u32));
463 data = ipw_read_reg32(priv, base + i + 2*sizeof(u32));
465 #ifdef CONFIG_IPW_DEBUG
466 IPW_ERROR("%i\t0x%08x\t%i\n", time, data, ev);
471 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val,
474 u32 addr, field_info, field_len, field_count, total_len;
476 IPW_DEBUG_ORD("ordinal = %i\n", ord);
478 if (!priv || !val || !len) {
479 IPW_DEBUG_ORD("Invalid argument\n");
483 /* verify device ordinal tables have been initialized */
484 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
485 IPW_DEBUG_ORD("Access ordinals before initialization\n");
489 switch (IPW_ORD_TABLE_ID_MASK & ord) {
490 case IPW_ORD_TABLE_0_MASK:
492 * TABLE 0: Direct access to a table of 32 bit values
494 * This is a very simple table with the data directly
495 * read from the table
498 /* remove the table id from the ordinal */
499 ord &= IPW_ORD_TABLE_VALUE_MASK;
502 if (ord > priv->table0_len) {
503 IPW_DEBUG_ORD("ordinal value (%i) longer then "
504 "max (%i)\n", ord, priv->table0_len);
508 /* verify we have enough room to store the value */
509 if (*len < sizeof(u32)) {
510 IPW_DEBUG_ORD("ordinal buffer length too small, "
511 "need %d\n", sizeof(u32));
515 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
516 ord, priv->table0_addr + (ord << 2));
520 *((u32 *)val) = ipw_read32(priv, priv->table0_addr + ord);
523 case IPW_ORD_TABLE_1_MASK:
525 * TABLE 1: Indirect access to a table of 32 bit values
527 * This is a fairly large table of u32 values each
528 * representing starting addr for the data (which is
532 /* remove the table id from the ordinal */
533 ord &= IPW_ORD_TABLE_VALUE_MASK;
536 if (ord > priv->table1_len) {
537 IPW_DEBUG_ORD("ordinal value too long\n");
541 /* verify we have enough room to store the value */
542 if (*len < sizeof(u32)) {
543 IPW_DEBUG_ORD("ordinal buffer length too small, "
544 "need %d\n", sizeof(u32));
548 *((u32 *)val) = ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
552 case IPW_ORD_TABLE_2_MASK:
554 * TABLE 2: Indirect access to a table of variable sized values
556 * This table consist of six values, each containing
557 * - dword containing the starting offset of the data
558 * - dword containing the lengh in the first 16bits
559 * and the count in the second 16bits
562 /* remove the table id from the ordinal */
563 ord &= IPW_ORD_TABLE_VALUE_MASK;
566 if (ord > priv->table2_len) {
567 IPW_DEBUG_ORD("ordinal value too long\n");
571 /* get the address of statistic */
572 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
574 /* get the second DW of statistics ;
575 * two 16-bit words - first is length, second is count */
576 field_info = ipw_read_reg32(priv, priv->table2_addr + (ord << 3) + sizeof(u32));
578 /* get each entry length */
579 field_len = *((u16 *)&field_info);
581 /* get number of entries */
582 field_count = *(((u16 *)&field_info) + 1);
584 /* abort if not enought memory */
585 total_len = field_len * field_count;
586 if (total_len > *len) {
595 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
596 "field_info = 0x%08x\n",
597 addr, total_len, field_info);
598 ipw_read_indirect(priv, addr, val, total_len);
602 IPW_DEBUG_ORD("Invalid ordinal!\n");
611 static void ipw_init_ordinals(struct ipw_priv *priv)
613 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
614 priv->table0_len = ipw_read32(priv, priv->table0_addr);
616 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
617 priv->table0_addr, priv->table0_len);
619 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
620 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
622 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
623 priv->table1_addr, priv->table1_len);
625 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
626 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
627 priv->table2_len &= 0x0000ffff; /* use first two bytes */
629 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
630 priv->table2_addr, priv->table2_len);
635 * The following adds a new attribute to the sysfs representation
636 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
637 * used for controling the debug level.
639 * See the level definitions in ipw for details.
641 static ssize_t show_debug_level(struct device_driver *d, char *buf)
643 return sprintf(buf, "0x%08X\n", ipw_debug_level);
645 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
648 char *p = (char *)buf;
651 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
653 if (p[0] == 'x' || p[0] == 'X')
655 val = simple_strtoul(p, &p, 16);
657 val = simple_strtoul(p, &p, 10);
659 printk(KERN_INFO DRV_NAME
660 ": %s is not in hex or decimal form.\n", buf);
662 ipw_debug_level = val;
664 return strnlen(buf, count);
667 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
668 show_debug_level, store_debug_level);
670 static ssize_t show_status(struct device *d, char *buf)
672 struct ipw_priv *p = (struct ipw_priv *)d->driver_data;
673 return sprintf(buf, "0x%08x\n", (int)p->status);
675 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
677 static ssize_t show_cfg(struct device *d, char *buf)
679 struct ipw_priv *p = (struct ipw_priv *)d->driver_data;
680 return sprintf(buf, "0x%08x\n", (int)p->config);
682 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
684 static ssize_t show_nic_type(struct device *d, char *buf)
686 struct ipw_priv *p = (struct ipw_priv *)d->driver_data;
687 u8 type = p->eeprom[EEPROM_NIC_TYPE];
690 case EEPROM_NIC_TYPE_STANDARD:
691 return sprintf(buf, "STANDARD\n");
692 case EEPROM_NIC_TYPE_DELL:
693 return sprintf(buf, "DELL\n");
694 case EEPROM_NIC_TYPE_FUJITSU:
695 return sprintf(buf, "FUJITSU\n");
696 case EEPROM_NIC_TYPE_IBM:
697 return sprintf(buf, "IBM\n");
698 case EEPROM_NIC_TYPE_HP:
699 return sprintf(buf, "HP\n");
702 return sprintf(buf, "UNKNOWN\n");
704 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
706 static ssize_t dump_error_log(struct device *d, const char *buf,
709 char *p = (char *)buf;
712 ipw_dump_nic_error_log((struct ipw_priv*)d->driver_data);
714 return strnlen(buf, count);
716 static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
718 static ssize_t dump_event_log(struct device *d, const char *buf,
721 char *p = (char *)buf;
724 ipw_dump_nic_event_log((struct ipw_priv*)d->driver_data);
726 return strnlen(buf, count);
728 static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
730 static ssize_t show_ucode_version(struct device *d, char *buf)
732 u32 len = sizeof(u32), tmp = 0;
733 struct ipw_priv *p = (struct ipw_priv*)d->driver_data;
735 if(ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
738 return sprintf(buf, "0x%08x\n", tmp);
740 static DEVICE_ATTR(ucode_version, S_IWUSR|S_IRUGO, show_ucode_version, NULL);
742 static ssize_t show_rtc(struct device *d, char *buf)
744 u32 len = sizeof(u32), tmp = 0;
745 struct ipw_priv *p = (struct ipw_priv*)d->driver_data;
747 if(ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
750 return sprintf(buf, "0x%08x\n", tmp);
752 static DEVICE_ATTR(rtc, S_IWUSR|S_IRUGO, show_rtc, NULL);
755 * Add a device attribute to view/control the delay between eeprom
758 static ssize_t show_eeprom_delay(struct device *d, char *buf)
760 int n = ((struct ipw_priv*)d->driver_data)->eeprom_delay;
761 return sprintf(buf, "%i\n", n);
763 static ssize_t store_eeprom_delay(struct device *d, const char *buf,
766 struct ipw_priv *p = (struct ipw_priv*)d->driver_data;
767 sscanf(buf, "%i", &p->eeprom_delay);
768 return strnlen(buf, count);
770 static DEVICE_ATTR(eeprom_delay, S_IWUSR|S_IRUGO,
771 show_eeprom_delay,store_eeprom_delay);
773 static ssize_t show_command_event_reg(struct device *d, char *buf)
776 struct ipw_priv *p = (struct ipw_priv *)d->driver_data;
778 reg = ipw_read_reg32(p, CX2_INTERNAL_CMD_EVENT);
779 return sprintf(buf, "0x%08x\n", reg);
781 static ssize_t store_command_event_reg(struct device *d,
786 struct ipw_priv *p = (struct ipw_priv *)d->driver_data;
788 sscanf(buf, "%x", ®);
789 ipw_write_reg32(p, CX2_INTERNAL_CMD_EVENT, reg);
790 return strnlen(buf, count);
792 static DEVICE_ATTR(command_event_reg, S_IWUSR|S_IRUGO,
793 show_command_event_reg,store_command_event_reg);
795 static ssize_t show_mem_gpio_reg(struct device *d, char *buf)
798 struct ipw_priv *p = (struct ipw_priv *)d->driver_data;
800 reg = ipw_read_reg32(p, 0x301100);
801 return sprintf(buf, "0x%08x\n", reg);
803 static ssize_t store_mem_gpio_reg(struct device *d,
808 struct ipw_priv *p = (struct ipw_priv *)d->driver_data;
810 sscanf(buf, "%x", ®);
811 ipw_write_reg32(p, 0x301100, reg);
812 return strnlen(buf, count);
814 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR|S_IRUGO,
815 show_mem_gpio_reg,store_mem_gpio_reg);
817 static ssize_t show_indirect_dword(struct device *d, char *buf)
820 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
821 if (priv->status & STATUS_INDIRECT_DWORD)
822 reg = ipw_read_reg32(priv, priv->indirect_dword);
826 return sprintf(buf, "0x%08x\n", reg);
828 static ssize_t store_indirect_dword(struct device *d,
832 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
834 sscanf(buf, "%x", &priv->indirect_dword);
835 priv->status |= STATUS_INDIRECT_DWORD;
836 return strnlen(buf, count);
838 static DEVICE_ATTR(indirect_dword, S_IWUSR|S_IRUGO,
839 show_indirect_dword,store_indirect_dword);
841 static ssize_t show_indirect_byte(struct device *d, char *buf)
844 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
845 if (priv->status & STATUS_INDIRECT_BYTE)
846 reg = ipw_read_reg8(priv, priv->indirect_byte);
850 return sprintf(buf, "0x%02x\n", reg);
852 static ssize_t store_indirect_byte(struct device *d,
856 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
858 sscanf(buf, "%x", &priv->indirect_byte);
859 priv->status |= STATUS_INDIRECT_BYTE;
860 return strnlen(buf, count);
862 static DEVICE_ATTR(indirect_byte, S_IWUSR|S_IRUGO,
863 show_indirect_byte, store_indirect_byte);
865 static ssize_t show_direct_dword(struct device *d, char *buf)
868 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
870 if (priv->status & STATUS_DIRECT_DWORD)
871 reg = ipw_read32(priv, priv->direct_dword);
875 return sprintf(buf, "0x%08x\n", reg);
877 static ssize_t store_direct_dword(struct device *d,
881 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
883 sscanf(buf, "%x", &priv->direct_dword);
884 priv->status |= STATUS_DIRECT_DWORD;
885 return strnlen(buf, count);
887 static DEVICE_ATTR(direct_dword, S_IWUSR|S_IRUGO,
888 show_direct_dword,store_direct_dword);
891 static inline int rf_kill_active(struct ipw_priv *priv)
893 if (0 == (ipw_read32(priv, 0x30) & 0x10000))
894 priv->status |= STATUS_RF_KILL_HW;
896 priv->status &= ~STATUS_RF_KILL_HW;
898 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
901 static ssize_t show_rf_kill(struct device *d, char *buf)
903 /* 0 - RF kill not enabled
904 1 - SW based RF kill active (sysfs)
905 2 - HW based RF kill active
906 3 - Both HW and SW baed RF kill active */
907 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
908 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
909 (rf_kill_active(priv) ? 0x2 : 0x0);
910 return sprintf(buf, "%i\n", val);
913 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
915 if ((disable_radio ? 1 : 0) ==
916 (priv->status & STATUS_RF_KILL_SW ? 1 : 0))
919 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
920 disable_radio ? "OFF" : "ON");
923 priv->status |= STATUS_RF_KILL_SW;
925 if (priv->workqueue) {
926 cancel_delayed_work(&priv->request_scan);
928 wake_up_interruptible(&priv->wait_command_queue);
929 queue_work(priv->workqueue, &priv->down);
931 priv->status &= ~STATUS_RF_KILL_SW;
932 if (rf_kill_active(priv)) {
933 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
934 "disabled by HW switch\n");
935 /* Make sure the RF_KILL check timer is running */
936 cancel_delayed_work(&priv->rf_kill);
937 queue_delayed_work(priv->workqueue, &priv->rf_kill,
940 queue_work(priv->workqueue, &priv->up);
946 static ssize_t store_rf_kill(struct device *d, const char *buf, size_t count)
948 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
950 ipw_radio_kill_sw(priv, buf[0] == '1');
954 static DEVICE_ATTR(rf_kill, S_IWUSR|S_IRUGO, show_rf_kill, store_rf_kill);
956 static void ipw_irq_tasklet(struct ipw_priv *priv)
958 u32 inta, inta_mask, handled = 0;
962 spin_lock_irqsave(&priv->lock, flags);
964 inta = ipw_read32(priv, CX2_INTA_RW);
965 inta_mask = ipw_read32(priv, CX2_INTA_MASK_R);
966 inta &= (CX2_INTA_MASK_ALL & inta_mask);
968 /* Add any cached INTA values that need to be handled */
969 inta |= priv->isr_inta;
971 /* handle all the justifications for the interrupt */
972 if (inta & CX2_INTA_BIT_RX_TRANSFER) {
974 handled |= CX2_INTA_BIT_RX_TRANSFER;
977 if (inta & CX2_INTA_BIT_TX_CMD_QUEUE) {
978 IPW_DEBUG_HC("Command completed.\n");
979 rc = ipw_queue_tx_reclaim( priv, &priv->txq_cmd, -1);
980 priv->status &= ~STATUS_HCMD_ACTIVE;
981 wake_up_interruptible(&priv->wait_command_queue);
982 handled |= CX2_INTA_BIT_TX_CMD_QUEUE;
985 if (inta & CX2_INTA_BIT_TX_QUEUE_1) {
986 IPW_DEBUG_TX("TX_QUEUE_1\n");
987 rc = ipw_queue_tx_reclaim( priv, &priv->txq[0], 0);
988 handled |= CX2_INTA_BIT_TX_QUEUE_1;
991 if (inta & CX2_INTA_BIT_TX_QUEUE_2) {
992 IPW_DEBUG_TX("TX_QUEUE_2\n");
993 rc = ipw_queue_tx_reclaim( priv, &priv->txq[1], 1);
994 handled |= CX2_INTA_BIT_TX_QUEUE_2;
997 if (inta & CX2_INTA_BIT_TX_QUEUE_3) {
998 IPW_DEBUG_TX("TX_QUEUE_3\n");
999 rc = ipw_queue_tx_reclaim( priv, &priv->txq[2], 2);
1000 handled |= CX2_INTA_BIT_TX_QUEUE_3;
1003 if (inta & CX2_INTA_BIT_TX_QUEUE_4) {
1004 IPW_DEBUG_TX("TX_QUEUE_4\n");
1005 rc = ipw_queue_tx_reclaim( priv, &priv->txq[3], 3);
1006 handled |= CX2_INTA_BIT_TX_QUEUE_4;
1009 if (inta & CX2_INTA_BIT_STATUS_CHANGE) {
1010 IPW_WARNING("STATUS_CHANGE\n");
1011 handled |= CX2_INTA_BIT_STATUS_CHANGE;
1014 if (inta & CX2_INTA_BIT_BEACON_PERIOD_EXPIRED) {
1015 IPW_WARNING("TX_PERIOD_EXPIRED\n");
1016 handled |= CX2_INTA_BIT_BEACON_PERIOD_EXPIRED;
1019 if (inta & CX2_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
1020 IPW_WARNING("HOST_CMD_DONE\n");
1021 handled |= CX2_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
1024 if (inta & CX2_INTA_BIT_FW_INITIALIZATION_DONE) {
1025 IPW_WARNING("FW_INITIALIZATION_DONE\n");
1026 handled |= CX2_INTA_BIT_FW_INITIALIZATION_DONE;
1029 if (inta & CX2_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
1030 IPW_WARNING("PHY_OFF_DONE\n");
1031 handled |= CX2_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
1034 if (inta & CX2_INTA_BIT_RF_KILL_DONE) {
1035 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
1036 priv->status |= STATUS_RF_KILL_HW;
1037 wake_up_interruptible(&priv->wait_command_queue);
1038 netif_carrier_off(priv->net_dev);
1039 netif_stop_queue(priv->net_dev);
1040 cancel_delayed_work(&priv->request_scan);
1041 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
1042 handled |= CX2_INTA_BIT_RF_KILL_DONE;
1045 if (inta & CX2_INTA_BIT_FATAL_ERROR) {
1046 IPW_ERROR("Firmware error detected. Restarting.\n");
1047 #ifdef CONFIG_IPW_DEBUG
1048 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
1049 ipw_dump_nic_error_log(priv);
1050 ipw_dump_nic_event_log(priv);
1053 queue_work(priv->workqueue, &priv->adapter_restart);
1054 handled |= CX2_INTA_BIT_FATAL_ERROR;
1057 if (inta & CX2_INTA_BIT_PARITY_ERROR) {
1058 IPW_ERROR("Parity error\n");
1059 handled |= CX2_INTA_BIT_PARITY_ERROR;
1062 if (handled != inta) {
1063 IPW_ERROR("Unhandled INTA bits 0x%08x\n",
1067 /* enable all interrupts */
1068 ipw_enable_interrupts(priv);
1070 spin_unlock_irqrestore(&priv->lock, flags);
1073 #ifdef CONFIG_IPW_DEBUG
1074 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
1075 static char *get_cmd_string(u8 cmd)
1078 IPW_CMD(HOST_COMPLETE);
1079 IPW_CMD(POWER_DOWN);
1080 IPW_CMD(SYSTEM_CONFIG);
1081 IPW_CMD(MULTICAST_ADDRESS);
1083 IPW_CMD(ADAPTER_ADDRESS);
1085 IPW_CMD(RTS_THRESHOLD);
1086 IPW_CMD(FRAG_THRESHOLD);
1087 IPW_CMD(POWER_MODE);
1089 IPW_CMD(TGI_TX_KEY);
1090 IPW_CMD(SCAN_REQUEST);
1091 IPW_CMD(SCAN_REQUEST_EXT);
1093 IPW_CMD(SUPPORTED_RATES);
1094 IPW_CMD(SCAN_ABORT);
1096 IPW_CMD(QOS_PARAMETERS);
1097 IPW_CMD(DINO_CONFIG);
1098 IPW_CMD(RSN_CAPABILITIES);
1100 IPW_CMD(CARD_DISABLE);
1101 IPW_CMD(SEED_NUMBER);
1103 IPW_CMD(COUNTRY_INFO);
1104 IPW_CMD(AIRONET_INFO);
1105 IPW_CMD(AP_TX_POWER);
1107 IPW_CMD(CCX_VER_INFO);
1108 IPW_CMD(SET_CALIBRATION);
1109 IPW_CMD(SENSITIVITY_CALIB);
1110 IPW_CMD(RETRY_LIMIT);
1111 IPW_CMD(IPW_PRE_POWER_DOWN);
1112 IPW_CMD(VAP_BEACON_TEMPLATE);
1113 IPW_CMD(VAP_DTIM_PERIOD);
1114 IPW_CMD(EXT_SUPPORTED_RATES);
1115 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
1116 IPW_CMD(VAP_QUIET_INTERVALS);
1117 IPW_CMD(VAP_CHANNEL_SWITCH);
1118 IPW_CMD(VAP_MANDATORY_CHANNELS);
1119 IPW_CMD(VAP_CELL_PWR_LIMIT);
1120 IPW_CMD(VAP_CF_PARAM_SET);
1121 IPW_CMD(VAP_SET_BEACONING_STATE);
1122 IPW_CMD(MEASUREMENT);
1123 IPW_CMD(POWER_CAPABILITY);
1124 IPW_CMD(SUPPORTED_CHANNELS);
1125 IPW_CMD(TPC_REPORT);
1127 IPW_CMD(PRODUCTION_COMMAND);
1132 #endif /* CONFIG_IPW_DEBUG */
1134 #define HOST_COMPLETE_TIMEOUT HZ
1135 static int ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
1139 if (priv->status & STATUS_HCMD_ACTIVE) {
1140 IPW_ERROR("Already sending a command\n");
1144 priv->status |= STATUS_HCMD_ACTIVE;
1146 IPW_DEBUG_HC("Sending %s command (#%d), %d bytes\n",
1147 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len);
1148 printk_buf(IPW_DL_HOST_COMMAND, (u8*)cmd->param, cmd->len);
1150 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, &cmd->param, cmd->len, 0);
1154 rc = wait_event_interruptible_timeout(
1155 priv->wait_command_queue, !(priv->status & STATUS_HCMD_ACTIVE),
1156 HOST_COMPLETE_TIMEOUT);
1158 IPW_DEBUG_INFO("Command completion failed out after %dms.\n",
1159 HOST_COMPLETE_TIMEOUT / (HZ / 1000));
1160 priv->status &= ~STATUS_HCMD_ACTIVE;
1163 if (priv->status & STATUS_RF_KILL_MASK) {
1164 IPW_DEBUG_INFO("Command aborted due to RF Kill Switch\n");
1171 static int ipw_send_host_complete(struct ipw_priv *priv)
1173 struct host_cmd cmd = {
1174 .cmd = IPW_CMD_HOST_COMPLETE,
1179 IPW_ERROR("Invalid args\n");
1183 if (ipw_send_cmd(priv, &cmd)) {
1184 IPW_ERROR("failed to send HOST_COMPLETE command\n");
1191 static int ipw_send_system_config(struct ipw_priv *priv,
1192 struct ipw_sys_config *config)
1194 struct host_cmd cmd = {
1195 .cmd = IPW_CMD_SYSTEM_CONFIG,
1196 .len = sizeof(*config)
1199 if (!priv || !config) {
1200 IPW_ERROR("Invalid args\n");
1204 memcpy(&cmd.param,config,sizeof(*config));
1205 if (ipw_send_cmd(priv, &cmd)) {
1206 IPW_ERROR("failed to send SYSTEM_CONFIG command\n");
1213 static int ipw_send_ssid(struct ipw_priv *priv, u8 *ssid, int len)
1215 struct host_cmd cmd = {
1216 .cmd = IPW_CMD_SSID,
1217 .len = min(len, IW_ESSID_MAX_SIZE)
1220 if (!priv || !ssid) {
1221 IPW_ERROR("Invalid args\n");
1225 memcpy(&cmd.param, ssid, cmd.len);
1226 if (ipw_send_cmd(priv, &cmd)) {
1227 IPW_ERROR("failed to send SSID command\n");
1234 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 *mac)
1236 struct host_cmd cmd = {
1237 .cmd = IPW_CMD_ADAPTER_ADDRESS,
1241 if (!priv || !mac) {
1242 IPW_ERROR("Invalid args\n");
1246 IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n",
1247 priv->net_dev->name, MAC_ARG(mac));
1249 memcpy(&cmd.param, mac, ETH_ALEN);
1251 if (ipw_send_cmd(priv, &cmd)) {
1252 IPW_ERROR("failed to send ADAPTER_ADDRESS command\n");
1259 static void ipw_adapter_restart(void *adapter)
1261 struct ipw_priv *priv = adapter;
1263 if (priv->status & STATUS_RF_KILL_MASK)
1268 IPW_ERROR("Failed to up device\n");
1276 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
1278 static void ipw_scan_check(void *data)
1280 struct ipw_priv *priv = data;
1281 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
1282 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
1283 "adapter (%dms).\n",
1284 IPW_SCAN_CHECK_WATCHDOG / 100);
1285 ipw_adapter_restart(priv);
1289 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
1290 struct ipw_scan_request_ext *request)
1292 struct host_cmd cmd = {
1293 .cmd = IPW_CMD_SCAN_REQUEST_EXT,
1294 .len = sizeof(*request)
1297 if (!priv || !request) {
1298 IPW_ERROR("Invalid args\n");
1302 memcpy(&cmd.param,request,sizeof(*request));
1303 if (ipw_send_cmd(priv, &cmd)) {
1304 IPW_ERROR("failed to send SCAN_REQUEST_EXT command\n");
1308 queue_delayed_work(priv->workqueue, &priv->scan_check,
1309 IPW_SCAN_CHECK_WATCHDOG);
1313 static int ipw_send_scan_abort(struct ipw_priv *priv)
1315 struct host_cmd cmd = {
1316 .cmd = IPW_CMD_SCAN_ABORT,
1321 IPW_ERROR("Invalid args\n");
1325 if (ipw_send_cmd(priv, &cmd)) {
1326 IPW_ERROR("failed to send SCAN_ABORT command\n");
1333 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
1335 struct host_cmd cmd = {
1336 .cmd = IPW_CMD_SENSITIVITY_CALIB,
1337 .len = sizeof(struct ipw_sensitivity_calib)
1339 struct ipw_sensitivity_calib *calib = (struct ipw_sensitivity_calib *)
1341 calib->beacon_rssi_raw = sens;
1342 if (ipw_send_cmd(priv, &cmd)) {
1343 IPW_ERROR("failed to send SENSITIVITY CALIB command\n");
1350 static int ipw_send_associate(struct ipw_priv *priv,
1351 struct ipw_associate *associate)
1353 struct host_cmd cmd = {
1354 .cmd = IPW_CMD_ASSOCIATE,
1355 .len = sizeof(*associate)
1358 if (!priv || !associate) {
1359 IPW_ERROR("Invalid args\n");
1363 memcpy(&cmd.param,associate,sizeof(*associate));
1364 if (ipw_send_cmd(priv, &cmd)) {
1365 IPW_ERROR("failed to send ASSOCIATE command\n");
1372 static int ipw_send_supported_rates(struct ipw_priv *priv,
1373 struct ipw_supported_rates *rates)
1375 struct host_cmd cmd = {
1376 .cmd = IPW_CMD_SUPPORTED_RATES,
1377 .len = sizeof(*rates)
1380 if (!priv || !rates) {
1381 IPW_ERROR("Invalid args\n");
1385 memcpy(&cmd.param,rates,sizeof(*rates));
1386 if (ipw_send_cmd(priv, &cmd)) {
1387 IPW_ERROR("failed to send SUPPORTED_RATES command\n");
1394 static int ipw_set_random_seed(struct ipw_priv *priv)
1396 struct host_cmd cmd = {
1397 .cmd = IPW_CMD_SEED_NUMBER,
1402 IPW_ERROR("Invalid args\n");
1406 get_random_bytes(&cmd.param, sizeof(u32));
1408 if (ipw_send_cmd(priv, &cmd)) {
1409 IPW_ERROR("failed to send SEED_NUMBER command\n");
1417 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
1419 struct host_cmd cmd = {
1420 .cmd = IPW_CMD_CARD_DISABLE,
1425 IPW_ERROR("Invalid args\n");
1429 *((u32*)&cmd.param) = phy_off;
1431 if (ipw_send_cmd(priv, &cmd)) {
1432 IPW_ERROR("failed to send CARD_DISABLE command\n");
1440 static int ipw_send_tx_power(struct ipw_priv *priv,
1441 struct ipw_tx_power *power)
1443 struct host_cmd cmd = {
1444 .cmd = IPW_CMD_TX_POWER,
1445 .len = sizeof(*power)
1448 if (!priv || !power) {
1449 IPW_ERROR("Invalid args\n");
1453 memcpy(&cmd.param,power,sizeof(*power));
1454 if (ipw_send_cmd(priv, &cmd)) {
1455 IPW_ERROR("failed to send TX_POWER command\n");
1462 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
1464 struct ipw_rts_threshold rts_threshold = {
1465 .rts_threshold = rts,
1467 struct host_cmd cmd = {
1468 .cmd = IPW_CMD_RTS_THRESHOLD,
1469 .len = sizeof(rts_threshold)
1473 IPW_ERROR("Invalid args\n");
1477 memcpy(&cmd.param, &rts_threshold, sizeof(rts_threshold));
1478 if (ipw_send_cmd(priv, &cmd)) {
1479 IPW_ERROR("failed to send RTS_THRESHOLD command\n");
1486 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
1488 struct ipw_frag_threshold frag_threshold = {
1489 .frag_threshold = frag,
1491 struct host_cmd cmd = {
1492 .cmd = IPW_CMD_FRAG_THRESHOLD,
1493 .len = sizeof(frag_threshold)
1497 IPW_ERROR("Invalid args\n");
1501 memcpy(&cmd.param, &frag_threshold, sizeof(frag_threshold));
1502 if (ipw_send_cmd(priv, &cmd)) {
1503 IPW_ERROR("failed to send FRAG_THRESHOLD command\n");
1510 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
1512 struct host_cmd cmd = {
1513 .cmd = IPW_CMD_POWER_MODE,
1516 u32 *param = (u32*)(&cmd.param);
1519 IPW_ERROR("Invalid args\n");
1523 /* If on battery, set to 3, if AC set to CAM, else user
1526 case IPW_POWER_BATTERY:
1527 *param = IPW_POWER_INDEX_3;
1530 *param = IPW_POWER_MODE_CAM;
1537 if (ipw_send_cmd(priv, &cmd)) {
1538 IPW_ERROR("failed to send POWER_MODE command\n");
1546 * The IPW device contains a Microwire compatible EEPROM that stores
1547 * various data like the MAC address. Usually the firmware has exclusive
1548 * access to the eeprom, but during device initialization (before the
1549 * device driver has sent the HostComplete command to the firmware) the
1550 * device driver has read access to the EEPROM by way of indirect addressing
1551 * through a couple of memory mapped registers.
1553 * The following is a simplified implementation for pulling data out of the
1554 * the eeprom, along with some helper functions to find information in
1555 * the per device private data's copy of the eeprom.
1557 * NOTE: To better understand how these functions work (i.e what is a chip
1558 * select and why do have to keep driving the eeprom clock?), read
1559 * just about any data sheet for a Microwire compatible EEPROM.
1562 /* write a 32 bit value into the indirect accessor register */
1563 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
1565 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
1567 /* the eeprom requires some time to complete the operation */
1568 udelay(p->eeprom_delay);
1573 /* perform a chip select operation */
1574 static inline void eeprom_cs(struct ipw_priv* priv)
1576 eeprom_write_reg(priv,0);
1577 eeprom_write_reg(priv,EEPROM_BIT_CS);
1578 eeprom_write_reg(priv,EEPROM_BIT_CS|EEPROM_BIT_SK);
1579 eeprom_write_reg(priv,EEPROM_BIT_CS);
1582 /* perform a chip select operation */
1583 static inline void eeprom_disable_cs(struct ipw_priv* priv)
1585 eeprom_write_reg(priv,EEPROM_BIT_CS);
1586 eeprom_write_reg(priv,0);
1587 eeprom_write_reg(priv,EEPROM_BIT_SK);
1590 /* push a single bit down to the eeprom */
1591 static inline void eeprom_write_bit(struct ipw_priv *p,u8 bit)
1593 int d = ( bit ? EEPROM_BIT_DI : 0);
1594 eeprom_write_reg(p,EEPROM_BIT_CS|d);
1595 eeprom_write_reg(p,EEPROM_BIT_CS|d|EEPROM_BIT_SK);
1598 /* push an opcode followed by an address down to the eeprom */
1599 static void eeprom_op(struct ipw_priv* priv, u8 op, u8 addr)
1604 eeprom_write_bit(priv,1);
1605 eeprom_write_bit(priv,op&2);
1606 eeprom_write_bit(priv,op&1);
1607 for ( i=7; i>=0; i-- ) {
1608 eeprom_write_bit(priv,addr&(1<<i));
1612 /* pull 16 bits off the eeprom, one bit at a time */
1613 static u16 eeprom_read_u16(struct ipw_priv* priv, u8 addr)
1618 /* Send READ Opcode */
1619 eeprom_op(priv,EEPROM_CMD_READ,addr);
1621 /* Send dummy bit */
1622 eeprom_write_reg(priv,EEPROM_BIT_CS);
1624 /* Read the byte off the eeprom one bit at a time */
1625 for ( i=0; i<16; i++ ) {
1627 eeprom_write_reg(priv,EEPROM_BIT_CS|EEPROM_BIT_SK);
1628 eeprom_write_reg(priv,EEPROM_BIT_CS);
1629 data = ipw_read_reg32(priv,FW_MEM_REG_EEPROM_ACCESS);
1630 r = (r<<1) | ((data & EEPROM_BIT_DO)?1:0);
1633 /* Send another dummy bit */
1634 eeprom_write_reg(priv,0);
1635 eeprom_disable_cs(priv);
1640 /* helper function for pulling the mac address out of the private */
1641 /* data's copy of the eeprom data */
1642 static void eeprom_parse_mac(struct ipw_priv* priv, u8* mac)
1644 u8* ee = (u8*)priv->eeprom;
1645 memcpy(mac, &ee[EEPROM_MAC_ADDRESS], 6);
1649 * Either the device driver (i.e. the host) or the firmware can
1650 * load eeprom data into the designated region in SRAM. If neither
1651 * happens then the FW will shutdown with a fatal error.
1653 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
1654 * bit needs region of shared SRAM needs to be non-zero.
1656 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
1659 u16 *eeprom = (u16 *)priv->eeprom;
1661 IPW_DEBUG_TRACE(">>\n");
1663 /* read entire contents of eeprom into private buffer */
1664 for ( i=0; i<128; i++ )
1665 eeprom[i] = eeprom_read_u16(priv,(u8)i);
1668 If the data looks correct, then copy it to our private
1669 copy. Otherwise let the firmware know to perform the operation
1672 if ((priv->eeprom + EEPROM_VERSION) != 0) {
1673 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
1675 /* write the eeprom data to sram */
1676 for( i=0; i<CX2_EEPROM_IMAGE_SIZE; i++ )
1677 ipw_write8(priv, IPW_EEPROM_DATA + i,
1680 /* Do not load eeprom data on fatal error or suspend */
1681 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
1683 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
1685 /* Load eeprom data on fatal error or suspend */
1686 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
1689 IPW_DEBUG_TRACE("<<\n");
1693 static inline void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
1697 _ipw_write32(priv, CX2_AUTOINC_ADDR, start);
1699 _ipw_write32(priv, CX2_AUTOINC_DATA, 0);
1702 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
1704 ipw_zero_memory(priv, CX2_SHARED_SRAM_DMA_CONTROL,
1705 CB_NUMBER_OF_ELEMENTS_SMALL *
1706 sizeof(struct command_block));
1709 static int ipw_fw_dma_enable(struct ipw_priv *priv)
1710 { /* start dma engine but no transfers yet*/
1712 IPW_DEBUG_FW(">> : \n");
1715 ipw_fw_dma_reset_command_blocks(priv);
1717 /* Write CB base address */
1718 ipw_write_reg32(priv, CX2_DMA_I_CB_BASE, CX2_SHARED_SRAM_DMA_CONTROL);
1720 IPW_DEBUG_FW("<< : \n");
1724 static void ipw_fw_dma_abort(struct ipw_priv *priv)
1728 IPW_DEBUG_FW(">> :\n");
1730 //set the Stop and Abort bit
1731 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
1732 ipw_write_reg32(priv, CX2_DMA_I_DMA_CONTROL, control);
1733 priv->sram_desc.last_cb_index = 0;
1735 IPW_DEBUG_FW("<< \n");
1738 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index, struct command_block *cb)
1740 u32 address = CX2_SHARED_SRAM_DMA_CONTROL + (sizeof(struct command_block) * index);
1741 IPW_DEBUG_FW(">> :\n");
1743 ipw_write_indirect(priv, address, (u8*)cb, sizeof(struct command_block));
1745 IPW_DEBUG_FW("<< :\n");
1750 static int ipw_fw_dma_kick(struct ipw_priv *priv)
1755 IPW_DEBUG_FW(">> :\n");
1757 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
1758 ipw_fw_dma_write_command_block(priv, index, &priv->sram_desc.cb_list[index]);
1760 /* Enable the DMA in the CSR register */
1761 ipw_clear_bit(priv, CX2_RESET_REG,CX2_RESET_REG_MASTER_DISABLED | CX2_RESET_REG_STOP_MASTER);
1763 /* Set the Start bit. */
1764 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
1765 ipw_write_reg32(priv, CX2_DMA_I_DMA_CONTROL, control);
1767 IPW_DEBUG_FW("<< :\n");
1771 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
1774 u32 register_value=0;
1775 u32 cb_fields_address=0;
1777 IPW_DEBUG_FW(">> :\n");
1778 address = ipw_read_reg32(priv,CX2_DMA_I_CURRENT_CB);
1779 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n",address);
1781 /* Read the DMA Controlor register */
1782 register_value = ipw_read_reg32(priv, CX2_DMA_I_DMA_CONTROL);
1783 IPW_DEBUG_FW_INFO("CX2_DMA_I_DMA_CONTROL is 0x%x \n",register_value);
1785 /* Print the CB values*/
1786 cb_fields_address = address;
1787 register_value = ipw_read_reg32(priv, cb_fields_address);
1788 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n",register_value);
1790 cb_fields_address += sizeof(u32);
1791 register_value = ipw_read_reg32(priv, cb_fields_address);
1792 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n",register_value);
1794 cb_fields_address += sizeof(u32);
1795 register_value = ipw_read_reg32(priv, cb_fields_address);
1796 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
1799 cb_fields_address += sizeof(u32);
1800 register_value = ipw_read_reg32(priv, cb_fields_address);
1801 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n",register_value);
1803 IPW_DEBUG_FW(">> :\n");
1806 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
1808 u32 current_cb_address = 0;
1809 u32 current_cb_index = 0;
1811 IPW_DEBUG_FW("<< :\n");
1812 current_cb_address= ipw_read_reg32(priv, CX2_DMA_I_CURRENT_CB);
1814 current_cb_index = (current_cb_address - CX2_SHARED_SRAM_DMA_CONTROL )/
1815 sizeof (struct command_block);
1817 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
1818 current_cb_index, current_cb_address );
1820 IPW_DEBUG_FW(">> :\n");
1821 return current_cb_index;
1825 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
1829 int interrupt_enabled,
1833 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
1834 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
1836 struct command_block *cb;
1837 u32 last_cb_element=0;
1839 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
1840 src_address, dest_address, length);
1842 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
1845 last_cb_element = priv->sram_desc.last_cb_index;
1846 cb = &priv->sram_desc.cb_list[last_cb_element];
1847 priv->sram_desc.last_cb_index++;
1849 /* Calculate the new CB control word */
1850 if (interrupt_enabled )
1851 control |= CB_INT_ENABLED;
1854 control |= CB_LAST_VALID;
1858 /* Calculate the CB Element's checksum value */
1859 cb->status = control ^src_address ^dest_address;
1861 /* Copy the Source and Destination addresses */
1862 cb->dest_addr = dest_address;
1863 cb->source_addr = src_address;
1865 /* Copy the Control Word last */
1866 cb->control = control;
1871 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
1876 u32 bytes_left = length;
1880 IPW_DEBUG_FW(">> \n");
1881 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
1882 src_phys, dest_address, length);
1883 while (bytes_left > CB_MAX_LENGTH) {
1884 status = ipw_fw_dma_add_command_block( priv,
1885 src_phys + src_offset,
1886 dest_address + dest_offset,
1887 CB_MAX_LENGTH, 0, 0);
1889 IPW_DEBUG_FW_INFO(": Failed\n");
1892 IPW_DEBUG_FW_INFO(": Added new cb\n");
1894 src_offset += CB_MAX_LENGTH;
1895 dest_offset += CB_MAX_LENGTH;
1896 bytes_left -= CB_MAX_LENGTH;
1899 /* add the buffer tail */
1900 if (bytes_left > 0) {
1901 status = ipw_fw_dma_add_command_block(
1902 priv, src_phys + src_offset,
1903 dest_address + dest_offset,
1906 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
1909 IPW_DEBUG_FW_INFO(": Adding new cb - the buffer tail\n");
1913 IPW_DEBUG_FW("<< \n");
1917 static int ipw_fw_dma_wait(struct ipw_priv *priv)
1919 u32 current_index = 0;
1922 IPW_DEBUG_FW(">> : \n");
1924 current_index = ipw_fw_dma_command_block_index(priv);
1925 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%8X\n",
1926 (int) priv->sram_desc.last_cb_index);
1928 while (current_index < priv->sram_desc.last_cb_index) {
1930 current_index = ipw_fw_dma_command_block_index(priv);
1934 if (watchdog > 400) {
1935 IPW_DEBUG_FW_INFO("Timeout\n");
1936 ipw_fw_dma_dump_command_block(priv);
1937 ipw_fw_dma_abort(priv);
1942 ipw_fw_dma_abort(priv);
1944 /*Disable the DMA in the CSR register*/
1945 ipw_set_bit(priv, CX2_RESET_REG,
1946 CX2_RESET_REG_MASTER_DISABLED | CX2_RESET_REG_STOP_MASTER);
1948 IPW_DEBUG_FW("<< dmaWaitSync \n");
1952 static void ipw_remove_current_network(struct ipw_priv *priv)
1954 struct list_head *element, *safe;
1955 struct ieee80211_network *network = NULL;
1956 list_for_each_safe(element, safe, &priv->ieee->network_list) {
1957 network = list_entry(element, struct ieee80211_network, list);
1958 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
1960 list_add_tail(&network->list,
1961 &priv->ieee->network_free_list);
1967 * Check that card is still alive.
1968 * Reads debug register from domain0.
1969 * If card is present, pre-defined value should
1973 * @return 1 if card is present, 0 otherwise
1975 static inline int ipw_alive(struct ipw_priv *priv)
1977 return ipw_read32(priv, 0x90) == 0xd55555d5;
1980 static inline int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
1986 if ((ipw_read32(priv, addr) & mask) == mask)
1990 } while (i < timeout);
1995 /* These functions load the firmware and micro code for the operation of
1996 * the ipw hardware. It assumes the buffer has all the bits for the
1997 * image and the caller is handling the memory allocation and clean up.
2001 static int ipw_stop_master(struct ipw_priv * priv)
2005 IPW_DEBUG_TRACE(">> \n");
2006 /* stop master. typical delay - 0 */
2007 ipw_set_bit(priv, CX2_RESET_REG, CX2_RESET_REG_STOP_MASTER);
2009 rc = ipw_poll_bit(priv, CX2_RESET_REG,
2010 CX2_RESET_REG_MASTER_DISABLED, 100);
2012 IPW_ERROR("stop master failed in 10ms\n");
2016 IPW_DEBUG_INFO("stop master %dms\n", rc);
2021 static void ipw_arc_release(struct ipw_priv *priv)
2023 IPW_DEBUG_TRACE(">> \n");
2026 ipw_clear_bit(priv, CX2_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
2028 /* no one knows timing, for safety add some delay */
2042 #define IPW_FW_MAJOR_VERSION 2
2043 #define IPW_FW_MINOR_VERSION 2
2045 #define IPW_FW_MINOR(x) ((x & 0xff) >> 8)
2046 #define IPW_FW_MAJOR(x) (x & 0xff)
2048 #define IPW_FW_VERSION ((IPW_FW_MINOR_VERSION << 8) | \
2049 IPW_FW_MAJOR_VERSION)
2051 #define IPW_FW_PREFIX "ipw-" __stringify(IPW_FW_MAJOR_VERSION) \
2052 "." __stringify(IPW_FW_MINOR_VERSION) "-"
2054 #if IPW_FW_MAJOR_VERSION >= 2 && IPW_FW_MINOR_VERSION > 0
2055 #define IPW_FW_NAME(x) IPW_FW_PREFIX "" x ".fw"
2057 #define IPW_FW_NAME(x) "ipw2200_" x ".fw"
2060 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data,
2063 int rc = 0, i, addr;
2067 image = (u16 *)data;
2069 IPW_DEBUG_TRACE(">> \n");
2071 rc = ipw_stop_master(priv);
2076 // spin_lock_irqsave(&priv->lock, flags);
2078 for (addr = CX2_SHARED_LOWER_BOUND;
2079 addr < CX2_REGISTER_DOMAIN1_END; addr += 4) {
2080 ipw_write32(priv, addr, 0);
2083 /* no ucode (yet) */
2084 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
2085 /* destroy DMA queues */
2086 /* reset sequence */
2088 ipw_write_reg32(priv, CX2_MEM_HALT_AND_RESET ,CX2_BIT_HALT_RESET_ON);
2089 ipw_arc_release(priv);
2090 ipw_write_reg32(priv, CX2_MEM_HALT_AND_RESET, CX2_BIT_HALT_RESET_OFF);
2094 ipw_write_reg32(priv, CX2_INTERNAL_CMD_EVENT, CX2_BASEBAND_POWER_DOWN);
2097 ipw_write_reg32(priv, CX2_INTERNAL_CMD_EVENT, 0);
2100 /* enable ucode store */
2101 ipw_write_reg8(priv, DINO_CONTROL_REG, 0x0);
2102 ipw_write_reg8(priv, DINO_CONTROL_REG, DINO_ENABLE_CS);
2108 * Do NOT set indirect address register once and then
2109 * store data to indirect data register in the loop.
2110 * It seems very reasonable, but in this case DINO do not
2111 * accept ucode. It is essential to set address each time.
2113 /* load new ipw uCode */
2114 for (i = 0; i < len / 2; i++)
2115 ipw_write_reg16(priv, CX2_BASEBAND_CONTROL_STORE, image[i]);
2119 ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, 0);
2120 ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS,
2121 DINO_ENABLE_SYSTEM );
2123 /* this is where the igx / win driver deveates from the VAP driver.*/
2125 /* wait for alive response */
2126 for (i = 0; i < 100; i++) {
2127 /* poll for incoming data */
2128 cr = ipw_read_reg8(priv, CX2_BASEBAND_CONTROL_STATUS);
2129 if (cr & DINO_RXFIFO_DATA)
2134 if (cr & DINO_RXFIFO_DATA) {
2135 /* alive_command_responce size is NOT multiple of 4 */
2136 u32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
2138 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
2139 response_buffer[i] =
2140 ipw_read_reg32(priv,
2141 CX2_BASEBAND_RX_FIFO_READ);
2142 memcpy(&priv->dino_alive, response_buffer,
2143 sizeof(priv->dino_alive));
2144 if (priv->dino_alive.alive_command == 1
2145 && priv->dino_alive.ucode_valid == 1) {
2148 "Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
2149 "of %02d/%02d/%02d %02d:%02d\n",
2150 priv->dino_alive.software_revision,
2151 priv->dino_alive.software_revision,
2152 priv->dino_alive.device_identifier,
2153 priv->dino_alive.device_identifier,
2154 priv->dino_alive.time_stamp[0],
2155 priv->dino_alive.time_stamp[1],
2156 priv->dino_alive.time_stamp[2],
2157 priv->dino_alive.time_stamp[3],
2158 priv->dino_alive.time_stamp[4]);
2160 IPW_DEBUG_INFO("Microcode is not alive\n");
2164 IPW_DEBUG_INFO("No alive response from DINO\n");
2168 /* disable DINO, otherwise for some reason
2169 firmware have problem getting alive resp. */
2170 ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, 0);
2172 // spin_unlock_irqrestore(&priv->lock, flags);
2177 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data,
2182 struct fw_chunk *chunk;
2183 dma_addr_t shared_phys;
2186 IPW_DEBUG_TRACE("<< : \n");
2187 shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
2192 memmove(shared_virt, data, len);
2195 rc = ipw_fw_dma_enable(priv);
2197 if (priv->sram_desc.last_cb_index > 0) {
2198 /* the DMA is already ready this would be a bug. */
2204 chunk = (struct fw_chunk *)(data + offset);
2205 offset += sizeof(struct fw_chunk);
2206 /* build DMA packet and queue up for sending */
2207 /* dma to chunk->address, the chunk->length bytes from data +
2210 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
2211 chunk->address, chunk->length);
2213 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
2217 offset += chunk->length;
2218 } while (offset < len);
2220 /* Run the DMA and wait for the answer*/
2221 rc = ipw_fw_dma_kick(priv);
2223 IPW_ERROR("dmaKick Failed\n");
2227 rc = ipw_fw_dma_wait(priv);
2229 IPW_ERROR("dmaWaitSync Failed\n");
2233 pci_free_consistent( priv->pci_dev, len, shared_virt, shared_phys);
2238 static int ipw_stop_nic(struct ipw_priv *priv)
2243 ipw_write32(priv, CX2_RESET_REG, CX2_RESET_REG_STOP_MASTER);
2245 rc = ipw_poll_bit(priv, CX2_RESET_REG,
2246 CX2_RESET_REG_MASTER_DISABLED, 500);
2248 IPW_ERROR("wait for reg master disabled failed\n");
2252 ipw_set_bit(priv, CX2_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
2257 static void ipw_start_nic(struct ipw_priv *priv)
2259 IPW_DEBUG_TRACE(">>\n");
2261 /* prvHwStartNic release ARC*/
2262 ipw_clear_bit(priv, CX2_RESET_REG,
2263 CX2_RESET_REG_MASTER_DISABLED |
2264 CX2_RESET_REG_STOP_MASTER |
2265 CBD_RESET_REG_PRINCETON_RESET);
2267 /* enable power management */
2268 ipw_set_bit(priv, CX2_GP_CNTRL_RW, CX2_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
2270 IPW_DEBUG_TRACE("<<\n");
2273 static int ipw_init_nic(struct ipw_priv *priv)
2277 IPW_DEBUG_TRACE(">>\n");
2280 /* set "initialization complete" bit to move adapter to D0 state */
2281 ipw_set_bit(priv, CX2_GP_CNTRL_RW, CX2_GP_CNTRL_BIT_INIT_DONE);
2283 /* low-level PLL activation */
2284 ipw_write32(priv, CX2_READ_INT_REGISTER, CX2_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
2286 /* wait for clock stabilization */
2287 rc = ipw_poll_bit(priv, CX2_GP_CNTRL_RW,
2288 CX2_GP_CNTRL_BIT_CLOCK_READY, 250);
2290 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
2292 /* assert SW reset */
2293 ipw_set_bit(priv, CX2_RESET_REG, CX2_RESET_REG_SW_RESET);
2297 /* set "initialization complete" bit to move adapter to D0 state */
2298 ipw_set_bit(priv, CX2_GP_CNTRL_RW, CX2_GP_CNTRL_BIT_INIT_DONE);
2300 IPW_DEBUG_TRACE(">>\n");
2305 /* Call this function from process context, it will sleep in request_firmware.
2306 * Probe is an ok place to call this from.
2308 static int ipw_reset_nic(struct ipw_priv *priv)
2312 IPW_DEBUG_TRACE(">>\n");
2314 rc = ipw_init_nic(priv);
2316 /* Clear the 'host command active' bit... */
2317 priv->status &= ~STATUS_HCMD_ACTIVE;
2318 wake_up_interruptible(&priv->wait_command_queue);
2320 IPW_DEBUG_TRACE("<<\n");
2324 static int ipw_get_fw(struct ipw_priv *priv,
2325 const struct firmware **fw, const char *name)
2327 struct fw_header *header;
2330 /* ask firmware_class module to get the boot firmware off disk */
2331 rc = request_firmware(fw, name, &priv->pci_dev->dev);
2333 IPW_ERROR("%s load failed: Reason %d\n", name, rc);
2337 header = (struct fw_header *)(*fw)->data;
2338 if (IPW_FW_MAJOR(header->version) != IPW_FW_MAJOR_VERSION) {
2339 IPW_ERROR("'%s' firmware version not compatible (%d != %d)\n",
2341 IPW_FW_MAJOR(header->version), IPW_FW_MAJOR_VERSION);
2345 IPW_DEBUG_INFO("Loading firmware '%s' file v%d.%d (%d bytes)\n",
2347 IPW_FW_MAJOR(header->version),
2348 IPW_FW_MINOR(header->version),
2349 (*fw)->size - sizeof(struct fw_header));
2353 #define CX2_RX_BUF_SIZE (3000)
2355 static inline void ipw_rx_queue_reset(struct ipw_priv *priv,
2356 struct ipw_rx_queue *rxq)
2358 unsigned long flags;
2361 spin_lock_irqsave(&rxq->lock, flags);
2363 INIT_LIST_HEAD(&rxq->rx_free);
2364 INIT_LIST_HEAD(&rxq->rx_used);
2366 /* Fill the rx_used queue with _all_ of the Rx buffers */
2367 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
2368 /* In the reset function, these buffers may have been allocated
2369 * to an SKB, so we need to unmap and free potential storage */
2370 if (rxq->pool[i].skb != NULL) {
2371 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
2373 PCI_DMA_FROMDEVICE);
2374 dev_kfree_skb(rxq->pool[i].skb);
2376 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
2379 /* Set us so that we have processed and used all buffers, but have
2380 * not restocked the Rx queue with fresh buffers */
2381 rxq->read = rxq->write = 0;
2382 rxq->processed = RX_QUEUE_SIZE - 1;
2383 rxq->free_count = 0;
2384 spin_unlock_irqrestore(&rxq->lock, flags);
2388 static int fw_loaded = 0;
2389 static const struct firmware *bootfw = NULL;
2390 static const struct firmware *firmware = NULL;
2391 static const struct firmware *ucode = NULL;
2394 static int ipw_load(struct ipw_priv *priv)
2397 const struct firmware *bootfw = NULL;
2398 const struct firmware *firmware = NULL;
2399 const struct firmware *ucode = NULL;
2401 int rc = 0, retries = 3;
2406 rc = ipw_get_fw(priv, &bootfw, IPW_FW_NAME("boot"));
2410 switch (priv->ieee->iw_mode) {
2412 rc = ipw_get_fw(priv, &ucode,
2413 IPW_FW_NAME("ibss_ucode"));
2417 rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("ibss"));
2420 #ifdef CONFIG_IPW_PROMISC
2421 case IW_MODE_MONITOR:
2422 rc = ipw_get_fw(priv, &ucode,
2423 IPW_FW_NAME("ibss_ucode"));
2427 rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("sniffer"));
2431 rc = ipw_get_fw(priv, &ucode,
2432 IPW_FW_NAME("bss_ucode"));
2436 rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("bss"));
2452 priv->rxq = ipw_rx_queue_alloc(priv);
2454 ipw_rx_queue_reset(priv, priv->rxq);
2456 IPW_ERROR("Unable to initialize Rx queue\n");
2461 /* Ensure interrupts are disabled */
2462 ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL);
2463 priv->status &= ~STATUS_INT_ENABLED;
2465 /* ack pending interrupts */
2466 ipw_write32(priv, CX2_INTA_RW, CX2_INTA_MASK_ALL);
2470 rc = ipw_reset_nic(priv);
2472 IPW_ERROR("Unable to reset NIC\n");
2476 ipw_zero_memory(priv, CX2_NIC_SRAM_LOWER_BOUND,
2477 CX2_NIC_SRAM_UPPER_BOUND - CX2_NIC_SRAM_LOWER_BOUND);
2479 /* DMA the initial boot firmware into the device */
2480 rc = ipw_load_firmware(priv, bootfw->data + sizeof(struct fw_header),
2481 bootfw->size - sizeof(struct fw_header));
2483 IPW_ERROR("Unable to load boot firmware\n");
2487 /* kick start the device */
2488 ipw_start_nic(priv);
2490 /* wait for the device to finish it's initial startup sequence */
2491 rc = ipw_poll_bit(priv, CX2_INTA_RW,
2492 CX2_INTA_BIT_FW_INITIALIZATION_DONE, 500);
2494 IPW_ERROR("device failed to boot initial fw image\n");
2497 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
2499 /* ack fw init done interrupt */
2500 ipw_write32(priv, CX2_INTA_RW, CX2_INTA_BIT_FW_INITIALIZATION_DONE);
2502 /* DMA the ucode into the device */
2503 rc = ipw_load_ucode(priv, ucode->data + sizeof(struct fw_header),
2504 ucode->size - sizeof(struct fw_header));
2506 IPW_ERROR("Unable to load ucode\n");
2513 /* DMA bss firmware into the device */
2514 rc = ipw_load_firmware(priv, firmware->data +
2515 sizeof(struct fw_header),
2516 firmware->size - sizeof(struct fw_header));
2518 IPW_ERROR("Unable to load firmware\n");
2522 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2524 rc = ipw_queue_reset(priv);
2526 IPW_ERROR("Unable to initialize queues\n");
2530 /* Ensure interrupts are disabled */
2531 ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL);
2533 /* kick start the device */
2534 ipw_start_nic(priv);
2536 if (ipw_read32(priv, CX2_INTA_RW) & CX2_INTA_BIT_PARITY_ERROR) {
2538 IPW_WARNING("Parity error. Retrying init.\n");
2543 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
2548 /* wait for the device */
2549 rc = ipw_poll_bit(priv, CX2_INTA_RW,
2550 CX2_INTA_BIT_FW_INITIALIZATION_DONE, 500);
2552 IPW_ERROR("device failed to start after 500ms\n");
2555 IPW_DEBUG_INFO("device response after %dms\n", rc);
2557 /* ack fw init done interrupt */
2558 ipw_write32(priv, CX2_INTA_RW, CX2_INTA_BIT_FW_INITIALIZATION_DONE);
2560 /* read eeprom data and initialize the eeprom region of sram */
2561 priv->eeprom_delay = 1;
2562 ipw_eeprom_init_sram(priv);
2564 /* enable interrupts */
2565 ipw_enable_interrupts(priv);
2567 /* Ensure our queue has valid packets */
2568 ipw_rx_queue_replenish(priv);
2570 ipw_write32(priv, CX2_RX_READ_INDEX, priv->rxq->read);
2572 /* ack pending interrupts */
2573 ipw_write32(priv, CX2_INTA_RW, CX2_INTA_MASK_ALL);
2576 release_firmware(bootfw);
2577 release_firmware(ucode);
2578 release_firmware(firmware);
2584 ipw_rx_queue_free(priv, priv->rxq);
2587 ipw_tx_queue_free(priv);
2589 release_firmware(bootfw);
2591 release_firmware(ucode);
2593 release_firmware(firmware);
2596 bootfw = ucode = firmware = NULL;
2605 * Theory of operation
2607 * A queue is a circular buffers with 'Read' and 'Write' pointers.
2608 * 2 empty entries always kept in the buffer to protect from overflow.
2610 * For Tx queue, there are low mark and high mark limits. If, after queuing
2611 * the packet for Tx, free space become < low mark, Tx queue stopped. When
2612 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
2615 * The IPW operates with six queues, one receive queue in the device's
2616 * sram, one transmit queue for sending commands to the device firmware,
2617 * and four transmit queues for data.
2619 * The four transmit queues allow for performing quality of service (qos)
2620 * transmissions as per the 802.11 protocol. Currently Linux does not
2621 * provide a mechanism to the user for utilizing prioritized queues, so
2622 * we only utilize the first data transmit queue (queue1).
2626 * Driver allocates buffers of this size for Rx
2629 static inline int ipw_queue_space(const struct clx2_queue *q)
2631 int s = q->last_used - q->first_empty;
2634 s -= 2; /* keep some reserve to not confuse empty and full situations */
2640 static inline int ipw_queue_inc_wrap(int index, int n_bd)
2642 return (++index == n_bd) ? 0 : index;
2646 * Initialize common DMA queue structure
2648 * @param q queue to init
2649 * @param count Number of BD's to allocate. Should be power of 2
2650 * @param read_register Address for 'read' register
2651 * (not offset within BAR, full address)
2652 * @param write_register Address for 'write' register
2653 * (not offset within BAR, full address)
2654 * @param base_register Address for 'base' register
2655 * (not offset within BAR, full address)
2656 * @param size Address for 'size' register
2657 * (not offset within BAR, full address)
2659 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
2660 int count, u32 read, u32 write,
2665 q->low_mark = q->n_bd / 4;
2666 if (q->low_mark < 4)
2669 q->high_mark = q->n_bd / 8;
2670 if (q->high_mark < 2)
2673 q->first_empty = q->last_used = 0;
2677 ipw_write32(priv, base, q->dma_addr);
2678 ipw_write32(priv, size, count);
2679 ipw_write32(priv, read, 0);
2680 ipw_write32(priv, write, 0);
2682 _ipw_read32(priv, 0x90);
2685 static int ipw_queue_tx_init(struct ipw_priv *priv,
2686 struct clx2_tx_queue *q,
2687 int count, u32 read, u32 write,
2690 struct pci_dev *dev = priv->pci_dev;
2692 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
2694 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
2698 q->bd = pci_alloc_consistent(dev,sizeof(q->bd[0])*count, &q->q.dma_addr);
2700 IPW_ERROR("pci_alloc_consistent(%d) failed\n",
2701 sizeof(q->bd[0]) * count);
2707 ipw_queue_init(priv, &q->q, count, read, write, base, size);
2712 * Free one TFD, those at index [txq->q.last_used].
2713 * Do NOT advance any indexes
2718 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
2719 struct clx2_tx_queue *txq)
2721 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
2722 struct pci_dev *dev = priv->pci_dev;
2726 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
2727 /* nothing to cleanup after for host commands */
2731 if (bd->u.data.num_chunks > NUM_TFD_CHUNKS) {
2732 IPW_ERROR("Too many chunks: %i\n", bd->u.data.num_chunks);
2733 /** @todo issue fatal error, it is quite serious situation */
2737 /* unmap chunks if any */
2738 for (i = 0; i < bd->u.data.num_chunks; i++) {
2739 pci_unmap_single(dev, bd->u.data.chunk_ptr[i],
2740 bd->u.data.chunk_len[i], PCI_DMA_TODEVICE);
2741 if (txq->txb[txq->q.last_used]) {
2742 ieee80211_txb_free(txq->txb[txq->q.last_used]);
2743 txq->txb[txq->q.last_used] = NULL;
2749 * Deallocate DMA queue.
2751 * Empty queue by removing and destroying all BD's.
2757 static void ipw_queue_tx_free(struct ipw_priv *priv,
2758 struct clx2_tx_queue *txq)
2760 struct clx2_queue *q = &txq->q;
2761 struct pci_dev *dev = priv->pci_dev;
2766 /* first, empty all BD's */
2767 for (; q->first_empty != q->last_used;
2768 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
2769 ipw_queue_tx_free_tfd(priv, txq);
2772 /* free buffers belonging to queue itself */
2773 pci_free_consistent(dev, sizeof(txq->bd[0])*q->n_bd, txq->bd,
2777 /* 0 fill whole structure */
2778 memset(txq, 0, sizeof(*txq));
2783 * Destroy all DMA queues and structures
2787 static void ipw_tx_queue_free(struct ipw_priv *priv)
2790 ipw_queue_tx_free(priv, &priv->txq_cmd);
2793 ipw_queue_tx_free(priv, &priv->txq[0]);
2794 ipw_queue_tx_free(priv, &priv->txq[1]);
2795 ipw_queue_tx_free(priv, &priv->txq[2]);
2796 ipw_queue_tx_free(priv, &priv->txq[3]);
2799 static void inline __maybe_wake_tx(struct ipw_priv *priv)
2801 if (netif_running(priv->net_dev)) {
2802 switch (priv->port_type) {
2803 case DCR_TYPE_MU_BSS:
2804 case DCR_TYPE_MU_IBSS:
2805 if (!(priv->status & STATUS_ASSOCIATED)) {
2809 netif_wake_queue(priv->net_dev);
2814 static inline void ipw_create_bssid(struct ipw_priv *priv, u8 *bssid)
2816 /* First 3 bytes are manufacturer */
2817 bssid[0] = priv->mac_addr[0];
2818 bssid[1] = priv->mac_addr[1];
2819 bssid[2] = priv->mac_addr[2];
2821 /* Last bytes are random */
2822 get_random_bytes(&bssid[3], ETH_ALEN-3);
2824 bssid[0] &= 0xfe; /* clear multicast bit */
2825 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
2828 static inline u8 ipw_add_station(struct ipw_priv *priv, u8 *bssid)
2830 struct ipw_station_entry entry;
2833 for (i = 0; i < priv->num_stations; i++) {
2834 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
2835 /* Another node is active in network */
2836 priv->missed_adhoc_beacons = 0;
2837 if (!(priv->config & CFG_STATIC_CHANNEL))
2838 /* when other nodes drop out, we drop out */
2839 priv->config &= ~CFG_ADHOC_PERSIST;
2845 if (i == MAX_STATIONS)
2846 return IPW_INVALID_STATION;
2848 IPW_DEBUG_SCAN("Adding AdHoc station: " MAC_FMT "\n", MAC_ARG(bssid));
2851 entry.support_mode = 0;
2852 memcpy(entry.mac_addr, bssid, ETH_ALEN);
2853 memcpy(priv->stations[i], bssid, ETH_ALEN);
2854 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
2857 priv->num_stations++;
2862 static inline u8 ipw_find_station(struct ipw_priv *priv, u8 *bssid)
2866 for (i = 0; i < priv->num_stations; i++)
2867 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
2870 return IPW_INVALID_STATION;
2873 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
2877 if (!(priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))) {
2878 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
2882 IPW_DEBUG_ASSOC("Disassocation attempt from " MAC_FMT " "
2884 MAC_ARG(priv->assoc_request.bssid),
2885 priv->assoc_request.channel);
2887 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
2888 priv->status |= STATUS_DISASSOCIATING;
2891 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
2893 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
2894 err = ipw_send_associate(priv, &priv->assoc_request);
2896 IPW_DEBUG_HC("Attempt to send [dis]associate command "
2903 static void ipw_disassociate(void *data)
2905 ipw_send_disassociate(data, 0);
2908 static void notify_wx_assoc_event(struct ipw_priv *priv)
2910 union iwreq_data wrqu;
2911 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
2912 if (priv->status & STATUS_ASSOCIATED)
2913 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
2915 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
2916 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
2919 struct ipw_status_code {
2924 static const struct ipw_status_code ipw_status_codes[] = {
2925 {0x00, "Successful"},
2926 {0x01, "Unspecified failure"},
2927 {0x0A, "Cannot support all requested capabilities in the "
2928 "Capability information field"},
2929 {0x0B, "Reassociation denied due to inability to confirm that "
2930 "association exists"},
2931 {0x0C, "Association denied due to reason outside the scope of this "
2933 {0x0D, "Responding station does not support the specified authentication "
2935 {0x0E, "Received an Authentication frame with authentication sequence "
2936 "transaction sequence number out of expected sequence"},
2937 {0x0F, "Authentication rejected because of challenge failure"},
2938 {0x10, "Authentication rejected due to timeout waiting for next "
2939 "frame in sequence"},
2940 {0x11, "Association denied because AP is unable to handle additional "
2941 "associated stations"},
2942 {0x12, "Association denied due to requesting station not supporting all "
2943 "of the datarates in the BSSBasicServiceSet Parameter"},
2944 {0x13, "Association denied due to requesting station not supporting "
2945 "short preamble operation"},
2946 {0x14, "Association denied due to requesting station not supporting "
2948 {0x15, "Association denied due to requesting station not supporting "
2950 {0x19, "Association denied due to requesting station not supporting "
2951 "short slot operation"},
2952 {0x1A, "Association denied due to requesting station not supporting "
2953 "DSSS-OFDM operation"},
2954 {0x28, "Invalid Information Element"},
2955 {0x29, "Group Cipher is not valid"},
2956 {0x2A, "Pairwise Cipher is not valid"},
2957 {0x2B, "AKMP is not valid"},
2958 {0x2C, "Unsupported RSN IE version"},
2959 {0x2D, "Invalid RSN IE Capabilities"},
2960 {0x2E, "Cipher suite is rejected per security policy"},
2963 #ifdef CONFIG_IPW_DEBUG
2964 static const char *ipw_get_status_code(u16 status)
2967 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
2968 if (ipw_status_codes[i].status == status)
2969 return ipw_status_codes[i].reason;
2970 return "Unknown status value.";
2974 static void inline average_init(struct average *avg)
2976 memset(avg, 0, sizeof(*avg));
2979 static void inline average_add(struct average *avg, s16 val)
2981 avg->sum -= avg->entries[avg->pos];
2983 avg->entries[avg->pos++] = val;
2984 if (unlikely(avg->pos == AVG_ENTRIES)) {
2990 static s16 inline average_value(struct average *avg)
2992 if (!unlikely(avg->init)) {
2994 return avg->sum / avg->pos;
2998 return avg->sum / AVG_ENTRIES;
3001 static void ipw_reset_stats(struct ipw_priv *priv)
3003 u32 len = sizeof(u32);
3007 average_init(&priv->average_missed_beacons);
3008 average_init(&priv->average_rssi);
3009 average_init(&priv->average_noise);
3011 priv->last_rate = 0;
3012 priv->last_missed_beacons = 0;
3013 priv->last_rx_packets = 0;
3014 priv->last_tx_packets = 0;
3015 priv->last_tx_failures = 0;
3017 /* Firmware managed, reset only when NIC is restarted, so we have to
3018 * normalize on the current value */
3019 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
3020 &priv->last_rx_err, &len);
3021 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
3022 &priv->last_tx_failures, &len);
3024 /* Driver managed, reset with each association */
3025 priv->missed_adhoc_beacons = 0;
3026 priv->missed_beacons = 0;
3027 priv->tx_packets = 0;
3028 priv->rx_packets = 0;
3033 static inline u32 ipw_get_max_rate(struct ipw_priv *priv)
3036 u32 mask = priv->rates_mask;
3037 /* If currently associated in B mode, restrict the maximum
3038 * rate match to B rates */
3039 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
3040 mask &= IEEE80211_CCK_RATES_MASK;
3042 /* TODO: Verify that the rate is supported by the current rates
3045 while (i && !(mask & i)) i >>= 1;
3047 case IEEE80211_CCK_RATE_1MB_MASK: return 1000000;
3048 case IEEE80211_CCK_RATE_2MB_MASK: return 2000000;
3049 case IEEE80211_CCK_RATE_5MB_MASK: return 5500000;
3050 case IEEE80211_OFDM_RATE_6MB_MASK: return 6000000;
3051 case IEEE80211_OFDM_RATE_9MB_MASK: return 9000000;
3052 case IEEE80211_CCK_RATE_11MB_MASK: return 11000000;
3053 case IEEE80211_OFDM_RATE_12MB_MASK: return 12000000;
3054 case IEEE80211_OFDM_RATE_18MB_MASK: return 18000000;
3055 case IEEE80211_OFDM_RATE_24MB_MASK: return 24000000;
3056 case IEEE80211_OFDM_RATE_36MB_MASK: return 36000000;
3057 case IEEE80211_OFDM_RATE_48MB_MASK: return 48000000;
3058 case IEEE80211_OFDM_RATE_54MB_MASK: return 54000000;
3061 if (priv->ieee->mode == IEEE_B)
3067 static u32 ipw_get_current_rate(struct ipw_priv *priv)
3069 u32 rate, len = sizeof(rate);
3072 if (!(priv->status & STATUS_ASSOCIATED))
3075 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
3076 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
3079 IPW_DEBUG_INFO("failed querying ordinals.\n");
3083 return ipw_get_max_rate(priv);
3086 case IPW_TX_RATE_1MB: return 1000000;
3087 case IPW_TX_RATE_2MB: return 2000000;
3088 case IPW_TX_RATE_5MB: return 5500000;
3089 case IPW_TX_RATE_6MB: return 6000000;
3090 case IPW_TX_RATE_9MB: return 9000000;
3091 case IPW_TX_RATE_11MB: return 11000000;
3092 case IPW_TX_RATE_12MB: return 12000000;
3093 case IPW_TX_RATE_18MB: return 18000000;
3094 case IPW_TX_RATE_24MB: return 24000000;
3095 case IPW_TX_RATE_36MB: return 36000000;
3096 case IPW_TX_RATE_48MB: return 48000000;
3097 case IPW_TX_RATE_54MB: return 54000000;
3103 #define PERFECT_RSSI (-50)
3104 #define WORST_RSSI (-85)
3105 #define IPW_STATS_INTERVAL (2 * HZ)
3106 static void ipw_gather_stats(struct ipw_priv *priv)
3108 u32 rx_err, rx_err_delta, rx_packets_delta;
3109 u32 tx_failures, tx_failures_delta, tx_packets_delta;
3110 u32 missed_beacons_percent, missed_beacons_delta;
3112 u32 len = sizeof(u32);
3114 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
3117 if (!(priv->status & STATUS_ASSOCIATED)) {
3122 /* Update the statistics */
3123 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
3124 &priv->missed_beacons, &len);
3125 missed_beacons_delta = priv->missed_beacons -
3126 priv->last_missed_beacons;
3127 priv->last_missed_beacons = priv->missed_beacons;
3128 if (priv->assoc_request.beacon_interval) {
3129 missed_beacons_percent = missed_beacons_delta *
3130 (HZ * priv->assoc_request.beacon_interval) /
3131 (IPW_STATS_INTERVAL * 10);
3133 missed_beacons_percent = 0;
3135 average_add(&priv->average_missed_beacons, missed_beacons_percent);
3137 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
3138 rx_err_delta = rx_err - priv->last_rx_err;
3139 priv->last_rx_err = rx_err;
3141 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
3142 tx_failures_delta = tx_failures - priv->last_tx_failures;
3143 priv->last_tx_failures = tx_failures;
3145 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
3146 priv->last_rx_packets = priv->rx_packets;
3148 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
3149 priv->last_tx_packets = priv->tx_packets;
3151 /* Calculate quality based on the following:
3153 * Missed beacon: 100% = 0, 0% = 70% missed
3154 * Rate: 60% = 1Mbs, 100% = Max
3155 * Rx and Tx errors represent a straight % of total Rx/Tx
3156 * RSSI: 100% = > -50, 0% = < -80
3157 * Rx errors: 100% = 0, 0% = 50% missed
3159 * The lowest computed quality is used.
3162 #define BEACON_THRESHOLD 5
3163 beacon_quality = 100 - missed_beacons_percent;
3164 if (beacon_quality < BEACON_THRESHOLD)
3167 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
3168 (100 - BEACON_THRESHOLD);
3169 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
3170 beacon_quality, missed_beacons_percent);
3172 priv->last_rate = ipw_get_current_rate(priv);
3173 rate_quality = priv->last_rate * 40 / priv->last_rate + 60;
3174 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
3175 rate_quality, priv->last_rate / 1000000);
3177 if (rx_packets_delta > 100 &&
3178 rx_packets_delta + rx_err_delta)
3179 rx_quality = 100 - (rx_err_delta * 100) /
3180 (rx_packets_delta + rx_err_delta);
3183 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
3184 rx_quality, rx_err_delta, rx_packets_delta);
3186 if (tx_packets_delta > 100 &&
3187 tx_packets_delta + tx_failures_delta)
3188 tx_quality = 100 - (tx_failures_delta * 100) /
3189 (tx_packets_delta + tx_failures_delta);
3192 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
3193 tx_quality, tx_failures_delta, tx_packets_delta);
3195 rssi = average_value(&priv->average_rssi);
3196 if (rssi > PERFECT_RSSI)
3197 signal_quality = 100;
3198 else if (rssi < WORST_RSSI)
3201 signal_quality = (rssi - WORST_RSSI) * 100 /
3202 (PERFECT_RSSI - WORST_RSSI);
3203 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
3204 signal_quality, rssi);
3206 quality = min(beacon_quality,
3208 min(tx_quality, min(rx_quality, signal_quality))));
3209 if (quality == beacon_quality)
3211 "Quality (%d%%): Clamped to missed beacons.\n",
3213 if (quality == rate_quality)
3215 "Quality (%d%%): Clamped to rate quality.\n",
3217 if (quality == tx_quality)
3219 "Quality (%d%%): Clamped to Tx quality.\n",
3221 if (quality == rx_quality)
3223 "Quality (%d%%): Clamped to Rx quality.\n",
3225 if (quality == signal_quality)
3227 "Quality (%d%%): Clamped to signal quality.\n",
3230 priv->quality = quality;
3232 queue_delayed_work(priv->workqueue, &priv->gather_stats,
3233 IPW_STATS_INTERVAL);
3237 * Handle host notification packet.
3238 * Called from interrupt routine
3240 static inline void ipw_rx_notification(struct ipw_priv* priv,
3241 struct ipw_rx_notification *notif)
3243 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n",
3244 notif->subtype, notif->size);
3246 switch (notif->subtype) {
3247 case HOST_NOTIFICATION_STATUS_ASSOCIATED: {
3248 struct notif_association *assoc = ¬if->u.assoc;
3250 switch (assoc->state) {
3251 case CMAS_ASSOCIATED: {
3252 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3253 "associated: '%s' " MAC_FMT " \n",
3254 escape_essid(priv->essid, priv->essid_len),
3255 MAC_ARG(priv->bssid));
3257 switch (priv->ieee->iw_mode) {
3259 memcpy(priv->ieee->bssid, priv->bssid,
3264 memcpy(priv->ieee->bssid, priv->bssid,
3267 /* clear out the station table */
3268 priv->num_stations = 0;
3270 IPW_DEBUG_ASSOC("queueing adhoc check\n");
3271 queue_delayed_work(priv->workqueue,
3273 priv->assoc_request.beacon_interval);
3277 priv->status &= ~STATUS_ASSOCIATING;
3278 priv->status |= STATUS_ASSOCIATED;
3280 netif_carrier_on(priv->net_dev);
3281 if (netif_queue_stopped(priv->net_dev)) {
3282 IPW_DEBUG_NOTIF("waking queue\n");
3283 netif_wake_queue(priv->net_dev);
3285 IPW_DEBUG_NOTIF("starting queue\n");
3286 netif_start_queue(priv->net_dev);
3289 ipw_reset_stats(priv);
3290 /* Ensure the rate is updated immediately */
3291 priv->last_rate = ipw_get_current_rate(priv);
3292 schedule_work(&priv->gather_stats);
3293 notify_wx_assoc_event(priv);
3295 /* queue_delayed_work(priv->workqueue,
3296 &priv->request_scan,
3297 SCAN_ASSOCIATED_INTERVAL);
3302 case CMAS_AUTHENTICATED: {
3303 if (priv->status & (STATUS_ASSOCIATED | STATUS_AUTH)) {
3304 #ifdef CONFIG_IPW_DEBUG
3305 struct notif_authenticate *auth = ¬if->u.auth;
3306 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3307 "deauthenticated: '%s' " MAC_FMT ": (0x%04X) - %s \n",
3308 escape_essid(priv->essid, priv->essid_len),
3309 MAC_ARG(priv->bssid),
3310 ntohs(auth->status),
3311 ipw_get_status_code(ntohs(auth->status)));
3314 priv->status &= ~(STATUS_ASSOCIATING |
3318 netif_carrier_off(priv->net_dev);
3319 netif_stop_queue(priv->net_dev);
3320 queue_work(priv->workqueue, &priv->request_scan);
3321 notify_wx_assoc_event(priv);
3325 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3326 "authenticated: '%s' " MAC_FMT "\n",
3327 escape_essid(priv->essid, priv->essid_len),
3328 MAC_ARG(priv->bssid));
3333 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3334 "disassociated: '%s' " MAC_FMT " \n",
3335 escape_essid(priv->essid, priv->essid_len),
3336 MAC_ARG(priv->bssid));
3339 STATUS_DISASSOCIATING |
3340 STATUS_ASSOCIATING |
3344 netif_stop_queue(priv->net_dev);
3345 if (!(priv->status & STATUS_ROAMING)) {
3346 netif_carrier_off(priv->net_dev);
3347 notify_wx_assoc_event(priv);
3349 /* Cancel any queued work ... */
3350 cancel_delayed_work(&priv->request_scan);
3351 cancel_delayed_work(&priv->adhoc_check);
3353 /* Queue up another scan... */
3354 queue_work(priv->workqueue,
3355 &priv->request_scan);
3357 cancel_delayed_work(&priv->gather_stats);
3359 priv->status |= STATUS_ROAMING;
3360 queue_work(priv->workqueue,
3361 &priv->request_scan);
3364 ipw_reset_stats(priv);
3369 IPW_ERROR("assoc: unknown (%d)\n",
3377 case HOST_NOTIFICATION_STATUS_AUTHENTICATE: {
3378 struct notif_authenticate *auth = ¬if->u.auth;
3379 switch (auth->state) {
3380 case CMAS_AUTHENTICATED:
3381 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
3382 "authenticated: '%s' " MAC_FMT " \n",
3383 escape_essid(priv->essid, priv->essid_len),
3384 MAC_ARG(priv->bssid));
3385 priv->status |= STATUS_AUTH;
3389 if (priv->status & STATUS_AUTH) {
3390 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3391 "authentication failed (0x%04X): %s\n",
3392 ntohs(auth->status),
3393 ipw_get_status_code(ntohs(auth->status)));
3395 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3396 "deauthenticated: '%s' " MAC_FMT "\n",
3397 escape_essid(priv->essid, priv->essid_len),
3398 MAC_ARG(priv->bssid));
3400 priv->status &= ~(STATUS_ASSOCIATING |
3404 netif_carrier_off(priv->net_dev);
3405 netif_stop_queue(priv->net_dev);
3406 queue_work(priv->workqueue, &priv->request_scan);
3407 notify_wx_assoc_event(priv);
3410 case CMAS_TX_AUTH_SEQ_1:
3411 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3414 case CMAS_RX_AUTH_SEQ_2:
3415 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3418 case CMAS_AUTH_SEQ_1_PASS:
3419 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3420 "AUTH_SEQ_1_PASS\n");
3422 case CMAS_AUTH_SEQ_1_FAIL:
3423 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3424 "AUTH_SEQ_1_FAIL\n");
3426 case CMAS_TX_AUTH_SEQ_3:
3427 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3430 case CMAS_RX_AUTH_SEQ_4:
3431 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3434 case CMAS_AUTH_SEQ_2_PASS:
3435 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3436 "AUTH_SEQ_2_PASS\n");
3438 case CMAS_AUTH_SEQ_2_FAIL:
3439 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3440 "AUT_SEQ_2_FAIL\n");
3443 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3446 case CMAS_RX_ASSOC_RESP:
3447 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3450 case CMAS_ASSOCIATED:
3451 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3455 IPW_DEBUG_NOTIF("auth: failure - %d\n", auth->state);
3461 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT: {
3462 struct notif_channel_result *x = ¬if->u.channel_result;
3464 if (notif->size == sizeof(*x)) {
3465 IPW_DEBUG_SCAN("Scan result for channel %d\n",
3468 IPW_DEBUG_SCAN("Scan result of wrong size %d "
3470 notif->size,sizeof(*x));
3475 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED: {
3476 struct notif_scan_complete* x = ¬if->u.scan_complete;
3477 if (notif->size == sizeof(*x)) {
3478 IPW_DEBUG_SCAN("Scan completed: type %d, %d channels, "
3484 IPW_ERROR("Scan completed of wrong size %d "
3486 notif->size,sizeof(*x));
3489 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3491 cancel_delayed_work(&priv->scan_check);
3493 if (!(priv->status & (STATUS_ASSOCIATED |
3494 STATUS_ASSOCIATING |
3496 STATUS_DISASSOCIATING)))
3497 queue_work(priv->workqueue, &priv->associate);
3498 else if (priv->status & STATUS_ROAMING) {
3499 /* If a scan completed and we are in roam mode, then
3500 * the scan that completed was the one requested as a
3501 * result of entering roam... so, schedule the
3503 queue_work(priv->workqueue, &priv->roam);
3504 } else if (priv->status & STATUS_SCAN_PENDING)
3505 queue_work(priv->workqueue, &priv->request_scan);
3507 priv->ieee->scans++;
3511 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH: {
3512 struct notif_frag_length *x = ¬if->u.frag_len;
3514 if (notif->size == sizeof(*x)) {
3515 IPW_ERROR("Frag length: %d\n", x->frag_length);
3517 IPW_ERROR("Frag length of wrong size %d "
3519 notif->size, sizeof(*x));
3524 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION: {
3525 struct notif_link_deterioration *x =
3526 ¬if->u.link_deterioration;
3527 if (notif->size==sizeof(*x)) {
3528 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
3529 "link deterioration: '%s' " MAC_FMT " \n",
3530 escape_essid(priv->essid, priv->essid_len),
3531 MAC_ARG(priv->bssid));
3532 memcpy(&priv->last_link_deterioration, x, sizeof(*x));
3534 IPW_ERROR("Link Deterioration of wrong size %d "
3536 notif->size,sizeof(*x));
3541 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE: {
3542 IPW_ERROR("Dino config\n");
3543 if (priv->hcmd && priv->hcmd->cmd == HOST_CMD_DINO_CONFIG) {
3544 /* TODO: Do anything special? */
3546 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
3551 case HOST_NOTIFICATION_STATUS_BEACON_STATE: {
3552 struct notif_beacon_state *x = ¬if->u.beacon_state;
3553 if (notif->size != sizeof(*x)) {
3554 IPW_ERROR("Beacon state of wrong size %d (should "
3555 "be %d)\n", notif->size, sizeof(*x));
3559 if (x->state == HOST_NOTIFICATION_STATUS_BEACON_MISSING) {
3560 if (priv->status & STATUS_SCANNING) {
3561 /* Stop scan to keep fw from getting
3563 queue_work(priv->workqueue,
3567 if (x->number > priv->missed_beacon_threshold &&
3568 priv->status & STATUS_ASSOCIATED) {
3569 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
3571 "Missed beacon: %d - disassociate\n",
3573 queue_work(priv->workqueue,
3574 &priv->disassociate);
3575 } else if (x->number > priv->roaming_threshold) {
3576 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
3577 "Missed beacon: %d - initiate "
3580 queue_work(priv->workqueue,
3583 IPW_DEBUG_NOTIF("Missed beacon: %d\n",
3587 priv->notif_missed_beacons = x->number;
3595 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY: {
3596 struct notif_tgi_tx_key *x = ¬if->u.tgi_tx_key;
3597 if (notif->size==sizeof(*x)) {
3598 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
3599 "0x%02x station %d\n",
3600 x->key_state,x->security_type,
3605 IPW_ERROR("TGi Tx Key of wrong size %d (should be %d)\n",
3606 notif->size,sizeof(*x));
3610 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS: {
3611 struct notif_calibration *x = ¬if->u.calibration;
3613 if (notif->size == sizeof(*x)) {
3614 memcpy(&priv->calib, x, sizeof(*x));
3615 IPW_DEBUG_INFO("TODO: Calibration\n");
3619 IPW_ERROR("Calibration of wrong size %d (should be %d)\n",
3620 notif->size,sizeof(*x));
3624 case HOST_NOTIFICATION_NOISE_STATS: {
3625 if (notif->size == sizeof(u32)) {
3626 priv->last_noise = (u8)(notif->u.noise.value & 0xff);
3627 average_add(&priv->average_noise, priv->last_noise);
3631 IPW_ERROR("Noise stat is wrong size %d (should be %d)\n",
3632 notif->size, sizeof(u32));
3637 IPW_ERROR("Unknown notification: "
3638 "subtype=%d,flags=0x%2x,size=%d\n",
3639 notif->subtype, notif->flags, notif->size);
3644 * Destroys all DMA structures and initialise them again
3647 * @return error code
3649 static int ipw_queue_reset(struct ipw_priv *priv)
3652 /** @todo customize queue sizes */
3653 int nTx = 64, nTxCmd = 8;
3654 ipw_tx_queue_free(priv);
3656 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
3657 CX2_TX_CMD_QUEUE_READ_INDEX,
3658 CX2_TX_CMD_QUEUE_WRITE_INDEX,
3659 CX2_TX_CMD_QUEUE_BD_BASE,
3660 CX2_TX_CMD_QUEUE_BD_SIZE);
3662 IPW_ERROR("Tx Cmd queue init failed\n");
3666 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
3667 CX2_TX_QUEUE_0_READ_INDEX,
3668 CX2_TX_QUEUE_0_WRITE_INDEX,
3669 CX2_TX_QUEUE_0_BD_BASE,
3670 CX2_TX_QUEUE_0_BD_SIZE);
3672 IPW_ERROR("Tx 0 queue init failed\n");
3675 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
3676 CX2_TX_QUEUE_1_READ_INDEX,
3677 CX2_TX_QUEUE_1_WRITE_INDEX,
3678 CX2_TX_QUEUE_1_BD_BASE,
3679 CX2_TX_QUEUE_1_BD_SIZE);
3681 IPW_ERROR("Tx 1 queue init failed\n");
3684 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
3685 CX2_TX_QUEUE_2_READ_INDEX,
3686 CX2_TX_QUEUE_2_WRITE_INDEX,
3687 CX2_TX_QUEUE_2_BD_BASE,
3688 CX2_TX_QUEUE_2_BD_SIZE);
3690 IPW_ERROR("Tx 2 queue init failed\n");
3693 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
3694 CX2_TX_QUEUE_3_READ_INDEX,
3695 CX2_TX_QUEUE_3_WRITE_INDEX,
3696 CX2_TX_QUEUE_3_BD_BASE,
3697 CX2_TX_QUEUE_3_BD_SIZE);
3699 IPW_ERROR("Tx 3 queue init failed\n");
3703 priv->rx_bufs_min = 0;
3704 priv->rx_pend_max = 0;
3708 ipw_tx_queue_free(priv);
3713 * Reclaim Tx queue entries no more used by NIC.
3715 * When FW adwances 'R' index, all entries between old and
3716 * new 'R' index need to be reclaimed. As result, some free space
3717 * forms. If there is enough free space (> low mark), wake Tx queue.
3719 * @note Need to protect against garbage in 'R' index
3723 * @return Number of used entries remains in the queue
3725 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
3726 struct clx2_tx_queue *txq, int qindex)
3730 struct clx2_queue *q = &txq->q;
3732 hw_tail = ipw_read32(priv, q->reg_r);
3733 if (hw_tail >= q->n_bd) {
3735 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
3739 for (; q->last_used != hw_tail;
3740 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3741 ipw_queue_tx_free_tfd(priv, txq);
3745 if (ipw_queue_space(q) > q->low_mark && qindex >= 0) {
3746 __maybe_wake_tx(priv);
3748 used = q->first_empty - q->last_used;
3755 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
3758 struct clx2_tx_queue *txq = &priv->txq_cmd;
3759 struct clx2_queue *q = &txq->q;
3760 struct tfd_frame *tfd;
3762 if (ipw_queue_space(q) < (sync ? 1 : 2)) {
3763 IPW_ERROR("No space for Tx\n");
3767 tfd = &txq->bd[q->first_empty];
3768 txq->txb[q->first_empty] = NULL;
3770 memset(tfd, 0, sizeof(*tfd));
3771 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
3772 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
3774 tfd->u.cmd.index = hcmd;
3775 tfd->u.cmd.length = len;
3776 memcpy(tfd->u.cmd.payload, buf, len);
3777 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
3778 ipw_write32(priv, q->reg_w, q->first_empty);
3779 _ipw_read32(priv, 0x90);
3787 * Rx theory of operation
3789 * The host allocates 32 DMA target addresses and passes the host address
3790 * to the firmware at register CX2_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
3794 * The host/firmware share two index registers for managing the Rx buffers.
3796 * The READ index maps to the first position that the firmware may be writing
3797 * to -- the driver can read up to (but not including) this position and get
3799 * The READ index is managed by the firmware once the card is enabled.
3801 * The WRITE index maps to the last position the driver has read from -- the
3802 * position preceding WRITE is the last slot the firmware can place a packet.
3804 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
3807 * During initialization the host sets up the READ queue position to the first
3808 * INDEX position, and WRITE to the last (READ - 1 wrapped)
3810 * When the firmware places a packet in a buffer it will advance the READ index
3811 * and fire the RX interrupt. The driver can then query the READ index and
3812 * process as many packets as possible, moving the WRITE index forward as it
3813 * resets the Rx queue buffers with new memory.
3815 * The management in the driver is as follows:
3816 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
3817 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
3818 * to replensish the ipw->rxq->rx_free.
3819 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
3820 * ipw->rxq is replenished and the READ INDEX is updated (updating the
3821 * 'processed' and 'read' driver indexes as well)
3822 * + A received packet is processed and handed to the kernel network stack,
3823 * detached from the ipw->rxq. The driver 'processed' index is updated.
3824 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
3825 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
3826 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
3827 * were enough free buffers and RX_STALLED is set it is cleared.
3832 * ipw_rx_queue_alloc() Allocates rx_free
3833 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
3834 * ipw_rx_queue_restock
3835 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
3836 * queue, updates firmware pointers, and updates
3837 * the WRITE index. If insufficient rx_free buffers
3838 * are available, schedules ipw_rx_queue_replenish
3840 * -- enable interrupts --
3841 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
3842 * READ INDEX, detaching the SKB from the pool.
3843 * Moves the packet buffer from queue to rx_used.
3844 * Calls ipw_rx_queue_restock to refill any empty
3851 * If there are slots in the RX queue that need to be restocked,
3852 * and we have free pre-allocated buffers, fill the ranks as much
3853 * as we can pulling from rx_free.
3855 * This moves the 'write' index forward to catch up with 'processed', and
3856 * also updates the memory address in the firmware to reference the new
3859 static void ipw_rx_queue_restock(struct ipw_priv *priv)
3861 struct ipw_rx_queue *rxq = priv->rxq;
3862 struct list_head *element;
3863 struct ipw_rx_mem_buffer *rxb;
3864 unsigned long flags;
3867 spin_lock_irqsave(&rxq->lock, flags);
3869 while ((rxq->write != rxq->processed) && (rxq->free_count)) {
3870 element = rxq->rx_free.next;
3871 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
3874 ipw_write32(priv, CX2_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
3876 rxq->queue[rxq->write] = rxb;
3877 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
3880 spin_unlock_irqrestore(&rxq->lock, flags);
3882 /* If the pre-allocated buffer pool is dropping low, schedule to
3884 if (rxq->free_count <= RX_LOW_WATERMARK)
3885 queue_work(priv->workqueue, &priv->rx_replenish);
3887 /* If we've added more space for the firmware to place data, tell it */
3888 if (write != rxq->write)
3889 ipw_write32(priv, CX2_RX_WRITE_INDEX, rxq->write);
3893 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
3894 * Also restock the Rx queue via ipw_rx_queue_restock.
3896 * This is called as a scheduled work item (except for during intialization)
3898 static void ipw_rx_queue_replenish(void *data)
3900 struct ipw_priv *priv = data;
3901 struct ipw_rx_queue *rxq = priv->rxq;
3902 struct list_head *element;
3903 struct ipw_rx_mem_buffer *rxb;
3904 unsigned long flags;
3906 spin_lock_irqsave(&rxq->lock, flags);
3907 while (!list_empty(&rxq->rx_used)) {
3908 element = rxq->rx_used.next;
3909 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
3910 rxb->skb = alloc_skb(CX2_RX_BUF_SIZE, GFP_ATOMIC);
3912 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
3913 priv->net_dev->name);
3914 /* We don't reschedule replenish work here -- we will
3915 * call the restock method and if it still needs
3916 * more buffers it will schedule replenish */
3921 rxb->rxb = (struct ipw_rx_buffer *)rxb->skb->data;
3922 rxb->dma_addr = pci_map_single(
3923 priv->pci_dev, rxb->skb->data, CX2_RX_BUF_SIZE,
3924 PCI_DMA_FROMDEVICE);
3926 list_add_tail(&rxb->list, &rxq->rx_free);
3929 spin_unlock_irqrestore(&rxq->lock, flags);
3931 ipw_rx_queue_restock(priv);
3934 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
3935 * If an SKB has been detached, the POOL needs to have it's SKB set to NULL
3936 * This free routine walks the list of POOL entries and if SKB is set to
3937 * non NULL it is unmapped and freed
3939 static void ipw_rx_queue_free(struct ipw_priv *priv,
3940 struct ipw_rx_queue *rxq)
3947 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
3948 if (rxq->pool[i].skb != NULL) {
3949 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3951 PCI_DMA_FROMDEVICE);
3952 dev_kfree_skb(rxq->pool[i].skb);
3959 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
3961 struct ipw_rx_queue *rxq;
3964 rxq = (struct ipw_rx_queue *)kmalloc(sizeof(*rxq), GFP_KERNEL);
3965 memset(rxq, 0, sizeof(*rxq));
3966 spin_lock_init(&rxq->lock);
3967 INIT_LIST_HEAD(&rxq->rx_free);
3968 INIT_LIST_HEAD(&rxq->rx_used);
3970 /* Fill the rx_used queue with _all_ of the Rx buffers */
3971 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
3972 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3974 /* Set us so that we have processed and used all buffers, but have
3975 * not restocked the Rx queue with fresh buffers */
3976 rxq->read = rxq->write = 0;
3977 rxq->processed = RX_QUEUE_SIZE - 1;
3978 rxq->free_count = 0;
3983 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
3985 rate &= ~IEEE80211_BASIC_RATE_MASK;
3986 if (ieee_mode == IEEE_A) {
3988 case IEEE80211_OFDM_RATE_6MB:
3989 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
3991 case IEEE80211_OFDM_RATE_9MB:
3992 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
3994 case IEEE80211_OFDM_RATE_12MB:
3995 return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ?
3997 case IEEE80211_OFDM_RATE_18MB:
3998 return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ?
4000 case IEEE80211_OFDM_RATE_24MB:
4001 return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ?
4003 case IEEE80211_OFDM_RATE_36MB:
4004 return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ?
4006 case IEEE80211_OFDM_RATE_48MB:
4007 return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ?
4009 case IEEE80211_OFDM_RATE_54MB:
4010 return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ?
4019 case IEEE80211_CCK_RATE_1MB:
4020 return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
4021 case IEEE80211_CCK_RATE_2MB:
4022 return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
4023 case IEEE80211_CCK_RATE_5MB:
4024 return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
4025 case IEEE80211_CCK_RATE_11MB:
4026 return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
4029 /* If we are limited to B modulations, bail at this point */
4030 if (ieee_mode == IEEE_B)
4035 case IEEE80211_OFDM_RATE_6MB:
4036 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
4037 case IEEE80211_OFDM_RATE_9MB:
4038 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
4039 case IEEE80211_OFDM_RATE_12MB:
4040 return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
4041 case IEEE80211_OFDM_RATE_18MB:
4042 return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
4043 case IEEE80211_OFDM_RATE_24MB:
4044 return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
4045 case IEEE80211_OFDM_RATE_36MB:
4046 return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
4047 case IEEE80211_OFDM_RATE_48MB:
4048 return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
4049 case IEEE80211_OFDM_RATE_54MB:
4050 return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
4056 static int ipw_compatible_rates(struct ipw_priv *priv,
4057 const struct ieee80211_network *network,
4058 struct ipw_supported_rates *rates)
4062 memset(rates, 0, sizeof(*rates));
4063 num_rates = min(network->rates_len, (u8)IPW_MAX_RATES);
4064 rates->num_rates = 0;
4065 for (i = 0; i < num_rates; i++) {
4066 if (!ipw_is_rate_in_mask(priv, network->mode, network->rates[i])) {
4067 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
4068 network->rates[i], priv->rates_mask);
4072 rates->supported_rates[rates->num_rates++] = network->rates[i];
4075 num_rates = min(network->rates_ex_len, (u8)(IPW_MAX_RATES - num_rates));
4076 for (i = 0; i < num_rates; i++) {
4077 if (!ipw_is_rate_in_mask(priv, network->mode, network->rates_ex[i])) {
4078 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
4079 network->rates_ex[i], priv->rates_mask);
4083 rates->supported_rates[rates->num_rates++] = network->rates_ex[i];
4086 return rates->num_rates;
4089 static inline void ipw_copy_rates(struct ipw_supported_rates *dest,
4090 const struct ipw_supported_rates *src)
4093 for (i = 0; i < src->num_rates; i++)
4094 dest->supported_rates[i] = src->supported_rates[i];
4095 dest->num_rates = src->num_rates;
4098 /* TODO: Look at sniffed packets in the air to determine if the basic rate
4099 * mask should ever be used -- right now all callers to add the scan rates are
4100 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
4101 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
4102 u8 modulation, u32 rate_mask)
4104 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
4105 IEEE80211_BASIC_RATE_MASK : 0;
4107 if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
4108 rates->supported_rates[rates->num_rates++] =
4109 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
4111 if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
4112 rates->supported_rates[rates->num_rates++] =
4113 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
4115 if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
4116 rates->supported_rates[rates->num_rates++] = basic_mask |
4117 IEEE80211_CCK_RATE_5MB;
4119 if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
4120 rates->supported_rates[rates->num_rates++] = basic_mask |
4121 IEEE80211_CCK_RATE_11MB;
4124 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
4125 u8 modulation, u32 rate_mask)
4127 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
4128 IEEE80211_BASIC_RATE_MASK : 0;
4130 if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
4131 rates->supported_rates[rates->num_rates++] = basic_mask |
4132 IEEE80211_OFDM_RATE_6MB;
4134 if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
4135 rates->supported_rates[rates->num_rates++] =
4136 IEEE80211_OFDM_RATE_9MB;
4138 if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
4139 rates->supported_rates[rates->num_rates++] = basic_mask |
4140 IEEE80211_OFDM_RATE_12MB;
4142 if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
4143 rates->supported_rates[rates->num_rates++] =
4144 IEEE80211_OFDM_RATE_18MB;
4146 if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
4147 rates->supported_rates[rates->num_rates++] = basic_mask |
4148 IEEE80211_OFDM_RATE_24MB;
4150 if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
4151 rates->supported_rates[rates->num_rates++] =
4152 IEEE80211_OFDM_RATE_36MB;
4154 if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
4155 rates->supported_rates[rates->num_rates++] =
4156 IEEE80211_OFDM_RATE_48MB;
4158 if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
4159 rates->supported_rates[rates->num_rates++] =
4160 IEEE80211_OFDM_RATE_54MB;
4163 struct ipw_network_match {
4164 struct ieee80211_network *network;
4165 struct ipw_supported_rates rates;
4168 static int ipw_best_network(
4169 struct ipw_priv *priv,
4170 struct ipw_network_match *match,
4171 struct ieee80211_network *network,
4174 struct ipw_supported_rates rates;
4176 /* Verify that this network's capability is compatible with the
4177 * current mode (AdHoc or Infrastructure) */
4178 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
4179 !(network->capability & WLAN_CAPABILITY_BSS)) ||
4180 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
4181 !(network->capability & WLAN_CAPABILITY_IBSS))) {
4182 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded due to "
4183 "capability mismatch.\n",
4184 escape_essid(network->ssid, network->ssid_len),
4185 MAC_ARG(network->bssid));
4189 /* If we do not have an ESSID for this AP, we can not associate with
4191 if (network->flags & NETWORK_EMPTY_ESSID) {
4192 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4193 "because of hidden ESSID.\n",
4194 escape_essid(network->ssid, network->ssid_len),
4195 MAC_ARG(network->bssid));
4199 if (unlikely(roaming)) {
4200 /* If we are roaming, then ensure check if this is a valid
4201 * network to try and roam to */
4202 if ((network->ssid_len != match->network->ssid_len) ||
4203 memcmp(network->ssid, match->network->ssid,
4204 network->ssid_len)) {
4205 IPW_DEBUG_ASSOC("Netowrk '%s (" MAC_FMT ")' excluded "
4206 "because of non-network ESSID.\n",
4207 escape_essid(network->ssid,
4209 MAC_ARG(network->bssid));
4213 /* If an ESSID has been configured then compare the broadcast
4215 if ((priv->config & CFG_STATIC_ESSID) &&
4216 ((network->ssid_len != priv->essid_len) ||
4217 memcmp(network->ssid, priv->essid,
4218 min(network->ssid_len, priv->essid_len)))) {
4219 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
4220 strncpy(escaped, escape_essid(
4221 network->ssid, network->ssid_len),
4223 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4224 "because of ESSID mismatch: '%s'.\n",
4225 escaped, MAC_ARG(network->bssid),
4226 escape_essid(priv->essid, priv->essid_len));
4231 /* If the old network rate is better than this one, don't bother
4232 * testing everything else. */
4233 if (match->network && match->network->stats.rssi >
4234 network->stats.rssi) {
4235 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
4237 escape_essid(network->ssid, network->ssid_len),
4239 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded because "
4240 "'%s (" MAC_FMT ")' has a stronger signal.\n",
4241 escaped, MAC_ARG(network->bssid),
4242 escape_essid(match->network->ssid,
4243 match->network->ssid_len),
4244 MAC_ARG(match->network->bssid));
4248 /* If this network has already had an association attempt within the
4249 * last 3 seconds, do not try and associate again... */
4250 if (network->last_associate &&
4251 time_after(network->last_associate + (HZ * 5UL), jiffies)) {
4252 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4253 "because of storming (%lu since last "
4254 "assoc attempt).\n",
4255 escape_essid(network->ssid, network->ssid_len),
4256 MAC_ARG(network->bssid),
4257 (jiffies - network->last_associate) / HZ);
4261 /* Now go through and see if the requested network is valid... */
4262 if (priv->ieee->scan_age != 0 &&
4263 jiffies - network->last_scanned > priv->ieee->scan_age) {
4264 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4265 "because of age: %lums.\n",
4266 escape_essid(network->ssid, network->ssid_len),
4267 MAC_ARG(network->bssid),
4268 (jiffies - network->last_scanned) / (HZ / 100));
4272 if ((priv->config & CFG_STATIC_CHANNEL) &&
4273 (network->channel != priv->channel)) {
4274 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4275 "because of channel mismatch: %d != %d.\n",
4276 escape_essid(network->ssid, network->ssid_len),
4277 MAC_ARG(network->bssid),
4278 network->channel, priv->channel);
4282 /* Verify privacy compatability */
4283 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
4284 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
4285 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4286 "because of privacy mismatch: %s != %s.\n",
4287 escape_essid(network->ssid, network->ssid_len),
4288 MAC_ARG(network->bssid),
4289 priv->capability & CAP_PRIVACY_ON ? "on" :
4291 network->capability &
4292 WLAN_CAPABILITY_PRIVACY ?"on" : "off");
4296 if ((priv->config & CFG_STATIC_BSSID) &&
4297 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
4298 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4299 "because of BSSID mismatch: " MAC_FMT ".\n",
4300 escape_essid(network->ssid, network->ssid_len),
4301 MAC_ARG(network->bssid),
4302 MAC_ARG(priv->bssid));
4306 /* Filter out any incompatible freq / mode combinations */
4307 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
4308 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4309 "because of invalid frequency/mode "
4311 escape_essid(network->ssid, network->ssid_len),
4312 MAC_ARG(network->bssid));
4316 ipw_compatible_rates(priv, network, &rates);
4317 if (rates.num_rates == 0) {
4318 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4319 "because of no compatible rates.\n",
4320 escape_essid(network->ssid, network->ssid_len),
4321 MAC_ARG(network->bssid));
4325 /* TODO: Perform any further minimal comparititive tests. We do not
4326 * want to put too much policy logic here; intelligent scan selection
4327 * should occur within a generic IEEE 802.11 user space tool. */
4329 /* Set up 'new' AP to this network */
4330 ipw_copy_rates(&match->rates, &rates);
4331 match->network = network;
4333 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' is a viable match.\n",
4334 escape_essid(network->ssid, network->ssid_len),
4335 MAC_ARG(network->bssid));
4341 static void ipw_adhoc_create(struct ipw_priv *priv,
4342 struct ieee80211_network *network)
4345 * For the purposes of scanning, we can set our wireless mode
4346 * to trigger scans across combinations of bands, but when it
4347 * comes to creating a new ad-hoc network, we have tell the FW
4348 * exactly which band to use.
4350 * We also have the possibility of an invalid channel for the
4351 * chossen band. Attempting to create a new ad-hoc network
4352 * with an invalid channel for wireless mode will trigger a
4355 network->mode = is_valid_channel(priv->ieee->mode, priv->channel);
4356 if (network->mode) {
4357 network->channel = priv->channel;
4359 IPW_WARNING("Overriding invalid channel\n");
4360 if (priv->ieee->mode & IEEE_A) {
4361 network->mode = IEEE_A;
4362 priv->channel = band_a_active_channel[0];
4363 } else if (priv->ieee->mode & IEEE_G) {
4364 network->mode = IEEE_G;
4365 priv->channel = band_b_active_channel[0];
4367 network->mode = IEEE_B;
4368 priv->channel = band_b_active_channel[0];
4372 network->channel = priv->channel;
4373 priv->config |= CFG_ADHOC_PERSIST;
4374 ipw_create_bssid(priv, network->bssid);
4375 network->ssid_len = priv->essid_len;
4376 memcpy(network->ssid, priv->essid, priv->essid_len);
4377 memset(&network->stats, 0, sizeof(network->stats));
4378 network->capability = WLAN_CAPABILITY_IBSS;
4379 if (priv->capability & CAP_PRIVACY_ON)
4380 network->capability |= WLAN_CAPABILITY_PRIVACY;
4381 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
4382 memcpy(network->rates, priv->rates.supported_rates,
4383 network->rates_len);
4384 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
4385 memcpy(network->rates_ex,
4386 &priv->rates.supported_rates[network->rates_len],
4387 network->rates_ex_len);
4388 network->last_scanned = 0;
4390 network->last_associate = 0;
4391 network->time_stamp[0] = 0;
4392 network->time_stamp[1] = 0;
4393 network->beacon_interval = 100; /* Default */
4394 network->listen_interval = 10; /* Default */
4395 network->atim_window = 0; /* Default */
4396 #ifdef CONFIG_IEEE80211_WPA
4397 network->wpa_ie_len = 0;
4398 network->rsn_ie_len = 0;
4399 #endif /* CONFIG_IEEE80211_WPA */
4402 static void ipw_send_wep_keys(struct ipw_priv *priv)
4404 struct ipw_wep_key *key;
4406 struct host_cmd cmd = {
4407 .cmd = IPW_CMD_WEP_KEY,
4411 key = (struct ipw_wep_key *)&cmd.param;
4412 key->cmd_id = DINO_CMD_WEP_KEY;
4415 for (i = 0; i < 4; i++) {
4417 if (!(priv->sec.flags & (1 << i))) {
4420 key->key_size = priv->sec.key_sizes[i];
4421 memcpy(key->key, priv->sec.keys[i], key->key_size);
4424 if (ipw_send_cmd(priv, &cmd)) {
4425 IPW_ERROR("failed to send WEP_KEY command\n");
4431 static void ipw_adhoc_check(void *data)
4433 struct ipw_priv *priv = data;
4435 if (priv->missed_adhoc_beacons++ > priv->missed_beacon_threshold &&
4436 !(priv->config & CFG_ADHOC_PERSIST)) {
4437 IPW_DEBUG_SCAN("Disassociating due to missed beacons\n");
4438 ipw_remove_current_network(priv);
4439 ipw_disassociate(priv);
4443 queue_delayed_work(priv->workqueue, &priv->adhoc_check,
4444 priv->assoc_request.beacon_interval);
4447 #ifdef CONFIG_IPW_DEBUG
4448 static void ipw_debug_config(struct ipw_priv *priv)
4450 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
4451 "[CFG 0x%08X]\n", priv->config);
4452 if (priv->config & CFG_STATIC_CHANNEL)
4453 IPW_DEBUG_INFO("Channel locked to %d\n",
4456 IPW_DEBUG_INFO("Channel unlocked.\n");
4457 if (priv->config & CFG_STATIC_ESSID)
4458 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
4459 escape_essid(priv->essid,
4462 IPW_DEBUG_INFO("ESSID unlocked.\n");
4463 if (priv->config & CFG_STATIC_BSSID)
4464 IPW_DEBUG_INFO("BSSID locked to %d\n", priv->channel);
4466 IPW_DEBUG_INFO("BSSID unlocked.\n");
4467 if (priv->capability & CAP_PRIVACY_ON)
4468 IPW_DEBUG_INFO("PRIVACY on\n");
4470 IPW_DEBUG_INFO("PRIVACY off\n");
4471 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
4474 #define ipw_debug_config(x) do {} while (0);
4477 static inline void ipw_set_fixed_rate(struct ipw_priv *priv,
4478 struct ieee80211_network *network)
4480 /* TODO: Verify that this works... */
4481 struct ipw_fixed_rate fr = {
4482 .tx_rates = priv->rates_mask
4487 /* Identify 'current FW band' and match it with the fixed
4490 switch (priv->ieee->freq_band) {
4491 case IEEE80211_52GHZ_BAND: /* A only */
4493 if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
4494 /* Invalid fixed rate mask */
4499 fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
4502 default: /* 2.4Ghz or Mixed */
4504 if (network->mode == IEEE_B) {
4505 if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
4506 /* Invalid fixed rate mask */
4513 if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
4514 IEEE80211_OFDM_RATES_MASK)) {
4515 /* Invalid fixed rate mask */
4520 if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
4521 mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
4522 fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
4525 if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
4526 mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
4527 fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
4530 if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
4531 mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
4532 fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
4535 fr.tx_rates |= mask;
4539 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
4540 ipw_write_reg32(priv, reg, *(u32*)&fr);
4543 static int ipw_associate_network(struct ipw_priv *priv,
4544 struct ieee80211_network *network,
4545 struct ipw_supported_rates *rates,
4550 if (priv->config & CFG_FIXED_RATE)
4551 ipw_set_fixed_rate(priv, network);
4553 if (!(priv->config & CFG_STATIC_ESSID)) {
4554 priv->essid_len = min(network->ssid_len,
4555 (u8)IW_ESSID_MAX_SIZE);
4556 memcpy(priv->essid, network->ssid, priv->essid_len);
4559 network->last_associate = jiffies;
4561 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
4562 priv->assoc_request.channel = network->channel;
4563 if ((priv->capability & CAP_PRIVACY_ON) &&
4564 (priv->capability & CAP_SHARED_KEY)) {
4565 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
4566 priv->assoc_request.auth_key = priv->sec.active_key;
4568 priv->assoc_request.auth_type = AUTH_OPEN;
4569 priv->assoc_request.auth_key = 0;
4572 if (priv->capability & CAP_PRIVACY_ON)
4573 ipw_send_wep_keys(priv);
4576 * It is valid for our ieee device to support multiple modes, but
4577 * when it comes to associating to a given network we have to choose
4580 if (network->mode & priv->ieee->mode & IEEE_A)
4581 priv->assoc_request.ieee_mode = IPW_A_MODE;
4582 else if (network->mode & priv->ieee->mode & IEEE_G)
4583 priv->assoc_request.ieee_mode = IPW_G_MODE;
4584 else if (network->mode & priv->ieee->mode & IEEE_B)
4585 priv->assoc_request.ieee_mode = IPW_B_MODE;
4587 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
4588 "802.11%c [%d], enc=%s%s%s%c%c\n",
4589 roaming ? "Rea" : "A",
4590 escape_essid(priv->essid, priv->essid_len),
4592 ipw_modes[priv->assoc_request.ieee_mode],
4594 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
4595 priv->capability & CAP_PRIVACY_ON ?
4596 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
4598 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
4599 priv->capability & CAP_PRIVACY_ON ?
4600 '1' + priv->sec.active_key : '.',
4601 priv->capability & CAP_PRIVACY_ON ?
4604 priv->assoc_request.beacon_interval = network->beacon_interval;
4605 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
4606 (network->time_stamp[0] == 0) &&
4607 (network->time_stamp[1] == 0)) {
4608 priv->assoc_request.assoc_type = HC_IBSS_START;
4609 priv->assoc_request.assoc_tsf_msw = 0;
4610 priv->assoc_request.assoc_tsf_lsw = 0;
4612 if (unlikely(roaming))
4613 priv->assoc_request.assoc_type = HC_REASSOCIATE;
4615 priv->assoc_request.assoc_type = HC_ASSOCIATE;
4616 priv->assoc_request.assoc_tsf_msw = network->time_stamp[1];
4617 priv->assoc_request.assoc_tsf_lsw = network->time_stamp[0];
4620 memcpy(&priv->assoc_request.bssid, network->bssid, ETH_ALEN);
4622 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
4623 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
4624 priv->assoc_request.atim_window = network->atim_window;
4626 memcpy(&priv->assoc_request.dest, network->bssid,
4628 priv->assoc_request.atim_window = 0;
4631 priv->assoc_request.capability = network->capability;
4632 priv->assoc_request.listen_interval = network->listen_interval;
4634 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
4636 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
4640 rates->ieee_mode = priv->assoc_request.ieee_mode;
4641 rates->purpose = IPW_RATE_CONNECT;
4642 ipw_send_supported_rates(priv, rates);
4644 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
4645 priv->sys_config.dot11g_auto_detection = 1;
4647 priv->sys_config.dot11g_auto_detection = 0;
4648 err = ipw_send_system_config(priv, &priv->sys_config);
4650 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
4654 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
4655 err = ipw_set_sensitivity(priv, network->stats.rssi);
4657 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
4662 * If preemption is enabled, it is possible for the association
4663 * to complete before we return from ipw_send_associate. Therefore
4664 * we have to be sure and update our priviate data first.
4666 priv->channel = network->channel;
4667 memcpy(priv->bssid, network->bssid, ETH_ALEN);
4668 priv->status |= STATUS_ASSOCIATING;
4669 priv->status &= ~STATUS_SECURITY_UPDATED;
4671 priv->assoc_network = network;
4673 err = ipw_send_associate(priv, &priv->assoc_request);
4675 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
4679 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' " MAC_FMT " \n",
4680 escape_essid(priv->essid, priv->essid_len),
4681 MAC_ARG(priv->bssid));
4686 static void ipw_roam(void *data)
4688 struct ipw_priv *priv = data;
4689 struct ieee80211_network *network = NULL;
4690 struct ipw_network_match match = {
4691 .network = priv->assoc_network
4694 /* The roaming process is as follows:
4696 * 1. Missed beacon threshold triggers the roaming process by
4697 * setting the status ROAM bit and requesting a scan.
4698 * 2. When the scan completes, it schedules the ROAM work
4699 * 3. The ROAM work looks at all of the known networks for one that
4700 * is a better network than the currently associated. If none
4701 * found, the ROAM process is over (ROAM bit cleared)
4702 * 4. If a better network is found, a disassociation request is
4704 * 5. When the disassociation completes, the roam work is again
4705 * scheduled. The second time through, the driver is no longer
4706 * associated, and the newly selected network is sent an
4707 * association request.
4708 * 6. At this point ,the roaming process is complete and the ROAM
4709 * status bit is cleared.
4712 /* If we are no longer associated, and the roaming bit is no longer
4713 * set, then we are not actively roaming, so just return */
4714 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
4717 if (priv->status & STATUS_ASSOCIATED) {
4718 /* First pass through ROAM process -- look for a better
4720 u8 rssi = priv->assoc_network->stats.rssi;
4721 priv->assoc_network->stats.rssi = -128;
4722 list_for_each_entry(network, &priv->ieee->network_list, list) {
4723 if (network != priv->assoc_network)
4724 ipw_best_network(priv, &match, network, 1);
4726 priv->assoc_network->stats.rssi = rssi;
4728 if (match.network == priv->assoc_network) {
4729 IPW_DEBUG_ASSOC("No better APs in this network to "
4731 priv->status &= ~STATUS_ROAMING;
4732 ipw_debug_config(priv);
4736 ipw_send_disassociate(priv, 1);
4737 priv->assoc_network = match.network;
4742 /* Second pass through ROAM process -- request association */
4743 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
4744 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
4745 priv->status &= ~STATUS_ROAMING;
4748 static void ipw_associate(void *data)
4750 struct ipw_priv *priv = data;
4752 struct ieee80211_network *network = NULL;
4753 struct ipw_network_match match = {
4756 struct ipw_supported_rates *rates;
4757 struct list_head *element;
4759 if (!(priv->config & CFG_ASSOCIATE) &&
4760 !(priv->config & (CFG_STATIC_ESSID |
4761 CFG_STATIC_CHANNEL |
4762 CFG_STATIC_BSSID))) {
4763 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
4767 list_for_each_entry(network, &priv->ieee->network_list, list)
4768 ipw_best_network(priv, &match, network, 0);
4770 network = match.network;
4771 rates = &match.rates;
4773 if (network == NULL &&
4774 priv->ieee->iw_mode == IW_MODE_ADHOC &&
4775 priv->config & CFG_ADHOC_CREATE &&
4776 priv->config & CFG_STATIC_ESSID &&
4777 !list_empty(&priv->ieee->network_free_list)) {
4778 element = priv->ieee->network_free_list.next;
4779 network = list_entry(element, struct ieee80211_network,
4781 ipw_adhoc_create(priv, network);
4782 rates = &priv->rates;
4784 list_add_tail(&network->list, &priv->ieee->network_list);
4787 /* If we reached the end of the list, then we don't have any valid
4790 ipw_debug_config(priv);
4792 queue_delayed_work(priv->workqueue, &priv->request_scan,
4798 ipw_associate_network(priv, network, rates, 0);
4801 static inline void ipw_handle_data_packet(struct ipw_priv *priv,
4802 struct ipw_rx_mem_buffer *rxb,
4803 struct ieee80211_rx_stats *stats)
4805 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
4807 /* We received data from the HW, so stop the watchdog */
4808 priv->net_dev->trans_start = jiffies;
4810 /* We only process data packets if the
4811 * interface is open */
4812 if (unlikely((pkt->u.frame.length + IPW_RX_FRAME_SIZE) >
4813 skb_tailroom(rxb->skb))) {
4814 priv->ieee->stats.rx_errors++;
4815 priv->wstats.discard.misc++;
4816 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
4818 } else if (unlikely(!netif_running(priv->net_dev))) {
4819 priv->ieee->stats.rx_dropped++;
4820 priv->wstats.discard.misc++;
4821 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
4825 /* Advance skb->data to the start of the actual payload */
4826 skb_reserve(rxb->skb, (u32)&pkt->u.frame.data[0] - (u32)pkt);
4828 /* Set the size of the skb to the size of the frame */
4829 skb_put(rxb->skb, pkt->u.frame.length);
4831 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
4833 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
4834 priv->ieee->stats.rx_errors++;
4835 else /* ieee80211_rx succeeded, so it now owns the SKB */
4841 * Main entry function for recieving a packet with 80211 headers. This
4842 * should be called when ever the FW has notified us that there is a new
4843 * skb in the recieve queue.
4845 static void ipw_rx(struct ipw_priv *priv)
4847 struct ipw_rx_mem_buffer *rxb;
4848 struct ipw_rx_packet *pkt;
4849 struct ieee80211_hdr *header;
4853 r = ipw_read32(priv, CX2_RX_READ_INDEX);
4854 w = ipw_read32(priv, CX2_RX_WRITE_INDEX);
4855 i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE;
4858 rxb = priv->rxq->queue[i];
4859 #ifdef CONFIG_IPW_DEBUG
4860 if (unlikely(rxb == NULL)) {
4861 printk(KERN_CRIT "Queue not allocated!\n");
4865 priv->rxq->queue[i] = NULL;
4867 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
4869 PCI_DMA_FROMDEVICE);
4871 pkt = (struct ipw_rx_packet *)rxb->skb->data;
4872 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
4873 pkt->header.message_type,
4874 pkt->header.rx_seq_num,
4875 pkt->header.control_bits);
4877 switch (pkt->header.message_type) {
4878 case RX_FRAME_TYPE: /* 802.11 frame */ {
4879 struct ieee80211_rx_stats stats = {
4880 .rssi = pkt->u.frame.rssi_dbm -
4882 .signal = pkt->u.frame.signal,
4883 .rate = pkt->u.frame.rate,
4884 .mac_time = jiffies,
4886 pkt->u.frame.received_channel,
4887 .freq = (pkt->u.frame.control & (1<<0)) ?
4888 IEEE80211_24GHZ_BAND : IEEE80211_52GHZ_BAND,
4889 .len = pkt->u.frame.length,
4892 if (stats.rssi != 0)
4893 stats.mask |= IEEE80211_STATMASK_RSSI;
4894 if (stats.signal != 0)
4895 stats.mask |= IEEE80211_STATMASK_SIGNAL;
4896 if (stats.rate != 0)
4897 stats.mask |= IEEE80211_STATMASK_RATE;
4901 #ifdef CONFIG_IPW_PROMISC
4902 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4903 ipw_handle_data_packet(priv, rxb, &stats);
4908 header = (struct ieee80211_hdr *)(rxb->skb->data +
4910 /* TODO: Check Ad-Hoc dest/source and make sure
4911 * that we are actually parsing these packets
4912 * correctly -- we should probably use the
4913 * frame control of the packet and disregard
4914 * the current iw_mode */
4915 switch (priv->ieee->iw_mode) {
4918 !memcmp(header->addr1,
4919 priv->net_dev->dev_addr,
4921 !memcmp(header->addr3,
4922 priv->bssid, ETH_ALEN) ||
4923 is_broadcast_ether_addr(header->addr1) ||
4924 is_multicast_ether_addr(header->addr1);
4930 !memcmp(header->addr3,
4931 priv->bssid, ETH_ALEN) ||
4932 !memcmp(header->addr1,
4933 priv->net_dev->dev_addr,
4935 is_broadcast_ether_addr(header->addr1) ||
4936 is_multicast_ether_addr(header->addr1);
4940 if (network_packet && priv->assoc_network) {
4941 priv->assoc_network->stats.rssi = stats.rssi;
4942 average_add(&priv->average_rssi,
4944 priv->last_rx_rssi = stats.rssi;
4947 IPW_DEBUG_RX("Frame: len=%u\n", pkt->u.frame.length);
4949 if (pkt->u.frame.length < frame_hdr_len(header)) {
4950 IPW_DEBUG_DROP("Received packet is too small. "
4952 priv->ieee->stats.rx_errors++;
4953 priv->wstats.discard.misc++;
4957 switch (WLAN_FC_GET_TYPE(header->frame_ctl)) {
4958 case IEEE80211_FTYPE_MGMT:
4959 ieee80211_rx_mgt(priv->ieee, header, &stats);
4960 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
4961 ((WLAN_FC_GET_STYPE(header->frame_ctl) ==
4962 IEEE80211_STYPE_PROBE_RESP) ||
4963 (WLAN_FC_GET_STYPE(header->frame_ctl) ==
4964 IEEE80211_STYPE_BEACON)) &&
4965 !memcmp(header->addr3, priv->bssid, ETH_ALEN))
4966 ipw_add_station(priv, header->addr2);
4969 case IEEE80211_FTYPE_CTL:
4972 case IEEE80211_FTYPE_DATA:
4974 ipw_handle_data_packet(priv, rxb, &stats);
4976 IPW_DEBUG_DROP("Dropping: " MAC_FMT
4977 ", " MAC_FMT ", " MAC_FMT "\n",
4978 MAC_ARG(header->addr1), MAC_ARG(header->addr2),
4979 MAC_ARG(header->addr3));
4985 case RX_HOST_NOTIFICATION_TYPE: {
4986 IPW_DEBUG_RX("Notification: subtype=%02X flags=%02X size=%d\n",
4987 pkt->u.notification.subtype,
4988 pkt->u.notification.flags,
4989 pkt->u.notification.size);
4990 ipw_rx_notification(priv, &pkt->u.notification);
4995 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
4996 pkt->header.message_type);
5000 /* For now we just don't re-use anything. We can tweak this
5001 * later to try and re-use notification packets and SKBs that
5002 * fail to Rx correctly */
5003 if (rxb->skb != NULL) {
5004 dev_kfree_skb_any(rxb->skb);
5008 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
5009 CX2_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5010 list_add_tail(&rxb->list, &priv->rxq->rx_used);
5012 i = (i + 1) % RX_QUEUE_SIZE;
5015 /* Backtrack one entry */
5016 priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1;
5018 ipw_rx_queue_restock(priv);
5021 static void ipw_abort_scan(struct ipw_priv *priv)
5025 if (priv->status & STATUS_SCAN_ABORTING) {
5026 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
5029 priv->status |= STATUS_SCAN_ABORTING;
5031 err = ipw_send_scan_abort(priv);
5033 IPW_DEBUG_HC("Request to abort scan failed.\n");
5036 static int ipw_request_scan(struct ipw_priv *priv)
5038 struct ipw_scan_request_ext scan;
5039 int channel_index = 0;
5040 int i, err, scan_type;
5042 if (priv->status & STATUS_EXIT_PENDING) {
5043 IPW_DEBUG_SCAN("Aborting scan due to device shutdown\n");
5044 priv->status |= STATUS_SCAN_PENDING;
5048 if (priv->status & STATUS_SCANNING) {
5049 IPW_DEBUG_HC("Concurrent scan requested. Aborting first.\n");
5050 priv->status |= STATUS_SCAN_PENDING;
5051 ipw_abort_scan(priv);
5055 if (priv->status & STATUS_SCAN_ABORTING) {
5056 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
5057 priv->status |= STATUS_SCAN_PENDING;
5061 if (priv->status & STATUS_RF_KILL_MASK) {
5062 IPW_DEBUG_HC("Aborting scan due to RF Kill activation\n");
5063 priv->status |= STATUS_SCAN_PENDING;
5067 memset(&scan, 0, sizeof(scan));
5069 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = 20;
5070 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] = 20;
5071 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = 20;
5073 scan.full_scan_index = ieee80211_get_scans(priv->ieee);
5074 /* If we are roaming, then make this a directed scan for the current
5075 * network. Otherwise, ensure that every other scan is a fast
5076 * channel hop scan */
5077 if ((priv->status & STATUS_ROAMING) || (
5078 !(priv->status & STATUS_ASSOCIATED) &&
5079 (priv->config & CFG_STATIC_ESSID) &&
5080 (scan.full_scan_index % 2))) {
5081 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
5083 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
5087 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
5089 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
5092 if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
5093 int start = channel_index;
5094 for (i = 0; i < MAX_A_CHANNELS; i++) {
5095 if (band_a_active_channel[i] == 0)
5097 if ((priv->status & STATUS_ASSOCIATED) &&
5098 band_a_active_channel[i] == priv->channel)
5101 scan.channels_list[channel_index] =
5102 band_a_active_channel[i];
5103 ipw_set_scan_type(&scan, channel_index, scan_type);
5106 if (start != channel_index) {
5107 scan.channels_list[start] = (u8)(IPW_A_MODE << 6) |
5108 (channel_index - start);
5113 if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
5114 int start = channel_index;
5115 for (i = 0; i < MAX_B_CHANNELS; i++) {
5116 if (band_b_active_channel[i] == 0)
5118 if ((priv->status & STATUS_ASSOCIATED) &&
5119 band_b_active_channel[i] == priv->channel)
5122 scan.channels_list[channel_index] =
5123 band_b_active_channel[i];
5124 ipw_set_scan_type(&scan, channel_index, scan_type);
5127 if (start != channel_index) {
5128 scan.channels_list[start] = (u8)(IPW_B_MODE << 6) |
5129 (channel_index - start);
5133 err = ipw_send_scan_request_ext(priv, &scan);
5135 IPW_DEBUG_HC("Sending scan command failed: %08X\n",
5140 priv->status |= STATUS_SCANNING;
5141 priv->status &= ~STATUS_SCAN_PENDING;
5147 * This file defines the Wireless Extension handlers. It does not
5148 * define any methods of hardware manipulation and relies on the
5149 * functions defined in ipw_main to provide the HW interaction.
5151 * The exception to this is the use of the ipw_get_ordinal()
5152 * function used to poll the hardware vs. making unecessary calls.
5156 static int ipw_wx_get_name(struct net_device *dev,
5157 struct iw_request_info *info,
5158 union iwreq_data *wrqu, char *extra)
5160 struct ipw_priv *priv = ieee80211_priv(dev);
5161 if (!(priv->status & STATUS_ASSOCIATED))
5162 strcpy(wrqu->name, "unassociated");
5164 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
5165 ipw_modes[priv->assoc_request.ieee_mode]);
5166 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
5170 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
5173 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
5174 priv->config &= ~CFG_STATIC_CHANNEL;
5175 if (!(priv->status & (STATUS_SCANNING | STATUS_ASSOCIATED |
5176 STATUS_ASSOCIATING))) {
5177 IPW_DEBUG_ASSOC("Attempting to associate with new "
5179 ipw_associate(priv);
5185 priv->config |= CFG_STATIC_CHANNEL;
5187 if (priv->channel == channel) {
5189 "Request to set channel to current value (%d)\n",
5194 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
5195 priv->channel = channel;
5197 /* If we are currently associated, or trying to associate
5198 * then see if this is a new channel (causing us to disassociate) */
5199 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
5200 IPW_DEBUG_ASSOC("Disassociating due to channel change.\n");
5201 ipw_disassociate(priv);
5203 ipw_associate(priv);
5209 static int ipw_wx_set_freq(struct net_device *dev,
5210 struct iw_request_info *info,
5211 union iwreq_data *wrqu, char *extra)
5213 struct ipw_priv *priv = ieee80211_priv(dev);
5214 struct iw_freq *fwrq = &wrqu->freq;
5216 /* if setting by freq convert to channel */
5218 if ((fwrq->m >= (int) 2.412e8 &&
5219 fwrq->m <= (int) 2.487e8)) {
5220 int f = fwrq->m / 100000;
5223 while ((c < REG_MAX_CHANNEL) &&
5224 (f != ipw_frequencies[c]))
5227 /* hack to fall through */
5233 if (fwrq->e > 0 || fwrq->m > 1000)
5236 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
5237 return ipw_set_channel(priv, (u8)fwrq->m);
5243 static int ipw_wx_get_freq(struct net_device *dev,
5244 struct iw_request_info *info,
5245 union iwreq_data *wrqu, char *extra)
5247 struct ipw_priv *priv = ieee80211_priv(dev);
5251 /* If we are associated, trying to associate, or have a statically
5252 * configured CHANNEL then return that; otherwise return ANY */
5253 if (priv->config & CFG_STATIC_CHANNEL ||
5254 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))
5255 wrqu->freq.m = priv->channel;
5259 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
5263 static int ipw_wx_set_mode(struct net_device *dev,
5264 struct iw_request_info *info,
5265 union iwreq_data *wrqu, char *extra)
5267 struct ipw_priv *priv = ieee80211_priv(dev);
5270 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
5272 if (wrqu->mode == priv->ieee->iw_mode)
5275 switch (wrqu->mode) {
5276 #ifdef CONFIG_IPW_PROMISC
5277 case IW_MODE_MONITOR:
5283 wrqu->mode = IW_MODE_INFRA;
5289 #ifdef CONFIG_IPW_PROMISC
5290 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
5291 priv->net_dev->type = ARPHRD_ETHER;
5293 if (wrqu->mode == IW_MODE_MONITOR)
5294 priv->net_dev->type = ARPHRD_IEEE80211;
5295 #endif /* CONFIG_IPW_PROMISC */
5298 /* Free the existing firmware and reset the fw_loaded
5299 * flag so ipw_load() will bring in the new firmawre */
5304 release_firmware(bootfw);
5305 release_firmware(ucode);
5306 release_firmware(firmware);
5307 bootfw = ucode = firmware = NULL;
5310 priv->ieee->iw_mode = wrqu->mode;
5311 ipw_adapter_restart(priv);
5316 static int ipw_wx_get_mode(struct net_device *dev,
5317 struct iw_request_info *info,
5318 union iwreq_data *wrqu, char *extra)
5320 struct ipw_priv *priv = ieee80211_priv(dev);
5322 wrqu->mode = priv->ieee->iw_mode;
5323 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
5329 #define DEFAULT_RTS_THRESHOLD 2304U
5330 #define MIN_RTS_THRESHOLD 1U
5331 #define MAX_RTS_THRESHOLD 2304U
5332 #define DEFAULT_BEACON_INTERVAL 100U
5333 #define DEFAULT_SHORT_RETRY_LIMIT 7U
5334 #define DEFAULT_LONG_RETRY_LIMIT 4U
5336 /* Values are in microsecond */
5337 static const s32 timeout_duration[] = {
5345 static const s32 period_duration[] = {
5353 static int ipw_wx_get_range(struct net_device *dev,
5354 struct iw_request_info *info,
5355 union iwreq_data *wrqu, char *extra)
5357 struct ipw_priv *priv = ieee80211_priv(dev);
5358 struct iw_range *range = (struct iw_range *)extra;
5362 wrqu->data.length = sizeof(*range);
5363 memset(range, 0, sizeof(*range));
5365 /* 54Mbs == ~27 Mb/s real (802.11g) */
5366 range->throughput = 27 * 1000 * 1000;
5368 range->max_qual.qual = 100;
5369 /* TODO: Find real max RSSI and stick here */
5370 range->max_qual.level = 0;
5371 range->max_qual.noise = 0;
5372 range->max_qual.updated = 7; /* Updated all three */
5374 range->avg_qual.qual = 70;
5375 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
5376 range->avg_qual.level = 0; /* FIXME to real average level */
5377 range->avg_qual.noise = 0;
5378 range->avg_qual.updated = 7; /* Updated all three */
5380 range->num_bitrates = min(priv->rates.num_rates, (u8)IW_MAX_BITRATES);
5382 for (i = 0; i < range->num_bitrates; i++)
5383 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
5386 range->max_rts = DEFAULT_RTS_THRESHOLD;
5387 range->min_frag = MIN_FRAG_THRESHOLD;
5388 range->max_frag = MAX_FRAG_THRESHOLD;
5390 range->encoding_size[0] = 5;
5391 range->encoding_size[1] = 13;
5392 range->num_encoding_sizes = 2;
5393 range->max_encoding_tokens = WEP_KEYS;
5395 /* Set the Wireless Extension versions */
5396 range->we_version_compiled = WIRELESS_EXT;
5397 range->we_version_source = 16;
5399 range->num_channels = FREQ_COUNT;
5402 for (i = 0; i < FREQ_COUNT; i++) {
5403 range->freq[val].i = i + 1;
5404 range->freq[val].m = ipw_frequencies[i] * 100000;
5405 range->freq[val].e = 1;
5408 if (val == IW_MAX_FREQUENCIES)
5411 range->num_frequency = val;
5413 IPW_DEBUG_WX("GET Range\n");
5417 static int ipw_wx_set_wap(struct net_device *dev,
5418 struct iw_request_info *info,
5419 union iwreq_data *wrqu, char *extra)
5421 struct ipw_priv *priv = ieee80211_priv(dev);
5423 static const unsigned char any[] = {
5424 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
5426 static const unsigned char off[] = {
5427 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
5430 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
5433 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
5434 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
5435 /* we disable mandatory BSSID association */
5436 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
5437 priv->config &= ~CFG_STATIC_BSSID;
5438 if (!(priv->status & (STATUS_SCANNING | STATUS_ASSOCIATED |
5439 STATUS_ASSOCIATING))) {
5440 IPW_DEBUG_ASSOC("Attempting to associate with new "
5442 ipw_associate(priv);
5448 priv->config |= CFG_STATIC_BSSID;
5449 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
5450 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
5454 IPW_DEBUG_WX("Setting mandatory BSSID to " MAC_FMT "\n",
5455 MAC_ARG(wrqu->ap_addr.sa_data));
5457 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
5459 /* If we are currently associated, or trying to associate
5460 * then see if this is a new BSSID (causing us to disassociate) */
5461 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
5462 IPW_DEBUG_ASSOC("Disassociating due to BSSID change.\n");
5463 ipw_disassociate(priv);
5465 ipw_associate(priv);
5471 static int ipw_wx_get_wap(struct net_device *dev,
5472 struct iw_request_info *info,
5473 union iwreq_data *wrqu, char *extra)
5475 struct ipw_priv *priv = ieee80211_priv(dev);
5476 /* If we are associated, trying to associate, or have a statically
5477 * configured BSSID then return that; otherwise return ANY */
5478 if (priv->config & CFG_STATIC_BSSID ||
5479 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
5480 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
5481 memcpy(wrqu->ap_addr.sa_data, &priv->bssid, ETH_ALEN);
5483 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
5485 IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n",
5486 MAC_ARG(wrqu->ap_addr.sa_data));
5490 static int ipw_wx_set_essid(struct net_device *dev,
5491 struct iw_request_info *info,
5492 union iwreq_data *wrqu, char *extra)
5494 struct ipw_priv *priv = ieee80211_priv(dev);
5495 char *essid = ""; /* ANY */
5498 if (wrqu->essid.flags && wrqu->essid.length) {
5499 length = wrqu->essid.length - 1;
5503 IPW_DEBUG_WX("Setting ESSID to ANY\n");
5504 priv->config &= ~CFG_STATIC_ESSID;
5505 if (!(priv->status & (STATUS_SCANNING | STATUS_ASSOCIATED |
5506 STATUS_ASSOCIATING))) {
5507 IPW_DEBUG_ASSOC("Attempting to associate with new "
5509 ipw_associate(priv);
5515 length = min(length, IW_ESSID_MAX_SIZE);
5517 priv->config |= CFG_STATIC_ESSID;
5519 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) {
5520 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
5524 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(essid, length),
5527 priv->essid_len = length;
5528 memcpy(priv->essid, essid, priv->essid_len);
5530 /* If we are currently associated, or trying to associate
5531 * then see if this is a new ESSID (causing us to disassociate) */
5532 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
5533 IPW_DEBUG_ASSOC("Disassociating due to ESSID change.\n");
5534 ipw_disassociate(priv);
5536 ipw_associate(priv);
5542 static int ipw_wx_get_essid(struct net_device *dev,
5543 struct iw_request_info *info,
5544 union iwreq_data *wrqu, char *extra)
5546 struct ipw_priv *priv = ieee80211_priv(dev);
5548 /* If we are associated, trying to associate, or have a statically
5549 * configured ESSID then return that; otherwise return ANY */
5550 if (priv->config & CFG_STATIC_ESSID ||
5551 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
5552 IPW_DEBUG_WX("Getting essid: '%s'\n",
5553 escape_essid(priv->essid, priv->essid_len));
5554 memcpy(extra, priv->essid, priv->essid_len);
5555 wrqu->essid.length = priv->essid_len;
5556 wrqu->essid.flags = 1; /* active */
5558 IPW_DEBUG_WX("Getting essid: ANY\n");
5559 wrqu->essid.length = 0;
5560 wrqu->essid.flags = 0; /* active */
5566 static int ipw_wx_set_nick(struct net_device *dev,
5567 struct iw_request_info *info,
5568 union iwreq_data *wrqu, char *extra)
5570 struct ipw_priv *priv = ieee80211_priv(dev);
5572 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
5573 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
5576 wrqu->data.length = min((size_t)wrqu->data.length, sizeof(priv->nick));
5577 memset(priv->nick, 0, sizeof(priv->nick));
5578 memcpy(priv->nick, extra, wrqu->data.length);
5579 IPW_DEBUG_TRACE("<<\n");
5585 static int ipw_wx_get_nick(struct net_device *dev,
5586 struct iw_request_info *info,
5587 union iwreq_data *wrqu, char *extra)
5589 struct ipw_priv *priv = ieee80211_priv(dev);
5590 IPW_DEBUG_WX("Getting nick\n");
5591 wrqu->data.length = strlen(priv->nick) + 1;
5592 memcpy(extra, priv->nick, wrqu->data.length);
5593 wrqu->data.flags = 1; /* active */
5598 static int ipw_wx_set_rate(struct net_device *dev,
5599 struct iw_request_info *info,
5600 union iwreq_data *wrqu, char *extra)
5602 IPW_DEBUG_WX("0x%p, 0x%p, 0x%p\n", dev, info, wrqu);
5606 static int ipw_wx_get_rate(struct net_device *dev,
5607 struct iw_request_info *info,
5608 union iwreq_data *wrqu, char *extra)
5610 struct ipw_priv * priv = ieee80211_priv(dev);
5611 wrqu->bitrate.value = priv->last_rate;
5613 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
5618 static int ipw_wx_set_rts(struct net_device *dev,
5619 struct iw_request_info *info,
5620 union iwreq_data *wrqu, char *extra)
5622 struct ipw_priv *priv = ieee80211_priv(dev);
5624 if (wrqu->rts.disabled)
5625 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
5627 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
5628 wrqu->rts.value > MAX_RTS_THRESHOLD)
5631 priv->rts_threshold = wrqu->rts.value;
5634 ipw_send_rts_threshold(priv, priv->rts_threshold);
5635 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
5639 static int ipw_wx_get_rts(struct net_device *dev,
5640 struct iw_request_info *info,
5641 union iwreq_data *wrqu, char *extra)
5643 struct ipw_priv *priv = ieee80211_priv(dev);
5644 wrqu->rts.value = priv->rts_threshold;
5645 wrqu->rts.fixed = 0; /* no auto select */
5646 wrqu->rts.disabled =
5647 (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
5649 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
5654 static int ipw_wx_set_txpow(struct net_device *dev,
5655 struct iw_request_info *info,
5656 union iwreq_data *wrqu, char *extra)
5658 struct ipw_priv *priv = ieee80211_priv(dev);
5659 struct ipw_tx_power tx_power;
5662 if (ipw_radio_kill_sw(priv, wrqu->power.disabled))
5663 return -EINPROGRESS;
5665 if (wrqu->power.flags != IW_TXPOW_DBM)
5668 if ((wrqu->power.value > 20) ||
5669 (wrqu->power.value < -12))
5672 priv->tx_power = wrqu->power.value;
5674 memset(&tx_power, 0, sizeof(tx_power));
5676 /* configure device for 'G' band */
5677 tx_power.ieee_mode = IPW_G_MODE;
5678 tx_power.num_channels = 11;
5679 for (i = 0; i < 11; i++) {
5680 tx_power.channels_tx_power[i].channel_number = i + 1;
5681 tx_power.channels_tx_power[i].tx_power = priv->tx_power;
5683 if (ipw_send_tx_power(priv, &tx_power))
5686 /* configure device to also handle 'B' band */
5687 tx_power.ieee_mode = IPW_B_MODE;
5688 if (ipw_send_tx_power(priv, &tx_power))
5698 static int ipw_wx_get_txpow(struct net_device *dev,
5699 struct iw_request_info *info,
5700 union iwreq_data *wrqu, char *extra)
5702 struct ipw_priv *priv = ieee80211_priv(dev);
5704 wrqu->power.value = priv->tx_power;
5705 wrqu->power.fixed = 1;
5706 wrqu->power.flags = IW_TXPOW_DBM;
5707 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
5709 IPW_DEBUG_WX("GET TX Power -> %s %d \n",
5710 wrqu->power.disabled ? "ON" : "OFF",
5716 static int ipw_wx_set_frag(struct net_device *dev,
5717 struct iw_request_info *info,
5718 union iwreq_data *wrqu, char *extra)
5720 struct ipw_priv *priv = ieee80211_priv(dev);
5722 if (wrqu->frag.disabled)
5723 priv->ieee->fts = DEFAULT_FTS;
5725 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
5726 wrqu->frag.value > MAX_FRAG_THRESHOLD)
5729 priv->ieee->fts = wrqu->frag.value & ~0x1;
5732 ipw_send_frag_threshold(priv, wrqu->frag.value);
5733 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
5737 static int ipw_wx_get_frag(struct net_device *dev,
5738 struct iw_request_info *info,
5739 union iwreq_data *wrqu, char *extra)
5741 struct ipw_priv *priv = ieee80211_priv(dev);
5742 wrqu->frag.value = priv->ieee->fts;
5743 wrqu->frag.fixed = 0; /* no auto select */
5744 wrqu->frag.disabled =
5745 (wrqu->frag.value == DEFAULT_FTS);
5747 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
5752 static int ipw_wx_set_retry(struct net_device *dev,
5753 struct iw_request_info *info,
5754 union iwreq_data *wrqu, char *extra)
5756 IPW_DEBUG_WX("0x%p, 0x%p, 0x%p\n", dev, info, wrqu);
5761 static int ipw_wx_get_retry(struct net_device *dev,
5762 struct iw_request_info *info,
5763 union iwreq_data *wrqu, char *extra)
5765 IPW_DEBUG_WX("0x%p, 0x%p, 0x%p\n", dev, info, wrqu);
5770 static int ipw_wx_set_scan(struct net_device *dev,
5771 struct iw_request_info *info,
5772 union iwreq_data *wrqu, char *extra)
5774 struct ipw_priv *priv = ieee80211_priv(dev);
5775 IPW_DEBUG_WX("Start scan\n");
5776 if (ipw_request_scan(priv))
5781 static int ipw_wx_get_scan(struct net_device *dev,
5782 struct iw_request_info *info,
5783 union iwreq_data *wrqu, char *extra)
5785 struct ipw_priv *priv = ieee80211_priv(dev);
5786 return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
5789 static int ipw_wx_set_encode(struct net_device *dev,
5790 struct iw_request_info *info,
5791 union iwreq_data *wrqu, char *key)
5793 struct ipw_priv *priv = ieee80211_priv(dev);
5794 return ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
5797 static int ipw_wx_get_encode(struct net_device *dev,
5798 struct iw_request_info *info,
5799 union iwreq_data *wrqu, char *key)
5801 struct ipw_priv *priv = ieee80211_priv(dev);
5802 return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
5805 static int ipw_wx_set_power(struct net_device *dev,
5806 struct iw_request_info *info,
5807 union iwreq_data *wrqu, char *extra)
5809 struct ipw_priv *priv = ieee80211_priv(dev);
5812 if (wrqu->power.disabled) {
5813 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
5814 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
5816 IPW_DEBUG_WX("failed setting power mode.\n");
5820 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
5825 switch (wrqu->power.flags & IW_POWER_MODE) {
5826 case IW_POWER_ON: /* If not specified */
5827 case IW_POWER_MODE: /* If set all mask */
5828 case IW_POWER_ALL_R: /* If explicitely state all */
5830 default: /* Otherwise we don't support it */
5831 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
5836 /* If the user hasn't specified a power management mode yet, default
5838 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
5839 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
5841 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
5842 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
5844 IPW_DEBUG_WX("failed setting power mode.\n");
5848 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n",
5854 static int ipw_wx_get_power(struct net_device *dev,
5855 struct iw_request_info *info,
5856 union iwreq_data *wrqu, char *extra)
5858 struct ipw_priv *priv = ieee80211_priv(dev);
5860 if (!(priv->power_mode & IPW_POWER_ENABLED)) {
5861 wrqu->power.disabled = 1;
5863 wrqu->power.disabled = 0;
5866 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
5871 static int ipw_wx_set_powermode(struct net_device *dev,
5872 struct iw_request_info *info,
5873 union iwreq_data *wrqu, char *extra)
5875 struct ipw_priv *priv = ieee80211_priv(dev);
5876 int mode = *(int *)extra;
5879 if ((mode < 1) || (mode > IPW_POWER_LIMIT)) {
5880 mode = IPW_POWER_AC;
5881 priv->power_mode = mode;
5883 priv->power_mode = IPW_POWER_ENABLED | mode;
5886 if (priv->power_mode != mode) {
5887 err = ipw_send_power_mode(priv, mode);
5890 IPW_DEBUG_WX("failed setting power mode.\n");
5898 #define MAX_WX_STRING 80
5899 static int ipw_wx_get_powermode(struct net_device *dev,
5900 struct iw_request_info *info,
5901 union iwreq_data *wrqu, char *extra)
5903 struct ipw_priv *priv = ieee80211_priv(dev);
5904 int level = IPW_POWER_LEVEL(priv->power_mode);
5907 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
5911 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
5913 case IPW_POWER_BATTERY:
5914 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
5917 p += snprintf(p, MAX_WX_STRING - (p - extra),
5918 "(Timeout %dms, Period %dms)",
5919 timeout_duration[level - 1] / 1000,
5920 period_duration[level - 1] / 1000);
5923 if (!(priv->power_mode & IPW_POWER_ENABLED))
5924 p += snprintf(p, MAX_WX_STRING - (p - extra)," OFF");
5926 wrqu->data.length = p - extra + 1;
5931 static int ipw_wx_set_wireless_mode(struct net_device *dev,
5932 struct iw_request_info *info,
5933 union iwreq_data *wrqu, char *extra)
5935 struct ipw_priv *priv = ieee80211_priv(dev);
5936 int mode = *(int *)extra;
5937 u8 band = 0, modulation = 0;
5939 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
5940 IPW_WARNING("Attempt to set invalid wireless mode: %d\n",
5945 if (priv->adapter == IPW_2915ABG) {
5946 priv->ieee->abg_ture = 1;
5947 if (mode & IEEE_A) {
5948 band |= IEEE80211_52GHZ_BAND;
5949 modulation |= IEEE80211_OFDM_MODULATION;
5951 priv->ieee->abg_ture = 0;
5953 if (mode & IEEE_A) {
5954 IPW_WARNING("Attempt to set 2200BG into "
5959 priv->ieee->abg_ture = 0;
5962 if (mode & IEEE_B) {
5963 band |= IEEE80211_24GHZ_BAND;
5964 modulation |= IEEE80211_CCK_MODULATION;
5966 priv->ieee->abg_ture = 0;
5968 if (mode & IEEE_G) {
5969 band |= IEEE80211_24GHZ_BAND;
5970 modulation |= IEEE80211_OFDM_MODULATION;
5972 priv->ieee->abg_ture = 0;
5974 priv->ieee->mode = mode;
5975 priv->ieee->freq_band = band;
5976 priv->ieee->modulation = modulation;
5977 init_supported_rates(priv, &priv->rates);
5979 /* If we are currently associated, or trying to associate
5980 * then see if this is a new configuration (causing us to
5982 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
5983 /* The resulting association will trigger
5984 * the new rates to be sent to the device */
5985 IPW_DEBUG_ASSOC("Disassociating due to mode change.\n");
5986 ipw_disassociate(priv);
5988 ipw_send_supported_rates(priv, &priv->rates);
5990 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
5991 mode & IEEE_A ? 'a' : '.',
5992 mode & IEEE_B ? 'b' : '.',
5993 mode & IEEE_G ? 'g' : '.');
5997 static int ipw_wx_get_wireless_mode(struct net_device *dev,
5998 struct iw_request_info *info,
5999 union iwreq_data *wrqu, char *extra)
6001 struct ipw_priv *priv = ieee80211_priv(dev);
6003 switch (priv->ieee->freq_band) {
6004 case IEEE80211_24GHZ_BAND:
6005 switch (priv->ieee->modulation) {
6006 case IEEE80211_CCK_MODULATION:
6007 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
6009 case IEEE80211_OFDM_MODULATION:
6010 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
6013 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
6018 case IEEE80211_52GHZ_BAND:
6019 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
6022 default: /* Mixed Band */
6023 switch (priv->ieee->modulation) {
6024 case IEEE80211_CCK_MODULATION:
6025 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
6027 case IEEE80211_OFDM_MODULATION:
6028 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
6031 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
6037 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
6039 wrqu->data.length = strlen(extra) + 1;
6044 #ifdef CONFIG_IPW_PROMISC
6045 static int ipw_wx_set_promisc(struct net_device *dev,
6046 struct iw_request_info *info,
6047 union iwreq_data *wrqu, char *extra)
6049 struct ipw_priv *priv = ieee80211_priv(dev);
6050 int *parms = (int *)extra;
6051 int enable = (parms[0] > 0);
6053 IPW_DEBUG_WX("SET PROMISC: %d %d\n", enable, parms[1]);
6055 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
6056 priv->net_dev->type = ARPHRD_IEEE80211;
6057 ipw_adapter_restart(priv);
6060 ipw_set_channel(priv, parms[1]);
6062 if (priv->ieee->iw_mode != IW_MODE_MONITOR)
6064 priv->net_dev->type = ARPHRD_ETHER;
6065 ipw_adapter_restart(priv);
6071 static int ipw_wx_reset(struct net_device *dev,
6072 struct iw_request_info *info,
6073 union iwreq_data *wrqu, char *extra)
6075 struct ipw_priv *priv = ieee80211_priv(dev);
6076 IPW_DEBUG_WX("RESET\n");
6077 ipw_adapter_restart(priv);
6080 #endif // CONFIG_IPW_PROMISC
6082 /* Rebase the WE IOCTLs to zero for the handler array */
6083 #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
6084 static iw_handler ipw_wx_handlers[] =
6086 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
6087 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
6088 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
6089 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
6090 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
6091 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
6092 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
6093 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
6094 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
6095 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
6096 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
6097 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
6098 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
6099 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
6100 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
6101 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
6102 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
6103 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
6104 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
6105 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
6106 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
6107 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
6108 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
6109 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
6110 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
6111 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
6112 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
6113 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
6116 #define IPW_PRIV_SET_POWER SIOCIWFIRSTPRIV
6117 #define IPW_PRIV_GET_POWER SIOCIWFIRSTPRIV+1
6118 #define IPW_PRIV_SET_MODE SIOCIWFIRSTPRIV+2
6119 #define IPW_PRIV_GET_MODE SIOCIWFIRSTPRIV+3
6120 #define IPW_PRIV_SET_PROMISC SIOCIWFIRSTPRIV+4
6121 #define IPW_PRIV_RESET SIOCIWFIRSTPRIV+5
6124 static struct iw_priv_args ipw_priv_args[] = {
6126 .cmd = IPW_PRIV_SET_POWER,
6127 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
6131 .cmd = IPW_PRIV_GET_POWER,
6132 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
6136 .cmd = IPW_PRIV_SET_MODE,
6137 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
6141 .cmd = IPW_PRIV_GET_MODE,
6142 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
6145 #ifdef CONFIG_IPW_PROMISC
6147 IPW_PRIV_SET_PROMISC,
6148 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"
6152 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"
6154 #endif /* CONFIG_IPW_PROMISC */
6157 static iw_handler ipw_priv_handler[] = {
6158 ipw_wx_set_powermode,
6159 ipw_wx_get_powermode,
6160 ipw_wx_set_wireless_mode,
6161 ipw_wx_get_wireless_mode,
6162 #ifdef CONFIG_IPW_PROMISC
6168 static struct iw_handler_def ipw_wx_handler_def =
6170 .standard = ipw_wx_handlers,
6171 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
6172 .num_private = ARRAY_SIZE(ipw_priv_handler),
6173 .num_private_args = ARRAY_SIZE(ipw_priv_args),
6174 .private = ipw_priv_handler,
6175 .private_args = ipw_priv_args,
6182 * Get wireless statistics.
6183 * Called by /proc/net/wireless
6184 * Also called by SIOCGIWSTATS
6186 static struct iw_statistics *ipw_get_wireless_stats(struct net_device * dev)
6188 struct ipw_priv *priv = ieee80211_priv(dev);
6189 struct iw_statistics *wstats;
6191 wstats = &priv->wstats;
6193 /* if hw is disabled, then ipw2100_get_ordinal() can't be called.
6194 * ipw2100_wx_wireless_stats seems to be called before fw is
6195 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
6196 * and associated; if not associcated, the values are all meaningless
6197 * anyway, so set them all to NULL and INVALID */
6198 if (!(priv->status & STATUS_ASSOCIATED)) {
6199 wstats->miss.beacon = 0;
6200 wstats->discard.retries = 0;
6201 wstats->qual.qual = 0;
6202 wstats->qual.level = 0;
6203 wstats->qual.noise = 0;
6204 wstats->qual.updated = 7;
6205 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
6206 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
6210 wstats->qual.qual = priv->quality;
6211 wstats->qual.level = average_value(&priv->average_rssi);
6212 wstats->qual.noise = average_value(&priv->average_noise);
6213 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
6214 IW_QUAL_NOISE_UPDATED;
6216 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
6217 wstats->discard.retries = priv->last_tx_failures;
6218 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
6220 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
6221 goto fail_get_ordinal;
6222 wstats->discard.retries += tx_retry; */
6228 /* net device stuff */
6230 static inline void init_sys_config(struct ipw_sys_config *sys_config)
6232 memset(sys_config, 0, sizeof(struct ipw_sys_config));
6233 sys_config->bt_coexistence = 1; /* We may need to look into prvStaBtConfig */
6234 sys_config->answer_broadcast_ssid_probe = 0;
6235 sys_config->accept_all_data_frames = 0;
6236 sys_config->accept_non_directed_frames = 1;
6237 sys_config->exclude_unicast_unencrypted = 0;
6238 sys_config->disable_unicast_decryption = 1;
6239 sys_config->exclude_multicast_unencrypted = 0;
6240 sys_config->disable_multicast_decryption = 1;
6241 sys_config->antenna_diversity = CFG_SYS_ANTENNA_BOTH;
6242 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
6243 sys_config->dot11g_auto_detection = 0;
6244 sys_config->enable_cts_to_self = 0;
6245 sys_config->bt_coexist_collision_thr = 0;
6246 sys_config->pass_noise_stats_to_host = 1;
6249 static int ipw_net_open(struct net_device *dev)
6251 struct ipw_priv *priv = ieee80211_priv(dev);
6252 IPW_DEBUG_INFO("dev->open\n");
6253 /* we should be verifying the device is ready to be opened */
6254 if (!(priv->status & STATUS_RF_KILL_MASK) &&
6255 (priv->status & STATUS_ASSOCIATED))
6256 netif_start_queue(dev);
6260 static int ipw_net_stop(struct net_device *dev)
6262 IPW_DEBUG_INFO("dev->close\n");
6263 netif_stop_queue(dev);
6270 modify to send one tfd per fragment instead of using chunking. otherwise
6271 we need to heavily modify the ieee80211_skb_to_txb.
6274 static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb)
6276 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)
6277 txb->fragments[0]->data;
6279 struct tfd_frame *tfd;
6280 struct clx2_tx_queue *txq = &priv->txq[0];
6281 struct clx2_queue *q = &txq->q;
6282 u8 id, hdr_len, unicast;
6283 u16 remaining_bytes;
6285 switch (priv->ieee->iw_mode) {
6287 hdr_len = IEEE80211_3ADDR_LEN;
6288 unicast = !is_broadcast_ether_addr(hdr->addr1) &&
6289 !is_multicast_ether_addr(hdr->addr1);
6290 id = ipw_find_station(priv, hdr->addr1);
6291 if (id == IPW_INVALID_STATION) {
6292 id = ipw_add_station(priv, hdr->addr1);
6293 if (id == IPW_INVALID_STATION) {
6294 IPW_WARNING("Attempt to send data to "
6295 "invalid cell: " MAC_FMT "\n",
6296 MAC_ARG(hdr->addr1));
6304 unicast = !is_broadcast_ether_addr(hdr->addr3) &&
6305 !is_multicast_ether_addr(hdr->addr3);
6306 hdr_len = IEEE80211_3ADDR_LEN;
6311 tfd = &txq->bd[q->first_empty];
6312 txq->txb[q->first_empty] = txb;
6313 memset(tfd, 0, sizeof(*tfd));
6314 tfd->u.data.station_number = id;
6316 tfd->control_flags.message_type = TX_FRAME_TYPE;
6317 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
6319 tfd->u.data.cmd_id = DINO_CMD_TX;
6320 tfd->u.data.len = txb->payload_size;
6321 remaining_bytes = txb->payload_size;
6322 if (unlikely(!unicast))
6323 tfd->u.data.tx_flags = DCT_FLAG_NO_WEP;
6325 tfd->u.data.tx_flags = DCT_FLAG_NO_WEP | DCT_FLAG_ACK_REQD;
6327 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
6328 tfd->u.data.tx_flags_ext = DCT_FLAG_EXT_MODE_CCK;
6330 tfd->u.data.tx_flags_ext = DCT_FLAG_EXT_MODE_OFDM;
6332 if (priv->config & CFG_PREAMBLE)
6333 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREMBL;
6335 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
6338 tfd->u.data.num_chunks = min((u8)(NUM_TFD_CHUNKS - 2), txb->nr_frags);
6339 for (i = 0; i < tfd->u.data.num_chunks; i++) {
6340 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
6341 i, tfd->u.data.num_chunks,
6342 txb->fragments[i]->len - hdr_len);
6343 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
6344 txb->fragments[i]->len - hdr_len);
6346 tfd->u.data.chunk_ptr[i] = pci_map_single(
6347 priv->pci_dev, txb->fragments[i]->data + hdr_len,
6348 txb->fragments[i]->len - hdr_len, PCI_DMA_TODEVICE);
6349 tfd->u.data.chunk_len[i] = txb->fragments[i]->len - hdr_len;
6352 if (i != txb->nr_frags) {
6353 struct sk_buff *skb;
6354 u16 remaining_bytes = 0;
6357 for (j = i; j < txb->nr_frags; j++)
6358 remaining_bytes += txb->fragments[j]->len - hdr_len;
6360 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
6362 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
6364 tfd->u.data.chunk_len[i] = remaining_bytes;
6365 for (j = i; j < txb->nr_frags; j++) {
6366 int size = txb->fragments[j]->len - hdr_len;
6367 printk(KERN_INFO "Adding frag %d %d...\n",
6369 memcpy(skb_put(skb, size),
6370 txb->fragments[j]->data + hdr_len,
6373 dev_kfree_skb_any(txb->fragments[i]);
6374 txb->fragments[i] = skb;
6375 tfd->u.data.chunk_ptr[i] = pci_map_single(
6376 priv->pci_dev, skb->data,
6377 tfd->u.data.chunk_len[i], PCI_DMA_TODEVICE);
6378 tfd->u.data.num_chunks++;
6383 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
6384 ipw_write32(priv, q->reg_w, q->first_empty);
6386 if (ipw_queue_space(q) < q->high_mark)
6387 netif_stop_queue(priv->net_dev);
6392 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
6393 ieee80211_txb_free(txb);
6396 static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
6397 struct net_device *dev)
6399 struct ipw_priv *priv = ieee80211_priv(dev);
6400 unsigned long flags;
6402 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
6404 spin_lock_irqsave(&priv->lock, flags);
6406 if (!(priv->status & STATUS_ASSOCIATED)) {
6407 IPW_DEBUG_INFO("Tx attempt while not associated.\n");
6408 priv->ieee->stats.tx_carrier_errors++;
6409 netif_stop_queue(dev);
6413 ipw_tx_skb(priv, txb);
6415 spin_unlock_irqrestore(&priv->lock, flags);
6419 spin_unlock_irqrestore(&priv->lock, flags);
6423 static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
6425 struct ipw_priv *priv = ieee80211_priv(dev);
6427 priv->ieee->stats.tx_packets = priv->tx_packets;
6428 priv->ieee->stats.rx_packets = priv->rx_packets;
6429 return &priv->ieee->stats;
6432 static void ipw_net_set_multicast_list(struct net_device *dev)
6437 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
6439 struct ipw_priv *priv = ieee80211_priv(dev);
6440 struct sockaddr *addr = p;
6441 if (!is_valid_ether_addr(addr->sa_data))
6442 return -EADDRNOTAVAIL;
6443 priv->config |= CFG_CUSTOM_MAC;
6444 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
6445 printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n",
6446 priv->net_dev->name, MAC_ARG(priv->mac_addr));
6447 ipw_adapter_restart(priv);
6451 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
6452 struct ethtool_drvinfo *info)
6454 struct ipw_priv *p = ieee80211_priv(dev);
6459 strcpy(info->driver, DRV_NAME);
6460 strcpy(info->version, DRV_VERSION);
6463 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
6465 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
6467 snprintf(info->fw_version, sizeof(info->fw_version),"%s (%s)",
6469 strcpy(info->bus_info, pci_name(p->pci_dev));
6470 info->eedump_len = CX2_EEPROM_IMAGE_SIZE;
6473 static u32 ipw_ethtool_get_link(struct net_device *dev)
6475 struct ipw_priv *priv = ieee80211_priv(dev);
6476 return (priv->status & STATUS_ASSOCIATED) != 0;
6479 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
6481 return CX2_EEPROM_IMAGE_SIZE;
6484 static int ipw_ethtool_get_eeprom(struct net_device *dev,
6485 struct ethtool_eeprom *eeprom, u8 *bytes)
6487 struct ipw_priv *p = ieee80211_priv(dev);
6489 if (eeprom->offset + eeprom->len > CX2_EEPROM_IMAGE_SIZE)
6492 memcpy(bytes, &((u8 *)p->eeprom)[eeprom->offset], eeprom->len);
6496 static int ipw_ethtool_set_eeprom(struct net_device *dev,
6497 struct ethtool_eeprom *eeprom, u8 *bytes)
6499 struct ipw_priv *p = ieee80211_priv(dev);
6502 if (eeprom->offset + eeprom->len > CX2_EEPROM_IMAGE_SIZE)
6505 memcpy(&((u8 *)p->eeprom)[eeprom->offset], bytes, eeprom->len);
6506 for (i = IPW_EEPROM_DATA;
6507 i < IPW_EEPROM_DATA + CX2_EEPROM_IMAGE_SIZE;
6509 ipw_write8(p, i, p->eeprom[i]);
6514 static struct ethtool_ops ipw_ethtool_ops = {
6515 .get_link = ipw_ethtool_get_link,
6516 .get_drvinfo = ipw_ethtool_get_drvinfo,
6517 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
6518 .get_eeprom = ipw_ethtool_get_eeprom,
6519 .set_eeprom = ipw_ethtool_set_eeprom,
6522 static irqreturn_t ipw_isr(int irq, void *data, struct pt_regs *regs)
6524 struct ipw_priv *priv = data;
6525 u32 inta, inta_mask;
6530 spin_lock(&priv->lock);
6532 if (!(priv->status & STATUS_INT_ENABLED)) {
6537 inta = ipw_read32(priv, CX2_INTA_RW);
6538 inta_mask = ipw_read32(priv, CX2_INTA_MASK_R);
6540 if (inta == 0xFFFFFFFF) {
6541 /* Hardware disappeared */
6542 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
6546 if (!(inta & (CX2_INTA_MASK_ALL & inta_mask))) {
6547 /* Shared interrupt */
6551 /* tell the device to stop sending interrupts */
6552 ipw_disable_interrupts(priv);
6554 /* ack current interrupts */
6555 inta &= (CX2_INTA_MASK_ALL & inta_mask);
6556 ipw_write32(priv, CX2_INTA_RW, inta);
6558 /* Cache INTA value for our tasklet */
6559 priv->isr_inta = inta;
6561 tasklet_schedule(&priv->irq_tasklet);
6563 spin_unlock(&priv->lock);
6567 spin_unlock(&priv->lock);
6571 static void ipw_rf_kill(void *adapter)
6573 struct ipw_priv *priv = adapter;
6574 unsigned long flags;
6576 spin_lock_irqsave(&priv->lock, flags);
6578 if (rf_kill_active(priv)) {
6579 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
6580 if (priv->workqueue)
6581 queue_delayed_work(priv->workqueue,
6582 &priv->rf_kill, 2 * HZ);
6586 /* RF Kill is now disabled, so bring the device back up */
6588 if (!(priv->status & STATUS_RF_KILL_MASK)) {
6589 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
6592 /* we can not do an adapter restart while inside an irq lock */
6593 queue_work(priv->workqueue, &priv->adapter_restart);
6595 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
6599 spin_unlock_irqrestore(&priv->lock, flags);
6602 static int ipw_setup_deferred_work(struct ipw_priv *priv)
6606 #ifdef CONFIG_SOFTWARE_SUSPEND2
6607 priv->workqueue = create_workqueue(DRV_NAME, 0);
6609 priv->workqueue = create_workqueue(DRV_NAME);
6611 init_waitqueue_head(&priv->wait_command_queue);
6613 INIT_WORK(&priv->adhoc_check, ipw_adhoc_check, priv);
6614 INIT_WORK(&priv->associate, ipw_associate, priv);
6615 INIT_WORK(&priv->disassociate, ipw_disassociate, priv);
6616 INIT_WORK(&priv->rx_replenish, ipw_rx_queue_replenish, priv);
6617 INIT_WORK(&priv->adapter_restart, ipw_adapter_restart, priv);
6618 INIT_WORK(&priv->rf_kill, ipw_rf_kill, priv);
6619 INIT_WORK(&priv->up, (void (*)(void *))ipw_up, priv);
6620 INIT_WORK(&priv->down, (void (*)(void *))ipw_down, priv);
6621 INIT_WORK(&priv->request_scan,
6622 (void (*)(void *))ipw_request_scan, priv);
6623 INIT_WORK(&priv->gather_stats,
6624 (void (*)(void *))ipw_gather_stats, priv);
6625 INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_abort_scan, priv);
6626 INIT_WORK(&priv->roam, ipw_roam, priv);
6627 INIT_WORK(&priv->scan_check, ipw_scan_check, priv);
6629 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
6630 ipw_irq_tasklet, (unsigned long)priv);
6636 static void shim__set_security(struct net_device *dev,
6637 struct ieee80211_security *sec)
6639 struct ipw_priv *priv = ieee80211_priv(dev);
6642 for (i = 0; i < 4; i++) {
6643 if (sec->flags & (1 << i)) {
6644 priv->sec.key_sizes[i] = sec->key_sizes[i];
6645 if (sec->key_sizes[i] == 0)
6646 priv->sec.flags &= ~(1 << i);
6648 memcpy(priv->sec.keys[i], sec->keys[i],
6650 priv->sec.flags |= (1 << i);
6651 priv->status |= STATUS_SECURITY_UPDATED;
6655 if ((sec->flags & SEC_ACTIVE_KEY) &&
6656 priv->sec.active_key != sec->active_key) {
6657 if (sec->active_key <= 3) {
6658 priv->sec.active_key = sec->active_key;
6659 priv->sec.flags |= SEC_ACTIVE_KEY;
6661 priv->sec.flags &= ~SEC_ACTIVE_KEY;
6662 priv->status |= STATUS_SECURITY_UPDATED;
6665 if ((sec->flags & SEC_AUTH_MODE) &&
6666 (priv->sec.auth_mode != sec->auth_mode)) {
6667 priv->sec.auth_mode = sec->auth_mode;
6668 priv->sec.flags |= SEC_AUTH_MODE;
6669 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
6670 priv->capability |= CAP_SHARED_KEY;
6672 priv->capability &= ~CAP_SHARED_KEY;
6673 priv->status |= STATUS_SECURITY_UPDATED;
6676 if (sec->flags & SEC_ENABLED &&
6677 priv->sec.enabled != sec->enabled) {
6678 priv->sec.flags |= SEC_ENABLED;
6679 priv->sec.enabled = sec->enabled;
6680 priv->status |= STATUS_SECURITY_UPDATED;
6682 priv->capability |= CAP_PRIVACY_ON;
6684 priv->capability &= ~CAP_PRIVACY_ON;
6687 if (sec->flags & SEC_LEVEL &&
6688 priv->sec.level != sec->level) {
6689 priv->sec.level = sec->level;
6690 priv->sec.flags |= SEC_LEVEL;
6691 priv->status |= STATUS_SECURITY_UPDATED;
6694 /* To match current functionality of ipw2100 (which works well w/
6695 * various supplicants, we don't force a disassociate if the
6696 * privacy capability changes ... */
6698 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
6699 (((priv->assoc_request.capability &
6700 WLAN_CAPABILITY_PRIVACY) && !sec->enabled) ||
6701 (!(priv->assoc_request.capability &
6702 WLAN_CAPABILITY_PRIVACY) && sec->enabled))) {
6703 IPW_DEBUG_ASSOC("Disassociating due to capability "
6705 ipw_disassociate(priv);
6710 static int init_supported_rates(struct ipw_priv *priv,
6711 struct ipw_supported_rates *rates)
6713 /* TODO: Mask out rates based on priv->rates_mask */
6715 memset(rates, 0, sizeof(*rates));
6716 /* configure supported rates */
6717 switch (priv->ieee->freq_band) {
6718 case IEEE80211_52GHZ_BAND:
6719 rates->ieee_mode = IPW_A_MODE;
6720 rates->purpose = IPW_RATE_CAPABILITIES;
6721 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
6722 IEEE80211_OFDM_DEFAULT_RATES_MASK);
6725 default: /* Mixed or 2.4Ghz */
6726 rates->ieee_mode = IPW_G_MODE;
6727 rates->purpose = IPW_RATE_CAPABILITIES;
6728 ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
6729 IEEE80211_CCK_DEFAULT_RATES_MASK);
6730 if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
6731 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
6732 IEEE80211_OFDM_DEFAULT_RATES_MASK);
6740 static int ipw_config(struct ipw_priv *priv)
6743 struct ipw_tx_power tx_power;
6745 memset(&priv->sys_config, 0, sizeof(priv->sys_config));
6746 memset(&tx_power, 0, sizeof(tx_power));
6748 /* This is only called from ipw_up, which resets/reloads the firmware
6749 so, we don't need to first disable the card before we configure
6752 /* configure device for 'G' band */
6753 tx_power.ieee_mode = IPW_G_MODE;
6754 tx_power.num_channels = 11;
6755 for (i = 0; i < 11; i++) {
6756 tx_power.channels_tx_power[i].channel_number = i + 1;
6757 tx_power.channels_tx_power[i].tx_power = priv->tx_power;
6759 if (ipw_send_tx_power(priv, &tx_power))
6762 /* configure device to also handle 'B' band */
6763 tx_power.ieee_mode = IPW_B_MODE;
6764 if (ipw_send_tx_power(priv, &tx_power))
6767 /* initialize adapter address */
6768 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
6771 /* set basic system config settings */
6772 init_sys_config(&priv->sys_config);
6773 if (ipw_send_system_config(priv, &priv->sys_config))
6776 init_supported_rates(priv, &priv->rates);
6777 if (ipw_send_supported_rates(priv, &priv->rates))
6780 /* Set request-to-send threshold */
6781 if (priv->rts_threshold) {
6782 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
6786 if (ipw_set_random_seed(priv))
6789 /* final state transition to the RUN state */
6790 if (ipw_send_host_complete(priv))
6793 /* If configured to try and auto-associate, kick off a scan */
6794 if ((priv->config & CFG_ASSOCIATE) && ipw_request_scan(priv))
6803 #define MAX_HW_RESTARTS 5
6804 static int ipw_up(struct ipw_priv *priv)
6808 if (priv->status & STATUS_EXIT_PENDING)
6811 for (i = 0; i < MAX_HW_RESTARTS; i++ ) {
6812 /* Load the microcode, firmware, and eeprom.
6813 * Also start the clocks. */
6814 rc = ipw_load(priv);
6816 IPW_ERROR("Unable to load firmware: 0x%08X\n",
6821 ipw_init_ordinals(priv);
6822 if (!(priv->config & CFG_CUSTOM_MAC))
6823 eeprom_parse_mac(priv, priv->mac_addr);
6824 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
6826 if (priv->status & STATUS_RF_KILL_MASK)
6829 rc = ipw_config(priv);
6831 IPW_DEBUG_INFO("Configured device on count %i\n", i);
6832 priv->notif_missed_beacons = 0;
6833 netif_start_queue(priv->net_dev);
6836 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n",
6840 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
6841 i, MAX_HW_RESTARTS);
6843 /* We had an error bringing up the hardware, so take it
6844 * all the way back down so we can try again */
6848 /* tried to restart and config the device for as long as our
6849 * patience could withstand */
6850 IPW_ERROR("Unable to initialize device after %d attempts.\n",
6855 static void ipw_down(struct ipw_priv *priv)
6857 /* Attempt to disable the card */
6859 ipw_send_card_disable(priv, 0);
6862 /* tell the device to stop sending interrupts */
6863 ipw_disable_interrupts(priv);
6865 /* Clear all bits but the RF Kill */
6866 priv->status &= STATUS_RF_KILL_MASK;
6868 netif_carrier_off(priv->net_dev);
6869 netif_stop_queue(priv->net_dev);
6874 /* Called by register_netdev() */
6875 static int ipw_net_init(struct net_device *dev)
6877 struct ipw_priv *priv = ieee80211_priv(dev);
6879 if (priv->status & STATUS_RF_KILL_SW) {
6880 IPW_WARNING("Radio disabled by module parameter.\n");
6882 } else if (rf_kill_active(priv)) {
6883 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
6884 "Kill switch must be turned off for "
6885 "wireless networking to work.\n");
6886 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
6896 /* PCI driver stuff */
6897 static struct pci_device_id card_ids[] = {
6898 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
6899 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
6900 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
6901 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
6902 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
6903 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
6904 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
6905 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
6906 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
6907 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
6908 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
6909 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
6910 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
6911 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
6912 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
6913 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
6914 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
6915 {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
6916 {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
6917 {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* 2225BG */
6918 {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
6919 {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
6921 /* required last entry */
6925 MODULE_DEVICE_TABLE(pci, card_ids);
6927 static struct attribute *ipw_sysfs_entries[] = {
6928 &dev_attr_rf_kill.attr,
6929 &dev_attr_direct_dword.attr,
6930 &dev_attr_indirect_byte.attr,
6931 &dev_attr_indirect_dword.attr,
6932 &dev_attr_mem_gpio_reg.attr,
6933 &dev_attr_command_event_reg.attr,
6934 &dev_attr_nic_type.attr,
6935 &dev_attr_status.attr,
6937 &dev_attr_dump_errors.attr,
6938 &dev_attr_dump_events.attr,
6939 &dev_attr_eeprom_delay.attr,
6940 &dev_attr_ucode_version.attr,
6945 static struct attribute_group ipw_attribute_group = {
6946 .name = NULL, /* put in device directory */
6947 .attrs = ipw_sysfs_entries,
6950 static int ipw_pci_probe(struct pci_dev *pdev,
6951 const struct pci_device_id *ent)
6954 struct net_device *net_dev;
6957 struct ipw_priv *priv;
6958 int band, modulation;
6960 net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
6961 if (net_dev == NULL) {
6966 priv = ieee80211_priv(net_dev);
6967 priv->ieee = netdev_priv(net_dev);
6968 priv->net_dev = net_dev;
6969 priv->pci_dev = pdev;
6970 #ifdef CONFIG_IPW_DEBUG
6971 ipw_debug_level = debug;
6973 spin_lock_init(&priv->lock);
6975 if (pci_enable_device(pdev)) {
6977 goto out_free_ieee80211;
6980 pci_set_master(pdev);
6982 #define PCI_DMA_32BIT 0x00000000ffffffffULL
6983 err = pci_set_dma_mask(pdev, PCI_DMA_32BIT);
6985 err = pci_set_consistent_dma_mask(pdev, PCI_DMA_32BIT);
6987 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
6988 goto out_pci_disable_device;
6991 pci_set_drvdata(pdev, priv);
6993 err = pci_request_regions(pdev, DRV_NAME);
6995 goto out_pci_disable_device;
6997 /* We disable the RETRY_TIMEOUT register (0x41) to keep
6998 * PCI Tx retries from interfering with C3 CPU state */
6999 pci_read_config_dword(pdev, 0x40, &val);
7000 if ((val & 0x0000ff00) != 0)
7001 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
7003 length = pci_resource_len(pdev, 0);
7004 priv->hw_len = length;
7006 base = ioremap_nocache(pci_resource_start(pdev, 0), length);
7009 goto out_pci_release_regions;
7012 priv->hw_base = base;
7013 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
7014 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
7016 err = ipw_setup_deferred_work(priv);
7018 IPW_ERROR("Unable to setup deferred work\n");
7022 /* Initialize module parameter values here */
7024 strncpy(net_dev->name, ifname, IFNAMSIZ);
7027 priv->config |= CFG_ASSOCIATE;
7029 IPW_DEBUG_INFO("Auto associate disabled.\n");
7032 priv->config |= CFG_ADHOC_CREATE;
7034 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
7037 priv->status |= STATUS_RF_KILL_SW;
7038 IPW_DEBUG_INFO("Radio disabled.\n");
7042 priv->config |= CFG_STATIC_CHANNEL;
7043 priv->channel = channel;
7044 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
7045 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
7046 /* TODO: Validate that provided channel is in range */
7051 priv->ieee->iw_mode = IW_MODE_ADHOC;
7053 #ifdef CONFIG_IPW_PROMISC
7055 priv->ieee->iw_mode = IW_MODE_MONITOR;
7060 priv->ieee->iw_mode = IW_MODE_INFRA;
7064 if ((priv->pci_dev->device == 0x4223) ||
7065 (priv->pci_dev->device == 0x4224)) {
7066 printk(KERN_INFO DRV_NAME
7067 ": Detected Intel PRO/Wireless 2915ABG Network "
7069 priv->ieee->abg_ture = 1;
7070 band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
7071 modulation = IEEE80211_OFDM_MODULATION |
7072 IEEE80211_CCK_MODULATION;
7073 priv->adapter = IPW_2915ABG;
7074 priv->ieee->mode = IEEE_A|IEEE_G|IEEE_B;
7076 if (priv->pci_dev->device == 0x4221)
7077 printk(KERN_INFO DRV_NAME
7078 ": Detected Intel PRO/Wireless 2225BG Network "
7081 printk(KERN_INFO DRV_NAME
7082 ": Detected Intel PRO/Wireless 2200BG Network "
7085 priv->ieee->abg_ture = 0;
7086 band = IEEE80211_24GHZ_BAND;
7087 modulation = IEEE80211_OFDM_MODULATION |
7088 IEEE80211_CCK_MODULATION;
7089 priv->adapter = IPW_2200BG;
7090 priv->ieee->mode = IEEE_G|IEEE_B;
7093 priv->ieee->freq_band = band;
7094 priv->ieee->modulation = modulation;
7096 priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
7098 priv->missed_beacon_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
7099 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
7101 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
7103 /* If power management is turned on, default to AC mode */
7104 priv->power_mode = IPW_POWER_AC;
7105 priv->tx_power = IPW_DEFAULT_TX_POWER;
7107 err = request_irq(pdev->irq, ipw_isr, SA_SHIRQ, DRV_NAME,
7110 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
7111 goto out_destroy_workqueue;
7114 SET_MODULE_OWNER(net_dev);
7115 SET_NETDEV_DEV(net_dev, &pdev->dev);
7117 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
7118 priv->ieee->set_security = shim__set_security;
7120 net_dev->open = ipw_net_open;
7121 net_dev->stop = ipw_net_stop;
7122 net_dev->init = ipw_net_init;
7123 net_dev->get_stats = ipw_net_get_stats;
7124 net_dev->set_multicast_list = ipw_net_set_multicast_list;
7125 net_dev->set_mac_address = ipw_net_set_mac_address;
7126 net_dev->get_wireless_stats = ipw_get_wireless_stats;
7127 net_dev->wireless_handlers = &ipw_wx_handler_def;
7128 net_dev->ethtool_ops = &ipw_ethtool_ops;
7129 net_dev->irq = pdev->irq;
7130 net_dev->base_addr = (unsigned long )priv->hw_base;
7131 net_dev->mem_start = pci_resource_start(pdev, 0);
7132 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
7134 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
7136 IPW_ERROR("failed to create sysfs device attributes\n");
7137 goto out_release_irq;
7140 err = register_netdev(net_dev);
7142 IPW_ERROR("failed to register network device\n");
7143 goto out_remove_group;
7149 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
7151 free_irq(pdev->irq, priv);
7152 out_destroy_workqueue:
7153 destroy_workqueue(priv->workqueue);
7154 priv->workqueue = NULL;
7156 iounmap(priv->hw_base);
7157 out_pci_release_regions:
7158 pci_release_regions(pdev);
7159 out_pci_disable_device:
7160 pci_disable_device(pdev);
7161 pci_set_drvdata(pdev, NULL);
7163 free_ieee80211(priv->net_dev);
7168 static void ipw_pci_remove(struct pci_dev *pdev)
7170 struct ipw_priv *priv = pci_get_drvdata(pdev);
7174 priv->status |= STATUS_EXIT_PENDING;
7176 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
7180 unregister_netdev(priv->net_dev);
7183 ipw_rx_queue_free(priv, priv->rxq);
7186 ipw_tx_queue_free(priv);
7188 /* ipw_down will ensure that there is no more pending work
7189 * in the workqueue's, so we can safely remove them now. */
7190 if (priv->workqueue) {
7191 cancel_delayed_work(&priv->adhoc_check);
7192 cancel_delayed_work(&priv->gather_stats);
7193 cancel_delayed_work(&priv->request_scan);
7194 cancel_delayed_work(&priv->rf_kill);
7195 cancel_delayed_work(&priv->scan_check);
7196 destroy_workqueue(priv->workqueue);
7197 priv->workqueue = NULL;
7200 free_irq(pdev->irq, priv);
7201 iounmap(priv->hw_base);
7202 pci_release_regions(pdev);
7203 pci_disable_device(pdev);
7204 pci_set_drvdata(pdev, NULL);
7205 free_ieee80211(priv->net_dev);
7209 release_firmware(bootfw);
7210 release_firmware(ucode);
7211 release_firmware(firmware);
7219 static int ipw_pci_suspend(struct pci_dev *pdev, u32 state)
7221 struct ipw_priv *priv = pci_get_drvdata(pdev);
7222 struct net_device *dev = priv->net_dev;
7224 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
7226 /* Take down the device; powers it off, etc. */
7229 /* Remove the PRESENT state of the device */
7230 netif_device_detach(dev);
7232 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
7233 pci_save_state(pdev, priv->pm_state);
7235 pci_save_state(pdev);
7237 pci_disable_device(pdev);
7238 pci_set_power_state(pdev, state);
7243 static int ipw_pci_resume(struct pci_dev *pdev)
7245 struct ipw_priv *priv = pci_get_drvdata(pdev);
7246 struct net_device *dev = priv->net_dev;
7249 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
7251 pci_set_power_state(pdev, 0);
7252 pci_enable_device(pdev);
7253 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
7254 pci_restore_state(pdev, priv->pm_state);
7256 pci_restore_state(pdev);
7259 * Suspend/Resume resets the PCI configuration space, so we have to
7260 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
7261 * from interfering with C3 CPU state. pci_restore_state won't help
7262 * here since it only restores the first 64 bytes pci config header.
7264 pci_read_config_dword(pdev, 0x40, &val);
7265 if ((val & 0x0000ff00) != 0)
7266 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
7268 /* Set the device back into the PRESENT state; this will also wake
7269 * the queue of needed */
7270 netif_device_attach(dev);
7272 /* Bring the device back up */
7273 queue_work(priv->workqueue, &priv->up);
7279 /* driver initialization stuff */
7280 static struct pci_driver ipw_driver = {
7282 .id_table = card_ids,
7283 .probe = ipw_pci_probe,
7284 .remove = __devexit_p(ipw_pci_remove),
7286 .suspend = ipw_pci_suspend,
7287 .resume = ipw_pci_resume,
7291 static int __init ipw_init(void)
7295 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
7296 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
7298 ret = pci_module_init(&ipw_driver);
7300 IPW_ERROR("Unable to initialize PCI module\n");
7304 ret = driver_create_file(&ipw_driver.driver,
7305 &driver_attr_debug_level);
7307 IPW_ERROR("Unable to create driver sysfs file\n");
7308 pci_unregister_driver(&ipw_driver);
7315 static void __exit ipw_exit(void)
7317 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
7318 pci_unregister_driver(&ipw_driver);
7321 module_param(disable, int, 0444);
7322 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
7324 module_param(associate, int, 0444);
7325 MODULE_PARM_DESC(associate, "auto associate when scanning (default on)");
7327 module_param(auto_create, int, 0444);
7328 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
7330 module_param(debug, int, 0444);
7331 MODULE_PARM_DESC(debug, "debug output mask");
7333 module_param(channel, int, 0444);
7334 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
7336 module_param(ifname, charp, 0444);
7337 MODULE_PARM_DESC(ifname, "network device name (default eth%d)");
7339 #ifdef CONFIG_IPW_PROMISC
7340 module_param(mode, int, 0444);
7341 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
7343 module_param(mode, int, 0444);
7344 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
7347 module_exit(ipw_exit);
7348 module_init(ipw_init);