2 * Intel IXP4xx HSS (synchronous serial port) driver for Linux
4 * Copyright (C) 2007-2008 Krzysztof HaĆasa <khc@pm.waw.pl>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
11 #include <linux/bitops.h>
12 #include <linux/cdev.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/dmapool.h>
16 #include <linux/hdlc.h>
18 #include <linux/kernel.h>
19 #include <linux/platform_device.h>
20 #include <linux/poll.h>
22 #include <mach/qmgr.h>
27 #define DEBUG_PKT_BYTES 0
30 #define DRV_NAME "ixp4xx_hss"
32 #define PKT_EXTRA_FLAGS 0 /* orig 1 */
33 #define PKT_NUM_PIPES 1 /* 1, 2 or 4 */
34 #define PKT_PIPE_FIFO_SIZEW 4 /* total 4 dwords per HSS */
36 #define RX_DESCS 16 /* also length of all RX queues */
37 #define TX_DESCS 16 /* also length of all TX queues */
39 #define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS))
40 #define RX_SIZE (HDLC_MAX_MRU + 4) /* NPE needs more space */
41 #define MAX_CLOSE_WAIT 1000 /* microseconds */
43 #define FRAME_SIZE 256 /* doesn't matter at this point */
44 #define FRAME_OFFSET 0
45 #define MAX_CHANNELS (FRAME_SIZE / 8)
47 #define NAPI_WEIGHT 16
50 #define HSS0_CHL_RXTRIG_QUEUE 12 /* orig size = 32 dwords */
51 #define HSS0_PKT_RX_QUEUE 13 /* orig size = 32 dwords */
52 #define HSS0_PKT_TX0_QUEUE 14 /* orig size = 16 dwords */
53 #define HSS0_PKT_TX1_QUEUE 15
54 #define HSS0_PKT_TX2_QUEUE 16
55 #define HSS0_PKT_TX3_QUEUE 17
56 #define HSS0_PKT_RXFREE0_QUEUE 18 /* orig size = 16 dwords */
57 #define HSS0_PKT_RXFREE1_QUEUE 19
58 #define HSS0_PKT_RXFREE2_QUEUE 20
59 #define HSS0_PKT_RXFREE3_QUEUE 21
60 #define HSS0_PKT_TXDONE_QUEUE 22 /* orig size = 64 dwords */
62 #define HSS1_CHL_RXTRIG_QUEUE 10
63 #define HSS1_PKT_RX_QUEUE 0
64 #define HSS1_PKT_TX0_QUEUE 5
65 #define HSS1_PKT_TX1_QUEUE 6
66 #define HSS1_PKT_TX2_QUEUE 7
67 #define HSS1_PKT_TX3_QUEUE 8
68 #define HSS1_PKT_RXFREE0_QUEUE 1
69 #define HSS1_PKT_RXFREE1_QUEUE 2
70 #define HSS1_PKT_RXFREE2_QUEUE 3
71 #define HSS1_PKT_RXFREE3_QUEUE 4
72 #define HSS1_PKT_TXDONE_QUEUE 9
74 #define NPE_PKT_MODE_HDLC 0
75 #define NPE_PKT_MODE_RAW 1
76 #define NPE_PKT_MODE_56KMODE 2
77 #define NPE_PKT_MODE_56KENDIAN_MSB 4
79 /* PKT_PIPE_HDLC_CFG_WRITE flags */
80 #define PKT_HDLC_IDLE_ONES 0x1 /* default = flags */
81 #define PKT_HDLC_CRC_32 0x2 /* default = CRC-16 */
82 #define PKT_HDLC_MSB_ENDIAN 0x4 /* default = LE */
85 /* hss_config, PCRs */
86 /* Frame sync sampling, default = active low */
87 #define PCR_FRM_SYNC_ACTIVE_HIGH 0x40000000
88 #define PCR_FRM_SYNC_FALLINGEDGE 0x80000000
89 #define PCR_FRM_SYNC_RISINGEDGE 0xC0000000
91 /* Frame sync pin: input (default) or output generated off a given clk edge */
92 #define PCR_FRM_SYNC_OUTPUT_FALLING 0x20000000
93 #define PCR_FRM_SYNC_OUTPUT_RISING 0x30000000
95 /* Frame and data clock sampling on edge, default = falling */
96 #define PCR_FCLK_EDGE_RISING 0x08000000
97 #define PCR_DCLK_EDGE_RISING 0x04000000
99 /* Clock direction, default = input */
100 #define PCR_SYNC_CLK_DIR_OUTPUT 0x02000000
102 /* Generate/Receive frame pulses, default = enabled */
103 #define PCR_FRM_PULSE_DISABLED 0x01000000
105 /* Data rate is full (default) or half the configured clk speed */
106 #define PCR_HALF_CLK_RATE 0x00200000
108 /* Invert data between NPE and HSS FIFOs? (default = no) */
109 #define PCR_DATA_POLARITY_INVERT 0x00100000
111 /* TX/RX endianness, default = LSB */
112 #define PCR_MSB_ENDIAN 0x00080000
114 /* Normal (default) / open drain mode (TX only) */
115 #define PCR_TX_PINS_OPEN_DRAIN 0x00040000
117 /* No framing bit transmitted and expected on RX? (default = framing bit) */
118 #define PCR_SOF_NO_FBIT 0x00020000
120 /* Drive data pins? */
121 #define PCR_TX_DATA_ENABLE 0x00010000
123 /* Voice 56k type: drive the data pins low (default), high, high Z */
124 #define PCR_TX_V56K_HIGH 0x00002000
125 #define PCR_TX_V56K_HIGH_IMP 0x00004000
127 /* Unassigned type: drive the data pins low (default), high, high Z */
128 #define PCR_TX_UNASS_HIGH 0x00000800
129 #define PCR_TX_UNASS_HIGH_IMP 0x00001000
131 /* T1 @ 1.544MHz only: Fbit dictated in FIFO (default) or high Z */
132 #define PCR_TX_FB_HIGH_IMP 0x00000400
134 /* 56k data endiannes - which bit unused: high (default) or low */
135 #define PCR_TX_56KE_BIT_0_UNUSED 0x00000200
137 /* 56k data transmission type: 32/8 bit data (default) or 56K data */
138 #define PCR_TX_56KS_56K_DATA 0x00000100
140 /* hss_config, cCR */
141 /* Number of packetized clients, default = 1 */
142 #define CCR_NPE_HFIFO_2_HDLC 0x04000000
143 #define CCR_NPE_HFIFO_3_OR_4HDLC 0x08000000
145 /* default = no loopback */
146 #define CCR_LOOPBACK 0x02000000
148 /* HSS number, default = 0 (first) */
149 #define CCR_SECOND_HSS 0x01000000
152 /* hss_config, clkCR: main:10, num:10, denom:12 */
153 #define CLK42X_SPEED_EXP ((0x3FF << 22) | ( 2 << 12) | 15) /*65 KHz*/
155 #define CLK42X_SPEED_512KHZ (( 130 << 22) | ( 2 << 12) | 15)
156 #define CLK42X_SPEED_1536KHZ (( 43 << 22) | ( 18 << 12) | 47)
157 #define CLK42X_SPEED_1544KHZ (( 43 << 22) | ( 33 << 12) | 192)
158 #define CLK42X_SPEED_2048KHZ (( 32 << 22) | ( 34 << 12) | 63)
159 #define CLK42X_SPEED_4096KHZ (( 16 << 22) | ( 34 << 12) | 127)
160 #define CLK42X_SPEED_8192KHZ (( 8 << 22) | ( 34 << 12) | 255)
162 #define CLK46X_SPEED_512KHZ (( 130 << 22) | ( 24 << 12) | 127)
163 #define CLK46X_SPEED_1536KHZ (( 43 << 22) | (152 << 12) | 383)
164 #define CLK46X_SPEED_1544KHZ (( 43 << 22) | ( 66 << 12) | 385)
165 #define CLK46X_SPEED_2048KHZ (( 32 << 22) | (280 << 12) | 511)
166 #define CLK46X_SPEED_4096KHZ (( 16 << 22) | (280 << 12) | 1023)
167 #define CLK46X_SPEED_8192KHZ (( 8 << 22) | (280 << 12) | 2047)
170 /* hss_config, LUT entries */
171 #define TDMMAP_UNASSIGNED 0
172 #define TDMMAP_HDLC 1 /* HDLC - packetized */
173 #define TDMMAP_VOICE56K 2 /* Voice56K - 7-bit channelized */
174 #define TDMMAP_VOICE64K 3 /* Voice64K - 8-bit channelized */
176 /* offsets into HSS config */
177 #define HSS_CONFIG_TX_PCR 0x00 /* port configuration registers */
178 #define HSS_CONFIG_RX_PCR 0x04
179 #define HSS_CONFIG_CORE_CR 0x08 /* loopback control, HSS# */
180 #define HSS_CONFIG_CLOCK_CR 0x0C /* clock generator control */
181 #define HSS_CONFIG_TX_FCR 0x10 /* frame configuration registers */
182 #define HSS_CONFIG_RX_FCR 0x14
183 #define HSS_CONFIG_TX_LUT 0x18 /* channel look-up tables */
184 #define HSS_CONFIG_RX_LUT 0x38
187 /* NPE command codes */
188 /* writes the ConfigWord value to the location specified by offset */
189 #define PORT_CONFIG_WRITE 0x40
191 /* triggers the NPE to load the contents of the configuration table */
192 #define PORT_CONFIG_LOAD 0x41
194 /* triggers the NPE to return an HssErrorReadResponse message */
195 #define PORT_ERROR_READ 0x42
197 /* triggers the NPE to reset internal status and enable the HssPacketized
198 operation for the flow specified by pPipe */
199 #define PKT_PIPE_FLOW_ENABLE 0x50
200 #define PKT_PIPE_FLOW_DISABLE 0x51
201 #define PKT_NUM_PIPES_WRITE 0x52
202 #define PKT_PIPE_FIFO_SIZEW_WRITE 0x53
203 #define PKT_PIPE_HDLC_CFG_WRITE 0x54
204 #define PKT_PIPE_IDLE_PATTERN_WRITE 0x55
205 #define PKT_PIPE_RX_SIZE_WRITE 0x56
206 #define PKT_PIPE_MODE_WRITE 0x57
208 /* HDLC packet status values - desc->status */
209 #define ERR_SHUTDOWN 1 /* stop or shutdown occurrance */
210 #define ERR_HDLC_ALIGN 2 /* HDLC alignment error */
211 #define ERR_HDLC_FCS 3 /* HDLC Frame Check Sum error */
212 #define ERR_RXFREE_Q_EMPTY 4 /* RX-free queue became empty while receiving
213 this packet (if buf_len < pkt_len) */
214 #define ERR_HDLC_TOO_LONG 5 /* HDLC frame size too long */
215 #define ERR_HDLC_ABORT 6 /* abort sequence received */
216 #define ERR_DISCONNECTING 7 /* disconnect is in progress */
220 typedef struct sk_buff buffer_t;
221 #define free_buffer dev_kfree_skb
222 #define free_buffer_irq dev_kfree_skb_irq
224 typedef void buffer_t;
225 #define free_buffer kfree
226 #define free_buffer_irq kfree
232 struct net_device *netdev;
233 struct napi_struct napi;
234 struct hss_plat_info *plat;
235 buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
236 struct desc *desc_tab; /* coherent */
239 unsigned int clock_type, clock_rate, loopback;
240 unsigned int initialized, carrier;
244 /* NPE message structure */
247 u8 cmd, unused, hss_port, index;
249 struct { u8 data8a, data8b, data8c, data8d; };
250 struct { u16 data16a, data16b; };
251 struct { u32 data32; };
254 u8 index, hss_port, unused, cmd;
256 struct { u8 data8d, data8c, data8b, data8a; };
257 struct { u16 data16b, data16a; };
258 struct { u32 data32; };
263 /* HDLC packet descriptor */
265 u32 next; /* pointer to next buffer, unused */
268 u16 buf_len; /* buffer length */
269 u16 pkt_len; /* packet length */
270 u32 data; /* pointer to data buffer in RAM */
275 u16 pkt_len; /* packet length */
276 u16 buf_len; /* buffer length */
277 u32 data; /* pointer to data buffer in RAM */
286 #define rx_desc_phys(port, n) ((port)->desc_tab_phys + \
287 (n) * sizeof(struct desc))
288 #define rx_desc_ptr(port, n) (&(port)->desc_tab[n])
290 #define tx_desc_phys(port, n) ((port)->desc_tab_phys + \
291 ((n) + RX_DESCS) * sizeof(struct desc))
292 #define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS])
294 /*****************************************************************************
296 ****************************************************************************/
298 static int ports_open;
299 static struct dma_pool *dma_pool;
300 static spinlock_t npe_lock;
302 static const struct {
303 int tx, txdone, rx, rxfree;
304 }queue_ids[2] = {{HSS0_PKT_TX0_QUEUE, HSS0_PKT_TXDONE_QUEUE, HSS0_PKT_RX_QUEUE,
305 HSS0_PKT_RXFREE0_QUEUE},
306 {HSS1_PKT_TX0_QUEUE, HSS1_PKT_TXDONE_QUEUE, HSS1_PKT_RX_QUEUE,
307 HSS1_PKT_RXFREE0_QUEUE},
310 /*****************************************************************************
312 ****************************************************************************/
314 static inline struct port* dev_to_port(struct net_device *dev)
316 return dev_to_hdlc(dev)->priv;
320 static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
323 for (i = 0; i < cnt; i++)
324 dest[i] = swab32(src[i]);
328 /*****************************************************************************
330 ****************************************************************************/
332 static void hss_npe_send(struct port *port, struct msg *msg, const char* what)
334 u32 *val = (u32*)msg;
335 if (npe_send_message(port->npe, msg, what)) {
336 printk(KERN_CRIT "HSS-%i: unable to send command [%08X:%08X]"
337 " to %s\n", port->id, val[0], val[1],
338 npe_name(port->npe));
343 static void hss_config_set_lut(struct port *port)
348 memset(&msg, 0, sizeof(msg));
349 msg.cmd = PORT_CONFIG_WRITE;
350 msg.hss_port = port->id;
352 for (ch = 0; ch < MAX_CHANNELS; ch++) {
354 msg.data32 |= TDMMAP_HDLC << 30;
357 msg.index = HSS_CONFIG_TX_LUT + ((ch / 4) & ~3);
358 hss_npe_send(port, &msg, "HSS_SET_TX_LUT");
360 msg.index += HSS_CONFIG_RX_LUT - HSS_CONFIG_TX_LUT;
361 hss_npe_send(port, &msg, "HSS_SET_RX_LUT");
366 static void hss_config(struct port *port)
370 memset(&msg, 0, sizeof(msg));
371 msg.cmd = PORT_CONFIG_WRITE;
372 msg.hss_port = port->id;
373 msg.index = HSS_CONFIG_TX_PCR;
374 msg.data32 = PCR_FRM_SYNC_OUTPUT_RISING | PCR_MSB_ENDIAN |
375 PCR_TX_DATA_ENABLE | PCR_SOF_NO_FBIT;
376 if (port->clock_type == CLOCK_INT)
377 msg.data32 |= PCR_SYNC_CLK_DIR_OUTPUT;
378 hss_npe_send(port, &msg, "HSS_SET_TX_PCR");
380 msg.index = HSS_CONFIG_RX_PCR;
381 msg.data32 ^= PCR_TX_DATA_ENABLE | PCR_DCLK_EDGE_RISING;
382 hss_npe_send(port, &msg, "HSS_SET_RX_PCR");
384 memset(&msg, 0, sizeof(msg));
385 msg.cmd = PORT_CONFIG_WRITE;
386 msg.hss_port = port->id;
387 msg.index = HSS_CONFIG_CORE_CR;
388 msg.data32 = (port->loopback ? CCR_LOOPBACK : 0) |
389 (port->id ? CCR_SECOND_HSS : 0);
390 hss_npe_send(port, &msg, "HSS_SET_CORE_CR");
392 memset(&msg, 0, sizeof(msg));
393 msg.cmd = PORT_CONFIG_WRITE;
394 msg.hss_port = port->id;
395 msg.index = HSS_CONFIG_CLOCK_CR;
396 msg.data32 = CLK42X_SPEED_2048KHZ /* FIXME */;
397 hss_npe_send(port, &msg, "HSS_SET_CLOCK_CR");
399 memset(&msg, 0, sizeof(msg));
400 msg.cmd = PORT_CONFIG_WRITE;
401 msg.hss_port = port->id;
402 msg.index = HSS_CONFIG_TX_FCR;
403 msg.data16a = FRAME_OFFSET;
404 msg.data16b = FRAME_SIZE - 1;
405 hss_npe_send(port, &msg, "HSS_SET_TX_FCR");
407 memset(&msg, 0, sizeof(msg));
408 msg.cmd = PORT_CONFIG_WRITE;
409 msg.hss_port = port->id;
410 msg.index = HSS_CONFIG_RX_FCR;
411 msg.data16a = FRAME_OFFSET;
412 msg.data16b = FRAME_SIZE - 1;
413 hss_npe_send(port, &msg, "HSS_SET_RX_FCR");
415 hss_config_set_lut(port);
417 memset(&msg, 0, sizeof(msg));
418 msg.cmd = PORT_CONFIG_LOAD;
419 msg.hss_port = port->id;
420 hss_npe_send(port, &msg, "HSS_LOAD_CONFIG");
422 if (npe_recv_message(port->npe, &msg, "HSS_LOAD_CONFIG") ||
423 /* HSS_LOAD_CONFIG for port #1 returns port_id = #4 */
424 msg.cmd != PORT_CONFIG_LOAD || msg.data32) {
425 printk(KERN_CRIT "HSS-%i: HSS_LOAD_CONFIG failed\n",
430 /* HDLC may stop working without this - check FIXME */
431 npe_recv_message(port->npe, &msg, "FLUSH_IT");
434 static void hss_set_hdlc_cfg(struct port *port)
438 memset(&msg, 0, sizeof(msg));
439 msg.cmd = PKT_PIPE_HDLC_CFG_WRITE;
440 msg.hss_port = port->id;
441 msg.data8a = port->hdlc_cfg; /* rx_cfg */
442 msg.data8b = port->hdlc_cfg | (PKT_EXTRA_FLAGS << 3); /* tx_cfg */
443 hss_npe_send(port, &msg, "HSS_SET_HDLC_CFG");
446 static u32 hss_get_status(struct port *port)
450 memset(&msg, 0, sizeof(msg));
451 msg.cmd = PORT_ERROR_READ;
452 msg.hss_port = port->id;
453 hss_npe_send(port, &msg, "PORT_ERROR_READ");
454 if (npe_recv_message(port->npe, &msg, "PORT_ERROR_READ")) {
455 printk(KERN_CRIT "HSS-%i: unable to read HSS status\n",
463 static void hss_start_hdlc(struct port *port)
467 memset(&msg, 0, sizeof(msg));
468 msg.cmd = PKT_PIPE_FLOW_ENABLE;
469 msg.hss_port = port->id;
471 hss_npe_send(port, &msg, "HSS_ENABLE_PKT_PIPE");
474 static void hss_stop_hdlc(struct port *port)
478 memset(&msg, 0, sizeof(msg));
479 msg.cmd = PKT_PIPE_FLOW_DISABLE;
480 msg.hss_port = port->id;
481 hss_npe_send(port, &msg, "HSS_DISABLE_PKT_PIPE");
482 hss_get_status(port); /* make sure it's halted */
485 static int hss_load_firmware(struct port *port)
490 if (port->initialized)
493 if (!npe_running(port->npe) &&
494 (err = npe_load_firmware(port->npe, npe_name(port->npe),
498 /* HDLC mode configuration */
499 memset(&msg, 0, sizeof(msg));
500 msg.cmd = PKT_NUM_PIPES_WRITE;
501 msg.hss_port = port->id;
502 msg.data8a = PKT_NUM_PIPES;
503 hss_npe_send(port, &msg, "HSS_SET_PKT_PIPES");
505 msg.cmd = PKT_PIPE_FIFO_SIZEW_WRITE;
506 msg.data8a = PKT_PIPE_FIFO_SIZEW;
507 hss_npe_send(port, &msg, "HSS_SET_PKT_FIFO");
509 msg.cmd = PKT_PIPE_MODE_WRITE;
510 msg.data8a = NPE_PKT_MODE_HDLC;
511 /* msg.data8b = inv_mask */
512 /* msg.data8c = or_mask */
513 hss_npe_send(port, &msg, "HSS_SET_PKT_MODE");
515 msg.cmd = PKT_PIPE_RX_SIZE_WRITE;
516 msg.data16a = HDLC_MAX_MRU; /* including CRC */
517 hss_npe_send(port, &msg, "HSS_SET_PKT_RX_SIZE");
519 msg.cmd = PKT_PIPE_IDLE_PATTERN_WRITE;
520 msg.data32 = 0x7F7F7F7F; /* ??? FIXME */
521 hss_npe_send(port, &msg, "HSS_SET_PKT_IDLE");
523 port->initialized = 1;
527 /*****************************************************************************
528 * packetized (HDLC) operation
529 ****************************************************************************/
531 static inline void debug_pkt(struct net_device *dev, const char *func,
537 printk(KERN_DEBUG "%s: %s(%i)", dev->name, func, len);
538 for (i = 0; i < len; i++) {
539 if (i >= DEBUG_PKT_BYTES)
541 printk("%s%02X", !(i % 4) ? " " : "", data[i]);
548 static inline void debug_desc(u32 phys, struct desc *desc)
551 printk(KERN_DEBUG "%X: %X %3X %3X %08X %X %X\n",
552 phys, desc->next, desc->buf_len, desc->pkt_len,
553 desc->data, desc->status, desc->error_count);
557 static inline int queue_get_desc(unsigned int queue, struct port *port,
560 u32 phys, tab_phys, n_desc;
563 if (!(phys = qmgr_get_entry(queue)))
567 tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0);
568 tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0);
569 n_desc = (phys - tab_phys) / sizeof(struct desc);
570 BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS));
571 debug_desc(phys, &tab[n_desc]);
572 BUG_ON(tab[n_desc].next);
576 static inline void queue_put_desc(unsigned int queue, u32 phys,
579 debug_desc(phys, desc);
581 qmgr_put_entry(queue, phys);
582 /* Don't check for queue overflow here, we've allocated sufficient
583 length and queues >= 32 don't support this check anyway. */
587 static inline void dma_unmap_tx(struct port *port, struct desc *desc)
590 dma_unmap_single(&port->netdev->dev, desc->data,
591 desc->buf_len, DMA_TO_DEVICE);
593 dma_unmap_single(&port->netdev->dev, desc->data & ~3,
594 ALIGN((desc->data & 3) + desc->buf_len, 4),
600 static void hss_hdlc_set_carrier(void *pdev, int carrier)
602 struct net_device *netdev = pdev;
603 struct port *port = dev_to_port(netdev);
606 spin_lock_irqsave(&npe_lock, flags);
607 port->carrier = carrier;
608 if (!port->loopback) {
610 netif_carrier_on(netdev);
612 netif_carrier_off(netdev);
614 spin_unlock_irqrestore(&npe_lock, flags);
617 static void hss_hdlc_rx_irq(void *pdev)
619 struct net_device *dev = pdev;
620 struct port *port = dev_to_port(dev);
623 printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name);
625 qmgr_disable_irq(queue_ids[port->id].rx);
626 napi_schedule(&port->napi);
629 static int hss_hdlc_poll(struct napi_struct *napi, int budget)
631 struct port *port = container_of(napi, struct port, napi);
632 struct net_device *dev = port->netdev;
633 unsigned int rxq = queue_ids[port->id].rx;
634 unsigned int rxfreeq = queue_ids[port->id].rxfree;
638 printk(KERN_DEBUG "%s: hss_hdlc_poll\n", dev->name);
641 while (received < budget) {
646 struct sk_buff *temp;
650 if ((n = queue_get_desc(rxq, port, 0)) < 0) {
652 printk(KERN_DEBUG "%s: hss_hdlc_poll"
653 " napi_complete\n", dev->name);
656 qmgr_enable_irq(rxq);
657 if (!qmgr_stat_empty(rxq) &&
658 napi_reschedule(napi)) {
660 printk(KERN_DEBUG "%s: hss_hdlc_poll"
661 " napi_reschedule succeeded\n",
664 qmgr_disable_irq(rxq);
668 printk(KERN_DEBUG "%s: hss_hdlc_poll all done\n",
671 return received; /* all work done */
674 desc = rx_desc_ptr(port, n);
675 #if 0 /* FIXME - error_count counts modulo 256, perhaps we should use it */
676 if (desc->error_count)
677 printk(KERN_DEBUG "%s: hss_hdlc_poll status 0x%02X"
678 " errors %u\n", dev->name, desc->status,
682 switch (desc->status) {
685 if ((skb = netdev_alloc_skb(dev, RX_SIZE)) != NULL) {
686 phys = dma_map_single(&dev->dev, skb->data,
689 if (dma_mapping_error(&dev->dev, phys)) {
695 skb = netdev_alloc_skb(dev, desc->pkt_len);
698 dev->stats.rx_dropped++;
702 dev->stats.rx_frame_errors++;
703 dev->stats.rx_errors++;
706 dev->stats.rx_crc_errors++;
707 dev->stats.rx_errors++;
709 case ERR_HDLC_TOO_LONG:
710 dev->stats.rx_length_errors++;
711 dev->stats.rx_errors++;
713 default: /* FIXME - remove printk */
714 printk(KERN_ERR "%s: hss_hdlc_poll: status 0x%02X"
715 " errors %u\n", dev->name, desc->status,
717 dev->stats.rx_errors++;
721 /* put the desc back on RX-ready queue */
722 desc->buf_len = RX_SIZE;
723 desc->pkt_len = desc->status = 0;
724 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
728 /* process received frame */
731 skb = port->rx_buff_tab[n];
732 dma_unmap_single(&dev->dev, desc->data,
733 RX_SIZE, DMA_FROM_DEVICE);
735 dma_sync_single(&dev->dev, desc->data,
736 RX_SIZE, DMA_FROM_DEVICE);
737 memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
738 ALIGN(desc->pkt_len, 4) / 4);
740 skb_put(skb, desc->pkt_len);
742 debug_pkt(dev, "hss_hdlc_poll", skb->data, skb->len);
744 skb->protocol = hdlc_type_trans(skb, dev);
745 dev->stats.rx_packets++;
746 dev->stats.rx_bytes += skb->len;
747 netif_receive_skb(skb);
749 /* put the new buffer on RX-free queue */
751 port->rx_buff_tab[n] = temp;
754 desc->buf_len = RX_SIZE;
756 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
760 printk(KERN_DEBUG "hss_hdlc_poll: end, not all work done\n");
762 return received; /* not all work done */
766 static void hss_hdlc_txdone_irq(void *pdev)
768 struct net_device *dev = pdev;
769 struct port *port = dev_to_port(dev);
773 printk(KERN_DEBUG DRV_NAME ": hss_hdlc_txdone_irq\n");
775 while ((n_desc = queue_get_desc(queue_ids[port->id].txdone,
780 desc = tx_desc_ptr(port, n_desc);
782 dev->stats.tx_packets++;
783 dev->stats.tx_bytes += desc->pkt_len;
785 dma_unmap_tx(port, desc);
787 printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq free %p\n",
788 dev->name, port->tx_buff_tab[n_desc]);
790 free_buffer_irq(port->tx_buff_tab[n_desc]);
791 port->tx_buff_tab[n_desc] = NULL;
793 /* really empty in fact */
794 start = qmgr_stat_nearly_empty(port->plat->txreadyq);
795 queue_put_desc(port->plat->txreadyq,
796 tx_desc_phys(port, n_desc), desc);
799 printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq xmit"
800 " ready\n", dev->name);
802 netif_wake_queue(dev);
807 static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
809 struct port *port = dev_to_port(dev);
810 unsigned int txreadyq = port->plat->txreadyq;
811 int len, offset, bytes, n;
817 printk(KERN_DEBUG "%s: hss_hdlc_xmit\n", dev->name);
820 if (unlikely(skb->len > HDLC_MAX_MRU)) {
822 dev->stats.tx_errors++;
826 debug_pkt(dev, "hss_hdlc_xmit", skb->data, skb->len);
830 offset = 0; /* no need to keep alignment */
834 offset = (int)skb->data & 3; /* keep 32-bit alignment */
835 bytes = ALIGN(offset + len, 4);
836 if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
838 dev->stats.tx_dropped++;
841 memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
845 phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
846 if (dma_mapping_error(&dev->dev, phys)) {
852 dev->stats.tx_dropped++;
856 n = queue_get_desc(txreadyq, port, 1);
858 desc = tx_desc_ptr(port, n);
861 port->tx_buff_tab[n] = skb;
863 port->tx_buff_tab[n] = mem;
865 desc->data = phys + offset;
866 desc->buf_len = desc->pkt_len = len;
869 queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc);
870 dev->trans_start = jiffies;
872 if (qmgr_stat_nearly_empty(txreadyq)) { /* really empty in fact */
874 printk(KERN_DEBUG "%s: hss_hdlc_xmit queue full\n", dev->name);
876 netif_stop_queue(dev);
877 /* we could miss TX ready interrupt */
878 if (!qmgr_stat_nearly_empty(txreadyq)) {
880 printk(KERN_DEBUG "%s: hss_hdlc_xmit ready again\n",
883 netif_wake_queue(dev);
888 printk(KERN_DEBUG "%s: hss_hdlc_xmit end\n", dev->name);
894 static int request_hdlc_queues(struct port *port)
898 err = qmgr_request_queue(queue_ids[port->id].rxfree, RX_DESCS, 0, 0,
899 "%s:RX-free", port->netdev->name);
903 err = qmgr_request_queue(queue_ids[port->id].rx, RX_DESCS, 0, 0,
904 "%s:RX", port->netdev->name);
908 err = qmgr_request_queue(queue_ids[port->id].tx, TX_DESCS, 0, 0,
909 "%s:TX", port->netdev->name);
913 err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0,
914 "%s:TX-ready", port->netdev->name);
918 err = qmgr_request_queue(queue_ids[port->id].txdone, TX_DESCS, 0, 0,
919 "%s:TX-done", port->netdev->name);
925 qmgr_release_queue(port->plat->txreadyq);
927 qmgr_release_queue(queue_ids[port->id].tx);
929 qmgr_release_queue(queue_ids[port->id].rx);
931 qmgr_release_queue(queue_ids[port->id].rxfree);
932 printk(KERN_DEBUG "%s: unable to request hardware queues\n",
937 static void release_hdlc_queues(struct port *port)
939 qmgr_release_queue(queue_ids[port->id].rxfree);
940 qmgr_release_queue(queue_ids[port->id].rx);
941 qmgr_release_queue(queue_ids[port->id].txdone);
942 qmgr_release_queue(queue_ids[port->id].tx);
943 qmgr_release_queue(port->plat->txreadyq);
946 static int init_hdlc_queues(struct port *port)
951 if (!(dma_pool = dma_pool_create(DRV_NAME, NULL,
952 POOL_ALLOC_SIZE, 32, 0)))
955 if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
956 &port->desc_tab_phys)))
958 memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
959 memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
960 memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
962 /* Setup RX buffers */
963 for (i = 0; i < RX_DESCS; i++) {
964 struct desc *desc = rx_desc_ptr(port, i);
968 if (!(buff = netdev_alloc_skb(port->netdev, RX_SIZE)))
972 if (!(buff = kmalloc(RX_SIZE, GFP_KERNEL)))
976 desc->buf_len = RX_SIZE;
977 desc->data = dma_map_single(&port->netdev->dev, data,
978 RX_SIZE, DMA_FROM_DEVICE);
979 if (dma_mapping_error(&port->netdev->dev, desc->data)) {
983 port->rx_buff_tab[i] = buff;
989 static void destroy_hdlc_queues(struct port *port)
993 if (port->desc_tab) {
994 for (i = 0; i < RX_DESCS; i++) {
995 struct desc *desc = rx_desc_ptr(port, i);
996 buffer_t *buff = port->rx_buff_tab[i];
998 dma_unmap_single(&port->netdev->dev,
1004 for (i = 0; i < TX_DESCS; i++) {
1005 struct desc *desc = tx_desc_ptr(port, i);
1006 buffer_t *buff = port->tx_buff_tab[i];
1008 dma_unmap_tx(port, desc);
1012 dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys);
1013 port->desc_tab = NULL;
1016 if (!ports_open && dma_pool) {
1017 dma_pool_destroy(dma_pool);
1022 static int hss_hdlc_open(struct net_device *dev)
1024 struct port *port = dev_to_port(dev);
1025 unsigned long flags;
1028 if ((err = hdlc_open(dev)))
1031 if ((err = hss_load_firmware(port)))
1032 goto err_hdlc_close;
1034 if ((err = request_hdlc_queues(port)))
1035 goto err_hdlc_close;
1037 if ((err = init_hdlc_queues(port)))
1038 goto err_destroy_queues;
1040 spin_lock_irqsave(&npe_lock, flags);
1041 if (port->plat->open)
1042 if ((err = port->plat->open(port->id, dev,
1043 hss_hdlc_set_carrier)))
1045 spin_unlock_irqrestore(&npe_lock, flags);
1047 /* Populate queues with buffers, no failure after this point */
1048 for (i = 0; i < TX_DESCS; i++)
1049 queue_put_desc(port->plat->txreadyq,
1050 tx_desc_phys(port, i), tx_desc_ptr(port, i));
1052 for (i = 0; i < RX_DESCS; i++)
1053 queue_put_desc(queue_ids[port->id].rxfree,
1054 rx_desc_phys(port, i), rx_desc_ptr(port, i));
1056 napi_enable(&port->napi);
1057 netif_start_queue(dev);
1059 qmgr_set_irq(queue_ids[port->id].rx, QUEUE_IRQ_SRC_NOT_EMPTY,
1060 hss_hdlc_rx_irq, dev);
1062 qmgr_set_irq(queue_ids[port->id].txdone, QUEUE_IRQ_SRC_NOT_EMPTY,
1063 hss_hdlc_txdone_irq, dev);
1064 qmgr_enable_irq(queue_ids[port->id].txdone);
1068 hss_set_hdlc_cfg(port);
1071 hss_start_hdlc(port);
1073 /* we may already have RX data, enables IRQ */
1074 napi_schedule(&port->napi);
1078 spin_unlock_irqrestore(&npe_lock, flags);
1080 destroy_hdlc_queues(port);
1081 release_hdlc_queues(port);
1087 static int hss_hdlc_close(struct net_device *dev)
1089 struct port *port = dev_to_port(dev);
1090 unsigned long flags;
1091 int i, buffs = RX_DESCS; /* allocated RX buffers */
1093 spin_lock_irqsave(&npe_lock, flags);
1095 qmgr_disable_irq(queue_ids[port->id].rx);
1096 netif_stop_queue(dev);
1097 napi_disable(&port->napi);
1099 hss_stop_hdlc(port);
1101 while (queue_get_desc(queue_ids[port->id].rxfree, port, 0) >= 0)
1103 while (queue_get_desc(queue_ids[port->id].rx, port, 0) >= 0)
1107 printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)"
1108 " left in NPE\n", dev->name, buffs);
1111 while (queue_get_desc(queue_ids[port->id].tx, port, 1) >= 0)
1112 buffs--; /* cancel TX */
1116 while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0)
1120 } while (++i < MAX_CLOSE_WAIT);
1123 printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) "
1124 "left in NPE\n", dev->name, buffs);
1127 printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
1129 qmgr_disable_irq(queue_ids[port->id].txdone);
1131 if (port->plat->close)
1132 port->plat->close(port->id, dev);
1133 spin_unlock_irqrestore(&npe_lock, flags);
1135 destroy_hdlc_queues(port);
1136 release_hdlc_queues(port);
1142 static int hss_hdlc_attach(struct net_device *dev, unsigned short encoding,
1143 unsigned short parity)
1145 struct port *port = dev_to_port(dev);
1147 if (encoding != ENCODING_NRZ)
1151 case PARITY_CRC16_PR1_CCITT:
1155 case PARITY_CRC32_PR1_CCITT:
1156 port->hdlc_cfg = PKT_HDLC_CRC_32;
1165 static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1167 const size_t size = sizeof(sync_serial_settings);
1168 sync_serial_settings new_line;
1169 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
1170 struct port *port = dev_to_port(dev);
1171 unsigned long flags;
1174 if (cmd != SIOCWANDEV)
1175 return hdlc_ioctl(dev, ifr, cmd);
1177 switch(ifr->ifr_settings.type) {
1179 ifr->ifr_settings.type = IF_IFACE_V35;
1180 if (ifr->ifr_settings.size < size) {
1181 ifr->ifr_settings.size = size; /* data size wanted */
1184 memset(&new_line, 0, sizeof(new_line));
1185 new_line.clock_type = port->clock_type;
1186 new_line.clock_rate = 2048000; /* FIXME */
1187 new_line.loopback = port->loopback;
1188 if (copy_to_user(line, &new_line, size))
1192 case IF_IFACE_SYNC_SERIAL:
1194 if(!capable(CAP_NET_ADMIN))
1196 if (copy_from_user(&new_line, line, size))
1199 clk = new_line.clock_type;
1200 if (port->plat->set_clock)
1201 clk = port->plat->set_clock(port->id, clk);
1203 if (clk != CLOCK_EXT && clk != CLOCK_INT)
1204 return -EINVAL; /* No such clock setting */
1206 if (new_line.loopback != 0 && new_line.loopback != 1)
1209 port->clock_type = clk; /* Update settings */
1210 /* FIXME port->clock_rate = new_line.clock_rate */;
1211 port->loopback = new_line.loopback;
1213 spin_lock_irqsave(&npe_lock, flags);
1215 if (dev->flags & IFF_UP)
1218 if (port->loopback || port->carrier)
1219 netif_carrier_on(port->netdev);
1221 netif_carrier_off(port->netdev);
1222 spin_unlock_irqrestore(&npe_lock, flags);
1227 return hdlc_ioctl(dev, ifr, cmd);
1231 /*****************************************************************************
1233 ****************************************************************************/
1235 static const struct net_device_ops hss_hdlc_ops = {
1236 .ndo_open = hss_hdlc_open,
1237 .ndo_stop = hss_hdlc_close,
1238 .ndo_change_mtu = hdlc_change_mtu,
1239 .ndo_start_xmit = hdlc_start_xmit,
1240 .ndo_do_ioctl = hss_hdlc_ioctl,
1243 static int __devinit hss_init_one(struct platform_device *pdev)
1246 struct net_device *dev;
1250 if ((port = kzalloc(sizeof(*port), GFP_KERNEL)) == NULL)
1253 if ((port->npe = npe_request(0)) == NULL) {
1258 if ((port->netdev = dev = alloc_hdlcdev(port)) == NULL) {
1263 SET_NETDEV_DEV(dev, &pdev->dev);
1264 hdlc = dev_to_hdlc(dev);
1265 hdlc->attach = hss_hdlc_attach;
1266 hdlc->xmit = hss_hdlc_xmit;
1267 dev->netdev_ops = &hss_hdlc_ops;
1268 dev->tx_queue_len = 100;
1269 port->clock_type = CLOCK_EXT;
1270 port->clock_rate = 2048000;
1271 port->id = pdev->id;
1272 port->dev = &pdev->dev;
1273 port->plat = pdev->dev.platform_data;
1274 netif_napi_add(dev, &port->napi, hss_hdlc_poll, NAPI_WEIGHT);
1276 if ((err = register_hdlc_device(dev)))
1277 goto err_free_netdev;
1279 platform_set_drvdata(pdev, port);
1281 printk(KERN_INFO "%s: HSS-%i\n", dev->name, port->id);
1287 npe_release(port->npe);
1293 static int __devexit hss_remove_one(struct platform_device *pdev)
1295 struct port *port = platform_get_drvdata(pdev);
1297 unregister_hdlc_device(port->netdev);
1298 free_netdev(port->netdev);
1299 npe_release(port->npe);
1300 platform_set_drvdata(pdev, NULL);
1305 static struct platform_driver ixp4xx_hss_driver = {
1306 .driver.name = DRV_NAME,
1307 .probe = hss_init_one,
1308 .remove = hss_remove_one,
1311 static int __init hss_init_module(void)
1313 if ((ixp4xx_read_feature_bits() &
1314 (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) !=
1315 (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS))
1318 spin_lock_init(&npe_lock);
1320 return platform_driver_register(&ixp4xx_hss_driver);
1323 static void __exit hss_cleanup_module(void)
1325 platform_driver_unregister(&ixp4xx_hss_driver);
1328 MODULE_AUTHOR("Krzysztof Halasa");
1329 MODULE_DESCRIPTION("Intel IXP4xx HSS driver");
1330 MODULE_LICENSE("GPL v2");
1331 MODULE_ALIAS("platform:ixp4xx_hss");
1332 module_init(hss_init_module);
1333 module_exit(hss_cleanup_module);