Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
[linux-2.6] / drivers / scsi / hptiop.c
1 /*
2  * HighPoint RR3xxx/4xxx controller driver for Linux
3  * Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; version 2 of the License.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * Please report bugs/comments/suggestions to linux@highpoint-tech.com
15  *
16  * For more information, visit http://www.highpoint-tech.com
17  */
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/string.h>
21 #include <linux/kernel.h>
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/delay.h>
26 #include <linux/timer.h>
27 #include <linux/spinlock.h>
28 #include <asm/uaccess.h>
29 #include <asm/io.h>
30 #include <asm/div64.h>
31 #include <scsi/scsi_cmnd.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi.h>
34 #include <scsi/scsi_tcq.h>
35 #include <scsi/scsi_host.h>
36
37 #include "hptiop.h"
38
39 MODULE_AUTHOR("HighPoint Technologies, Inc.");
40 MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver");
41
42 static char driver_name[] = "hptiop";
43 static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver";
44 static const char driver_ver[] = "v1.3 (071203)";
45
46 static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec);
47 static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
48                                 struct hpt_iop_request_scsi_command *req);
49 static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 tag);
50 static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag);
51 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
52
53 static int iop_wait_ready_itl(struct hptiop_hba *hba, u32 millisec)
54 {
55         u32 req = 0;
56         int i;
57
58         for (i = 0; i < millisec; i++) {
59                 req = readl(&hba->u.itl.iop->inbound_queue);
60                 if (req != IOPMU_QUEUE_EMPTY)
61                         break;
62                 msleep(1);
63         }
64
65         if (req != IOPMU_QUEUE_EMPTY) {
66                 writel(req, &hba->u.itl.iop->outbound_queue);
67                 readl(&hba->u.itl.iop->outbound_intstatus);
68                 return 0;
69         }
70
71         return -1;
72 }
73
74 static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec)
75 {
76         return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
77 }
78
79 static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
80 {
81         if (tag & IOPMU_QUEUE_ADDR_HOST_BIT)
82                 hptiop_host_request_callback_itl(hba,
83                                 tag & ~IOPMU_QUEUE_ADDR_HOST_BIT);
84         else
85                 hptiop_iop_request_callback_itl(hba, tag);
86 }
87
88 static void hptiop_drain_outbound_queue_itl(struct hptiop_hba *hba)
89 {
90         u32 req;
91
92         while ((req = readl(&hba->u.itl.iop->outbound_queue)) !=
93                                                 IOPMU_QUEUE_EMPTY) {
94
95                 if (req & IOPMU_QUEUE_MASK_HOST_BITS)
96                         hptiop_request_callback_itl(hba, req);
97                 else {
98                         struct hpt_iop_request_header __iomem * p;
99
100                         p = (struct hpt_iop_request_header __iomem *)
101                                 ((char __iomem *)hba->u.itl.iop + req);
102
103                         if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) {
104                                 if (readl(&p->context))
105                                         hptiop_request_callback_itl(hba, req);
106                                 else
107                                         writel(1, &p->context);
108                         }
109                         else
110                                 hptiop_request_callback_itl(hba, req);
111                 }
112         }
113 }
114
115 static int iop_intr_itl(struct hptiop_hba *hba)
116 {
117         struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop;
118         u32 status;
119         int ret = 0;
120
121         status = readl(&iop->outbound_intstatus);
122
123         if (status & IOPMU_OUTBOUND_INT_MSG0) {
124                 u32 msg = readl(&iop->outbound_msgaddr0);
125
126                 dprintk("received outbound msg %x\n", msg);
127                 writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus);
128                 hptiop_message_callback(hba, msg);
129                 ret = 1;
130         }
131
132         if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
133                 hptiop_drain_outbound_queue_itl(hba);
134                 ret = 1;
135         }
136
137         return ret;
138 }
139
140 static u64 mv_outbound_read(struct hpt_iopmu_mv __iomem *mu)
141 {
142         u32 outbound_tail = readl(&mu->outbound_tail);
143         u32 outbound_head = readl(&mu->outbound_head);
144
145         if (outbound_tail != outbound_head) {
146                 u64 p;
147
148                 memcpy_fromio(&p, &mu->outbound_q[mu->outbound_tail], 8);
149                 outbound_tail++;
150
151                 if (outbound_tail == MVIOP_QUEUE_LEN)
152                         outbound_tail = 0;
153                 writel(outbound_tail, &mu->outbound_tail);
154                 return p;
155         } else
156                 return 0;
157 }
158
159 static void mv_inbound_write(u64 p, struct hptiop_hba *hba)
160 {
161         u32 inbound_head = readl(&hba->u.mv.mu->inbound_head);
162         u32 head = inbound_head + 1;
163
164         if (head == MVIOP_QUEUE_LEN)
165                 head = 0;
166
167         memcpy_toio(&hba->u.mv.mu->inbound_q[inbound_head], &p, 8);
168         writel(head, &hba->u.mv.mu->inbound_head);
169         writel(MVIOP_MU_INBOUND_INT_POSTQUEUE,
170                         &hba->u.mv.regs->inbound_doorbell);
171 }
172
173 static void hptiop_request_callback_mv(struct hptiop_hba *hba, u64 tag)
174 {
175         u32 req_type = (tag >> 5) & 0x7;
176         struct hpt_iop_request_scsi_command *req;
177
178         dprintk("hptiop_request_callback_mv: tag=%llx\n", tag);
179
180         BUG_ON((tag & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) == 0);
181
182         switch (req_type) {
183         case IOP_REQUEST_TYPE_GET_CONFIG:
184         case IOP_REQUEST_TYPE_SET_CONFIG:
185                 hba->msg_done = 1;
186                 break;
187
188         case IOP_REQUEST_TYPE_SCSI_COMMAND:
189                 req = hba->reqs[tag >> 8].req_virt;
190                 if (likely(tag & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT))
191                         req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
192
193                 hptiop_finish_scsi_req(hba, tag>>8, req);
194                 break;
195
196         default:
197                 break;
198         }
199 }
200
201 static int iop_intr_mv(struct hptiop_hba *hba)
202 {
203         u32 status;
204         int ret = 0;
205
206         status = readl(&hba->u.mv.regs->outbound_doorbell);
207         writel(~status, &hba->u.mv.regs->outbound_doorbell);
208
209         if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
210                 u32 msg;
211                 msg = readl(&hba->u.mv.mu->outbound_msg);
212                 dprintk("received outbound msg %x\n", msg);
213                 hptiop_message_callback(hba, msg);
214                 ret = 1;
215         }
216
217         if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
218                 u64 tag;
219
220                 while ((tag = mv_outbound_read(hba->u.mv.mu)))
221                         hptiop_request_callback_mv(hba, tag);
222                 ret = 1;
223         }
224
225         return ret;
226 }
227
228 static int iop_send_sync_request_itl(struct hptiop_hba *hba,
229                                         void __iomem *_req, u32 millisec)
230 {
231         struct hpt_iop_request_header __iomem *req = _req;
232         u32 i;
233
234         writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, &req->flags);
235         writel(0, &req->context);
236         writel((unsigned long)req - (unsigned long)hba->u.itl.iop,
237                         &hba->u.itl.iop->inbound_queue);
238         readl(&hba->u.itl.iop->outbound_intstatus);
239
240         for (i = 0; i < millisec; i++) {
241                 iop_intr_itl(hba);
242                 if (readl(&req->context))
243                         return 0;
244                 msleep(1);
245         }
246
247         return -1;
248 }
249
250 static int iop_send_sync_request_mv(struct hptiop_hba *hba,
251                                         u32 size_bits, u32 millisec)
252 {
253         struct hpt_iop_request_header *reqhdr = hba->u.mv.internal_req;
254         u32 i;
255
256         hba->msg_done = 0;
257         reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST);
258         mv_inbound_write(hba->u.mv.internal_req_phy |
259                         MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bits, hba);
260
261         for (i = 0; i < millisec; i++) {
262                 iop_intr_mv(hba);
263                 if (hba->msg_done)
264                         return 0;
265                 msleep(1);
266         }
267         return -1;
268 }
269
270 static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg)
271 {
272         writel(msg, &hba->u.itl.iop->inbound_msgaddr0);
273         readl(&hba->u.itl.iop->outbound_intstatus);
274 }
275
276 static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg)
277 {
278         writel(msg, &hba->u.mv.mu->inbound_msg);
279         writel(MVIOP_MU_INBOUND_INT_MSG, &hba->u.mv.regs->inbound_doorbell);
280         readl(&hba->u.mv.regs->inbound_doorbell);
281 }
282
283 static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
284 {
285         u32 i;
286
287         hba->msg_done = 0;
288         hba->ops->post_msg(hba, msg);
289
290         for (i = 0; i < millisec; i++) {
291                 spin_lock_irq(hba->host->host_lock);
292                 hba->ops->iop_intr(hba);
293                 spin_unlock_irq(hba->host->host_lock);
294                 if (hba->msg_done)
295                         break;
296                 msleep(1);
297         }
298
299         return hba->msg_done? 0 : -1;
300 }
301
302 static int iop_get_config_itl(struct hptiop_hba *hba,
303                                 struct hpt_iop_request_get_config *config)
304 {
305         u32 req32;
306         struct hpt_iop_request_get_config __iomem *req;
307
308         req32 = readl(&hba->u.itl.iop->inbound_queue);
309         if (req32 == IOPMU_QUEUE_EMPTY)
310                 return -1;
311
312         req = (struct hpt_iop_request_get_config __iomem *)
313                         ((unsigned long)hba->u.itl.iop + req32);
314
315         writel(0, &req->header.flags);
316         writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type);
317         writel(sizeof(struct hpt_iop_request_get_config), &req->header.size);
318         writel(IOP_RESULT_PENDING, &req->header.result);
319
320         if (iop_send_sync_request_itl(hba, req, 20000)) {
321                 dprintk("Get config send cmd failed\n");
322                 return -1;
323         }
324
325         memcpy_fromio(config, req, sizeof(*config));
326         writel(req32, &hba->u.itl.iop->outbound_queue);
327         return 0;
328 }
329
330 static int iop_get_config_mv(struct hptiop_hba *hba,
331                                 struct hpt_iop_request_get_config *config)
332 {
333         struct hpt_iop_request_get_config *req = hba->u.mv.internal_req;
334
335         req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
336         req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG);
337         req->header.size =
338                 cpu_to_le32(sizeof(struct hpt_iop_request_get_config));
339         req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
340         req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG<<5);
341         req->header.context_hi32 = 0;
342
343         if (iop_send_sync_request_mv(hba, 0, 20000)) {
344                 dprintk("Get config send cmd failed\n");
345                 return -1;
346         }
347
348         memcpy(config, req, sizeof(struct hpt_iop_request_get_config));
349         return 0;
350 }
351
352 static int iop_set_config_itl(struct hptiop_hba *hba,
353                                 struct hpt_iop_request_set_config *config)
354 {
355         u32 req32;
356         struct hpt_iop_request_set_config __iomem *req;
357
358         req32 = readl(&hba->u.itl.iop->inbound_queue);
359         if (req32 == IOPMU_QUEUE_EMPTY)
360                 return -1;
361
362         req = (struct hpt_iop_request_set_config __iomem *)
363                         ((unsigned long)hba->u.itl.iop + req32);
364
365         memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header),
366                 (u8 *)config + sizeof(struct hpt_iop_request_header),
367                 sizeof(struct hpt_iop_request_set_config) -
368                         sizeof(struct hpt_iop_request_header));
369
370         writel(0, &req->header.flags);
371         writel(IOP_REQUEST_TYPE_SET_CONFIG, &req->header.type);
372         writel(sizeof(struct hpt_iop_request_set_config), &req->header.size);
373         writel(IOP_RESULT_PENDING, &req->header.result);
374
375         if (iop_send_sync_request_itl(hba, req, 20000)) {
376                 dprintk("Set config send cmd failed\n");
377                 return -1;
378         }
379
380         writel(req32, &hba->u.itl.iop->outbound_queue);
381         return 0;
382 }
383
384 static int iop_set_config_mv(struct hptiop_hba *hba,
385                                 struct hpt_iop_request_set_config *config)
386 {
387         struct hpt_iop_request_set_config *req = hba->u.mv.internal_req;
388
389         memcpy(req, config, sizeof(struct hpt_iop_request_set_config));
390         req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
391         req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG);
392         req->header.size =
393                 cpu_to_le32(sizeof(struct hpt_iop_request_set_config));
394         req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
395         req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5);
396         req->header.context_hi32 = 0;
397
398         if (iop_send_sync_request_mv(hba, 0, 20000)) {
399                 dprintk("Set config send cmd failed\n");
400                 return -1;
401         }
402
403         return 0;
404 }
405
406 static void hptiop_enable_intr_itl(struct hptiop_hba *hba)
407 {
408         writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0),
409                 &hba->u.itl.iop->outbound_intmask);
410 }
411
412 static void hptiop_enable_intr_mv(struct hptiop_hba *hba)
413 {
414         writel(MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG,
415                 &hba->u.mv.regs->outbound_intmask);
416 }
417
418 static int hptiop_initialize_iop(struct hptiop_hba *hba)
419 {
420         /* enable interrupts */
421         hba->ops->enable_intr(hba);
422
423         hba->initialized = 1;
424
425         /* start background tasks */
426         if (iop_send_sync_msg(hba,
427                         IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
428                 printk(KERN_ERR "scsi%d: fail to start background task\n",
429                         hba->host->host_no);
430                 return -1;
431         }
432         return 0;
433 }
434
435 static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index)
436 {
437         u32 mem_base_phy, length;
438         void __iomem *mem_base_virt;
439
440         struct pci_dev *pcidev = hba->pcidev;
441
442
443         if (!(pci_resource_flags(pcidev, index) & IORESOURCE_MEM)) {
444                 printk(KERN_ERR "scsi%d: pci resource invalid\n",
445                                 hba->host->host_no);
446                 return NULL;
447         }
448
449         mem_base_phy = pci_resource_start(pcidev, index);
450         length = pci_resource_len(pcidev, index);
451         mem_base_virt = ioremap(mem_base_phy, length);
452
453         if (!mem_base_virt) {
454                 printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n",
455                                 hba->host->host_no);
456                 return NULL;
457         }
458         return mem_base_virt;
459 }
460
461 static int hptiop_map_pci_bar_itl(struct hptiop_hba *hba)
462 {
463         hba->u.itl.iop = hptiop_map_pci_bar(hba, 0);
464         if (hba->u.itl.iop)
465                 return 0;
466         else
467                 return -1;
468 }
469
470 static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba)
471 {
472         iounmap(hba->u.itl.iop);
473 }
474
475 static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba)
476 {
477         hba->u.mv.regs = hptiop_map_pci_bar(hba, 0);
478         if (hba->u.mv.regs == NULL)
479                 return -1;
480
481         hba->u.mv.mu = hptiop_map_pci_bar(hba, 2);
482         if (hba->u.mv.mu == NULL) {
483                 iounmap(hba->u.mv.regs);
484                 return -1;
485         }
486
487         return 0;
488 }
489
490 static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba)
491 {
492         iounmap(hba->u.mv.regs);
493         iounmap(hba->u.mv.mu);
494 }
495
496 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
497 {
498         dprintk("iop message 0x%x\n", msg);
499
500         if (msg == IOPMU_INBOUND_MSG0_NOP)
501                 hba->msg_done = 1;
502
503         if (!hba->initialized)
504                 return;
505
506         if (msg == IOPMU_INBOUND_MSG0_RESET) {
507                 atomic_set(&hba->resetting, 0);
508                 wake_up(&hba->reset_wq);
509         }
510         else if (msg <= IOPMU_INBOUND_MSG0_MAX)
511                 hba->msg_done = 1;
512 }
513
514 static struct hptiop_request *get_req(struct hptiop_hba *hba)
515 {
516         struct hptiop_request *ret;
517
518         dprintk("get_req : req=%p\n", hba->req_list);
519
520         ret = hba->req_list;
521         if (ret)
522                 hba->req_list = ret->next;
523
524         return ret;
525 }
526
527 static void free_req(struct hptiop_hba *hba, struct hptiop_request *req)
528 {
529         dprintk("free_req(%d, %p)\n", req->index, req);
530         req->next = hba->req_list;
531         hba->req_list = req;
532 }
533
534 static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
535                                 struct hpt_iop_request_scsi_command *req)
536 {
537         struct scsi_cmnd *scp;
538
539         dprintk("hptiop_finish_scsi_req: req=%p, type=%d, "
540                         "result=%d, context=0x%x tag=%d\n",
541                         req, req->header.type, req->header.result,
542                         req->header.context, tag);
543
544         BUG_ON(!req->header.result);
545         BUG_ON(req->header.type != cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND));
546
547         scp = hba->reqs[tag].scp;
548
549         if (HPT_SCP(scp)->mapped)
550                 scsi_dma_unmap(scp);
551
552         switch (le32_to_cpu(req->header.result)) {
553         case IOP_RESULT_SUCCESS:
554                 scsi_set_resid(scp,
555                         scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
556                 scp->result = (DID_OK<<16);
557                 break;
558         case IOP_RESULT_BAD_TARGET:
559                 scp->result = (DID_BAD_TARGET<<16);
560                 break;
561         case IOP_RESULT_BUSY:
562                 scp->result = (DID_BUS_BUSY<<16);
563                 break;
564         case IOP_RESULT_RESET:
565                 scp->result = (DID_RESET<<16);
566                 break;
567         case IOP_RESULT_FAIL:
568                 scp->result = (DID_ERROR<<16);
569                 break;
570         case IOP_RESULT_INVALID_REQUEST:
571                 scp->result = (DID_ABORT<<16);
572                 break;
573         case IOP_RESULT_CHECK_CONDITION:
574                 scsi_set_resid(scp,
575                         scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
576                 scp->result = SAM_STAT_CHECK_CONDITION;
577                 memcpy(scp->sense_buffer, &req->sg_list,
578                                 min_t(size_t, SCSI_SENSE_BUFFERSIZE,
579                                         le32_to_cpu(req->dataxfer_length)));
580                 break;
581
582         default:
583                 scp->result = ((DRIVER_INVALID|SUGGEST_ABORT)<<24) |
584                                         (DID_ABORT<<16);
585                 break;
586         }
587
588         dprintk("scsi_done(%p)\n", scp);
589         scp->scsi_done(scp);
590         free_req(hba, &hba->reqs[tag]);
591 }
592
593 static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag)
594 {
595         struct hpt_iop_request_scsi_command *req;
596         u32 tag;
597
598         if (hba->iopintf_v2) {
599                 tag = _tag & ~IOPMU_QUEUE_REQUEST_RESULT_BIT;
600                 req = hba->reqs[tag].req_virt;
601                 if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
602                         req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
603         } else {
604                 tag = _tag;
605                 req = hba->reqs[tag].req_virt;
606         }
607
608         hptiop_finish_scsi_req(hba, tag, req);
609 }
610
611 void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
612 {
613         struct hpt_iop_request_header __iomem *req;
614         struct hpt_iop_request_ioctl_command __iomem *p;
615         struct hpt_ioctl_k *arg;
616
617         req = (struct hpt_iop_request_header __iomem *)
618                         ((unsigned long)hba->u.itl.iop + tag);
619         dprintk("hptiop_iop_request_callback_itl: req=%p, type=%d, "
620                         "result=%d, context=0x%x tag=%d\n",
621                         req, readl(&req->type), readl(&req->result),
622                         readl(&req->context), tag);
623
624         BUG_ON(!readl(&req->result));
625         BUG_ON(readl(&req->type) != IOP_REQUEST_TYPE_IOCTL_COMMAND);
626
627         p = (struct hpt_iop_request_ioctl_command __iomem *)req;
628         arg = (struct hpt_ioctl_k *)(unsigned long)
629                 (readl(&req->context) |
630                         ((u64)readl(&req->context_hi32)<<32));
631
632         if (readl(&req->result) == IOP_RESULT_SUCCESS) {
633                 arg->result = HPT_IOCTL_RESULT_OK;
634
635                 if (arg->outbuf_size)
636                         memcpy_fromio(arg->outbuf,
637                                 &p->buf[(readl(&p->inbuf_size) + 3)& ~3],
638                                 arg->outbuf_size);
639
640                 if (arg->bytes_returned)
641                         *arg->bytes_returned = arg->outbuf_size;
642         }
643         else
644                 arg->result = HPT_IOCTL_RESULT_FAILED;
645
646         arg->done(arg);
647         writel(tag, &hba->u.itl.iop->outbound_queue);
648 }
649
650 static irqreturn_t hptiop_intr(int irq, void *dev_id)
651 {
652         struct hptiop_hba  *hba = dev_id;
653         int  handled;
654         unsigned long flags;
655
656         spin_lock_irqsave(hba->host->host_lock, flags);
657         handled = hba->ops->iop_intr(hba);
658         spin_unlock_irqrestore(hba->host->host_lock, flags);
659
660         return handled;
661 }
662
663 static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg)
664 {
665         struct Scsi_Host *host = scp->device->host;
666         struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
667         struct scatterlist *sg;
668         int idx, nseg;
669
670         nseg = scsi_dma_map(scp);
671         BUG_ON(nseg < 0);
672         if (!nseg)
673                 return 0;
674
675         HPT_SCP(scp)->sgcnt = nseg;
676         HPT_SCP(scp)->mapped = 1;
677
678         BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);
679
680         scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) {
681                 psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg));
682                 psg[idx].size = cpu_to_le32(sg_dma_len(sg));
683                 psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ?
684                         cpu_to_le32(1) : 0;
685         }
686         return HPT_SCP(scp)->sgcnt;
687 }
688
689 static void hptiop_post_req_itl(struct hptiop_hba *hba,
690                                         struct hptiop_request *_req)
691 {
692         struct hpt_iop_request_header *reqhdr = _req->req_virt;
693
694         reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
695                                                         (u32)_req->index);
696         reqhdr->context_hi32 = 0;
697
698         if (hba->iopintf_v2) {
699                 u32 size, size_bits;
700
701                 size = le32_to_cpu(reqhdr->size);
702                 if (size < 256)
703                         size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
704                 else if (size < 512)
705                         size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
706                 else
707                         size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT |
708                                                 IOPMU_QUEUE_ADDR_HOST_BIT;
709                 writel(_req->req_shifted_phy | size_bits,
710                         &hba->u.itl.iop->inbound_queue);
711         } else
712                 writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT,
713                                         &hba->u.itl.iop->inbound_queue);
714 }
715
716 static void hptiop_post_req_mv(struct hptiop_hba *hba,
717                                         struct hptiop_request *_req)
718 {
719         struct hpt_iop_request_header *reqhdr = _req->req_virt;
720         u32 size, size_bit;
721
722         reqhdr->context = cpu_to_le32(_req->index<<8 |
723                                         IOP_REQUEST_TYPE_SCSI_COMMAND<<5);
724         reqhdr->context_hi32 = 0;
725         size = le32_to_cpu(reqhdr->size);
726
727         if (size <= 256)
728                 size_bit = 0;
729         else if (size <= 256*2)
730                 size_bit = 1;
731         else if (size <= 256*3)
732                 size_bit = 2;
733         else
734                 size_bit = 3;
735
736         mv_inbound_write((_req->req_shifted_phy << 5) |
737                 MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba);
738 }
739
740 static int hptiop_queuecommand(struct scsi_cmnd *scp,
741                                 void (*done)(struct scsi_cmnd *))
742 {
743         struct Scsi_Host *host = scp->device->host;
744         struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
745         struct hpt_iop_request_scsi_command *req;
746         int sg_count = 0;
747         struct hptiop_request *_req;
748
749         BUG_ON(!done);
750         scp->scsi_done = done;
751
752         _req = get_req(hba);
753         if (_req == NULL) {
754                 dprintk("hptiop_queuecmd : no free req\n");
755                 return SCSI_MLQUEUE_HOST_BUSY;
756         }
757
758         _req->scp = scp;
759
760         dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%d cdb=(%x-%x-%x) "
761                         "req_index=%d, req=%p\n",
762                         scp,
763                         host->host_no, scp->device->channel,
764                         scp->device->id, scp->device->lun,
765                         ((u32 *)scp->cmnd)[0],
766                         ((u32 *)scp->cmnd)[1],
767                         ((u32 *)scp->cmnd)[2],
768                         _req->index, _req->req_virt);
769
770         scp->result = 0;
771
772         if (scp->device->channel || scp->device->lun ||
773                         scp->device->id > hba->max_devices) {
774                 scp->result = DID_BAD_TARGET << 16;
775                 free_req(hba, _req);
776                 goto cmd_done;
777         }
778
779         req = _req->req_virt;
780
781         /* build S/G table */
782         sg_count = hptiop_buildsgl(scp, req->sg_list);
783         if (!sg_count)
784                 HPT_SCP(scp)->mapped = 0;
785
786         req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
787         req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND);
788         req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
789         req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp));
790         req->channel = scp->device->channel;
791         req->target = scp->device->id;
792         req->lun = scp->device->lun;
793         req->header.size = cpu_to_le32(
794                                 sizeof(struct hpt_iop_request_scsi_command)
795                                  - sizeof(struct hpt_iopsg)
796                                  + sg_count * sizeof(struct hpt_iopsg));
797
798         memcpy(req->cdb, scp->cmnd, sizeof(req->cdb));
799         hba->ops->post_req(hba, _req);
800         return 0;
801
802 cmd_done:
803         dprintk("scsi_done(scp=%p)\n", scp);
804         scp->scsi_done(scp);
805         return 0;
806 }
807
808 static const char *hptiop_info(struct Scsi_Host *host)
809 {
810         return driver_name_long;
811 }
812
813 static int hptiop_reset_hba(struct hptiop_hba *hba)
814 {
815         if (atomic_xchg(&hba->resetting, 1) == 0) {
816                 atomic_inc(&hba->reset_count);
817                 hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET);
818         }
819
820         wait_event_timeout(hba->reset_wq,
821                         atomic_read(&hba->resetting) == 0, 60 * HZ);
822
823         if (atomic_read(&hba->resetting)) {
824                 /* IOP is in unkown state, abort reset */
825                 printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no);
826                 return -1;
827         }
828
829         if (iop_send_sync_msg(hba,
830                 IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
831                 dprintk("scsi%d: fail to start background task\n",
832                                 hba->host->host_no);
833         }
834
835         return 0;
836 }
837
838 static int hptiop_reset(struct scsi_cmnd *scp)
839 {
840         struct Scsi_Host * host = scp->device->host;
841         struct hptiop_hba * hba = (struct hptiop_hba *)host->hostdata;
842
843         printk(KERN_WARNING "hptiop_reset(%d/%d/%d) scp=%p\n",
844                         scp->device->host->host_no, scp->device->channel,
845                         scp->device->id, scp);
846
847         return hptiop_reset_hba(hba)? FAILED : SUCCESS;
848 }
849
850 static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
851                                                 int queue_depth)
852 {
853         struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata;
854
855         if (queue_depth > hba->max_requests)
856                 queue_depth = hba->max_requests;
857         scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
858         return queue_depth;
859 }
860
861 static ssize_t hptiop_show_version(struct device *dev,
862                                    struct device_attribute *attr, char *buf)
863 {
864         return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver);
865 }
866
867 static ssize_t hptiop_show_fw_version(struct device *dev,
868                                       struct device_attribute *attr, char *buf)
869 {
870         struct Scsi_Host *host = class_to_shost(dev);
871         struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
872
873         return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n",
874                                 hba->firmware_version >> 24,
875                                 (hba->firmware_version >> 16) & 0xff,
876                                 (hba->firmware_version >> 8) & 0xff,
877                                 hba->firmware_version & 0xff);
878 }
879
880 static struct device_attribute hptiop_attr_version = {
881         .attr = {
882                 .name = "driver-version",
883                 .mode = S_IRUGO,
884         },
885         .show = hptiop_show_version,
886 };
887
888 static struct device_attribute hptiop_attr_fw_version = {
889         .attr = {
890                 .name = "firmware-version",
891                 .mode = S_IRUGO,
892         },
893         .show = hptiop_show_fw_version,
894 };
895
896 static struct device_attribute *hptiop_attrs[] = {
897         &hptiop_attr_version,
898         &hptiop_attr_fw_version,
899         NULL
900 };
901
902 static struct scsi_host_template driver_template = {
903         .module                     = THIS_MODULE,
904         .name                       = driver_name,
905         .queuecommand               = hptiop_queuecommand,
906         .eh_device_reset_handler    = hptiop_reset,
907         .eh_bus_reset_handler       = hptiop_reset,
908         .info                       = hptiop_info,
909         .emulated                   = 0,
910         .use_clustering             = ENABLE_CLUSTERING,
911         .proc_name                  = driver_name,
912         .shost_attrs                = hptiop_attrs,
913         .this_id                    = -1,
914         .change_queue_depth         = hptiop_adjust_disk_queue_depth,
915 };
916
917 static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba)
918 {
919         hba->u.mv.internal_req = dma_alloc_coherent(&hba->pcidev->dev,
920                         0x800, &hba->u.mv.internal_req_phy, GFP_KERNEL);
921         if (hba->u.mv.internal_req)
922                 return 0;
923         else
924                 return -1;
925 }
926
927 static int hptiop_internal_memfree_mv(struct hptiop_hba *hba)
928 {
929         if (hba->u.mv.internal_req) {
930                 dma_free_coherent(&hba->pcidev->dev, 0x800,
931                         hba->u.mv.internal_req, hba->u.mv.internal_req_phy);
932                 return 0;
933         } else
934                 return -1;
935 }
936
937 static int __devinit hptiop_probe(struct pci_dev *pcidev,
938                                         const struct pci_device_id *id)
939 {
940         struct Scsi_Host *host = NULL;
941         struct hptiop_hba *hba;
942         struct hpt_iop_request_get_config iop_config;
943         struct hpt_iop_request_set_config set_config;
944         dma_addr_t start_phy;
945         void *start_virt;
946         u32 offset, i, req_size;
947
948         dprintk("hptiop_probe(%p)\n", pcidev);
949
950         if (pci_enable_device(pcidev)) {
951                 printk(KERN_ERR "hptiop: fail to enable pci device\n");
952                 return -ENODEV;
953         }
954
955         printk(KERN_INFO "adapter at PCI %d:%d:%d, IRQ %d\n",
956                 pcidev->bus->number, pcidev->devfn >> 3, pcidev->devfn & 7,
957                 pcidev->irq);
958
959         pci_set_master(pcidev);
960
961         /* Enable 64bit DMA if possible */
962         if (pci_set_dma_mask(pcidev, DMA_64BIT_MASK)) {
963                 if (pci_set_dma_mask(pcidev, DMA_32BIT_MASK)) {
964                         printk(KERN_ERR "hptiop: fail to set dma_mask\n");
965                         goto disable_pci_device;
966                 }
967         }
968
969         if (pci_request_regions(pcidev, driver_name)) {
970                 printk(KERN_ERR "hptiop: pci_request_regions failed\n");
971                 goto disable_pci_device;
972         }
973
974         host = scsi_host_alloc(&driver_template, sizeof(struct hptiop_hba));
975         if (!host) {
976                 printk(KERN_ERR "hptiop: fail to alloc scsi host\n");
977                 goto free_pci_regions;
978         }
979
980         hba = (struct hptiop_hba *)host->hostdata;
981
982         hba->ops = (struct hptiop_adapter_ops *)id->driver_data;
983         hba->pcidev = pcidev;
984         hba->host = host;
985         hba->initialized = 0;
986         hba->iopintf_v2 = 0;
987
988         atomic_set(&hba->resetting, 0);
989         atomic_set(&hba->reset_count, 0);
990
991         init_waitqueue_head(&hba->reset_wq);
992         init_waitqueue_head(&hba->ioctl_wq);
993
994         host->max_lun = 1;
995         host->max_channel = 0;
996         host->io_port = 0;
997         host->n_io_port = 0;
998         host->irq = pcidev->irq;
999
1000         if (hba->ops->map_pci_bar(hba))
1001                 goto free_scsi_host;
1002
1003         if (hba->ops->iop_wait_ready(hba, 20000)) {
1004                 printk(KERN_ERR "scsi%d: firmware not ready\n",
1005                                 hba->host->host_no);
1006                 goto unmap_pci_bar;
1007         }
1008
1009         if (hba->ops->internal_memalloc) {
1010                 if (hba->ops->internal_memalloc(hba)) {
1011                         printk(KERN_ERR "scsi%d: internal_memalloc failed\n",
1012                                 hba->host->host_no);
1013                         goto unmap_pci_bar;
1014                 }
1015         }
1016
1017         if (hba->ops->get_config(hba, &iop_config)) {
1018                 printk(KERN_ERR "scsi%d: get config failed\n",
1019                                 hba->host->host_no);
1020                 goto unmap_pci_bar;
1021         }
1022
1023         hba->max_requests = min(le32_to_cpu(iop_config.max_requests),
1024                                 HPTIOP_MAX_REQUESTS);
1025         hba->max_devices = le32_to_cpu(iop_config.max_devices);
1026         hba->max_request_size = le32_to_cpu(iop_config.request_size);
1027         hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count);
1028         hba->firmware_version = le32_to_cpu(iop_config.firmware_version);
1029         hba->interface_version = le32_to_cpu(iop_config.interface_version);
1030         hba->sdram_size = le32_to_cpu(iop_config.sdram_size);
1031
1032         if (hba->firmware_version > 0x01020000 ||
1033                         hba->interface_version > 0x01020000)
1034                 hba->iopintf_v2 = 1;
1035
1036         host->max_sectors = le32_to_cpu(iop_config.data_transfer_length) >> 9;
1037         host->max_id = le32_to_cpu(iop_config.max_devices);
1038         host->sg_tablesize = le32_to_cpu(iop_config.max_sg_count);
1039         host->can_queue = le32_to_cpu(iop_config.max_requests);
1040         host->cmd_per_lun = le32_to_cpu(iop_config.max_requests);
1041         host->max_cmd_len = 16;
1042
1043         req_size = sizeof(struct hpt_iop_request_scsi_command)
1044                 + sizeof(struct hpt_iopsg) * (hba->max_sg_descriptors - 1);
1045         if ((req_size & 0x1f) != 0)
1046                 req_size = (req_size + 0x1f) & ~0x1f;
1047
1048         memset(&set_config, 0, sizeof(struct hpt_iop_request_set_config));
1049         set_config.iop_id = cpu_to_le32(host->host_no);
1050         set_config.vbus_id = cpu_to_le16(host->host_no);
1051         set_config.max_host_request_size = cpu_to_le16(req_size);
1052
1053         if (hba->ops->set_config(hba, &set_config)) {
1054                 printk(KERN_ERR "scsi%d: set config failed\n",
1055                                 hba->host->host_no);
1056                 goto unmap_pci_bar;
1057         }
1058
1059         pci_set_drvdata(pcidev, host);
1060
1061         if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED,
1062                                         driver_name, hba)) {
1063                 printk(KERN_ERR "scsi%d: request irq %d failed\n",
1064                                         hba->host->host_no, pcidev->irq);
1065                 goto unmap_pci_bar;
1066         }
1067
1068         /* Allocate request mem */
1069
1070         dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests);
1071
1072         hba->req_size = req_size;
1073         start_virt = dma_alloc_coherent(&pcidev->dev,
1074                                 hba->req_size*hba->max_requests + 0x20,
1075                                 &start_phy, GFP_KERNEL);
1076
1077         if (!start_virt) {
1078                 printk(KERN_ERR "scsi%d: fail to alloc request mem\n",
1079                                         hba->host->host_no);
1080                 goto free_request_irq;
1081         }
1082
1083         hba->dma_coherent = start_virt;
1084         hba->dma_coherent_handle = start_phy;
1085
1086         if ((start_phy & 0x1f) != 0)
1087         {
1088                 offset = ((start_phy + 0x1f) & ~0x1f) - start_phy;
1089                 start_phy += offset;
1090                 start_virt += offset;
1091         }
1092
1093         hba->req_list = start_virt;
1094         for (i = 0; i < hba->max_requests; i++) {
1095                 hba->reqs[i].next = NULL;
1096                 hba->reqs[i].req_virt = start_virt;
1097                 hba->reqs[i].req_shifted_phy = start_phy >> 5;
1098                 hba->reqs[i].index = i;
1099                 free_req(hba, &hba->reqs[i]);
1100                 start_virt = (char *)start_virt + hba->req_size;
1101                 start_phy = start_phy + hba->req_size;
1102         }
1103
1104         /* Enable Interrupt and start background task */
1105         if (hptiop_initialize_iop(hba))
1106                 goto free_request_mem;
1107
1108         if (scsi_add_host(host, &pcidev->dev)) {
1109                 printk(KERN_ERR "scsi%d: scsi_add_host failed\n",
1110                                         hba->host->host_no);
1111                 goto free_request_mem;
1112         }
1113
1114
1115         scsi_scan_host(host);
1116
1117         dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no);
1118         return 0;
1119
1120 free_request_mem:
1121         dma_free_coherent(&hba->pcidev->dev,
1122                         hba->req_size * hba->max_requests + 0x20,
1123                         hba->dma_coherent, hba->dma_coherent_handle);
1124
1125 free_request_irq:
1126         free_irq(hba->pcidev->irq, hba);
1127
1128 unmap_pci_bar:
1129         if (hba->ops->internal_memfree)
1130                 hba->ops->internal_memfree(hba);
1131
1132         hba->ops->unmap_pci_bar(hba);
1133
1134 free_scsi_host:
1135         scsi_host_put(host);
1136
1137 free_pci_regions:
1138         pci_release_regions(pcidev);
1139
1140 disable_pci_device:
1141         pci_disable_device(pcidev);
1142
1143         dprintk("scsi%d: hptiop_probe fail\n", host->host_no);
1144         return -ENODEV;
1145 }
1146
1147 static void hptiop_shutdown(struct pci_dev *pcidev)
1148 {
1149         struct Scsi_Host *host = pci_get_drvdata(pcidev);
1150         struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
1151
1152         dprintk("hptiop_shutdown(%p)\n", hba);
1153
1154         /* stop the iop */
1155         if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
1156                 printk(KERN_ERR "scsi%d: shutdown the iop timeout\n",
1157                                         hba->host->host_no);
1158
1159         /* disable all outbound interrupts */
1160         hba->ops->disable_intr(hba);
1161 }
1162
1163 static void hptiop_disable_intr_itl(struct hptiop_hba *hba)
1164 {
1165         u32 int_mask;
1166
1167         int_mask = readl(&hba->u.itl.iop->outbound_intmask);
1168         writel(int_mask |
1169                 IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE,
1170                 &hba->u.itl.iop->outbound_intmask);
1171         readl(&hba->u.itl.iop->outbound_intmask);
1172 }
1173
1174 static void hptiop_disable_intr_mv(struct hptiop_hba *hba)
1175 {
1176         writel(0, &hba->u.mv.regs->outbound_intmask);
1177         readl(&hba->u.mv.regs->outbound_intmask);
1178 }
1179
1180 static void hptiop_remove(struct pci_dev *pcidev)
1181 {
1182         struct Scsi_Host *host = pci_get_drvdata(pcidev);
1183         struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
1184
1185         dprintk("scsi%d: hptiop_remove\n", hba->host->host_no);
1186
1187         scsi_remove_host(host);
1188
1189         hptiop_shutdown(pcidev);
1190
1191         free_irq(hba->pcidev->irq, hba);
1192
1193         dma_free_coherent(&hba->pcidev->dev,
1194                         hba->req_size * hba->max_requests + 0x20,
1195                         hba->dma_coherent,
1196                         hba->dma_coherent_handle);
1197
1198         if (hba->ops->internal_memfree)
1199                 hba->ops->internal_memfree(hba);
1200
1201         hba->ops->unmap_pci_bar(hba);
1202
1203         pci_release_regions(hba->pcidev);
1204         pci_set_drvdata(hba->pcidev, NULL);
1205         pci_disable_device(hba->pcidev);
1206
1207         scsi_host_put(host);
1208 }
1209
1210 static struct hptiop_adapter_ops hptiop_itl_ops = {
1211         .iop_wait_ready    = iop_wait_ready_itl,
1212         .internal_memalloc = NULL,
1213         .internal_memfree  = NULL,
1214         .map_pci_bar       = hptiop_map_pci_bar_itl,
1215         .unmap_pci_bar     = hptiop_unmap_pci_bar_itl,
1216         .enable_intr       = hptiop_enable_intr_itl,
1217         .disable_intr      = hptiop_disable_intr_itl,
1218         .get_config        = iop_get_config_itl,
1219         .set_config        = iop_set_config_itl,
1220         .iop_intr          = iop_intr_itl,
1221         .post_msg          = hptiop_post_msg_itl,
1222         .post_req          = hptiop_post_req_itl,
1223 };
1224
1225 static struct hptiop_adapter_ops hptiop_mv_ops = {
1226         .iop_wait_ready    = iop_wait_ready_mv,
1227         .internal_memalloc = hptiop_internal_memalloc_mv,
1228         .internal_memfree  = hptiop_internal_memfree_mv,
1229         .map_pci_bar       = hptiop_map_pci_bar_mv,
1230         .unmap_pci_bar     = hptiop_unmap_pci_bar_mv,
1231         .enable_intr       = hptiop_enable_intr_mv,
1232         .disable_intr      = hptiop_disable_intr_mv,
1233         .get_config        = iop_get_config_mv,
1234         .set_config        = iop_set_config_mv,
1235         .iop_intr          = iop_intr_mv,
1236         .post_msg          = hptiop_post_msg_mv,
1237         .post_req          = hptiop_post_req_mv,
1238 };
1239
1240 static struct pci_device_id hptiop_id_table[] = {
1241         { PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops },
1242         { PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops },
1243         { PCI_VDEVICE(TTI, 0x3520), (kernel_ulong_t)&hptiop_itl_ops },
1244         { PCI_VDEVICE(TTI, 0x4320), (kernel_ulong_t)&hptiop_itl_ops },
1245         { PCI_VDEVICE(TTI, 0x3510), (kernel_ulong_t)&hptiop_itl_ops },
1246         { PCI_VDEVICE(TTI, 0x3511), (kernel_ulong_t)&hptiop_itl_ops },
1247         { PCI_VDEVICE(TTI, 0x3521), (kernel_ulong_t)&hptiop_itl_ops },
1248         { PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops },
1249         { PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops },
1250         { PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops },
1251         { PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops },
1252         { PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops },
1253         { PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops },
1254         { PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops },
1255         { PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops },
1256         { PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops },
1257         { PCI_VDEVICE(TTI, 0x4311), (kernel_ulong_t)&hptiop_itl_ops },
1258         { PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops },
1259         { PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops },
1260         { PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops },
1261         {},
1262 };
1263
1264 MODULE_DEVICE_TABLE(pci, hptiop_id_table);
1265
1266 static struct pci_driver hptiop_pci_driver = {
1267         .name       = driver_name,
1268         .id_table   = hptiop_id_table,
1269         .probe      = hptiop_probe,
1270         .remove     = hptiop_remove,
1271         .shutdown   = hptiop_shutdown,
1272 };
1273
1274 static int __init hptiop_module_init(void)
1275 {
1276         printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver);
1277         return pci_register_driver(&hptiop_pci_driver);
1278 }
1279
1280 static void __exit hptiop_module_exit(void)
1281 {
1282         pci_unregister_driver(&hptiop_pci_driver);
1283 }
1284
1285
1286 module_init(hptiop_module_init);
1287 module_exit(hptiop_module_exit);
1288
1289 MODULE_LICENSE("GPL");
1290