Commit | Line | Data |
---|---|---|
ede1e6f8 | 1 | /* |
00f59701 | 2 | * HighPoint RR3xxx/4xxx controller driver for Linux |
db9b6e89 | 3 | * Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved. |
ede1e6f8 HLT |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; version 2 of the License. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * Please report bugs/comments/suggestions to linux@highpoint-tech.com | |
15 | * | |
16 | * For more information, visit http://www.highpoint-tech.com | |
17 | */ | |
ede1e6f8 HLT |
18 | #include <linux/module.h> |
19 | #include <linux/types.h> | |
20 | #include <linux/string.h> | |
21 | #include <linux/kernel.h> | |
22 | #include <linux/pci.h> | |
23 | #include <linux/interrupt.h> | |
24 | #include <linux/errno.h> | |
25 | #include <linux/delay.h> | |
26 | #include <linux/timer.h> | |
27 | #include <linux/spinlock.h> | |
ede1e6f8 HLT |
28 | #include <asm/uaccess.h> |
29 | #include <asm/io.h> | |
30 | #include <asm/div64.h> | |
31 | #include <scsi/scsi_cmnd.h> | |
32 | #include <scsi/scsi_device.h> | |
33 | #include <scsi/scsi.h> | |
34 | #include <scsi/scsi_tcq.h> | |
35 | #include <scsi/scsi_host.h> | |
36 | ||
37 | #include "hptiop.h" | |
38 | ||
39 | MODULE_AUTHOR("HighPoint Technologies, Inc."); | |
00f59701 | 40 | MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver"); |
ede1e6f8 HLT |
41 | |
42 | static char driver_name[] = "hptiop"; | |
00f59701 HLT |
43 | static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver"; |
44 | static const char driver_ver[] = "v1.3 (071203)"; | |
45 | ||
46 | static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec); | |
47 | static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag, | |
48 | struct hpt_iop_request_scsi_command *req); | |
49 | static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 tag); | |
50 | static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag); | |
ede1e6f8 HLT |
51 | static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg); |
52 | ||
00f59701 | 53 | static int iop_wait_ready_itl(struct hptiop_hba *hba, u32 millisec) |
ede1e6f8 HLT |
54 | { |
55 | u32 req = 0; | |
56 | int i; | |
57 | ||
58 | for (i = 0; i < millisec; i++) { | |
00f59701 | 59 | req = readl(&hba->u.itl.iop->inbound_queue); |
ede1e6f8 HLT |
60 | if (req != IOPMU_QUEUE_EMPTY) |
61 | break; | |
62 | msleep(1); | |
63 | } | |
64 | ||
65 | if (req != IOPMU_QUEUE_EMPTY) { | |
00f59701 HLT |
66 | writel(req, &hba->u.itl.iop->outbound_queue); |
67 | readl(&hba->u.itl.iop->outbound_intstatus); | |
ede1e6f8 HLT |
68 | return 0; |
69 | } | |
70 | ||
71 | return -1; | |
72 | } | |
73 | ||
00f59701 HLT |
74 | static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec) |
75 | { | |
76 | return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec); | |
77 | } | |
78 | ||
79 | static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag) | |
ede1e6f8 | 80 | { |
db9b6e89 | 81 | if (tag & IOPMU_QUEUE_ADDR_HOST_BIT) |
00f59701 | 82 | hptiop_host_request_callback_itl(hba, |
ede1e6f8 HLT |
83 | tag & ~IOPMU_QUEUE_ADDR_HOST_BIT); |
84 | else | |
00f59701 | 85 | hptiop_iop_request_callback_itl(hba, tag); |
ede1e6f8 HLT |
86 | } |
87 | ||
00f59701 | 88 | static void hptiop_drain_outbound_queue_itl(struct hptiop_hba *hba) |
ede1e6f8 HLT |
89 | { |
90 | u32 req; | |
91 | ||
00f59701 HLT |
92 | while ((req = readl(&hba->u.itl.iop->outbound_queue)) != |
93 | IOPMU_QUEUE_EMPTY) { | |
ede1e6f8 HLT |
94 | |
95 | if (req & IOPMU_QUEUE_MASK_HOST_BITS) | |
00f59701 | 96 | hptiop_request_callback_itl(hba, req); |
ede1e6f8 HLT |
97 | else { |
98 | struct hpt_iop_request_header __iomem * p; | |
99 | ||
100 | p = (struct hpt_iop_request_header __iomem *) | |
00f59701 | 101 | ((char __iomem *)hba->u.itl.iop + req); |
ede1e6f8 HLT |
102 | |
103 | if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) { | |
104 | if (readl(&p->context)) | |
00f59701 | 105 | hptiop_request_callback_itl(hba, req); |
ede1e6f8 HLT |
106 | else |
107 | writel(1, &p->context); | |
108 | } | |
109 | else | |
00f59701 | 110 | hptiop_request_callback_itl(hba, req); |
ede1e6f8 HLT |
111 | } |
112 | } | |
113 | } | |
114 | ||
00f59701 | 115 | static int iop_intr_itl(struct hptiop_hba *hba) |
ede1e6f8 | 116 | { |
00f59701 | 117 | struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop; |
ede1e6f8 HLT |
118 | u32 status; |
119 | int ret = 0; | |
120 | ||
121 | status = readl(&iop->outbound_intstatus); | |
122 | ||
123 | if (status & IOPMU_OUTBOUND_INT_MSG0) { | |
124 | u32 msg = readl(&iop->outbound_msgaddr0); | |
00f59701 | 125 | |
ede1e6f8 HLT |
126 | dprintk("received outbound msg %x\n", msg); |
127 | writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus); | |
128 | hptiop_message_callback(hba, msg); | |
129 | ret = 1; | |
130 | } | |
131 | ||
132 | if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) { | |
00f59701 HLT |
133 | hptiop_drain_outbound_queue_itl(hba); |
134 | ret = 1; | |
135 | } | |
136 | ||
137 | return ret; | |
138 | } | |
139 | ||
140 | static u64 mv_outbound_read(struct hpt_iopmu_mv __iomem *mu) | |
141 | { | |
142 | u32 outbound_tail = readl(&mu->outbound_tail); | |
143 | u32 outbound_head = readl(&mu->outbound_head); | |
144 | ||
145 | if (outbound_tail != outbound_head) { | |
146 | u64 p; | |
147 | ||
148 | memcpy_fromio(&p, &mu->outbound_q[mu->outbound_tail], 8); | |
149 | outbound_tail++; | |
150 | ||
151 | if (outbound_tail == MVIOP_QUEUE_LEN) | |
152 | outbound_tail = 0; | |
153 | writel(outbound_tail, &mu->outbound_tail); | |
154 | return p; | |
155 | } else | |
156 | return 0; | |
157 | } | |
158 | ||
159 | static void mv_inbound_write(u64 p, struct hptiop_hba *hba) | |
160 | { | |
161 | u32 inbound_head = readl(&hba->u.mv.mu->inbound_head); | |
162 | u32 head = inbound_head + 1; | |
163 | ||
164 | if (head == MVIOP_QUEUE_LEN) | |
165 | head = 0; | |
166 | ||
167 | memcpy_toio(&hba->u.mv.mu->inbound_q[inbound_head], &p, 8); | |
168 | writel(head, &hba->u.mv.mu->inbound_head); | |
169 | writel(MVIOP_MU_INBOUND_INT_POSTQUEUE, | |
170 | &hba->u.mv.regs->inbound_doorbell); | |
171 | } | |
172 | ||
173 | static void hptiop_request_callback_mv(struct hptiop_hba *hba, u64 tag) | |
174 | { | |
175 | u32 req_type = (tag >> 5) & 0x7; | |
176 | struct hpt_iop_request_scsi_command *req; | |
177 | ||
178 | dprintk("hptiop_request_callback_mv: tag=%llx\n", tag); | |
179 | ||
180 | BUG_ON((tag & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) == 0); | |
181 | ||
182 | switch (req_type) { | |
183 | case IOP_REQUEST_TYPE_GET_CONFIG: | |
184 | case IOP_REQUEST_TYPE_SET_CONFIG: | |
185 | hba->msg_done = 1; | |
186 | break; | |
187 | ||
188 | case IOP_REQUEST_TYPE_SCSI_COMMAND: | |
189 | req = hba->reqs[tag >> 8].req_virt; | |
190 | if (likely(tag & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)) | |
191 | req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS); | |
192 | ||
193 | hptiop_finish_scsi_req(hba, tag>>8, req); | |
194 | break; | |
195 | ||
196 | default: | |
197 | break; | |
198 | } | |
199 | } | |
200 | ||
201 | static int iop_intr_mv(struct hptiop_hba *hba) | |
202 | { | |
203 | u32 status; | |
204 | int ret = 0; | |
205 | ||
206 | status = readl(&hba->u.mv.regs->outbound_doorbell); | |
207 | writel(~status, &hba->u.mv.regs->outbound_doorbell); | |
208 | ||
209 | if (status & MVIOP_MU_OUTBOUND_INT_MSG) { | |
210 | u32 msg; | |
211 | msg = readl(&hba->u.mv.mu->outbound_msg); | |
212 | dprintk("received outbound msg %x\n", msg); | |
213 | hptiop_message_callback(hba, msg); | |
214 | ret = 1; | |
215 | } | |
216 | ||
217 | if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) { | |
218 | u64 tag; | |
219 | ||
220 | while ((tag = mv_outbound_read(hba->u.mv.mu))) | |
221 | hptiop_request_callback_mv(hba, tag); | |
ede1e6f8 HLT |
222 | ret = 1; |
223 | } | |
224 | ||
225 | return ret; | |
226 | } | |
227 | ||
00f59701 | 228 | static int iop_send_sync_request_itl(struct hptiop_hba *hba, |
ede1e6f8 HLT |
229 | void __iomem *_req, u32 millisec) |
230 | { | |
231 | struct hpt_iop_request_header __iomem *req = _req; | |
232 | u32 i; | |
233 | ||
00f59701 | 234 | writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, &req->flags); |
ede1e6f8 | 235 | writel(0, &req->context); |
00f59701 HLT |
236 | writel((unsigned long)req - (unsigned long)hba->u.itl.iop, |
237 | &hba->u.itl.iop->inbound_queue); | |
238 | readl(&hba->u.itl.iop->outbound_intstatus); | |
ede1e6f8 HLT |
239 | |
240 | for (i = 0; i < millisec; i++) { | |
00f59701 | 241 | iop_intr_itl(hba); |
ede1e6f8 HLT |
242 | if (readl(&req->context)) |
243 | return 0; | |
244 | msleep(1); | |
245 | } | |
246 | ||
247 | return -1; | |
248 | } | |
249 | ||
00f59701 HLT |
250 | static int iop_send_sync_request_mv(struct hptiop_hba *hba, |
251 | u32 size_bits, u32 millisec) | |
ede1e6f8 | 252 | { |
00f59701 | 253 | struct hpt_iop_request_header *reqhdr = hba->u.mv.internal_req; |
ede1e6f8 HLT |
254 | u32 i; |
255 | ||
256 | hba->msg_done = 0; | |
00f59701 HLT |
257 | reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST); |
258 | mv_inbound_write(hba->u.mv.internal_req_phy | | |
259 | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bits, hba); | |
260 | ||
261 | for (i = 0; i < millisec; i++) { | |
262 | iop_intr_mv(hba); | |
263 | if (hba->msg_done) | |
264 | return 0; | |
265 | msleep(1); | |
266 | } | |
267 | return -1; | |
268 | } | |
269 | ||
270 | static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg) | |
271 | { | |
272 | writel(msg, &hba->u.itl.iop->inbound_msgaddr0); | |
273 | readl(&hba->u.itl.iop->outbound_intstatus); | |
274 | } | |
275 | ||
276 | static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg) | |
277 | { | |
278 | writel(msg, &hba->u.mv.mu->inbound_msg); | |
279 | writel(MVIOP_MU_INBOUND_INT_MSG, &hba->u.mv.regs->inbound_doorbell); | |
280 | readl(&hba->u.mv.regs->inbound_doorbell); | |
281 | } | |
ede1e6f8 | 282 | |
00f59701 HLT |
283 | static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec) |
284 | { | |
285 | u32 i; | |
ede1e6f8 | 286 | |
00f59701 HLT |
287 | hba->msg_done = 0; |
288 | hba->ops->post_msg(hba, msg); | |
ede1e6f8 HLT |
289 | |
290 | for (i = 0; i < millisec; i++) { | |
291 | spin_lock_irq(hba->host->host_lock); | |
00f59701 | 292 | hba->ops->iop_intr(hba); |
ede1e6f8 HLT |
293 | spin_unlock_irq(hba->host->host_lock); |
294 | if (hba->msg_done) | |
295 | break; | |
296 | msleep(1); | |
297 | } | |
298 | ||
299 | return hba->msg_done? 0 : -1; | |
300 | } | |
301 | ||
00f59701 | 302 | static int iop_get_config_itl(struct hptiop_hba *hba, |
ede1e6f8 HLT |
303 | struct hpt_iop_request_get_config *config) |
304 | { | |
305 | u32 req32; | |
306 | struct hpt_iop_request_get_config __iomem *req; | |
307 | ||
00f59701 | 308 | req32 = readl(&hba->u.itl.iop->inbound_queue); |
ede1e6f8 HLT |
309 | if (req32 == IOPMU_QUEUE_EMPTY) |
310 | return -1; | |
311 | ||
312 | req = (struct hpt_iop_request_get_config __iomem *) | |
00f59701 | 313 | ((unsigned long)hba->u.itl.iop + req32); |
ede1e6f8 HLT |
314 | |
315 | writel(0, &req->header.flags); | |
316 | writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type); | |
317 | writel(sizeof(struct hpt_iop_request_get_config), &req->header.size); | |
318 | writel(IOP_RESULT_PENDING, &req->header.result); | |
319 | ||
00f59701 | 320 | if (iop_send_sync_request_itl(hba, req, 20000)) { |
ede1e6f8 HLT |
321 | dprintk("Get config send cmd failed\n"); |
322 | return -1; | |
323 | } | |
324 | ||
325 | memcpy_fromio(config, req, sizeof(*config)); | |
00f59701 HLT |
326 | writel(req32, &hba->u.itl.iop->outbound_queue); |
327 | return 0; | |
328 | } | |
329 | ||
330 | static int iop_get_config_mv(struct hptiop_hba *hba, | |
331 | struct hpt_iop_request_get_config *config) | |
332 | { | |
333 | struct hpt_iop_request_get_config *req = hba->u.mv.internal_req; | |
334 | ||
335 | req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); | |
336 | req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG); | |
337 | req->header.size = | |
338 | cpu_to_le32(sizeof(struct hpt_iop_request_get_config)); | |
339 | req->header.result = cpu_to_le32(IOP_RESULT_PENDING); | |
f6b196a2 JB |
340 | req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG<<5); |
341 | req->header.context_hi32 = 0; | |
00f59701 HLT |
342 | |
343 | if (iop_send_sync_request_mv(hba, 0, 20000)) { | |
344 | dprintk("Get config send cmd failed\n"); | |
345 | return -1; | |
346 | } | |
347 | ||
348 | memcpy(config, req, sizeof(struct hpt_iop_request_get_config)); | |
ede1e6f8 HLT |
349 | return 0; |
350 | } | |
351 | ||
00f59701 | 352 | static int iop_set_config_itl(struct hptiop_hba *hba, |
ede1e6f8 HLT |
353 | struct hpt_iop_request_set_config *config) |
354 | { | |
355 | u32 req32; | |
356 | struct hpt_iop_request_set_config __iomem *req; | |
357 | ||
00f59701 | 358 | req32 = readl(&hba->u.itl.iop->inbound_queue); |
ede1e6f8 HLT |
359 | if (req32 == IOPMU_QUEUE_EMPTY) |
360 | return -1; | |
361 | ||
362 | req = (struct hpt_iop_request_set_config __iomem *) | |
00f59701 | 363 | ((unsigned long)hba->u.itl.iop + req32); |
ede1e6f8 HLT |
364 | |
365 | memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header), | |
366 | (u8 *)config + sizeof(struct hpt_iop_request_header), | |
367 | sizeof(struct hpt_iop_request_set_config) - | |
368 | sizeof(struct hpt_iop_request_header)); | |
369 | ||
370 | writel(0, &req->header.flags); | |
371 | writel(IOP_REQUEST_TYPE_SET_CONFIG, &req->header.type); | |
372 | writel(sizeof(struct hpt_iop_request_set_config), &req->header.size); | |
373 | writel(IOP_RESULT_PENDING, &req->header.result); | |
374 | ||
00f59701 | 375 | if (iop_send_sync_request_itl(hba, req, 20000)) { |
ede1e6f8 HLT |
376 | dprintk("Set config send cmd failed\n"); |
377 | return -1; | |
378 | } | |
379 | ||
00f59701 | 380 | writel(req32, &hba->u.itl.iop->outbound_queue); |
ede1e6f8 HLT |
381 | return 0; |
382 | } | |
383 | ||
00f59701 HLT |
384 | static int iop_set_config_mv(struct hptiop_hba *hba, |
385 | struct hpt_iop_request_set_config *config) | |
ede1e6f8 | 386 | { |
00f59701 | 387 | struct hpt_iop_request_set_config *req = hba->u.mv.internal_req; |
ede1e6f8 | 388 | |
00f59701 HLT |
389 | memcpy(req, config, sizeof(struct hpt_iop_request_set_config)); |
390 | req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); | |
391 | req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG); | |
392 | req->header.size = | |
393 | cpu_to_le32(sizeof(struct hpt_iop_request_set_config)); | |
394 | req->header.result = cpu_to_le32(IOP_RESULT_PENDING); | |
f6b196a2 JB |
395 | req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5); |
396 | req->header.context_hi32 = 0; | |
00f59701 HLT |
397 | |
398 | if (iop_send_sync_request_mv(hba, 0, 20000)) { | |
399 | dprintk("Set config send cmd failed\n"); | |
400 | return -1; | |
401 | } | |
402 | ||
403 | return 0; | |
404 | } | |
405 | ||
406 | static void hptiop_enable_intr_itl(struct hptiop_hba *hba) | |
407 | { | |
ede1e6f8 | 408 | writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0), |
00f59701 HLT |
409 | &hba->u.itl.iop->outbound_intmask); |
410 | } | |
411 | ||
412 | static void hptiop_enable_intr_mv(struct hptiop_hba *hba) | |
413 | { | |
414 | writel(MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG, | |
415 | &hba->u.mv.regs->outbound_intmask); | |
416 | } | |
417 | ||
418 | static int hptiop_initialize_iop(struct hptiop_hba *hba) | |
419 | { | |
420 | /* enable interrupts */ | |
421 | hba->ops->enable_intr(hba); | |
ede1e6f8 HLT |
422 | |
423 | hba->initialized = 1; | |
424 | ||
425 | /* start background tasks */ | |
426 | if (iop_send_sync_msg(hba, | |
427 | IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) { | |
428 | printk(KERN_ERR "scsi%d: fail to start background task\n", | |
429 | hba->host->host_no); | |
430 | return -1; | |
431 | } | |
432 | return 0; | |
433 | } | |
434 | ||
00f59701 | 435 | static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index) |
ede1e6f8 HLT |
436 | { |
437 | u32 mem_base_phy, length; | |
438 | void __iomem *mem_base_virt; | |
00f59701 | 439 | |
ede1e6f8 HLT |
440 | struct pci_dev *pcidev = hba->pcidev; |
441 | ||
00f59701 HLT |
442 | |
443 | if (!(pci_resource_flags(pcidev, index) & IORESOURCE_MEM)) { | |
ede1e6f8 HLT |
444 | printk(KERN_ERR "scsi%d: pci resource invalid\n", |
445 | hba->host->host_no); | |
9bcf0910 | 446 | return NULL; |
ede1e6f8 HLT |
447 | } |
448 | ||
00f59701 HLT |
449 | mem_base_phy = pci_resource_start(pcidev, index); |
450 | length = pci_resource_len(pcidev, index); | |
ede1e6f8 HLT |
451 | mem_base_virt = ioremap(mem_base_phy, length); |
452 | ||
453 | if (!mem_base_virt) { | |
454 | printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n", | |
455 | hba->host->host_no); | |
9bcf0910 | 456 | return NULL; |
00f59701 HLT |
457 | } |
458 | return mem_base_virt; | |
459 | } | |
460 | ||
461 | static int hptiop_map_pci_bar_itl(struct hptiop_hba *hba) | |
462 | { | |
463 | hba->u.itl.iop = hptiop_map_pci_bar(hba, 0); | |
464 | if (hba->u.itl.iop) | |
465 | return 0; | |
466 | else | |
467 | return -1; | |
468 | } | |
469 | ||
470 | static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba) | |
471 | { | |
472 | iounmap(hba->u.itl.iop); | |
473 | } | |
474 | ||
475 | static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba) | |
476 | { | |
477 | hba->u.mv.regs = hptiop_map_pci_bar(hba, 0); | |
9bcf0910 | 478 | if (hba->u.mv.regs == NULL) |
00f59701 HLT |
479 | return -1; |
480 | ||
481 | hba->u.mv.mu = hptiop_map_pci_bar(hba, 2); | |
9bcf0910 | 482 | if (hba->u.mv.mu == NULL) { |
00f59701 | 483 | iounmap(hba->u.mv.regs); |
ede1e6f8 HLT |
484 | return -1; |
485 | } | |
486 | ||
ede1e6f8 HLT |
487 | return 0; |
488 | } | |
489 | ||
00f59701 HLT |
490 | static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba) |
491 | { | |
492 | iounmap(hba->u.mv.regs); | |
493 | iounmap(hba->u.mv.mu); | |
494 | } | |
495 | ||
ede1e6f8 HLT |
496 | static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg) |
497 | { | |
498 | dprintk("iop message 0x%x\n", msg); | |
499 | ||
00f59701 HLT |
500 | if (msg == IOPMU_INBOUND_MSG0_NOP) |
501 | hba->msg_done = 1; | |
502 | ||
ede1e6f8 HLT |
503 | if (!hba->initialized) |
504 | return; | |
505 | ||
506 | if (msg == IOPMU_INBOUND_MSG0_RESET) { | |
507 | atomic_set(&hba->resetting, 0); | |
508 | wake_up(&hba->reset_wq); | |
509 | } | |
510 | else if (msg <= IOPMU_INBOUND_MSG0_MAX) | |
511 | hba->msg_done = 1; | |
512 | } | |
513 | ||
00f59701 | 514 | static struct hptiop_request *get_req(struct hptiop_hba *hba) |
ede1e6f8 HLT |
515 | { |
516 | struct hptiop_request *ret; | |
517 | ||
518 | dprintk("get_req : req=%p\n", hba->req_list); | |
519 | ||
520 | ret = hba->req_list; | |
521 | if (ret) | |
522 | hba->req_list = ret->next; | |
523 | ||
524 | return ret; | |
525 | } | |
526 | ||
00f59701 | 527 | static void free_req(struct hptiop_hba *hba, struct hptiop_request *req) |
ede1e6f8 HLT |
528 | { |
529 | dprintk("free_req(%d, %p)\n", req->index, req); | |
530 | req->next = hba->req_list; | |
531 | hba->req_list = req; | |
532 | } | |
533 | ||
00f59701 HLT |
534 | static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag, |
535 | struct hpt_iop_request_scsi_command *req) | |
ede1e6f8 | 536 | { |
ede1e6f8 HLT |
537 | struct scsi_cmnd *scp; |
538 | ||
00f59701 | 539 | dprintk("hptiop_finish_scsi_req: req=%p, type=%d, " |
ede1e6f8 HLT |
540 | "result=%d, context=0x%x tag=%d\n", |
541 | req, req->header.type, req->header.result, | |
542 | req->header.context, tag); | |
543 | ||
544 | BUG_ON(!req->header.result); | |
545 | BUG_ON(req->header.type != cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND)); | |
546 | ||
547 | scp = hba->reqs[tag].scp; | |
548 | ||
f9875496 FT |
549 | if (HPT_SCP(scp)->mapped) |
550 | scsi_dma_unmap(scp); | |
ede1e6f8 HLT |
551 | |
552 | switch (le32_to_cpu(req->header.result)) { | |
553 | case IOP_RESULT_SUCCESS: | |
00f59701 HLT |
554 | scsi_set_resid(scp, |
555 | scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length)); | |
ede1e6f8 HLT |
556 | scp->result = (DID_OK<<16); |
557 | break; | |
558 | case IOP_RESULT_BAD_TARGET: | |
559 | scp->result = (DID_BAD_TARGET<<16); | |
560 | break; | |
561 | case IOP_RESULT_BUSY: | |
562 | scp->result = (DID_BUS_BUSY<<16); | |
563 | break; | |
564 | case IOP_RESULT_RESET: | |
565 | scp->result = (DID_RESET<<16); | |
566 | break; | |
567 | case IOP_RESULT_FAIL: | |
568 | scp->result = (DID_ERROR<<16); | |
569 | break; | |
570 | case IOP_RESULT_INVALID_REQUEST: | |
571 | scp->result = (DID_ABORT<<16); | |
572 | break; | |
00f59701 HLT |
573 | case IOP_RESULT_CHECK_CONDITION: |
574 | scsi_set_resid(scp, | |
575 | scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length)); | |
ede1e6f8 | 576 | scp->result = SAM_STAT_CHECK_CONDITION; |
c372f4a8 | 577 | memcpy(scp->sense_buffer, &req->sg_list, |
b80ca4f7 | 578 | min_t(size_t, SCSI_SENSE_BUFFERSIZE, |
0fec02c9 | 579 | le32_to_cpu(req->dataxfer_length))); |
ede1e6f8 HLT |
580 | break; |
581 | ||
582 | default: | |
1c9fbafc | 583 | scp->result = DRIVER_INVALID << 24 | DID_ABORT << 16; |
ede1e6f8 HLT |
584 | break; |
585 | } | |
586 | ||
587 | dprintk("scsi_done(%p)\n", scp); | |
588 | scp->scsi_done(scp); | |
589 | free_req(hba, &hba->reqs[tag]); | |
590 | } | |
591 | ||
00f59701 HLT |
592 | static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag) |
593 | { | |
594 | struct hpt_iop_request_scsi_command *req; | |
595 | u32 tag; | |
596 | ||
597 | if (hba->iopintf_v2) { | |
598 | tag = _tag & ~IOPMU_QUEUE_REQUEST_RESULT_BIT; | |
599 | req = hba->reqs[tag].req_virt; | |
600 | if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT)) | |
601 | req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS); | |
602 | } else { | |
603 | tag = _tag; | |
604 | req = hba->reqs[tag].req_virt; | |
605 | } | |
606 | ||
607 | hptiop_finish_scsi_req(hba, tag, req); | |
608 | } | |
609 | ||
610 | void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag) | |
ede1e6f8 HLT |
611 | { |
612 | struct hpt_iop_request_header __iomem *req; | |
613 | struct hpt_iop_request_ioctl_command __iomem *p; | |
614 | struct hpt_ioctl_k *arg; | |
615 | ||
616 | req = (struct hpt_iop_request_header __iomem *) | |
00f59701 HLT |
617 | ((unsigned long)hba->u.itl.iop + tag); |
618 | dprintk("hptiop_iop_request_callback_itl: req=%p, type=%d, " | |
ede1e6f8 HLT |
619 | "result=%d, context=0x%x tag=%d\n", |
620 | req, readl(&req->type), readl(&req->result), | |
621 | readl(&req->context), tag); | |
622 | ||
623 | BUG_ON(!readl(&req->result)); | |
624 | BUG_ON(readl(&req->type) != IOP_REQUEST_TYPE_IOCTL_COMMAND); | |
625 | ||
626 | p = (struct hpt_iop_request_ioctl_command __iomem *)req; | |
627 | arg = (struct hpt_ioctl_k *)(unsigned long) | |
628 | (readl(&req->context) | | |
629 | ((u64)readl(&req->context_hi32)<<32)); | |
630 | ||
631 | if (readl(&req->result) == IOP_RESULT_SUCCESS) { | |
632 | arg->result = HPT_IOCTL_RESULT_OK; | |
633 | ||
634 | if (arg->outbuf_size) | |
635 | memcpy_fromio(arg->outbuf, | |
636 | &p->buf[(readl(&p->inbuf_size) + 3)& ~3], | |
637 | arg->outbuf_size); | |
638 | ||
639 | if (arg->bytes_returned) | |
640 | *arg->bytes_returned = arg->outbuf_size; | |
641 | } | |
642 | else | |
643 | arg->result = HPT_IOCTL_RESULT_FAILED; | |
644 | ||
645 | arg->done(arg); | |
00f59701 | 646 | writel(tag, &hba->u.itl.iop->outbound_queue); |
ede1e6f8 HLT |
647 | } |
648 | ||
7d12e780 | 649 | static irqreturn_t hptiop_intr(int irq, void *dev_id) |
ede1e6f8 HLT |
650 | { |
651 | struct hptiop_hba *hba = dev_id; | |
652 | int handled; | |
653 | unsigned long flags; | |
654 | ||
655 | spin_lock_irqsave(hba->host->host_lock, flags); | |
00f59701 | 656 | handled = hba->ops->iop_intr(hba); |
ede1e6f8 HLT |
657 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
658 | ||
659 | return handled; | |
660 | } | |
661 | ||
662 | static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg) | |
663 | { | |
664 | struct Scsi_Host *host = scp->device->host; | |
665 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; | |
f9875496 FT |
666 | struct scatterlist *sg; |
667 | int idx, nseg; | |
668 | ||
669 | nseg = scsi_dma_map(scp); | |
670 | BUG_ON(nseg < 0); | |
671 | if (!nseg) | |
672 | return 0; | |
ede1e6f8 | 673 | |
f9875496 FT |
674 | HPT_SCP(scp)->sgcnt = nseg; |
675 | HPT_SCP(scp)->mapped = 1; | |
676 | ||
677 | BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors); | |
678 | ||
679 | scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) { | |
680 | psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg)); | |
681 | psg[idx].size = cpu_to_le32(sg_dma_len(sg)); | |
682 | psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ? | |
683 | cpu_to_le32(1) : 0; | |
ede1e6f8 | 684 | } |
f9875496 | 685 | return HPT_SCP(scp)->sgcnt; |
ede1e6f8 HLT |
686 | } |
687 | ||
00f59701 HLT |
688 | static void hptiop_post_req_itl(struct hptiop_hba *hba, |
689 | struct hptiop_request *_req) | |
690 | { | |
691 | struct hpt_iop_request_header *reqhdr = _req->req_virt; | |
692 | ||
693 | reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT | | |
694 | (u32)_req->index); | |
695 | reqhdr->context_hi32 = 0; | |
696 | ||
697 | if (hba->iopintf_v2) { | |
698 | u32 size, size_bits; | |
699 | ||
700 | size = le32_to_cpu(reqhdr->size); | |
701 | if (size < 256) | |
702 | size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT; | |
703 | else if (size < 512) | |
704 | size_bits = IOPMU_QUEUE_ADDR_HOST_BIT; | |
705 | else | |
706 | size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT | | |
707 | IOPMU_QUEUE_ADDR_HOST_BIT; | |
708 | writel(_req->req_shifted_phy | size_bits, | |
709 | &hba->u.itl.iop->inbound_queue); | |
710 | } else | |
711 | writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT, | |
712 | &hba->u.itl.iop->inbound_queue); | |
713 | } | |
714 | ||
715 | static void hptiop_post_req_mv(struct hptiop_hba *hba, | |
716 | struct hptiop_request *_req) | |
717 | { | |
718 | struct hpt_iop_request_header *reqhdr = _req->req_virt; | |
719 | u32 size, size_bit; | |
720 | ||
721 | reqhdr->context = cpu_to_le32(_req->index<<8 | | |
722 | IOP_REQUEST_TYPE_SCSI_COMMAND<<5); | |
723 | reqhdr->context_hi32 = 0; | |
724 | size = le32_to_cpu(reqhdr->size); | |
725 | ||
726 | if (size <= 256) | |
727 | size_bit = 0; | |
728 | else if (size <= 256*2) | |
729 | size_bit = 1; | |
730 | else if (size <= 256*3) | |
731 | size_bit = 2; | |
732 | else | |
733 | size_bit = 3; | |
734 | ||
735 | mv_inbound_write((_req->req_shifted_phy << 5) | | |
736 | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba); | |
737 | } | |
738 | ||
ede1e6f8 HLT |
739 | static int hptiop_queuecommand(struct scsi_cmnd *scp, |
740 | void (*done)(struct scsi_cmnd *)) | |
741 | { | |
742 | struct Scsi_Host *host = scp->device->host; | |
743 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; | |
744 | struct hpt_iop_request_scsi_command *req; | |
745 | int sg_count = 0; | |
746 | struct hptiop_request *_req; | |
747 | ||
748 | BUG_ON(!done); | |
749 | scp->scsi_done = done; | |
750 | ||
ede1e6f8 HLT |
751 | _req = get_req(hba); |
752 | if (_req == NULL) { | |
753 | dprintk("hptiop_queuecmd : no free req\n"); | |
4f2ddba3 | 754 | return SCSI_MLQUEUE_HOST_BUSY; |
ede1e6f8 HLT |
755 | } |
756 | ||
757 | _req->scp = scp; | |
758 | ||
759 | dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%d cdb=(%x-%x-%x) " | |
760 | "req_index=%d, req=%p\n", | |
761 | scp, | |
762 | host->host_no, scp->device->channel, | |
763 | scp->device->id, scp->device->lun, | |
64a87b24 BH |
764 | ((u32 *)scp->cmnd)[0], |
765 | ((u32 *)scp->cmnd)[1], | |
766 | ((u32 *)scp->cmnd)[2], | |
ede1e6f8 HLT |
767 | _req->index, _req->req_virt); |
768 | ||
769 | scp->result = 0; | |
770 | ||
771 | if (scp->device->channel || scp->device->lun || | |
772 | scp->device->id > hba->max_devices) { | |
773 | scp->result = DID_BAD_TARGET << 16; | |
774 | free_req(hba, _req); | |
775 | goto cmd_done; | |
776 | } | |
777 | ||
db9b6e89 | 778 | req = _req->req_virt; |
ede1e6f8 HLT |
779 | |
780 | /* build S/G table */ | |
f9875496 FT |
781 | sg_count = hptiop_buildsgl(scp, req->sg_list); |
782 | if (!sg_count) | |
ede1e6f8 HLT |
783 | HPT_SCP(scp)->mapped = 0; |
784 | ||
785 | req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); | |
786 | req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND); | |
787 | req->header.result = cpu_to_le32(IOP_RESULT_PENDING); | |
f9875496 | 788 | req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp)); |
ede1e6f8 HLT |
789 | req->channel = scp->device->channel; |
790 | req->target = scp->device->id; | |
791 | req->lun = scp->device->lun; | |
792 | req->header.size = cpu_to_le32( | |
793 | sizeof(struct hpt_iop_request_scsi_command) | |
794 | - sizeof(struct hpt_iopsg) | |
795 | + sg_count * sizeof(struct hpt_iopsg)); | |
796 | ||
797 | memcpy(req->cdb, scp->cmnd, sizeof(req->cdb)); | |
00f59701 | 798 | hba->ops->post_req(hba, _req); |
ede1e6f8 HLT |
799 | return 0; |
800 | ||
801 | cmd_done: | |
802 | dprintk("scsi_done(scp=%p)\n", scp); | |
803 | scp->scsi_done(scp); | |
804 | return 0; | |
805 | } | |
806 | ||
807 | static const char *hptiop_info(struct Scsi_Host *host) | |
808 | { | |
809 | return driver_name_long; | |
810 | } | |
811 | ||
812 | static int hptiop_reset_hba(struct hptiop_hba *hba) | |
813 | { | |
814 | if (atomic_xchg(&hba->resetting, 1) == 0) { | |
815 | atomic_inc(&hba->reset_count); | |
00f59701 | 816 | hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET); |
ede1e6f8 HLT |
817 | } |
818 | ||
819 | wait_event_timeout(hba->reset_wq, | |
820 | atomic_read(&hba->resetting) == 0, 60 * HZ); | |
821 | ||
822 | if (atomic_read(&hba->resetting)) { | |
823 | /* IOP is in unkown state, abort reset */ | |
824 | printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no); | |
825 | return -1; | |
826 | } | |
827 | ||
828 | if (iop_send_sync_msg(hba, | |
829 | IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) { | |
830 | dprintk("scsi%d: fail to start background task\n", | |
831 | hba->host->host_no); | |
832 | } | |
833 | ||
834 | return 0; | |
835 | } | |
836 | ||
837 | static int hptiop_reset(struct scsi_cmnd *scp) | |
838 | { | |
839 | struct Scsi_Host * host = scp->device->host; | |
840 | struct hptiop_hba * hba = (struct hptiop_hba *)host->hostdata; | |
841 | ||
842 | printk(KERN_WARNING "hptiop_reset(%d/%d/%d) scp=%p\n", | |
843 | scp->device->host->host_no, scp->device->channel, | |
844 | scp->device->id, scp); | |
845 | ||
846 | return hptiop_reset_hba(hba)? FAILED : SUCCESS; | |
847 | } | |
848 | ||
849 | static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev, | |
850 | int queue_depth) | |
851 | { | |
00f59701 HLT |
852 | struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata; |
853 | ||
854 | if (queue_depth > hba->max_requests) | |
855 | queue_depth = hba->max_requests; | |
ede1e6f8 HLT |
856 | scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); |
857 | return queue_depth; | |
858 | } | |
859 | ||
ee959b00 TJ |
860 | static ssize_t hptiop_show_version(struct device *dev, |
861 | struct device_attribute *attr, char *buf) | |
ede1e6f8 HLT |
862 | { |
863 | return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver); | |
864 | } | |
865 | ||
ee959b00 TJ |
866 | static ssize_t hptiop_show_fw_version(struct device *dev, |
867 | struct device_attribute *attr, char *buf) | |
ede1e6f8 | 868 | { |
ee959b00 | 869 | struct Scsi_Host *host = class_to_shost(dev); |
ede1e6f8 HLT |
870 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; |
871 | ||
872 | return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n", | |
873 | hba->firmware_version >> 24, | |
874 | (hba->firmware_version >> 16) & 0xff, | |
875 | (hba->firmware_version >> 8) & 0xff, | |
876 | hba->firmware_version & 0xff); | |
877 | } | |
878 | ||
ee959b00 | 879 | static struct device_attribute hptiop_attr_version = { |
ede1e6f8 HLT |
880 | .attr = { |
881 | .name = "driver-version", | |
882 | .mode = S_IRUGO, | |
883 | }, | |
884 | .show = hptiop_show_version, | |
885 | }; | |
886 | ||
ee959b00 | 887 | static struct device_attribute hptiop_attr_fw_version = { |
ede1e6f8 HLT |
888 | .attr = { |
889 | .name = "firmware-version", | |
890 | .mode = S_IRUGO, | |
891 | }, | |
892 | .show = hptiop_show_fw_version, | |
893 | }; | |
894 | ||
ee959b00 | 895 | static struct device_attribute *hptiop_attrs[] = { |
ede1e6f8 HLT |
896 | &hptiop_attr_version, |
897 | &hptiop_attr_fw_version, | |
898 | NULL | |
899 | }; | |
900 | ||
901 | static struct scsi_host_template driver_template = { | |
902 | .module = THIS_MODULE, | |
903 | .name = driver_name, | |
904 | .queuecommand = hptiop_queuecommand, | |
905 | .eh_device_reset_handler = hptiop_reset, | |
906 | .eh_bus_reset_handler = hptiop_reset, | |
907 | .info = hptiop_info, | |
ede1e6f8 HLT |
908 | .emulated = 0, |
909 | .use_clustering = ENABLE_CLUSTERING, | |
910 | .proc_name = driver_name, | |
911 | .shost_attrs = hptiop_attrs, | |
912 | .this_id = -1, | |
913 | .change_queue_depth = hptiop_adjust_disk_queue_depth, | |
914 | }; | |
915 | ||
00f59701 HLT |
916 | static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba) |
917 | { | |
918 | hba->u.mv.internal_req = dma_alloc_coherent(&hba->pcidev->dev, | |
919 | 0x800, &hba->u.mv.internal_req_phy, GFP_KERNEL); | |
920 | if (hba->u.mv.internal_req) | |
921 | return 0; | |
922 | else | |
923 | return -1; | |
924 | } | |
925 | ||
926 | static int hptiop_internal_memfree_mv(struct hptiop_hba *hba) | |
927 | { | |
928 | if (hba->u.mv.internal_req) { | |
929 | dma_free_coherent(&hba->pcidev->dev, 0x800, | |
930 | hba->u.mv.internal_req, hba->u.mv.internal_req_phy); | |
931 | return 0; | |
932 | } else | |
933 | return -1; | |
934 | } | |
935 | ||
ede1e6f8 HLT |
936 | static int __devinit hptiop_probe(struct pci_dev *pcidev, |
937 | const struct pci_device_id *id) | |
938 | { | |
939 | struct Scsi_Host *host = NULL; | |
940 | struct hptiop_hba *hba; | |
941 | struct hpt_iop_request_get_config iop_config; | |
942 | struct hpt_iop_request_set_config set_config; | |
943 | dma_addr_t start_phy; | |
944 | void *start_virt; | |
945 | u32 offset, i, req_size; | |
946 | ||
947 | dprintk("hptiop_probe(%p)\n", pcidev); | |
948 | ||
949 | if (pci_enable_device(pcidev)) { | |
950 | printk(KERN_ERR "hptiop: fail to enable pci device\n"); | |
951 | return -ENODEV; | |
952 | } | |
953 | ||
954 | printk(KERN_INFO "adapter at PCI %d:%d:%d, IRQ %d\n", | |
955 | pcidev->bus->number, pcidev->devfn >> 3, pcidev->devfn & 7, | |
956 | pcidev->irq); | |
957 | ||
958 | pci_set_master(pcidev); | |
959 | ||
960 | /* Enable 64bit DMA if possible */ | |
6a35528a | 961 | if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) { |
284901a9 | 962 | if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) { |
ede1e6f8 HLT |
963 | printk(KERN_ERR "hptiop: fail to set dma_mask\n"); |
964 | goto disable_pci_device; | |
965 | } | |
966 | } | |
967 | ||
968 | if (pci_request_regions(pcidev, driver_name)) { | |
969 | printk(KERN_ERR "hptiop: pci_request_regions failed\n"); | |
970 | goto disable_pci_device; | |
971 | } | |
972 | ||
973 | host = scsi_host_alloc(&driver_template, sizeof(struct hptiop_hba)); | |
974 | if (!host) { | |
975 | printk(KERN_ERR "hptiop: fail to alloc scsi host\n"); | |
976 | goto free_pci_regions; | |
977 | } | |
978 | ||
979 | hba = (struct hptiop_hba *)host->hostdata; | |
980 | ||
00f59701 | 981 | hba->ops = (struct hptiop_adapter_ops *)id->driver_data; |
ede1e6f8 HLT |
982 | hba->pcidev = pcidev; |
983 | hba->host = host; | |
984 | hba->initialized = 0; | |
db9b6e89 | 985 | hba->iopintf_v2 = 0; |
ede1e6f8 HLT |
986 | |
987 | atomic_set(&hba->resetting, 0); | |
988 | atomic_set(&hba->reset_count, 0); | |
989 | ||
990 | init_waitqueue_head(&hba->reset_wq); | |
991 | init_waitqueue_head(&hba->ioctl_wq); | |
992 | ||
993 | host->max_lun = 1; | |
994 | host->max_channel = 0; | |
995 | host->io_port = 0; | |
996 | host->n_io_port = 0; | |
997 | host->irq = pcidev->irq; | |
998 | ||
00f59701 | 999 | if (hba->ops->map_pci_bar(hba)) |
ede1e6f8 HLT |
1000 | goto free_scsi_host; |
1001 | ||
00f59701 | 1002 | if (hba->ops->iop_wait_ready(hba, 20000)) { |
ede1e6f8 HLT |
1003 | printk(KERN_ERR "scsi%d: firmware not ready\n", |
1004 | hba->host->host_no); | |
1005 | goto unmap_pci_bar; | |
1006 | } | |
1007 | ||
00f59701 HLT |
1008 | if (hba->ops->internal_memalloc) { |
1009 | if (hba->ops->internal_memalloc(hba)) { | |
1010 | printk(KERN_ERR "scsi%d: internal_memalloc failed\n", | |
1011 | hba->host->host_no); | |
1012 | goto unmap_pci_bar; | |
1013 | } | |
1014 | } | |
1015 | ||
1016 | if (hba->ops->get_config(hba, &iop_config)) { | |
ede1e6f8 HLT |
1017 | printk(KERN_ERR "scsi%d: get config failed\n", |
1018 | hba->host->host_no); | |
1019 | goto unmap_pci_bar; | |
1020 | } | |
1021 | ||
1022 | hba->max_requests = min(le32_to_cpu(iop_config.max_requests), | |
1023 | HPTIOP_MAX_REQUESTS); | |
1024 | hba->max_devices = le32_to_cpu(iop_config.max_devices); | |
1025 | hba->max_request_size = le32_to_cpu(iop_config.request_size); | |
1026 | hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count); | |
1027 | hba->firmware_version = le32_to_cpu(iop_config.firmware_version); | |
db9b6e89 | 1028 | hba->interface_version = le32_to_cpu(iop_config.interface_version); |
ede1e6f8 HLT |
1029 | hba->sdram_size = le32_to_cpu(iop_config.sdram_size); |
1030 | ||
db9b6e89 HLT |
1031 | if (hba->firmware_version > 0x01020000 || |
1032 | hba->interface_version > 0x01020000) | |
1033 | hba->iopintf_v2 = 1; | |
1034 | ||
ede1e6f8 HLT |
1035 | host->max_sectors = le32_to_cpu(iop_config.data_transfer_length) >> 9; |
1036 | host->max_id = le32_to_cpu(iop_config.max_devices); | |
1037 | host->sg_tablesize = le32_to_cpu(iop_config.max_sg_count); | |
1038 | host->can_queue = le32_to_cpu(iop_config.max_requests); | |
1039 | host->cmd_per_lun = le32_to_cpu(iop_config.max_requests); | |
1040 | host->max_cmd_len = 16; | |
1041 | ||
db9b6e89 HLT |
1042 | req_size = sizeof(struct hpt_iop_request_scsi_command) |
1043 | + sizeof(struct hpt_iopsg) * (hba->max_sg_descriptors - 1); | |
1044 | if ((req_size & 0x1f) != 0) | |
1045 | req_size = (req_size + 0x1f) & ~0x1f; | |
1046 | ||
1047 | memset(&set_config, 0, sizeof(struct hpt_iop_request_set_config)); | |
ede1e6f8 | 1048 | set_config.iop_id = cpu_to_le32(host->host_no); |
db9b6e89 HLT |
1049 | set_config.vbus_id = cpu_to_le16(host->host_no); |
1050 | set_config.max_host_request_size = cpu_to_le16(req_size); | |
ede1e6f8 | 1051 | |
00f59701 | 1052 | if (hba->ops->set_config(hba, &set_config)) { |
ede1e6f8 HLT |
1053 | printk(KERN_ERR "scsi%d: set config failed\n", |
1054 | hba->host->host_no); | |
1055 | goto unmap_pci_bar; | |
1056 | } | |
1057 | ||
ede1e6f8 HLT |
1058 | pci_set_drvdata(pcidev, host); |
1059 | ||
1d6f359a | 1060 | if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED, |
ede1e6f8 HLT |
1061 | driver_name, hba)) { |
1062 | printk(KERN_ERR "scsi%d: request irq %d failed\n", | |
1063 | hba->host->host_no, pcidev->irq); | |
3e74051b | 1064 | goto unmap_pci_bar; |
ede1e6f8 HLT |
1065 | } |
1066 | ||
1067 | /* Allocate request mem */ | |
ede1e6f8 HLT |
1068 | |
1069 | dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests); | |
1070 | ||
1071 | hba->req_size = req_size; | |
1072 | start_virt = dma_alloc_coherent(&pcidev->dev, | |
1073 | hba->req_size*hba->max_requests + 0x20, | |
1074 | &start_phy, GFP_KERNEL); | |
1075 | ||
1076 | if (!start_virt) { | |
1077 | printk(KERN_ERR "scsi%d: fail to alloc request mem\n", | |
1078 | hba->host->host_no); | |
1079 | goto free_request_irq; | |
1080 | } | |
1081 | ||
1082 | hba->dma_coherent = start_virt; | |
1083 | hba->dma_coherent_handle = start_phy; | |
1084 | ||
1085 | if ((start_phy & 0x1f) != 0) | |
1086 | { | |
1087 | offset = ((start_phy + 0x1f) & ~0x1f) - start_phy; | |
1088 | start_phy += offset; | |
1089 | start_virt += offset; | |
1090 | } | |
1091 | ||
1092 | hba->req_list = start_virt; | |
1093 | for (i = 0; i < hba->max_requests; i++) { | |
1094 | hba->reqs[i].next = NULL; | |
1095 | hba->reqs[i].req_virt = start_virt; | |
1096 | hba->reqs[i].req_shifted_phy = start_phy >> 5; | |
1097 | hba->reqs[i].index = i; | |
1098 | free_req(hba, &hba->reqs[i]); | |
1099 | start_virt = (char *)start_virt + hba->req_size; | |
1100 | start_phy = start_phy + hba->req_size; | |
1101 | } | |
1102 | ||
1103 | /* Enable Interrupt and start background task */ | |
1104 | if (hptiop_initialize_iop(hba)) | |
1105 | goto free_request_mem; | |
1106 | ||
3e74051b CH |
1107 | if (scsi_add_host(host, &pcidev->dev)) { |
1108 | printk(KERN_ERR "scsi%d: scsi_add_host failed\n", | |
1109 | hba->host->host_no); | |
1110 | goto free_request_mem; | |
1111 | } | |
1112 | ||
ede1e6f8 HLT |
1113 | |
1114 | scsi_scan_host(host); | |
1115 | ||
1116 | dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no); | |
1117 | return 0; | |
1118 | ||
1119 | free_request_mem: | |
1120 | dma_free_coherent(&hba->pcidev->dev, | |
00f59701 | 1121 | hba->req_size * hba->max_requests + 0x20, |
ede1e6f8 HLT |
1122 | hba->dma_coherent, hba->dma_coherent_handle); |
1123 | ||
1124 | free_request_irq: | |
1125 | free_irq(hba->pcidev->irq, hba); | |
1126 | ||
ede1e6f8 | 1127 | unmap_pci_bar: |
00f59701 HLT |
1128 | if (hba->ops->internal_memfree) |
1129 | hba->ops->internal_memfree(hba); | |
ede1e6f8 | 1130 | |
00f59701 | 1131 | hba->ops->unmap_pci_bar(hba); |
ede1e6f8 HLT |
1132 | |
1133 | free_scsi_host: | |
1134 | scsi_host_put(host); | |
1135 | ||
00f59701 HLT |
1136 | free_pci_regions: |
1137 | pci_release_regions(pcidev); | |
1138 | ||
ede1e6f8 HLT |
1139 | disable_pci_device: |
1140 | pci_disable_device(pcidev); | |
1141 | ||
1142 | dprintk("scsi%d: hptiop_probe fail\n", host->host_no); | |
1143 | return -ENODEV; | |
1144 | } | |
1145 | ||
1146 | static void hptiop_shutdown(struct pci_dev *pcidev) | |
1147 | { | |
1148 | struct Scsi_Host *host = pci_get_drvdata(pcidev); | |
1149 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; | |
ede1e6f8 HLT |
1150 | |
1151 | dprintk("hptiop_shutdown(%p)\n", hba); | |
1152 | ||
1153 | /* stop the iop */ | |
1154 | if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000)) | |
1155 | printk(KERN_ERR "scsi%d: shutdown the iop timeout\n", | |
1156 | hba->host->host_no); | |
1157 | ||
1158 | /* disable all outbound interrupts */ | |
00f59701 HLT |
1159 | hba->ops->disable_intr(hba); |
1160 | } | |
1161 | ||
1162 | static void hptiop_disable_intr_itl(struct hptiop_hba *hba) | |
1163 | { | |
1164 | u32 int_mask; | |
1165 | ||
1166 | int_mask = readl(&hba->u.itl.iop->outbound_intmask); | |
ede1e6f8 HLT |
1167 | writel(int_mask | |
1168 | IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE, | |
00f59701 HLT |
1169 | &hba->u.itl.iop->outbound_intmask); |
1170 | readl(&hba->u.itl.iop->outbound_intmask); | |
1171 | } | |
1172 | ||
1173 | static void hptiop_disable_intr_mv(struct hptiop_hba *hba) | |
1174 | { | |
1175 | writel(0, &hba->u.mv.regs->outbound_intmask); | |
1176 | readl(&hba->u.mv.regs->outbound_intmask); | |
ede1e6f8 HLT |
1177 | } |
1178 | ||
1179 | static void hptiop_remove(struct pci_dev *pcidev) | |
1180 | { | |
1181 | struct Scsi_Host *host = pci_get_drvdata(pcidev); | |
1182 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; | |
1183 | ||
1184 | dprintk("scsi%d: hptiop_remove\n", hba->host->host_no); | |
1185 | ||
4f2ddba3 HLT |
1186 | scsi_remove_host(host); |
1187 | ||
ede1e6f8 HLT |
1188 | hptiop_shutdown(pcidev); |
1189 | ||
1190 | free_irq(hba->pcidev->irq, hba); | |
1191 | ||
1192 | dma_free_coherent(&hba->pcidev->dev, | |
1193 | hba->req_size * hba->max_requests + 0x20, | |
1194 | hba->dma_coherent, | |
1195 | hba->dma_coherent_handle); | |
1196 | ||
00f59701 HLT |
1197 | if (hba->ops->internal_memfree) |
1198 | hba->ops->internal_memfree(hba); | |
1199 | ||
1200 | hba->ops->unmap_pci_bar(hba); | |
ede1e6f8 HLT |
1201 | |
1202 | pci_release_regions(hba->pcidev); | |
1203 | pci_set_drvdata(hba->pcidev, NULL); | |
1204 | pci_disable_device(hba->pcidev); | |
1205 | ||
ede1e6f8 HLT |
1206 | scsi_host_put(host); |
1207 | } | |
1208 | ||
00f59701 HLT |
1209 | static struct hptiop_adapter_ops hptiop_itl_ops = { |
1210 | .iop_wait_ready = iop_wait_ready_itl, | |
9bcf0910 HH |
1211 | .internal_memalloc = NULL, |
1212 | .internal_memfree = NULL, | |
00f59701 HLT |
1213 | .map_pci_bar = hptiop_map_pci_bar_itl, |
1214 | .unmap_pci_bar = hptiop_unmap_pci_bar_itl, | |
1215 | .enable_intr = hptiop_enable_intr_itl, | |
1216 | .disable_intr = hptiop_disable_intr_itl, | |
1217 | .get_config = iop_get_config_itl, | |
1218 | .set_config = iop_set_config_itl, | |
1219 | .iop_intr = iop_intr_itl, | |
1220 | .post_msg = hptiop_post_msg_itl, | |
1221 | .post_req = hptiop_post_req_itl, | |
1222 | }; | |
1223 | ||
1224 | static struct hptiop_adapter_ops hptiop_mv_ops = { | |
1225 | .iop_wait_ready = iop_wait_ready_mv, | |
1226 | .internal_memalloc = hptiop_internal_memalloc_mv, | |
1227 | .internal_memfree = hptiop_internal_memfree_mv, | |
1228 | .map_pci_bar = hptiop_map_pci_bar_mv, | |
1229 | .unmap_pci_bar = hptiop_unmap_pci_bar_mv, | |
1230 | .enable_intr = hptiop_enable_intr_mv, | |
1231 | .disable_intr = hptiop_disable_intr_mv, | |
1232 | .get_config = iop_get_config_mv, | |
1233 | .set_config = iop_set_config_mv, | |
1234 | .iop_intr = iop_intr_mv, | |
1235 | .post_msg = hptiop_post_msg_mv, | |
1236 | .post_req = hptiop_post_req_mv, | |
1237 | }; | |
1238 | ||
ede1e6f8 | 1239 | static struct pci_device_id hptiop_id_table[] = { |
00f59701 HLT |
1240 | { PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops }, |
1241 | { PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops }, | |
1242 | { PCI_VDEVICE(TTI, 0x3520), (kernel_ulong_t)&hptiop_itl_ops }, | |
1243 | { PCI_VDEVICE(TTI, 0x4320), (kernel_ulong_t)&hptiop_itl_ops }, | |
1244 | { PCI_VDEVICE(TTI, 0x3510), (kernel_ulong_t)&hptiop_itl_ops }, | |
1245 | { PCI_VDEVICE(TTI, 0x3511), (kernel_ulong_t)&hptiop_itl_ops }, | |
1246 | { PCI_VDEVICE(TTI, 0x3521), (kernel_ulong_t)&hptiop_itl_ops }, | |
1247 | { PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops }, | |
1248 | { PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops }, | |
1249 | { PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops }, | |
dd07428b HLT |
1250 | { PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops }, |
1251 | { PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops }, | |
1252 | { PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops }, | |
b73a7749 | 1253 | { PCI_VDEVICE(TTI, 0x4321), (kernel_ulong_t)&hptiop_itl_ops }, |
dd07428b HLT |
1254 | { PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops }, |
1255 | { PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops }, | |
1256 | { PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops }, | |
1257 | { PCI_VDEVICE(TTI, 0x4311), (kernel_ulong_t)&hptiop_itl_ops }, | |
00f59701 HLT |
1258 | { PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops }, |
1259 | { PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops }, | |
1260 | { PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops }, | |
ede1e6f8 HLT |
1261 | {}, |
1262 | }; | |
1263 | ||
1264 | MODULE_DEVICE_TABLE(pci, hptiop_id_table); | |
1265 | ||
1266 | static struct pci_driver hptiop_pci_driver = { | |
1267 | .name = driver_name, | |
1268 | .id_table = hptiop_id_table, | |
1269 | .probe = hptiop_probe, | |
1270 | .remove = hptiop_remove, | |
1271 | .shutdown = hptiop_shutdown, | |
1272 | }; | |
1273 | ||
1274 | static int __init hptiop_module_init(void) | |
1275 | { | |
ede1e6f8 | 1276 | printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver); |
3e74051b | 1277 | return pci_register_driver(&hptiop_pci_driver); |
ede1e6f8 HLT |
1278 | } |
1279 | ||
1280 | static void __exit hptiop_module_exit(void) | |
1281 | { | |
ede1e6f8 HLT |
1282 | pci_unregister_driver(&hptiop_pci_driver); |
1283 | } | |
1284 | ||
1285 | ||
1286 | module_init(hptiop_module_init); | |
1287 | module_exit(hptiop_module_exit); | |
1288 | ||
1289 | MODULE_LICENSE("GPL"); | |
db9b6e89 | 1290 |