2 * Copyright (C) 2005 - 2008 ServerEngines
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@serverengines.com
14 * 209 N. Fair Oaks Ave
22 be_function_internal_query_firmware_config(struct be_function_object *pfob,
23 struct BE_FIRMWARE_CONFIG *config)
25 struct FWCMD_COMMON_FIRMWARE_CONFIG *fwcmd = NULL;
26 struct MCC_WRB_AMAP *wrb = NULL;
29 struct be_mcc_wrb_response_copy rc;
31 spin_lock_irqsave(&pfob->post_lock, irql);
33 wrb = be_function_peek_mcc_wrb(pfob);
35 TRACE(DL_ERR, "MCC wrb peek failed.");
36 status = BE_STATUS_NO_MCC_WRB;
39 /* Prepares an embedded fwcmd, including request/response sizes. */
40 fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_FIRMWARE_CONFIG);
42 rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_FIRMWARE_CONFIG,
44 rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_FIRMWARE_CONFIG,
48 /* Post the f/w command */
49 status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL,
50 NULL, NULL, NULL, fwcmd, &rc);
52 spin_unlock_irqrestore(&pfob->post_lock, irql);
53 if (pfob->pend_queue_driving && pfob->mcc) {
54 pfob->pend_queue_driving = 0;
55 be_drive_mcc_wrb_queue(pfob->mcc);
61 This allocates and initializes a function object based on the information
62 provided by upper layer drivers.
64 Returns BE_SUCCESS on success and an appropriate int on failure.
66 A function object represents a single BladeEngine (logical) PCI function.
67 That is a function object either represents
68 the networking side of BladeEngine or the iSCSI side of BladeEngine.
70 This routine will also detect and create an appropriate PD object for the
71 PCI function as needed.
74 be_function_object_create(u8 __iomem *csr_va, u8 __iomem *db_va,
75 u8 __iomem *pci_va, u32 function_type,
76 struct ring_desc *mailbox, struct be_function_object *pfob)
80 ASSERT(pfob); /* not a magic assert */
81 ASSERT(function_type <= 2);
83 TRACE(DL_INFO, "Create function object. type:%s object:0x%p",
84 (function_type == BE_FUNCTION_TYPE_ISCSI ? "iSCSI" :
85 (function_type == BE_FUNCTION_TYPE_NETWORK ? "Network" :
88 memset(pfob, 0, sizeof(*pfob));
90 pfob->type = function_type;
91 pfob->csr_va = csr_va;
93 pfob->pci_va = pci_va;
95 spin_lock_init(&pfob->cq_lock);
96 spin_lock_init(&pfob->post_lock);
97 spin_lock_init(&pfob->mcc_context_lock);
100 pfob->pci_function_number = 1;
103 pfob->emulate = false;
104 TRACE(DL_NOTE, "Non-emulation mode");
105 status = be_drive_POST(pfob);
106 if (status != BE_SUCCESS) {
107 TRACE(DL_ERR, "BladeEngine POST failed.");
111 /* Initialize the mailbox */
112 status = be_mpu_init_mailbox(pfob, mailbox);
113 if (status != BE_SUCCESS) {
114 TRACE(DL_ERR, "Failed to initialize mailbox.");
118 * Cache the firmware config for ASSERTs in hwclib and later
121 status = be_function_internal_query_firmware_config(pfob,
123 if (status != BE_SUCCESS) {
124 TRACE(DL_ERR, "Failed to query firmware config.");
129 if (status != BE_SUCCESS) {
130 /* No cleanup necessary */
131 TRACE(DL_ERR, "Failed to create function.");
132 memset(pfob, 0, sizeof(*pfob));
138 This routine drops the reference count on a given function object. Once
139 the reference count falls to zero, the function object is destroyed and all
140 resources held are freed.
142 FunctionObject - The function object to drop the reference to.
144 int be_function_object_destroy(struct be_function_object *pfob)
146 TRACE(DL_INFO, "Destroy pfob. Object:0x%p",
150 ASSERT(pfob->mcc == NULL);
155 int be_function_cleanup(struct be_function_object *pfob)
160 struct PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP ctrl;
163 if (pfob->type == BE_FUNCTION_TYPE_NETWORK) {
164 status = be_rxf_multicast_config(pfob, false, 0,
165 NULL, NULL, NULL, NULL);
166 ASSERT(status == BE_SUCCESS);
169 status = be_rxf_vlan_config(pfob, false, 0, NULL, NULL, NULL, NULL);
170 ASSERT(status == BE_SUCCESS);
172 * MCC Queue -- Switches to mailbox mode. May want to destroy
173 * all but the MCC CQ before this call if polling CQ is much better
174 * performance than polling mailbox register.
177 status = be_mcc_ring_destroy(pfob->mcc);
179 * If interrupts are disabled, clear any CEV interrupt assertions that
180 * fired after we stopped processing EQs.
182 ctrl.dw[0] = PCICFG1_READ(pfob, host_timer_int_ctrl);
183 host_intr = AMAP_GET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR,
186 if (pfob->type == BE_FUNCTION_TYPE_NETWORK)
187 isr = CSR_READ(pfob, cev.isr1);
189 isr = CSR_READ(pfob, cev.isr0);
191 /* This should never happen... */
192 TRACE(DL_ERR, "function_cleanup called with interrupt enabled");
193 /* Function object destroy */
194 status = be_function_object_destroy(pfob);
195 ASSERT(status == BE_SUCCESS);
202 be_function_prepare_embedded_fwcmd(struct be_function_object *pfob,
203 struct MCC_WRB_AMAP *wrb, u32 payld_len, u32 request_length,
204 u32 response_length, u32 opcode, u32 subsystem)
206 struct FWCMD_REQUEST_HEADER *header = NULL;
211 n = offsetof(struct BE_MCC_WRB_AMAP, payload)/8;
212 AMAP_SET_BITS_PTR(MCC_WRB, embedded, wrb, 1);
213 AMAP_SET_BITS_PTR(MCC_WRB, payload_length, wrb, min(payld_len, n));
214 header = (struct FWCMD_REQUEST_HEADER *)((u8 *)wrb + n);
218 header->request_length = max(request_length, response_length);
219 header->opcode = opcode;
220 header->subsystem = subsystem;
226 be_function_prepare_nonembedded_fwcmd(struct be_function_object *pfob,
227 struct MCC_WRB_AMAP *wrb,
228 void *fwcmd_va, u64 fwcmd_pa,
232 u32 opcode, u32 subsystem)
234 struct FWCMD_REQUEST_HEADER *header = NULL;
236 struct MCC_WRB_PAYLOAD_AMAP *plp;
241 header = (struct FWCMD_REQUEST_HEADER *) fwcmd_va;
243 AMAP_SET_BITS_PTR(MCC_WRB, embedded, wrb, 0);
244 AMAP_SET_BITS_PTR(MCC_WRB, payload_length, wrb, payld_len);
247 * Assume one fragment. The caller may override the SGL by
248 * rewriting the 0th length and adding more entries. They
249 * will also need to update the sge_count.
251 AMAP_SET_BITS_PTR(MCC_WRB, sge_count, wrb, 1);
253 n = offsetof(struct BE_MCC_WRB_AMAP, payload)/8;
254 plp = (struct MCC_WRB_PAYLOAD_AMAP *)((u8 *)wrb + n);
255 AMAP_SET_BITS_PTR(MCC_WRB_PAYLOAD, sgl[0].length, plp, payld_len);
256 AMAP_SET_BITS_PTR(MCC_WRB_PAYLOAD, sgl[0].pa_lo, plp, (u32)fwcmd_pa);
257 AMAP_SET_BITS_PTR(MCC_WRB_PAYLOAD, sgl[0].pa_hi, plp,
258 upper_32_bits(fwcmd_pa));
262 header->request_length = max(request_length, response_length);
263 header->opcode = opcode;
264 header->subsystem = subsystem;
269 struct MCC_WRB_AMAP *
270 be_function_peek_mcc_wrb(struct be_function_object *pfob)
272 struct MCC_WRB_AMAP *wrb = NULL;
276 wrb = _be_mpu_peek_ring_wrb(pfob->mcc, false);
278 offset = offsetof(struct BE_MCC_MAILBOX_AMAP, wrb)/8;
279 wrb = (struct MCC_WRB_AMAP *) ((u8 *) pfob->mailbox.va +
284 memset(wrb, 0, sizeof(struct MCC_WRB_AMAP));
289 #if defined(BE_DEBUG)
290 void be_function_debug_print_wrb(struct be_function_object *pfob,
291 struct MCC_WRB_AMAP *wrb, void *optional_fwcmd_va,
292 struct be_mcc_wrb_context *wrb_context)
295 struct FWCMD_REQUEST_HEADER *header = NULL;
299 embedded = AMAP_GET_BITS_PTR(MCC_WRB, embedded, wrb);
302 n = offsetof(struct BE_MCC_WRB_AMAP, payload)/8;
303 header = (struct FWCMD_REQUEST_HEADER *)((u8 *)wrb + n);
305 header = (struct FWCMD_REQUEST_HEADER *) optional_fwcmd_va;
308 /* Save the completed count before posting for a debug assert. */
311 wrb_context->opcode = header->opcode;
312 wrb_context->subsystem = header->subsystem;
315 wrb_context->opcode = 0;
316 wrb_context->subsystem = 0;
320 #define be_function_debug_print_wrb(a_, b_, c_, d_)
324 be_function_post_mcc_wrb(struct be_function_object *pfob,
325 struct MCC_WRB_AMAP *wrb,
326 struct be_generic_q_ctxt *q_ctxt,
327 mcc_wrb_cqe_callback cb, void *cb_context,
328 mcc_wrb_cqe_callback internal_cb,
329 void *internal_cb_context, void *optional_fwcmd_va,
330 struct be_mcc_wrb_response_copy *rc)
333 struct be_mcc_wrb_context *wrb_context = NULL;
337 /* Initialize context. */
338 q_ctxt->context.internal_cb = internal_cb;
339 q_ctxt->context.internal_cb_context = internal_cb_context;
340 q_ctxt->context.cb = cb;
341 q_ctxt->context.cb_context = cb_context;
343 q_ctxt->context.copy.length = rc->length;
344 q_ctxt->context.copy.fwcmd_offset = rc->fwcmd_offset;
345 q_ctxt->context.copy.va = rc->va;
347 q_ctxt->context.copy.length = 0;
349 q_ctxt->context.optional_fwcmd_va = optional_fwcmd_va;
351 /* Queue this request */
352 status = be_function_queue_mcc_wrb(pfob, q_ctxt);
357 * Allocate a WRB context struct to hold the callback pointers,
358 * status, etc. This is required if commands complete out of order.
360 wrb_context = _be_mcc_allocate_wrb_context(pfob);
362 TRACE(DL_WARN, "Failed to allocate MCC WRB context.");
363 status = BE_STATUS_SYSTEM_RESOURCES;
366 /* Initialize context. */
367 memset(wrb_context, 0, sizeof(*wrb_context));
368 wrb_context->internal_cb = internal_cb;
369 wrb_context->internal_cb_context = internal_cb_context;
370 wrb_context->cb = cb;
371 wrb_context->cb_context = cb_context;
373 wrb_context->copy.length = rc->length;
374 wrb_context->copy.fwcmd_offset = rc->fwcmd_offset;
375 wrb_context->copy.va = rc->va;
377 wrb_context->copy.length = 0;
378 wrb_context->wrb = wrb;
381 * Copy the context pointer into the WRB opaque tag field.
382 * Verify assumption of 64-bit tag with a compile time assert.
384 p = (u64 *) ((u8 *)wrb + offsetof(struct BE_MCC_WRB_AMAP, tag)/8);
385 *p = (u64)(size_t)wrb_context;
387 /* Print info about this FWCMD for debug builds. */
388 be_function_debug_print_wrb(pfob, wrb, optional_fwcmd_va, wrb_context);
391 * issue the WRB to the MPU as appropriate
395 * we're in WRB mode, pass to the mcc layer
397 status = _be_mpu_post_wrb_ring(pfob->mcc, wrb, wrb_context);
400 * we're in mailbox mode
402 status = _be_mpu_post_wrb_mailbox(pfob, wrb, wrb_context);
404 /* mailbox mode always completes synchronously */
405 ASSERT(status != BE_STATUS_PENDING);
414 be_function_ring_destroy(struct be_function_object *pfob,
415 u32 id, u32 ring_type, mcc_wrb_cqe_callback cb,
416 void *cb_context, mcc_wrb_cqe_callback internal_cb,
417 void *internal_cb_context)
420 struct FWCMD_COMMON_RING_DESTROY *fwcmd = NULL;
421 struct MCC_WRB_AMAP *wrb = NULL;
425 spin_lock_irqsave(&pfob->post_lock, irql);
427 TRACE(DL_INFO, "Destroy ring id:%d type:%d", id, ring_type);
429 wrb = be_function_peek_mcc_wrb(pfob);
432 TRACE(DL_ERR, "No free MCC WRBs in destroy ring.");
433 status = BE_STATUS_NO_MCC_WRB;
436 /* Prepares an embedded fwcmd, including request/response sizes. */
437 fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_RING_DESTROY);
439 fwcmd->params.request.id = id;
440 fwcmd->params.request.ring_type = ring_type;
442 /* Post the f/w command */
443 status = be_function_post_mcc_wrb(pfob, wrb, NULL, cb, cb_context,
444 internal_cb, internal_cb_context, fwcmd, NULL);
445 if (status != BE_SUCCESS && status != BE_PENDING) {
446 TRACE(DL_ERR, "Ring destroy fwcmd failed. id:%d ring_type:%d",
452 spin_unlock_irqrestore(&pfob->post_lock, irql);
453 if (pfob->pend_queue_driving && pfob->mcc) {
454 pfob->pend_queue_driving = 0;
455 be_drive_mcc_wrb_queue(pfob->mcc);
461 be_rd_to_pa_list(struct ring_desc *rd, struct PHYS_ADDR *pa_list, u32 max_num)
463 u32 num_pages = PAGES_SPANNED(rd->va, rd->length);
471 for (i = 0; i < min(num_pages, max_num); i++) {
472 lepa = cpu_to_le64(pa);
473 pa_list[i].lo = (u32)lepa;
474 pa_list[i].hi = upper_32_bits(lepa);
481 /*-----------------------------------------------------------------------------
482 * Function: be_function_get_fw_version
483 * Retrieves the firmware version on the adpater. If the callback is
484 * NULL this call executes synchronously. If the callback is not NULL,
485 * the returned status will be BE_PENDING if the command was issued
488 * fwv - Pointer to response buffer if callback is NULL.
489 * cb - Callback function invoked when the FWCMD completes.
490 * cb_context - Passed to the callback function.
491 * return pend_status - BE_SUCCESS (0) on success.
492 * BE_PENDING (postive value) if the FWCMD
493 * completion is pending. Negative error code on failure.
494 *---------------------------------------------------------------------------
497 be_function_get_fw_version(struct be_function_object *pfob,
498 struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD *fwv,
499 mcc_wrb_cqe_callback cb, void *cb_context)
501 int status = BE_SUCCESS;
502 struct MCC_WRB_AMAP *wrb = NULL;
503 struct FWCMD_COMMON_GET_FW_VERSION *fwcmd = NULL;
505 struct be_mcc_wrb_response_copy rc;
507 spin_lock_irqsave(&pfob->post_lock, irql);
509 wrb = be_function_peek_mcc_wrb(pfob);
511 TRACE(DL_ERR, "MCC wrb peek failed.");
512 status = BE_STATUS_NO_MCC_WRB;
517 TRACE(DL_ERR, "callback and response buffer NULL!");
521 /* Prepares an embedded fwcmd, including request/response sizes. */
522 fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_GET_FW_VERSION);
524 rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_GET_FW_VERSION,
526 rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_GET_FW_VERSION,
530 /* Post the f/w command */
531 status = be_function_post_mcc_wrb(pfob, wrb, NULL, cb,
532 cb_context, NULL, NULL, fwcmd, &rc);
535 spin_unlock_irqrestore(&pfob->post_lock, irql);
536 if (pfob->pend_queue_driving && pfob->mcc) {
537 pfob->pend_queue_driving = 0;
538 be_drive_mcc_wrb_queue(pfob->mcc);
544 be_function_queue_mcc_wrb(struct be_function_object *pfob,
545 struct be_generic_q_ctxt *q_ctxt)
552 * issue the WRB to the MPU as appropriate
556 /* We're in ring mode. Queue this item. */
557 pfob->mcc->backlog_length++;
558 list_add_tail(&q_ctxt->context.list, &pfob->mcc->backlog);