2 * csr1212.c -- IEEE 1212 Control and Status Register support for Linux
4 * Copyright (C) 2003 Francois Retief <fgretief@sun.ac.za>
5 * Steve Kinneberg <kinnebergsteve@acmsystems.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
21 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
27 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * - Verify interface consistency: i.e., public functions that take a size
33 * parameter expect size to be in bytes.
36 #include <linux/errno.h>
37 #include <linux/string.h>
39 #include <asm/byteorder.h>
44 /* Permitted key type for each key id */
45 #define __I (1 << CSR1212_KV_TYPE_IMMEDIATE)
46 #define __C (1 << CSR1212_KV_TYPE_CSR_OFFSET)
47 #define __D (1 << CSR1212_KV_TYPE_DIRECTORY)
48 #define __L (1 << CSR1212_KV_TYPE_LEAF)
49 static const u8 csr1212_key_id_type_map[0x30] = {
50 __C, /* used by Apple iSight */
51 __D | __L, /* Descriptor */
52 __I | __D | __L, /* Bus_Dependent_Info */
53 __I | __D | __L, /* Vendor */
54 __I, /* Hardware_Version */
56 __D | __L | __I, /* Module */
57 __I, 0, 0, 0, /* used by Apple iSight, Reserved */
58 __I, /* Node_Capabilities */
60 0, 0, 0, /* Reserved */
62 __I, /* Specifier_ID */
64 __I | __C | __D | __L, /* Dependent_Info */
65 __L, /* Unit_Location */
71 __L, /* Extended_ROM */
72 __I, /* Extended_Key_Specifier_ID */
73 __I, /* Extended_Key */
74 __I | __C | __D | __L, /* Extended_Data */
75 __L, /* Modifiable_Descriptor */
76 __I, /* Directory_ID */
85 #define quads_to_bytes(_q) ((_q) * sizeof(u32))
86 #define bytes_to_quads(_b) (((_b) + sizeof(u32) - 1) / sizeof(u32))
88 static void free_keyval(struct csr1212_keyval *kv)
90 if ((kv->key.type == CSR1212_KV_TYPE_LEAF) &&
91 (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM))
92 CSR1212_FREE(kv->value.leaf.data);
97 static u16 csr1212_crc16(const u32 *buffer, size_t length)
103 for (; length; length--) {
104 data = be32_to_cpu(*buffer);
106 for (shift = 28; shift >= 0; shift -= 4 ) {
107 sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
108 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
113 return cpu_to_be16(crc);
117 /* Microsoft computes the CRC with the bytes in reverse order. Therefore we
118 * have a special version of the CRC algorithm to account for their buggy
120 static u16 csr1212_msft_crc16(const u32 *buffer, size_t length)
126 for (; length; length--) {
127 data = le32_to_cpu(*buffer);
129 for (shift = 28; shift >= 0; shift -= 4 ) {
130 sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
131 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
136 return cpu_to_be16(crc);
140 static struct csr1212_dentry *
141 csr1212_find_keyval(struct csr1212_keyval *dir, struct csr1212_keyval *kv)
143 struct csr1212_dentry *pos;
145 for (pos = dir->value.directory.dentries_head;
146 pos != NULL; pos = pos->next)
152 static struct csr1212_keyval *
153 csr1212_find_keyval_offset(struct csr1212_keyval *kv_list, u32 offset)
155 struct csr1212_keyval *kv;
157 for (kv = kv_list->next; kv && (kv != kv_list); kv = kv->next)
158 if (kv->offset == offset)
164 /* Creation Routines */
166 struct csr1212_csr *csr1212_create_csr(struct csr1212_bus_ops *ops,
167 size_t bus_info_size, void *private)
169 struct csr1212_csr *csr;
171 csr = CSR1212_MALLOC(sizeof(*csr));
176 csr1212_rom_cache_malloc(CSR1212_CONFIG_ROM_SPACE_OFFSET,
177 CSR1212_CONFIG_ROM_SPACE_SIZE);
178 if (!csr->cache_head) {
183 /* The keyval key id is not used for the root node, but a valid key id
184 * that can be used for a directory needs to be passed to
185 * csr1212_new_directory(). */
186 csr->root_kv = csr1212_new_directory(CSR1212_KV_ID_VENDOR);
188 CSR1212_FREE(csr->cache_head);
193 csr->bus_info_data = csr->cache_head->data;
194 csr->bus_info_len = bus_info_size;
195 csr->crc_len = bus_info_size;
197 csr->private = private;
198 csr->cache_tail = csr->cache_head;
203 void csr1212_init_local_csr(struct csr1212_csr *csr,
204 const u32 *bus_info_data, int max_rom)
206 static const int mr_map[] = { 4, 64, 1024, 0 };
208 BUG_ON(max_rom & ~0x3);
209 csr->max_rom = mr_map[max_rom];
210 memcpy(csr->bus_info_data, bus_info_data, csr->bus_info_len);
213 static struct csr1212_keyval *csr1212_new_keyval(u8 type, u8 key)
215 struct csr1212_keyval *kv;
217 if (key < 0x30 && ((csr1212_key_id_type_map[key] & (1 << type)) == 0))
220 kv = CSR1212_MALLOC(sizeof(*kv));
227 kv->associate = NULL;
237 struct csr1212_keyval *csr1212_new_immediate(u8 key, u32 value)
239 struct csr1212_keyval *kv;
241 kv = csr1212_new_keyval(CSR1212_KV_TYPE_IMMEDIATE, key);
245 kv->value.immediate = value;
250 static struct csr1212_keyval *
251 csr1212_new_leaf(u8 key, const void *data, size_t data_len)
253 struct csr1212_keyval *kv;
255 kv = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, key);
260 kv->value.leaf.data = CSR1212_MALLOC(data_len);
261 if (!kv->value.leaf.data) {
267 memcpy(kv->value.leaf.data, data, data_len);
269 kv->value.leaf.data = NULL;
272 kv->value.leaf.len = bytes_to_quads(data_len);
279 static struct csr1212_keyval *
280 csr1212_new_csr_offset(u8 key, u32 csr_offset)
282 struct csr1212_keyval *kv;
284 kv = csr1212_new_keyval(CSR1212_KV_TYPE_CSR_OFFSET, key);
288 kv->value.csr_offset = csr_offset;
295 struct csr1212_keyval *csr1212_new_directory(u8 key)
297 struct csr1212_keyval *kv;
299 kv = csr1212_new_keyval(CSR1212_KV_TYPE_DIRECTORY, key);
303 kv->value.directory.len = 0;
305 kv->value.directory.dentries_head = NULL;
306 kv->value.directory.dentries_tail = NULL;
311 void csr1212_associate_keyval(struct csr1212_keyval *kv,
312 struct csr1212_keyval *associate)
314 BUG_ON(!kv || !associate || kv->key.id == CSR1212_KV_ID_DESCRIPTOR ||
315 (associate->key.id != CSR1212_KV_ID_DESCRIPTOR &&
316 associate->key.id != CSR1212_KV_ID_DEPENDENT_INFO &&
317 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY &&
318 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA &&
319 associate->key.id < 0x30) ||
320 (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID &&
321 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY) ||
322 (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
323 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA) ||
324 (associate->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
325 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) ||
326 (associate->key.id == CSR1212_KV_ID_EXTENDED_DATA &&
327 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY));
330 csr1212_release_keyval(kv->associate);
333 kv->associate = associate;
336 int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
337 struct csr1212_keyval *kv)
339 struct csr1212_dentry *dentry;
341 BUG_ON(!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY);
343 dentry = CSR1212_MALLOC(sizeof(*dentry));
352 dentry->prev = dir->value.directory.dentries_tail;
354 if (!dir->value.directory.dentries_head)
355 dir->value.directory.dentries_head = dentry;
357 if (dir->value.directory.dentries_tail)
358 dir->value.directory.dentries_tail->next = dentry;
359 dir->value.directory.dentries_tail = dentry;
361 return CSR1212_SUCCESS;
364 #define CSR1212_DESCRIPTOR_LEAF_DATA(kv) \
365 (&((kv)->value.leaf.data[1]))
367 #define CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, type) \
368 ((kv)->value.leaf.data[0] = \
369 cpu_to_be32(CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) | \
370 ((type) << CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT)))
371 #define CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, spec_id) \
372 ((kv)->value.leaf.data[0] = \
373 cpu_to_be32((CSR1212_DESCRIPTOR_LEAF_TYPE(kv) << \
374 CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT) | \
375 ((spec_id) & CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK)))
377 static struct csr1212_keyval *
378 csr1212_new_descriptor_leaf(u8 dtype, u32 specifier_id,
379 const void *data, size_t data_len)
381 struct csr1212_keyval *kv;
383 kv = csr1212_new_leaf(CSR1212_KV_ID_DESCRIPTOR, NULL,
384 data_len + CSR1212_DESCRIPTOR_LEAF_OVERHEAD);
388 CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, dtype);
389 CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, specifier_id);
392 memcpy(CSR1212_DESCRIPTOR_LEAF_DATA(kv), data, data_len);
398 #define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_WIDTH(kv, width) \
399 ((kv)->value.leaf.data[1] = \
400 ((kv)->value.leaf.data[1] & \
401 cpu_to_be32(~(CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_MASK << \
402 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT))) | \
403 cpu_to_be32(((width) & CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_MASK) << \
404 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT))
406 #define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_CHAR_SET(kv, char_set) \
407 ((kv)->value.leaf.data[1] = \
408 ((kv)->value.leaf.data[1] & \
409 cpu_to_be32(~(CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK << \
410 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT))) | \
411 cpu_to_be32(((char_set) & \
412 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK) << \
413 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT))
415 #define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language) \
416 ((kv)->value.leaf.data[1] = \
417 ((kv)->value.leaf.data[1] & \
418 cpu_to_be32(~(CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK))) | \
419 cpu_to_be32(((language) & \
420 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK)))
422 static struct csr1212_keyval *
423 csr1212_new_textual_descriptor_leaf(u8 cwidth, u16 cset, u16 language,
424 const void *data, size_t data_len)
426 struct csr1212_keyval *kv;
429 kv = csr1212_new_descriptor_leaf(0, 0, NULL, data_len +
430 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD);
434 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_WIDTH(kv, cwidth);
435 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_CHAR_SET(kv, cset);
436 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language);
438 lstr = (char*)CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(kv);
440 /* make sure last quadlet is zeroed out */
441 *((u32*)&(lstr[(data_len - 1) & ~0x3])) = 0;
443 /* don't copy the NUL terminator */
444 memcpy(lstr, data, data_len);
449 static int csr1212_check_minimal_ascii(const char *s)
451 static const char minimal_ascii_table[] = {
452 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07,
453 0x00, 0x00, 0x0a, 0x00, 0x0C, 0x0D, 0x00, 0x00,
454 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
455 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
456 0x20, 0x21, 0x22, 0x00, 0x00, 0x25, 0x26, 0x27,
457 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
458 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
459 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
460 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
461 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
462 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
463 0x58, 0x59, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x5f,
464 0x00, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
465 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
466 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
467 0x78, 0x79, 0x7a, 0x00, 0x00, 0x00, 0x00, 0x00,
470 if (minimal_ascii_table[*s & 0x7F] != *s)
471 return -1; /* failed */
473 /* String conforms to minimal-ascii, as specified by IEEE 1212,
478 struct csr1212_keyval *csr1212_new_string_descriptor_leaf(const char *s)
480 /* Check if string conform to minimal_ascii format */
481 if (csr1212_check_minimal_ascii(s))
484 /* IEEE 1212, par. 7.5.4.1 Textual descriptors (minimal ASCII) */
485 return csr1212_new_textual_descriptor_leaf(0, 0, 0, s, strlen(s));
489 /* Destruction Routines */
491 void csr1212_detach_keyval_from_directory(struct csr1212_keyval *dir,
492 struct csr1212_keyval *kv)
494 struct csr1212_dentry *dentry;
496 if (!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY)
499 dentry = csr1212_find_keyval(dir, kv);
505 dentry->prev->next = dentry->next;
507 dentry->next->prev = dentry->prev;
508 if (dir->value.directory.dentries_head == dentry)
509 dir->value.directory.dentries_head = dentry->next;
510 if (dir->value.directory.dentries_tail == dentry)
511 dir->value.directory.dentries_tail = dentry->prev;
513 CSR1212_FREE(dentry);
515 csr1212_release_keyval(kv);
518 /* This function is used to free the memory taken by a keyval. If the given
519 * keyval is a directory type, then any keyvals contained in that directory
520 * will be destroyed as well if their respective refcnts are 0. By means of
521 * list manipulation, this routine will descend a directory structure in a
522 * non-recursive manner. */
523 static void csr1212_destroy_keyval(struct csr1212_keyval *kv)
525 struct csr1212_keyval *k, *a;
526 struct csr1212_dentry dentry;
527 struct csr1212_dentry *head, *tail;
547 if (k->key.type == CSR1212_KV_TYPE_DIRECTORY) {
548 /* If the current entry is a directory, move all
549 * the entries to the destruction list. */
550 if (k->value.directory.dentries_head) {
552 k->value.directory.dentries_head;
553 k->value.directory.dentries_head->prev =
555 tail = k->value.directory.dentries_tail;
564 if (head->prev && head->prev != &dentry)
565 CSR1212_FREE(head->prev);
567 } else if (tail != &dentry) {
573 void csr1212_release_keyval(struct csr1212_keyval *kv)
578 csr1212_destroy_keyval(kv);
581 void csr1212_destroy_csr(struct csr1212_csr *csr)
583 struct csr1212_csr_rom_cache *c, *oc;
584 struct csr1212_cache_region *cr, *ocr;
586 csr1212_release_keyval(csr->root_kv);
605 /* CSR Image Creation */
607 static int csr1212_append_new_cache(struct csr1212_csr *csr, size_t romsize)
609 struct csr1212_csr_rom_cache *cache;
612 BUG_ON(!csr || !csr->ops || !csr->ops->allocate_addr_range ||
613 !csr->ops->release_addr || csr->max_rom < 1);
615 /* ROM size must be a multiple of csr->max_rom */
616 romsize = (romsize + (csr->max_rom - 1)) & ~(csr->max_rom - 1);
618 csr_addr = csr->ops->allocate_addr_range(romsize, csr->max_rom,
620 if (csr_addr == CSR1212_INVALID_ADDR_SPACE)
623 if (csr_addr < CSR1212_REGISTER_SPACE_BASE) {
624 /* Invalid address returned from allocate_addr_range(). */
625 csr->ops->release_addr(csr_addr, csr->private);
629 cache = csr1212_rom_cache_malloc(csr_addr - CSR1212_REGISTER_SPACE_BASE,
632 csr->ops->release_addr(csr_addr, csr->private);
636 cache->ext_rom = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF,
637 CSR1212_KV_ID_EXTENDED_ROM);
638 if (!cache->ext_rom) {
639 csr->ops->release_addr(csr_addr, csr->private);
644 if (csr1212_attach_keyval_to_directory(csr->root_kv, cache->ext_rom) !=
646 csr1212_release_keyval(cache->ext_rom);
647 csr->ops->release_addr(csr_addr, csr->private);
651 cache->ext_rom->offset = csr_addr - CSR1212_REGISTER_SPACE_BASE;
652 cache->ext_rom->value.leaf.len = -1;
653 cache->ext_rom->value.leaf.data = cache->data;
655 /* Add cache to tail of cache list */
656 cache->prev = csr->cache_tail;
657 csr->cache_tail->next = cache;
658 csr->cache_tail = cache;
659 return CSR1212_SUCCESS;
662 static void csr1212_remove_cache(struct csr1212_csr *csr,
663 struct csr1212_csr_rom_cache *cache)
665 if (csr->cache_head == cache)
666 csr->cache_head = cache->next;
667 if (csr->cache_tail == cache)
668 csr->cache_tail = cache->prev;
671 cache->prev->next = cache->next;
673 cache->next->prev = cache->prev;
675 if (cache->ext_rom) {
676 csr1212_detach_keyval_from_directory(csr->root_kv,
678 csr1212_release_keyval(cache->ext_rom);
684 static int csr1212_generate_layout_subdir(struct csr1212_keyval *dir,
685 struct csr1212_keyval **layout_tail)
687 struct csr1212_dentry *dentry;
688 struct csr1212_keyval *dkv;
689 struct csr1212_keyval *last_extkey_spec = NULL;
690 struct csr1212_keyval *last_extkey = NULL;
693 for (dentry = dir->value.directory.dentries_head; dentry;
694 dentry = dentry->next) {
695 for (dkv = dentry->kv; dkv; dkv = dkv->associate) {
696 /* Special Case: Extended Key Specifier_ID */
698 CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
699 if (last_extkey_spec == NULL)
700 last_extkey_spec = dkv;
701 else if (dkv->value.immediate !=
702 last_extkey_spec->value.immediate)
703 last_extkey_spec = dkv;
706 /* Special Case: Extended Key */
707 } else if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
708 if (last_extkey == NULL)
710 else if (dkv->value.immediate !=
711 last_extkey->value.immediate)
719 switch (dkv->key.type) {
721 case CSR1212_KV_TYPE_IMMEDIATE:
722 case CSR1212_KV_TYPE_CSR_OFFSET:
724 case CSR1212_KV_TYPE_LEAF:
725 case CSR1212_KV_TYPE_DIRECTORY:
726 /* Remove from list */
727 if (dkv->prev && (dkv->prev->next == dkv))
728 dkv->prev->next = dkv->next;
729 if (dkv->next && (dkv->next->prev == dkv))
730 dkv->next->prev = dkv->prev;
731 //if (dkv == *layout_tail)
732 // *layout_tail = dkv->prev;
734 /* Special case: Extended ROM leafs */
735 if (dkv->key.id == CSR1212_KV_ID_EXTENDED_ROM) {
736 dkv->value.leaf.len = -1;
737 /* Don't add Extended ROM leafs in the
738 * layout list, they are handled
743 /* Add to tail of list */
745 dkv->prev = *layout_tail;
746 (*layout_tail)->next = dkv;
755 static size_t csr1212_generate_layout_order(struct csr1212_keyval *kv)
757 struct csr1212_keyval *ltail = kv;
761 switch (kv->key.type) {
762 case CSR1212_KV_TYPE_LEAF:
763 /* Add 1 quadlet for crc/len field */
764 agg_size += kv->value.leaf.len + 1;
767 case CSR1212_KV_TYPE_DIRECTORY:
768 kv->value.directory.len =
769 csr1212_generate_layout_subdir(kv, <ail);
770 /* Add 1 quadlet for crc/len field */
771 agg_size += kv->value.directory.len + 1;
776 return quads_to_bytes(agg_size);
779 static struct csr1212_keyval *
780 csr1212_generate_positions(struct csr1212_csr_rom_cache *cache,
781 struct csr1212_keyval *start_kv, int start_pos)
783 struct csr1212_keyval *kv = start_kv;
784 struct csr1212_keyval *okv = start_kv;
786 int kv_len = 0, okv_len = 0;
788 cache->layout_head = kv;
790 while (kv && pos < cache->size) {
791 /* Special case: Extended ROM leafs */
792 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
793 kv->offset = cache->offset + pos;
795 switch (kv->key.type) {
796 case CSR1212_KV_TYPE_LEAF:
797 kv_len = kv->value.leaf.len;
800 case CSR1212_KV_TYPE_DIRECTORY:
801 kv_len = kv->value.directory.len;
805 /* Should never get here */
809 pos += quads_to_bytes(kv_len + 1);
811 if (pos <= cache->size) {
818 cache->layout_tail = okv;
819 cache->len = okv->offset - cache->offset + quads_to_bytes(okv_len + 1);
824 #define CSR1212_KV_KEY_SHIFT 24
825 #define CSR1212_KV_KEY_TYPE_SHIFT 6
826 #define CSR1212_KV_KEY_ID_MASK 0x3f
827 #define CSR1212_KV_KEY_TYPE_MASK 0x3 /* after shift */
830 csr1212_generate_tree_subdir(struct csr1212_keyval *dir, u32 *data_buffer)
832 struct csr1212_dentry *dentry;
833 struct csr1212_keyval *last_extkey_spec = NULL;
834 struct csr1212_keyval *last_extkey = NULL;
837 for (dentry = dir->value.directory.dentries_head;
839 dentry = dentry->next) {
840 struct csr1212_keyval *a;
842 for (a = dentry->kv; a; a = a->associate) {
845 /* Special Case: Extended Key Specifier_ID */
847 CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
848 if (last_extkey_spec == NULL)
849 last_extkey_spec = a;
850 else if (a->value.immediate !=
851 last_extkey_spec->value.immediate)
852 last_extkey_spec = a;
856 /* Special Case: Extended Key */
857 } else if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
858 if (last_extkey == NULL)
860 else if (a->value.immediate !=
861 last_extkey->value.immediate)
867 switch (a->key.type) {
868 case CSR1212_KV_TYPE_IMMEDIATE:
869 value = a->value.immediate;
871 case CSR1212_KV_TYPE_CSR_OFFSET:
872 value = a->value.csr_offset;
874 case CSR1212_KV_TYPE_LEAF:
876 value -= dir->offset + quads_to_bytes(1+index);
877 value = bytes_to_quads(value);
879 case CSR1212_KV_TYPE_DIRECTORY:
881 value -= dir->offset + quads_to_bytes(1+index);
882 value = bytes_to_quads(value);
885 /* Should never get here */
886 break; /* GDB breakpoint */
889 value |= (a->key.id & CSR1212_KV_KEY_ID_MASK) <<
890 CSR1212_KV_KEY_SHIFT;
891 value |= (a->key.type & CSR1212_KV_KEY_TYPE_MASK) <<
892 (CSR1212_KV_KEY_SHIFT +
893 CSR1212_KV_KEY_TYPE_SHIFT);
894 data_buffer[index] = cpu_to_be32(value);
900 struct csr1212_keyval_img {
905 u32 data[0]; /* older gcc can't handle [] which is standard */
908 static void csr1212_fill_cache(struct csr1212_csr_rom_cache *cache)
910 struct csr1212_keyval *kv, *nkv;
911 struct csr1212_keyval_img *kvi;
913 for (kv = cache->layout_head;
914 kv != cache->layout_tail->next;
916 kvi = (struct csr1212_keyval_img *)(cache->data +
917 bytes_to_quads(kv->offset - cache->offset));
918 switch (kv->key.type) {
920 case CSR1212_KV_TYPE_IMMEDIATE:
921 case CSR1212_KV_TYPE_CSR_OFFSET:
922 /* Should never get here */
923 break; /* GDB breakpoint */
925 case CSR1212_KV_TYPE_LEAF:
926 /* Don't copy over Extended ROM areas, they are
927 * already filled out! */
928 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
929 memcpy(kvi->data, kv->value.leaf.data,
930 quads_to_bytes(kv->value.leaf.len));
932 kvi->length = cpu_to_be16(kv->value.leaf.len);
933 kvi->crc = csr1212_crc16(kvi->data, kv->value.leaf.len);
936 case CSR1212_KV_TYPE_DIRECTORY:
937 csr1212_generate_tree_subdir(kv, kvi->data);
939 kvi->length = cpu_to_be16(kv->value.directory.len);
940 kvi->crc = csr1212_crc16(kvi->data,
941 kv->value.directory.len);
947 kv->prev->next = NULL;
949 kv->next->prev = NULL;
955 /* This size is arbitrarily chosen.
956 * The struct overhead is subtracted for more economic allocations. */
957 #define CSR1212_EXTENDED_ROM_SIZE (2048 - sizeof(struct csr1212_csr_rom_cache))
959 int csr1212_generate_csr_image(struct csr1212_csr *csr)
961 struct csr1212_bus_info_block_img *bi;
962 struct csr1212_csr_rom_cache *cache;
963 struct csr1212_keyval *kv;
970 cache = csr->cache_head;
972 bi = (struct csr1212_bus_info_block_img*)cache->data;
974 bi->length = bytes_to_quads(csr->bus_info_len) - 1;
975 bi->crc_length = bi->length;
976 bi->crc = csr1212_crc16(bi->data, bi->crc_length);
978 csr->root_kv->next = NULL;
979 csr->root_kv->prev = NULL;
981 agg_size = csr1212_generate_layout_order(csr->root_kv);
983 init_offset = csr->bus_info_len;
985 for (kv = csr->root_kv, cache = csr->cache_head;
987 cache = cache->next) {
989 /* Estimate approximate number of additional cache
990 * regions needed (it assumes that the cache holding
991 * the first 1K Config ROM space always exists). */
992 int est_c = agg_size / (CSR1212_EXTENDED_ROM_SIZE -
993 (2 * sizeof(u32))) + 1;
995 /* Add additional cache regions, extras will be
997 for (; est_c; est_c--) {
998 ret = csr1212_append_new_cache(csr,
999 CSR1212_EXTENDED_ROM_SIZE);
1000 if (ret != CSR1212_SUCCESS)
1003 /* Need to re-layout for additional cache regions */
1004 agg_size = csr1212_generate_layout_order(csr->root_kv);
1006 cache = csr->cache_head;
1007 init_offset = csr->bus_info_len;
1009 kv = csr1212_generate_positions(cache, kv, init_offset);
1010 agg_size -= cache->len;
1011 init_offset = sizeof(u32);
1014 /* Remove unused, excess cache regions */
1016 struct csr1212_csr_rom_cache *oc = cache;
1018 cache = cache->next;
1019 csr1212_remove_cache(csr, oc);
1022 /* Go through the list backward so that when done, the correct CRC
1023 * will be calculated for the Extended ROM areas. */
1024 for (cache = csr->cache_tail; cache; cache = cache->prev) {
1025 /* Only Extended ROM caches should have this set. */
1026 if (cache->ext_rom) {
1029 /* Make sure the Extended ROM leaf is a multiple of
1030 * max_rom in size. */
1031 BUG_ON(csr->max_rom < 1);
1032 leaf_size = (cache->len + (csr->max_rom - 1)) &
1033 ~(csr->max_rom - 1);
1035 /* Zero out the unused ROM region */
1036 memset(cache->data + bytes_to_quads(cache->len), 0x00,
1037 leaf_size - cache->len);
1039 /* Subtract leaf header */
1040 leaf_size -= sizeof(u32);
1042 /* Update the Extended ROM leaf length */
1043 cache->ext_rom->value.leaf.len =
1044 bytes_to_quads(leaf_size);
1046 /* Zero out the unused ROM region */
1047 memset(cache->data + bytes_to_quads(cache->len), 0x00,
1048 cache->size - cache->len);
1051 /* Copy the data into the cache buffer */
1052 csr1212_fill_cache(cache);
1054 if (cache != csr->cache_head) {
1055 /* Set the length and CRC of the extended ROM. */
1056 struct csr1212_keyval_img *kvi =
1057 (struct csr1212_keyval_img*)cache->data;
1058 u16 len = bytes_to_quads(cache->len) - 1;
1060 kvi->length = cpu_to_be16(len);
1061 kvi->crc = csr1212_crc16(kvi->data, len);
1065 return CSR1212_SUCCESS;
1068 int csr1212_read(struct csr1212_csr *csr, u32 offset, void *buffer, u32 len)
1070 struct csr1212_csr_rom_cache *cache;
1072 for (cache = csr->cache_head; cache; cache = cache->next)
1073 if (offset >= cache->offset &&
1074 (offset + len) <= (cache->offset + cache->size)) {
1075 memcpy(buffer, &cache->data[
1076 bytes_to_quads(offset - cache->offset)],
1078 return CSR1212_SUCCESS;
1085 /* Parse a chunk of data as a Config ROM */
1087 static int csr1212_parse_bus_info_block(struct csr1212_csr *csr)
1089 struct csr1212_bus_info_block_img *bi;
1090 struct csr1212_cache_region *cr;
1094 /* IEEE 1212 says that the entire bus info block should be readable in
1095 * a single transaction regardless of the max_rom value.
1096 * Unfortunately, many IEEE 1394 devices do not abide by that, so the
1097 * bus info block will be read 1 quadlet at a time. The rest of the
1098 * ConfigROM will be read according to the max_rom field. */
1099 for (i = 0; i < csr->bus_info_len; i += sizeof(u32)) {
1100 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
1101 sizeof(u32), &csr->cache_head->data[bytes_to_quads(i)],
1103 if (ret != CSR1212_SUCCESS)
1106 /* check ROM header's info_length */
1108 be32_to_cpu(csr->cache_head->data[0]) >> 24 !=
1109 bytes_to_quads(csr->bus_info_len) - 1)
1113 bi = (struct csr1212_bus_info_block_img*)csr->cache_head->data;
1114 csr->crc_len = quads_to_bytes(bi->crc_length);
1116 /* IEEE 1212 recommends that crc_len be equal to bus_info_len, but that
1117 * is not always the case, so read the rest of the crc area 1 quadlet at
1119 for (i = csr->bus_info_len; i <= csr->crc_len; i += sizeof(u32)) {
1120 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
1121 sizeof(u32), &csr->cache_head->data[bytes_to_quads(i)],
1123 if (ret != CSR1212_SUCCESS)
1128 /* Apparently there are too many differnt wrong implementations of the
1129 * CRC algorithm that verifying them is moot. */
1130 if ((csr1212_crc16(bi->data, bi->crc_length) != bi->crc) &&
1131 (csr1212_msft_crc16(bi->data, bi->crc_length) != bi->crc))
1135 cr = CSR1212_MALLOC(sizeof(*cr));
1141 cr->offset_start = 0;
1142 cr->offset_end = csr->crc_len + 4;
1144 csr->cache_head->filled_head = cr;
1145 csr->cache_head->filled_tail = cr;
1147 return CSR1212_SUCCESS;
1150 #define CSR1212_KV_KEY(q) (be32_to_cpu(q) >> CSR1212_KV_KEY_SHIFT)
1151 #define CSR1212_KV_KEY_TYPE(q) (CSR1212_KV_KEY(q) >> CSR1212_KV_KEY_TYPE_SHIFT)
1152 #define CSR1212_KV_KEY_ID(q) (CSR1212_KV_KEY(q) & CSR1212_KV_KEY_ID_MASK)
1153 #define CSR1212_KV_VAL_MASK 0xffffff
1154 #define CSR1212_KV_VAL(q) (be32_to_cpu(q) & CSR1212_KV_VAL_MASK)
1157 csr1212_parse_dir_entry(struct csr1212_keyval *dir, u32 ki, u32 kv_pos)
1159 int ret = CSR1212_SUCCESS;
1160 struct csr1212_keyval *k = NULL;
1163 switch (CSR1212_KV_KEY_TYPE(ki)) {
1164 case CSR1212_KV_TYPE_IMMEDIATE:
1165 k = csr1212_new_immediate(CSR1212_KV_KEY_ID(ki),
1166 CSR1212_KV_VAL(ki));
1172 k->refcnt = 0; /* Don't keep local reference when parsing. */
1175 case CSR1212_KV_TYPE_CSR_OFFSET:
1176 k = csr1212_new_csr_offset(CSR1212_KV_KEY_ID(ki),
1177 CSR1212_KV_VAL(ki));
1182 k->refcnt = 0; /* Don't keep local reference when parsing. */
1186 /* Compute the offset from 0xffff f000 0000. */
1187 offset = quads_to_bytes(CSR1212_KV_VAL(ki)) + kv_pos;
1188 if (offset == kv_pos) {
1189 /* Uh-oh. Can't have a relative offset of 0 for Leaves
1190 * or Directories. The Config ROM image is most likely
1191 * messed up, so we'll just abort here. */
1196 k = csr1212_find_keyval_offset(dir, offset);
1199 break; /* Found it. */
1201 if (CSR1212_KV_KEY_TYPE(ki) == CSR1212_KV_TYPE_DIRECTORY)
1202 k = csr1212_new_directory(CSR1212_KV_KEY_ID(ki));
1204 k = csr1212_new_leaf(CSR1212_KV_KEY_ID(ki), NULL, 0);
1210 k->refcnt = 0; /* Don't keep local reference when parsing. */
1211 k->valid = 0; /* Contents not read yet so it's not valid. */
1215 k->next = dir->next;
1216 dir->next->prev = k;
1219 ret = csr1212_attach_keyval_to_directory(dir, k);
1222 if (ret != CSR1212_SUCCESS && k != NULL)
1227 int csr1212_parse_keyval(struct csr1212_keyval *kv,
1228 struct csr1212_csr_rom_cache *cache)
1230 struct csr1212_keyval_img *kvi;
1232 int ret = CSR1212_SUCCESS;
1235 kvi = (struct csr1212_keyval_img*)
1236 &cache->data[bytes_to_quads(kv->offset - cache->offset)];
1237 kvi_len = be16_to_cpu(kvi->length);
1240 /* Apparently there are too many differnt wrong implementations of the
1241 * CRC algorithm that verifying them is moot. */
1242 if ((csr1212_crc16(kvi->data, kvi_len) != kvi->crc) &&
1243 (csr1212_msft_crc16(kvi->data, kvi_len) != kvi->crc)) {
1249 switch (kv->key.type) {
1250 case CSR1212_KV_TYPE_DIRECTORY:
1251 for (i = 0; i < kvi_len; i++) {
1252 u32 ki = kvi->data[i];
1254 /* Some devices put null entries in their unit
1255 * directories. If we come across such an entry,
1259 ret = csr1212_parse_dir_entry(kv, ki,
1260 kv->offset + quads_to_bytes(i + 1));
1262 kv->value.directory.len = kvi_len;
1265 case CSR1212_KV_TYPE_LEAF:
1266 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) {
1267 size_t size = quads_to_bytes(kvi_len);
1269 kv->value.leaf.data = CSR1212_MALLOC(size);
1270 if (!kv->value.leaf.data) {
1275 kv->value.leaf.len = kvi_len;
1276 memcpy(kv->value.leaf.data, kvi->data, size);
1288 csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1290 struct csr1212_cache_region *cr, *ncr, *newcr = NULL;
1291 struct csr1212_keyval_img *kvi = NULL;
1292 struct csr1212_csr_rom_cache *cache;
1298 BUG_ON(!csr || !kv || csr->max_rom < 1);
1300 /* First find which cache the data should be in (or go in if not read
1302 for (cache = csr->cache_head; cache; cache = cache->next)
1303 if (kv->offset >= cache->offset &&
1304 kv->offset < (cache->offset + cache->size))
1310 /* Only create a new cache for Extended ROM leaves. */
1311 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
1314 if (csr->ops->bus_read(csr,
1315 CSR1212_REGISTER_SPACE_BASE + kv->offset,
1316 sizeof(u32), &q, csr->private))
1319 kv->value.leaf.len = be32_to_cpu(q) >> 16;
1321 cache_size = (quads_to_bytes(kv->value.leaf.len + 1) +
1322 (csr->max_rom - 1)) & ~(csr->max_rom - 1);
1324 cache = csr1212_rom_cache_malloc(kv->offset, cache_size);
1328 kv->value.leaf.data = &cache->data[1];
1329 csr->cache_tail->next = cache;
1330 cache->prev = csr->cache_tail;
1332 csr->cache_tail = cache;
1333 cache->filled_head =
1334 CSR1212_MALLOC(sizeof(*cache->filled_head));
1335 if (!cache->filled_head)
1338 cache->filled_head->offset_start = 0;
1339 cache->filled_head->offset_end = sizeof(u32);
1340 cache->filled_tail = cache->filled_head;
1341 cache->filled_head->next = NULL;
1342 cache->filled_head->prev = NULL;
1345 /* Don't read the entire extended ROM now. Pieces of it will
1346 * be read when entries inside it are read. */
1347 return csr1212_parse_keyval(kv, cache);
1350 cache_index = kv->offset - cache->offset;
1352 /* Now seach read portions of the cache to see if it is there. */
1353 for (cr = cache->filled_head; cr; cr = cr->next) {
1354 if (cache_index < cr->offset_start) {
1355 newcr = CSR1212_MALLOC(sizeof(*newcr));
1359 newcr->offset_start = cache_index & ~(csr->max_rom - 1);
1360 newcr->offset_end = newcr->offset_start;
1362 newcr->prev = cr->prev;
1366 } else if ((cache_index >= cr->offset_start) &&
1367 (cache_index < cr->offset_end)) {
1368 kvi = (struct csr1212_keyval_img*)
1369 (&cache->data[bytes_to_quads(cache_index)]);
1370 kv_len = quads_to_bytes(be16_to_cpu(kvi->length) + 1);
1372 } else if (cache_index == cr->offset_end) {
1378 cr = cache->filled_tail;
1379 newcr = CSR1212_MALLOC(sizeof(*newcr));
1383 newcr->offset_start = cache_index & ~(csr->max_rom - 1);
1384 newcr->offset_end = newcr->offset_start;
1386 newcr->next = cr->next;
1389 cache->filled_tail = newcr;
1392 while(!kvi || cr->offset_end < cache_index + kv_len) {
1393 cache_ptr = &cache->data[bytes_to_quads(cr->offset_end &
1394 ~(csr->max_rom - 1))];
1396 addr = (CSR1212_CSR_ARCH_REG_SPACE_BASE + cache->offset +
1397 cr->offset_end) & ~(csr->max_rom - 1);
1399 if (csr->ops->bus_read(csr, addr, csr->max_rom, cache_ptr,
1401 if (csr->max_rom == 4)
1402 /* We've got problems! */
1405 /* Apperently the max_rom value was a lie, set it to
1406 * do quadlet reads and try again. */
1411 cr->offset_end += csr->max_rom - (cr->offset_end &
1412 (csr->max_rom - 1));
1414 if (!kvi && (cr->offset_end > cache_index)) {
1415 kvi = (struct csr1212_keyval_img*)
1416 (&cache->data[bytes_to_quads(cache_index)]);
1417 kv_len = quads_to_bytes(be16_to_cpu(kvi->length) + 1);
1420 if ((kv_len + (kv->offset - cache->offset)) > cache->size) {
1421 /* The Leaf or Directory claims its length extends
1422 * beyond the ConfigROM image region and thus beyond the
1423 * end of our cache region. Therefore, we abort now
1424 * rather than seg faulting later. */
1430 if (ncr && (cr->offset_end >= ncr->offset_start)) {
1431 /* consolidate region entries */
1432 ncr->offset_start = cr->offset_start;
1435 cr->prev->next = cr->next;
1436 ncr->prev = cr->prev;
1437 if (cache->filled_head == cr)
1438 cache->filled_head = ncr;
1444 return csr1212_parse_keyval(kv, cache);
1447 struct csr1212_keyval *
1448 csr1212_get_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1453 if (csr1212_read_keyval(csr, kv) != CSR1212_SUCCESS)
1458 int csr1212_parse_csr(struct csr1212_csr *csr)
1460 static const int mr_map[] = { 4, 64, 1024, 0 };
1461 struct csr1212_dentry *dentry;
1464 BUG_ON(!csr || !csr->ops || !csr->ops->bus_read);
1466 ret = csr1212_parse_bus_info_block(csr);
1467 if (ret != CSR1212_SUCCESS)
1470 if (!csr->ops->get_max_rom) {
1471 csr->max_rom = mr_map[0]; /* default value */
1473 int i = csr->ops->get_max_rom(csr->bus_info_data,
1477 csr->max_rom = mr_map[i];
1480 csr->cache_head->layout_head = csr->root_kv;
1481 csr->cache_head->layout_tail = csr->root_kv;
1483 csr->root_kv->offset = (CSR1212_CONFIG_ROM_SPACE_BASE & 0xffff) +
1486 csr->root_kv->valid = 0;
1487 csr->root_kv->next = csr->root_kv;
1488 csr->root_kv->prev = csr->root_kv;
1489 ret = csr1212_read_keyval(csr, csr->root_kv);
1490 if (ret != CSR1212_SUCCESS)
1493 /* Scan through the Root directory finding all extended ROM regions
1494 * and make cache regions for them */
1495 for (dentry = csr->root_kv->value.directory.dentries_head;
1496 dentry; dentry = dentry->next) {
1497 if (dentry->kv->key.id == CSR1212_KV_ID_EXTENDED_ROM &&
1498 !dentry->kv->valid) {
1499 ret = csr1212_read_keyval(csr, dentry->kv);
1500 if (ret != CSR1212_SUCCESS)
1505 return CSR1212_SUCCESS;