2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
38 static DECLARE_WAIT_QUEUE_HEAD(rds_message_flush_waitq);
40 static unsigned int rds_exthdr_size[__RDS_EXTHDR_MAX] = {
41 [RDS_EXTHDR_NONE] = 0,
42 [RDS_EXTHDR_VERSION] = sizeof(struct rds_ext_header_version),
43 [RDS_EXTHDR_RDMA] = sizeof(struct rds_ext_header_rdma),
44 [RDS_EXTHDR_RDMA_DEST] = sizeof(struct rds_ext_header_rdma_dest),
48 void rds_message_addref(struct rds_message *rm)
50 rdsdebug("addref rm %p ref %d\n", rm, atomic_read(&rm->m_refcount));
51 atomic_inc(&rm->m_refcount);
55 * This relies on dma_map_sg() not touching sg[].page during merging.
57 static void rds_message_purge(struct rds_message *rm)
61 if (unlikely(test_bit(RDS_MSG_PAGEVEC, &rm->m_flags)))
64 for (i = 0; i < rm->m_nents; i++) {
65 rdsdebug("putting data page %p\n", (void *)sg_page(&rm->m_sg[i]));
66 /* XXX will have to put_page for page refs */
67 __free_page(sg_page(&rm->m_sg[i]));
72 rds_rdma_free_op(rm->m_rdma_op);
74 rds_mr_put(rm->m_rdma_mr);
77 void rds_message_inc_purge(struct rds_incoming *inc)
79 struct rds_message *rm = container_of(inc, struct rds_message, m_inc);
80 rds_message_purge(rm);
83 void rds_message_put(struct rds_message *rm)
85 rdsdebug("put rm %p ref %d\n", rm, atomic_read(&rm->m_refcount));
87 if (atomic_dec_and_test(&rm->m_refcount)) {
88 BUG_ON(!list_empty(&rm->m_sock_item));
89 BUG_ON(!list_empty(&rm->m_conn_item));
90 rds_message_purge(rm);
96 void rds_message_inc_free(struct rds_incoming *inc)
98 struct rds_message *rm = container_of(inc, struct rds_message, m_inc);
102 void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
103 __be16 dport, u64 seq)
106 hdr->h_sport = sport;
107 hdr->h_dport = dport;
108 hdr->h_sequence = cpu_to_be64(seq);
109 hdr->h_exthdr[0] = RDS_EXTHDR_NONE;
112 int rds_message_add_extension(struct rds_header *hdr,
113 unsigned int type, const void *data, unsigned int len)
115 unsigned int ext_len = sizeof(u8) + len;
118 /* For now, refuse to add more than one extension header */
119 if (hdr->h_exthdr[0] != RDS_EXTHDR_NONE)
122 if (type >= __RDS_EXTHDR_MAX
123 || len != rds_exthdr_size[type])
126 if (ext_len >= RDS_HEADER_EXT_SPACE)
131 memcpy(dst, data, len);
133 dst[len] = RDS_EXTHDR_NONE;
138 * If a message has extension headers, retrieve them here.
141 * unsigned int pos = 0;
144 * buflen = sizeof(buffer);
145 * type = rds_message_next_extension(hdr, &pos, buffer, &buflen);
146 * if (type == RDS_EXTHDR_NONE)
151 int rds_message_next_extension(struct rds_header *hdr,
152 unsigned int *pos, void *buf, unsigned int *buflen)
154 unsigned int offset, ext_type, ext_len;
155 u8 *src = hdr->h_exthdr;
158 if (offset >= RDS_HEADER_EXT_SPACE)
161 /* Get the extension type and length. For now, the
162 * length is implied by the extension type. */
163 ext_type = src[offset++];
165 if (ext_type == RDS_EXTHDR_NONE || ext_type >= __RDS_EXTHDR_MAX)
167 ext_len = rds_exthdr_size[ext_type];
168 if (offset + ext_len > RDS_HEADER_EXT_SPACE)
171 *pos = offset + ext_len;
172 if (ext_len < *buflen)
174 memcpy(buf, src + offset, *buflen);
178 *pos = RDS_HEADER_EXT_SPACE;
180 return RDS_EXTHDR_NONE;
183 int rds_message_add_version_extension(struct rds_header *hdr, unsigned int version)
185 struct rds_ext_header_version ext_hdr;
187 ext_hdr.h_version = cpu_to_be32(version);
188 return rds_message_add_extension(hdr, RDS_EXTHDR_VERSION, &ext_hdr, sizeof(ext_hdr));
191 int rds_message_get_version_extension(struct rds_header *hdr, unsigned int *version)
193 struct rds_ext_header_version ext_hdr;
194 unsigned int pos = 0, len = sizeof(ext_hdr);
196 /* We assume the version extension is the only one present */
197 if (rds_message_next_extension(hdr, &pos, &ext_hdr, &len) != RDS_EXTHDR_VERSION)
199 *version = be32_to_cpu(ext_hdr.h_version);
203 int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset)
205 struct rds_ext_header_rdma_dest ext_hdr;
207 ext_hdr.h_rdma_rkey = cpu_to_be32(r_key);
208 ext_hdr.h_rdma_offset = cpu_to_be32(offset);
209 return rds_message_add_extension(hdr, RDS_EXTHDR_RDMA_DEST, &ext_hdr, sizeof(ext_hdr));
212 struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp)
214 struct rds_message *rm;
216 rm = kzalloc(sizeof(struct rds_message) +
217 (nents * sizeof(struct scatterlist)), gfp);
222 sg_init_table(rm->m_sg, nents);
223 atomic_set(&rm->m_refcount, 1);
224 INIT_LIST_HEAD(&rm->m_sock_item);
225 INIT_LIST_HEAD(&rm->m_conn_item);
226 spin_lock_init(&rm->m_rs_lock);
232 struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len)
234 struct rds_message *rm;
237 rm = rds_message_alloc(ceil(total_len, PAGE_SIZE), GFP_KERNEL);
239 return ERR_PTR(-ENOMEM);
241 set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
242 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
243 rm->m_nents = ceil(total_len, PAGE_SIZE);
245 for (i = 0; i < rm->m_nents; ++i) {
246 sg_set_page(&rm->m_sg[i],
247 virt_to_page(page_addrs[i]),
254 struct rds_message *rds_message_copy_from_user(struct iovec *first_iov,
257 unsigned long to_copy;
258 unsigned long iov_off;
259 unsigned long sg_off;
260 struct rds_message *rm;
262 struct scatterlist *sg;
265 rm = rds_message_alloc(ceil(total_len, PAGE_SIZE), GFP_KERNEL);
271 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
274 * now allocate and copy in the data payload.
279 sg_off = 0; /* Dear gcc, sg->page will be null from kzalloc. */
282 if (sg_page(sg) == NULL) {
283 ret = rds_page_remainder_alloc(sg, total_len,
291 while (iov_off == iov->iov_len) {
296 to_copy = min(iov->iov_len - iov_off, sg->length - sg_off);
297 to_copy = min_t(size_t, to_copy, total_len);
299 rdsdebug("copying %lu bytes from user iov [%p, %zu] + %lu to "
300 "sg [%p, %u, %u] + %lu\n",
301 to_copy, iov->iov_base, iov->iov_len, iov_off,
302 (void *)sg_page(sg), sg->offset, sg->length, sg_off);
304 ret = rds_page_copy_from_user(sg_page(sg), sg->offset + sg_off,
305 iov->iov_base + iov_off,
311 total_len -= to_copy;
314 if (sg_off == sg->length)
328 int rds_message_inc_copy_to_user(struct rds_incoming *inc,
329 struct iovec *first_iov, size_t size)
331 struct rds_message *rm;
333 struct scatterlist *sg;
334 unsigned long to_copy;
335 unsigned long iov_off;
336 unsigned long vec_off;
341 rm = container_of(inc, struct rds_message, m_inc);
342 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
350 while (copied < size && copied < len) {
351 while (iov_off == iov->iov_len) {
356 to_copy = min(iov->iov_len - iov_off, sg->length - vec_off);
357 to_copy = min_t(size_t, to_copy, size - copied);
358 to_copy = min_t(unsigned long, to_copy, len - copied);
360 rdsdebug("copying %lu bytes to user iov [%p, %zu] + %lu to "
361 "sg [%p, %u, %u] + %lu\n",
362 to_copy, iov->iov_base, iov->iov_len, iov_off,
363 sg_page(sg), sg->offset, sg->length, vec_off);
365 ret = rds_page_copy_to_user(sg_page(sg), sg->offset + vec_off,
366 iov->iov_base + iov_off,
377 if (vec_off == sg->length) {
387 * If the message is still on the send queue, wait until the transport
388 * is done with it. This is particularly important for RDMA operations.
390 void rds_message_wait(struct rds_message *rm)
392 wait_event(rds_message_flush_waitq,
393 !test_bit(RDS_MSG_MAPPED, &rm->m_flags));
396 void rds_message_unmapped(struct rds_message *rm)
398 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
399 if (waitqueue_active(&rds_message_flush_waitq))
400 wake_up(&rds_message_flush_waitq);