1 /* $Id: checksum.S,v 1.10 2001/07/06 13:11:32 gniibe Exp $
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * IP/TCP/UDP checksumming routines
9 * Authors: Jorge Cwik, <jorge@laser.satlink.net>
10 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
11 * Tom May, <ftom@netcom.com>
12 * Pentium Pro/II routines:
13 * Alexander Kjeldaas <astor@guardian.no>
14 * Finn Arne Gangstad <finnag@guardian.no>
15 * Lots of code moved from tcp.c and ip.c; see those files
18 * Changes: Ingo Molnar, converted csum_partial_copy() to 2.1 exception
20 * Andi Kleen, add zeroing on error
21 * converted to pure assembler
23 * SuperH version: Copyright (C) 1999 Niibe Yutaka
25 * This program is free software; you can redistribute it and/or
26 * modify it under the terms of the GNU General Public License
27 * as published by the Free Software Foundation; either version
28 * 2 of the License, or (at your option) any later version.
31 #include <asm/errno.h>
32 #include <linux/linkage.h>
35 * computes a partial checksum, e.g. for TCP/UDP fragments
39 * unsigned int csum_partial(const unsigned char *buf, int len,
46 * Experiments with Ethernet and SLIP connections show that buff
47 * is aligned on either a 2-byte or 4-byte boundary. We get at
48 * least a twofold speedup on 486 and Pentium if it is 4-byte aligned.
49 * Fortunately, it is easy to convert 2-byte alignment to 4-byte
50 * alignment for the unrolled loop.
54 tst #2, r0 ! Check alignment.
55 bt 2f ! Jump if alignment is ok.
57 add #-2, r5 ! Alignment uses up two bytes.
59 bt/s 1f ! Jump if we had at least two bytes.
62 add #2, r5 ! r5 was < 2. Deal with it.
64 mov r5, r1 ! Save new len for later use.
74 bt/s 4f ! if it's =0, go to 4f
99 addc r5, r6 ! add carry to r6
116 addc r5, r6 ! r5==0 here, so it means add carry-bit
122 bt 9f ! if it's =0 go to 9f
136 #ifndef __LITTLE_ENDIAN__
148 unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
149 int sum, int *src_err_ptr, int *dst_err_ptr)
153 * Copy from ds while checksumming, otherwise like csum_partial
155 * The macros SRC and DST specify the type of access for the instruction.
156 * thus we can call a custom exception handler for all access types.
158 * FIXME: could someone double-check whether I haven't mixed up some SRC and
159 * DST definitions? It's damn hard to trigger all cases. I hope I got
160 * them all but there's no guarantee.
164 9999: __VA_ARGS__ ; \
165 .section __ex_table, "a"; \
166 .long 9999b, 6001f ; \
170 9999: __VA_ARGS__ ; \
171 .section __ex_table, "a"; \
172 .long 9999b, 6002f ; \
176 ! r4: const char *SRC
185 ENTRY(csum_partial_copy_generic)
189 mov #3,r0 ! Check src and dest are equally aligned
194 bf 3f ! Different alignments, use slow version
195 tst #1,r0 ! Check dest word aligned
196 bf 3f ! If not, do it the slow way
199 tst r0,r5 ! Check dest alignment.
200 bt 2f ! Jump if alignment is ok.
201 add #-2,r6 ! Alignment uses up two bytes.
202 cmp/pz r6 ! Jump if we had at least two bytes.
205 add #2,r6 ! r6 was < 2. Deal with it.
209 3: ! Handle different src and dest alignments.
210 ! This is not common, so simple byte by byte copy will do.
222 DST( mov.b r0,@(1,r5) )
226 #ifdef __LITTLE_ENDIAN__
247 ! src and dest equally aligned, but to a two byte boundary.
248 ! Handle first two bytes as a special case
271 DST( mov.l r1,@(4,r5) )
277 DST( mov.l r0,@(8,r5) )
278 DST( mov.l r1,@(12,r5) )
284 DST( mov.l r0,@(16,r5) )
285 DST( mov.l r1,@(20,r5) )
291 DST( mov.l r0,@(24,r5) )
292 DST( mov.l r1,@(28,r5) )
341 #ifndef __LITTLE_ENDIAN__
351 .section .fixup, "ax"
354 mov.l @(8,r15),r0 ! src_err_ptr
358 ! zero the complete destination - computing the rest
360 mov.l @(4,r15),r5 ! dst
374 mov.l @(12,r15),r0 ! dst_err_ptr