2 * This file contains assembly-language implementations
3 * of IP-style 1's complement checksum routines.
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * Severely hacked about by Paul Mackerras (paulus@cs.anu.edu.au).
15 #include <linux/sys.h>
16 #include <asm/processor.h>
17 #include <asm/cache.h>
18 #include <asm/errno.h>
19 #include <asm/ppc_asm.h>
24 * computes the checksum of a memory block at buff, length len,
25 * and adds in "sum" (32-bit)
27 * csum_partial(buff, len, sum)
33 beq 3f /* if we're doing < 4 bytes */
34 andi. r5,r3,2 /* Align buffer to longword boundary */
36 lhz r5,4(r3) /* do 2 bytes to get aligned */
40 srwi. r6,r4,2 /* # words to do */
43 2: lwzu r5,4(r3) /* the bdnz has zero overhead, so it should */
44 adde r0,r0,r5 /* be unnecessary to unroll this loop */
56 slwi r5,r5,8 /* Upper byte of word */
58 5: addze r3,r0 /* add in final carry */
62 * Computes the checksum of a memory block at src, length len,
63 * and adds in "sum" (32-bit), while copying the block to dst.
64 * If an access exception occurs on src or dst, it stores -EFAULT
65 * to *src_err or *dst_err respectively, and (for an error on
66 * src) zeroes the rest of dst.
68 * csum_partial_copy_generic(src, dst, len, sum, src_err, dst_err)
70 #define CSUM_COPY_16_BYTES_WITHEX(n) \
92 #define CSUM_COPY_16_BYTES_EXCODE(n) \
93 .section __ex_table,"a"; \
95 .long 8 ## n ## 0b,src_error; \
96 .long 8 ## n ## 1b,src_error; \
97 .long 8 ## n ## 2b,src_error; \
98 .long 8 ## n ## 3b,src_error; \
99 .long 8 ## n ## 4b,dst_error; \
100 .long 8 ## n ## 5b,dst_error; \
101 .long 8 ## n ## 6b,dst_error; \
102 .long 8 ## n ## 7b,dst_error; \
106 .stabs "arch/powerpc/lib/",N_SO,0,0,0f
107 .stabs "checksum_32.S",N_SO,0,0,0f
110 CACHELINE_BYTES = L1_CACHE_BYTES
111 LG_CACHELINE_BYTES = L1_CACHE_SHIFT
112 CACHELINE_MASK = (L1_CACHE_BYTES-1)
114 _GLOBAL(csum_partial_copy_generic)
119 andi. r0,r4,1 /* is destination address even ? */
125 andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
128 cmplw 0,r5,r0 /* is this more than total to do? */
129 blt 63f /* if not much to do */
130 andi. r8,r0,3 /* get it word-aligned first */
134 70: lbz r9,4(r4) /* do some bytes */
146 72: lwzu r9,4(r4) /* do some words */
151 58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
152 clrlwi r5,r5,32-LG_CACHELINE_BYTES
156 /* Here we decide how far ahead to prefetch the source */
162 #if MAX_COPY_PREFETCH > 1
163 /* Heuristically, for large transfers we prefetch
164 MAX_COPY_PREFETCH cachelines ahead. For small transfers
165 we prefetch 1 cacheline ahead. */
166 cmpwi r0,MAX_COPY_PREFETCH
168 li r7,MAX_COPY_PREFETCH
171 addi r3,r3,CACHELINE_BYTES
175 addi r3,r3,CACHELINE_BYTES
176 #endif /* MAX_COPY_PREFETCH > 1 */
184 /* the main body of the cacheline loop */
185 CSUM_COPY_16_BYTES_WITHEX(0)
186 #if L1_CACHE_BYTES >= 32
187 CSUM_COPY_16_BYTES_WITHEX(1)
188 #if L1_CACHE_BYTES >= 64
189 CSUM_COPY_16_BYTES_WITHEX(2)
190 CSUM_COPY_16_BYTES_WITHEX(3)
191 #if L1_CACHE_BYTES >= 128
192 CSUM_COPY_16_BYTES_WITHEX(4)
193 CSUM_COPY_16_BYTES_WITHEX(5)
194 CSUM_COPY_16_BYTES_WITHEX(6)
195 CSUM_COPY_16_BYTES_WITHEX(7)
229 rlwinm r3,r3,8,0,31 /* swap bytes for odd destination */
251 .section __ex_table,"a"
261 * this stuff handles faults in the cacheline loop and branches to either
262 * src_error (if in read part) or dst_error (if in write part)
264 CSUM_COPY_16_BYTES_EXCODE(0)
265 #if L1_CACHE_BYTES >= 32
266 CSUM_COPY_16_BYTES_EXCODE(1)
267 #if L1_CACHE_BYTES >= 64
268 CSUM_COPY_16_BYTES_EXCODE(2)
269 CSUM_COPY_16_BYTES_EXCODE(3)
270 #if L1_CACHE_BYTES >= 128
271 CSUM_COPY_16_BYTES_EXCODE(4)
272 CSUM_COPY_16_BYTES_EXCODE(5)
273 CSUM_COPY_16_BYTES_EXCODE(6)
274 CSUM_COPY_16_BYTES_EXCODE(7)
279 .section __ex_table,"a"