1 /* Optimized version of the standard memset() function.
3 Copyright (c) 2002 Hewlett-Packard Co/CERN
4 Sverre Jarp <Sverre.Jarp@cern.ch>
13 The algorithm is fairly straightforward: set byte by byte until we
14 we get to a 16B-aligned address, then loop on 128 B chunks using an
15 early store as prefetching, then loop on 32B chucks, then clear remaining
16 words, finally clear remaining bytes.
17 Since a stf.spill f0 can store 16B in one go, we use this instruction
18 to get peak speed when value = 0. */
20 #include <asm/asmmacro.h>
21 #include <asm/export.h>
41 // This routine uses only scratch predicate registers (p6 - p15)
42 #define p_scr p6 // default register for same-cycle branches
54 #define LSIZE_SH 7 // shift amount
60 alloc tmp = ar.pfs, 3, 0, 0, 0
66 mov ret0 = dest // return value
67 cmp.ne p_nz, p_zr = value, r0 // use stf.spill if value is zero
68 cmp.eq p_scr, p0 = cnt, r0
71 and ptr2 = -(MIN1+1), dest // aligned address
72 and tmp = MIN1, dest // prepare to check for correct alignment
73 tbit.nz p_y, p_n = dest, 0 // Do we have an odd address? (M_B_U)
76 mux1 value = value, @brcst // create 8 identical bytes in word
77 (p_scr) br.ret.dpnt.many rp // return immediately if count = 0
80 cmp.ne p_unalgn, p0 = tmp, r0 //
82 sub bytecnt = (MIN1+1), tmp // NB: # of bytes to move is 1 higher than loopcnt
83 cmp.gt p_scr, p0 = 16, cnt // is it a minimalistic task?
84 (p_scr) br.cond.dptk.many .move_bytes_unaligned // go move just a few (M_B_U)
87 (p_unalgn) add ptr1 = (MIN1+1), ptr2 // after alignment
88 (p_unalgn) add ptr2 = MIN1P1HALF, ptr2 // after alignment
89 (p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 3 // should we do a st8 ?
92 (p_y) add cnt = -8, cnt //
93 (p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 2 // should we do a st4 ?
95 (p_y) st8 [ptr2] = value,-4 //
96 (p_n) add ptr2 = 4, ptr2 //
99 (p_yy) add cnt = -4, cnt //
100 (p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 1 // should we do a st2 ?
102 (p_yy) st4 [ptr2] = value,-2 //
103 (p_nn) add ptr2 = 2, ptr2 //
106 mov tmp = LINE_SIZE+1 // for compare
107 (p_y) add cnt = -2, cnt //
108 (p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 0 // should we do a st1 ?
110 setf.sig fvalue=value // transfer value to FLP side
111 (p_y) st2 [ptr2] = value,-1 //
112 (p_n) add ptr2 = 1, ptr2 //
116 (p_yy) st1 [ptr2] = value //
117 cmp.gt p_scr, p0 = tmp, cnt // is it a minimalistic task?
119 (p_yy) add cnt = -1, cnt //
120 (p_scr) br.cond.dpnt.many .fraction_of_line // go move just a few
125 shr.u linecnt = cnt, LSIZE_SH
126 (p_zr) br.cond.dptk.many .l1b // Jump to use stf.spill
129 TEXT_ALIGN(32) // --------------------- // L1A: store ahead into cache lines; fill later
131 and tmp = -(LINE_SIZE), cnt // compute end of range
132 mov ptr9 = ptr1 // used for prefetching
133 and cnt = (LINE_SIZE-1), cnt // remainder
135 mov loopcnt = PREF_AHEAD-1 // default prefetch loop
136 cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value
139 (p_scr) add loopcnt = -1, linecnt //
140 add ptr2 = 8, ptr1 // start of stores (beyond prefetch stores)
141 add ptr1 = tmp, ptr1 // first address beyond total range
144 add tmp = -1, linecnt // next loop count
145 mov.i ar.lc = loopcnt //
149 stf8 [ptr9] = fvalue, 128 // Do stores one cache line apart
151 br.cloop.dptk.few .pref_l1a
154 add ptr0 = 16, ptr2 // Two stores in parallel
159 stf8 [ptr2] = fvalue, 8
160 stf8 [ptr0] = fvalue, 8
163 stf8 [ptr2] = fvalue, 24
164 stf8 [ptr0] = fvalue, 24
167 stf8 [ptr2] = fvalue, 8
168 stf8 [ptr0] = fvalue, 8
171 stf8 [ptr2] = fvalue, 24
172 stf8 [ptr0] = fvalue, 24
175 stf8 [ptr2] = fvalue, 8
176 stf8 [ptr0] = fvalue, 8
179 stf8 [ptr2] = fvalue, 24
180 stf8 [ptr0] = fvalue, 24
183 stf8 [ptr2] = fvalue, 8
184 stf8 [ptr0] = fvalue, 32
185 cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching?
188 stf8 [ptr2] = fvalue, 24
189 (p_scr) stf8 [ptr9] = fvalue, 128
190 br.cloop.dptk.few .l1ax
193 cmp.le p_scr, p0 = 8, cnt // just a few bytes left ?
194 (p_scr) br.cond.dpnt.many .fraction_of_line // Branch no. 2
195 br.cond.dpnt.many .move_bytes_from_alignment // Branch no. 3
199 .l1b: // ------------------------------------ // L1B: store ahead into cache lines; fill later
201 and tmp = -(LINE_SIZE), cnt // compute end of range
202 mov ptr9 = ptr1 // used for prefetching
203 and cnt = (LINE_SIZE-1), cnt // remainder
205 mov loopcnt = PREF_AHEAD-1 // default prefetch loop
206 cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value
209 (p_scr) add loopcnt = -1, linecnt
210 add ptr2 = 16, ptr1 // start of stores (beyond prefetch stores)
211 add ptr1 = tmp, ptr1 // first address beyond total range
214 add tmp = -1, linecnt // next loop count
215 mov.i ar.lc = loopcnt
219 stf.spill [ptr9] = f0, 128 // Do stores one cache line apart
221 br.cloop.dptk.few .pref_l1b
224 add ptr0 = 16, ptr2 // Two stores in parallel
229 stf.spill [ptr2] = f0, 32
230 stf.spill [ptr0] = f0, 32
233 stf.spill [ptr2] = f0, 32
234 stf.spill [ptr0] = f0, 32
237 stf.spill [ptr2] = f0, 32
238 stf.spill [ptr0] = f0, 64
239 cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching?
242 stf.spill [ptr2] = f0, 32
243 (p_scr) stf.spill [ptr9] = f0, 128
244 br.cloop.dptk.few .l1bx
247 cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ?
248 (p_scr) br.cond.dpnt.many .move_bytes_from_alignment //
254 shr.u loopcnt = cnt, 5 // loopcnt = cnt / 32
257 cmp.eq p_scr, p0 = loopcnt, r0
258 add loopcnt = -1, loopcnt
259 (p_scr) br.cond.dpnt.many .store_words
262 and cnt = 0x1f, cnt // compute the remaining cnt
263 mov.i ar.lc = loopcnt
266 .l2: // ------------------------------------ // L2A: store 32B in 2 cycles
268 stf8 [ptr1] = fvalue, 8
269 stf8 [ptr2] = fvalue, 8
271 stf8 [ptr1] = fvalue, 24
272 stf8 [ptr2] = fvalue, 24
273 br.cloop.dptk.many .l2
277 cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ?
278 (p_scr) br.cond.dpnt.many .move_bytes_from_alignment // Branch
282 stf8 [ptr1] = fvalue, 8 // store
283 cmp.le p_y, p_n = 16, cnt
284 add cnt = -8, cnt // subtract
287 (p_y) stf8 [ptr1] = fvalue, 8 // store
288 (p_y) cmp.le.unc p_yy, p_nn = 16, cnt
289 (p_y) add cnt = -8, cnt // subtract
292 (p_yy) stf8 [ptr1] = fvalue, 8
293 (p_yy) add cnt = -8, cnt // subtract
296 .move_bytes_from_alignment:
298 cmp.eq p_scr, p0 = cnt, r0
299 tbit.nz.unc p_y, p0 = cnt, 2 // should we terminate with a st4 ?
300 (p_scr) br.cond.dpnt.few .restore_and_exit
303 (p_y) st4 [ptr1] = value,4
304 tbit.nz.unc p_yy, p0 = cnt, 1 // should we terminate with a st2 ?
307 (p_yy) st2 [ptr1] = value,2
308 tbit.nz.unc p_y, p0 = cnt, 0 // should we terminate with a st1 ?
312 (p_y) st1 [ptr1] = value
317 mov.i ar.lc = save_lc
321 .move_bytes_unaligned:
323 .pred.rel "mutex",p_y, p_n
324 .pred.rel "mutex",p_yy, p_nn
325 (p_n) cmp.le p_yy, p_nn = 4, cnt
326 (p_y) cmp.le p_yy, p_nn = 5, cnt
327 (p_n) add ptr2 = 2, ptr1
329 (p_y) add ptr2 = 3, ptr1
330 (p_y) st1 [ptr1] = value, 1 // fill 1 (odd-aligned) byte [15, 14 (or less) left]
331 (p_y) add cnt = -1, cnt
334 (p_yy) cmp.le.unc p_y, p0 = 8, cnt
335 add ptr3 = ptr1, cnt // prepare last store
336 mov.i ar.lc = save_lc
338 (p_yy) st2 [ptr1] = value, 4 // fill 2 (aligned) bytes
339 (p_yy) st2 [ptr2] = value, 4 // fill 2 (aligned) bytes [11, 10 (o less) left]
340 (p_yy) add cnt = -4, cnt
343 (p_y) cmp.le.unc p_yy, p0 = 8, cnt
344 add ptr3 = -1, ptr3 // last store
345 tbit.nz p_scr, p0 = cnt, 1 // will there be a st2 at the end ?
347 (p_y) st2 [ptr1] = value, 4 // fill 2 (aligned) bytes
348 (p_y) st2 [ptr2] = value, 4 // fill 2 (aligned) bytes [7, 6 (or less) left]
349 (p_y) add cnt = -4, cnt
352 (p_yy) st2 [ptr1] = value, 4 // fill 2 (aligned) bytes
353 (p_yy) st2 [ptr2] = value, 4 // fill 2 (aligned) bytes [3, 2 (or less) left]
354 tbit.nz p_y, p0 = cnt, 0 // will there be a st1 at the end ?
356 (p_yy) add cnt = -4, cnt
359 (p_scr) st2 [ptr1] = value // fill 2 (aligned) bytes
360 (p_y) st1 [ptr3] = value // fill last byte (using ptr3)
364 EXPORT_SYMBOL(memset)