117 lines
2.5 KiB
ArmAsm
117 lines
2.5 KiB
ArmAsm
/*
|
|
* memchr - find a character in a memory zone
|
|
*
|
|
* Copyright (c) 2020, Arm Limited.
|
|
* SPDX-License-Identifier: MIT
|
|
*/
|
|
|
|
/* Assumptions:
|
|
*
|
|
* ARMv8-a, AArch64, Advanced SIMD.
|
|
* MTE compatible.
|
|
*/
|
|
|
|
#include "../asmdefs.h"
|
|
|
|
#define srcin x0
|
|
#define chrin w1
|
|
#define cntin x2
|
|
#define result x0
|
|
|
|
#define src x3
|
|
#define cntrem x4
|
|
#define synd x5
|
|
#define shift x6
|
|
#define tmp x7
|
|
#define wtmp w7
|
|
|
|
#define vrepchr v0
|
|
#define qdata q1
|
|
#define vdata v1
|
|
#define vhas_chr v2
|
|
#define vrepmask v3
|
|
#define vend v4
|
|
#define dend d4
|
|
|
|
/*
|
|
Core algorithm:
|
|
|
|
For each 16-byte chunk we calculate a 64-bit syndrome value with four bits
|
|
per byte. For even bytes, bits 0-3 are set if the relevant byte matched the
|
|
requested character or the byte is NUL. Bits 4-7 must be zero. Bits 4-7 are
|
|
set likewise for odd bytes so that adjacent bytes can be merged. Since the
|
|
bits in the syndrome reflect the order in which things occur in the original
|
|
string, counting trailing zeros identifies exactly which byte matched. */
|
|
|
|
ENTRY (__memchr_aarch64_mte)
|
|
PTR_ARG (0)
|
|
SIZE_ARG (2)
|
|
bic src, srcin, 15
|
|
cbz cntin, L(nomatch)
|
|
ld1 {vdata.16b}, [src]
|
|
dup vrepchr.16b, chrin
|
|
mov wtmp, 0xf00f
|
|
dup vrepmask.8h, wtmp
|
|
cmeq vhas_chr.16b, vdata.16b, vrepchr.16b
|
|
lsl shift, srcin, 2
|
|
and vhas_chr.16b, vhas_chr.16b, vrepmask.16b
|
|
addp vend.16b, vhas_chr.16b, vhas_chr.16b /* 128->64 */
|
|
fmov synd, dend
|
|
lsr synd, synd, shift
|
|
cbz synd, L(start_loop)
|
|
|
|
rbit synd, synd
|
|
clz synd, synd
|
|
add result, srcin, synd, lsr 2
|
|
cmp cntin, synd, lsr 2
|
|
csel result, result, xzr, hi
|
|
ret
|
|
|
|
L(start_loop):
|
|
sub tmp, src, srcin
|
|
add tmp, tmp, 16
|
|
subs cntrem, cntin, tmp
|
|
b.ls L(nomatch)
|
|
|
|
/* Make sure that it won't overread by a 16-byte chunk */
|
|
add tmp, cntrem, 15
|
|
tbnz tmp, 4, L(loop32_2)
|
|
|
|
.p2align 4
|
|
L(loop32):
|
|
ldr qdata, [src, 16]!
|
|
cmeq vhas_chr.16b, vdata.16b, vrepchr.16b
|
|
umaxp vend.16b, vhas_chr.16b, vhas_chr.16b /* 128->64 */
|
|
fmov synd, dend
|
|
cbnz synd, L(end)
|
|
|
|
L(loop32_2):
|
|
ldr qdata, [src, 16]!
|
|
subs cntrem, cntrem, 32
|
|
cmeq vhas_chr.16b, vdata.16b, vrepchr.16b
|
|
b.ls L(end)
|
|
umaxp vend.16b, vhas_chr.16b, vhas_chr.16b /* 128->64 */
|
|
fmov synd, dend
|
|
cbz synd, L(loop32)
|
|
L(end):
|
|
and vhas_chr.16b, vhas_chr.16b, vrepmask.16b
|
|
addp vend.16b, vhas_chr.16b, vhas_chr.16b /* 128->64 */
|
|
fmov synd, dend
|
|
add tmp, srcin, cntin
|
|
sub cntrem, tmp, src
|
|
#ifndef __AARCH64EB__
|
|
rbit synd, synd
|
|
#endif
|
|
clz synd, synd
|
|
cmp cntrem, synd, lsr 2
|
|
add result, src, synd, lsr 2
|
|
csel result, result, xzr, hi
|
|
ret
|
|
|
|
L(nomatch):
|
|
mov result, 0
|
|
ret
|
|
|
|
END (__memchr_aarch64_mte)
|
|
|