1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
|
Fix <https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-7309>.
Taken from this upstream commit, sans ChangeLog updates and tests:
<https://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=3f635fb43389b54f682fc9ed2acc0b2aaf4a923d>.
diff --git a/sysdeps/x86_64/memcmp.S b/sysdeps/x86_64/memcmp.S
index 1fc487caa5..1322bb3b92 100644
--- a/sysdeps/x86_64/memcmp.S
+++ b/sysdeps/x86_64/memcmp.S
@@ -21,14 +21,18 @@
.text
ENTRY (memcmp)
- test %rdx, %rdx
+#ifdef __ILP32__
+ /* Clear the upper 32 bits. */
+ movl %edx, %edx
+#endif
+ test %RDX_LP, %RDX_LP
jz L(finz)
cmpq $1, %rdx
- jle L(finr1b)
+ jbe L(finr1b)
subq %rdi, %rsi
movq %rdx, %r10
cmpq $32, %r10
- jge L(gt32)
+ jae L(gt32)
/* Handle small chunks and last block of less than 32 bytes. */
L(small):
testq $1, %r10
@@ -156,7 +160,7 @@ L(A32):
movq %r11, %r10
andq $-32, %r10
cmpq %r10, %rdi
- jge L(mt16)
+ jae L(mt16)
/* Pre-unroll to be ready for unrolled 64B loop. */
testq $32, %rdi
jz L(A64)
@@ -178,7 +182,7 @@ L(A64):
movq %r11, %r10
andq $-64, %r10
cmpq %r10, %rdi
- jge L(mt32)
+ jae L(mt32)
L(A64main):
movdqu (%rdi,%rsi), %xmm0
@@ -216,7 +220,7 @@ L(mt32):
movq %r11, %r10
andq $-32, %r10
cmpq %r10, %rdi
- jge L(mt16)
+ jae L(mt16)
L(A32main):
movdqu (%rdi,%rsi), %xmm0
@@ -254,7 +258,7 @@ L(ATR):
movq %r11, %r10
andq $-32, %r10
cmpq %r10, %rdi
- jge L(mt16)
+ jae L(mt16)
testq $16, %rdi
jz L(ATR32)
@@ -325,7 +329,7 @@ L(ATR64main):
movq %r11, %r10
andq $-32, %r10
cmpq %r10, %rdi
- jge L(mt16)
+ jae L(mt16)
L(ATR32res):
movdqa (%rdi,%rsi), %xmm0
|