and-01.ll revision fa487e83a83c260d6a50f3df00a0eb012553a912
1; Test 32-bit ANDs in which the second operand is variable. 2; 3; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s 4 5declare i32 @foo() 6 7; Check NR. 8define i32 @f1(i32 %a, i32 %b) { 9; CHECK: f1: 10; CHECK: nr %r2, %r3 11; CHECK: br %r14 12 %and = and i32 %a, %b 13 ret i32 %and 14} 15 16; Check the low end of the N range. 17define i32 @f2(i32 %a, i32 *%src) { 18; CHECK: f2: 19; CHECK: n %r2, 0(%r3) 20; CHECK: br %r14 21 %b = load i32 *%src 22 %and = and i32 %a, %b 23 ret i32 %and 24} 25 26; Check the high end of the aligned N range. 27define i32 @f3(i32 %a, i32 *%src) { 28; CHECK: f3: 29; CHECK: n %r2, 4092(%r3) 30; CHECK: br %r14 31 %ptr = getelementptr i32 *%src, i64 1023 32 %b = load i32 *%ptr 33 %and = and i32 %a, %b 34 ret i32 %and 35} 36 37; Check the next word up, which should use NY instead of N. 38define i32 @f4(i32 %a, i32 *%src) { 39; CHECK: f4: 40; CHECK: ny %r2, 4096(%r3) 41; CHECK: br %r14 42 %ptr = getelementptr i32 *%src, i64 1024 43 %b = load i32 *%ptr 44 %and = and i32 %a, %b 45 ret i32 %and 46} 47 48; Check the high end of the aligned NY range. 49define i32 @f5(i32 %a, i32 *%src) { 50; CHECK: f5: 51; CHECK: ny %r2, 524284(%r3) 52; CHECK: br %r14 53 %ptr = getelementptr i32 *%src, i64 131071 54 %b = load i32 *%ptr 55 %and = and i32 %a, %b 56 ret i32 %and 57} 58 59; Check the next word up, which needs separate address logic. 60; Other sequences besides this one would be OK. 61define i32 @f6(i32 %a, i32 *%src) { 62; CHECK: f6: 63; CHECK: agfi %r3, 524288 64; CHECK: n %r2, 0(%r3) 65; CHECK: br %r14 66 %ptr = getelementptr i32 *%src, i64 131072 67 %b = load i32 *%ptr 68 %and = and i32 %a, %b 69 ret i32 %and 70} 71 72; Check the high end of the negative aligned NY range. 73define i32 @f7(i32 %a, i32 *%src) { 74; CHECK: f7: 75; CHECK: ny %r2, -4(%r3) 76; CHECK: br %r14 77 %ptr = getelementptr i32 *%src, i64 -1 78 %b = load i32 *%ptr 79 %and = and i32 %a, %b 80 ret i32 %and 81} 82 83; Check the low end of the NY range. 84define i32 @f8(i32 %a, i32 *%src) { 85; CHECK: f8: 86; CHECK: ny %r2, -524288(%r3) 87; CHECK: br %r14 88 %ptr = getelementptr i32 *%src, i64 -131072 89 %b = load i32 *%ptr 90 %and = and i32 %a, %b 91 ret i32 %and 92} 93 94; Check the next word down, which needs separate address logic. 95; Other sequences besides this one would be OK. 96define i32 @f9(i32 %a, i32 *%src) { 97; CHECK: f9: 98; CHECK: agfi %r3, -524292 99; CHECK: n %r2, 0(%r3) 100; CHECK: br %r14 101 %ptr = getelementptr i32 *%src, i64 -131073 102 %b = load i32 *%ptr 103 %and = and i32 %a, %b 104 ret i32 %and 105} 106 107; Check that N allows an index. 108define i32 @f10(i32 %a, i64 %src, i64 %index) { 109; CHECK: f10: 110; CHECK: n %r2, 4092({{%r4,%r3|%r3,%r4}}) 111; CHECK: br %r14 112 %add1 = add i64 %src, %index 113 %add2 = add i64 %add1, 4092 114 %ptr = inttoptr i64 %add2 to i32 * 115 %b = load i32 *%ptr 116 %and = and i32 %a, %b 117 ret i32 %and 118} 119 120; Check that NY allows an index. 121define i32 @f11(i32 %a, i64 %src, i64 %index) { 122; CHECK: f11: 123; CHECK: ny %r2, 4096({{%r4,%r3|%r3,%r4}}) 124; CHECK: br %r14 125 %add1 = add i64 %src, %index 126 %add2 = add i64 %add1, 4096 127 %ptr = inttoptr i64 %add2 to i32 * 128 %b = load i32 *%ptr 129 %and = and i32 %a, %b 130 ret i32 %and 131} 132 133; Check that ANDs of spilled values can use N rather than NR. 134define i32 @f12(i32 *%ptr0) { 135; CHECK: f12: 136; CHECK: brasl %r14, foo@PLT 137; CHECK: n %r2, 16{{[04]}}(%r15) 138; CHECK: br %r14 139 %ptr1 = getelementptr i32 *%ptr0, i64 2 140 %ptr2 = getelementptr i32 *%ptr0, i64 4 141 %ptr3 = getelementptr i32 *%ptr0, i64 6 142 %ptr4 = getelementptr i32 *%ptr0, i64 8 143 %ptr5 = getelementptr i32 *%ptr0, i64 10 144 %ptr6 = getelementptr i32 *%ptr0, i64 12 145 %ptr7 = getelementptr i32 *%ptr0, i64 14 146 %ptr8 = getelementptr i32 *%ptr0, i64 16 147 %ptr9 = getelementptr i32 *%ptr0, i64 18 148 149 %val0 = load i32 *%ptr0 150 %val1 = load i32 *%ptr1 151 %val2 = load i32 *%ptr2 152 %val3 = load i32 *%ptr3 153 %val4 = load i32 *%ptr4 154 %val5 = load i32 *%ptr5 155 %val6 = load i32 *%ptr6 156 %val7 = load i32 *%ptr7 157 %val8 = load i32 *%ptr8 158 %val9 = load i32 *%ptr9 159 160 %ret = call i32 @foo() 161 162 %and0 = and i32 %ret, %val0 163 %and1 = and i32 %and0, %val1 164 %and2 = and i32 %and1, %val2 165 %and3 = and i32 %and2, %val3 166 %and4 = and i32 %and3, %val4 167 %and5 = and i32 %and4, %val5 168 %and6 = and i32 %and5, %val6 169 %and7 = and i32 %and6, %val7 170 %and8 = and i32 %and7, %val8 171 %and9 = and i32 %and8, %val9 172 173 ret i32 %and9 174} 175