1// RUN: %clang_cc1 -Wall -Werror -triple thumbv7-eabi -target-cpu cortex-a8 -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s 2 3#include <stdint.h> 4 5void *f0() 6{ 7 return __builtin_thread_pointer(); 8} 9 10void f1(char *a, char *b) { 11 __clear_cache(a,b); 12} 13 14// CHECK: call {{.*}} @__clear_cache 15 16void test_eh_return_data_regno() 17{ 18 volatile int res; 19 res = __builtin_eh_return_data_regno(0); // CHECK: store volatile i32 0 20 res = __builtin_eh_return_data_regno(1); // CHECK: store volatile i32 1 21} 22 23void nop() { 24 __builtin_arm_nop(); 25} 26 27// CHECK: call {{.*}} @llvm.arm.hint(i32 0) 28 29void yield() { 30 __builtin_arm_yield(); 31} 32 33// CHECK: call {{.*}} @llvm.arm.hint(i32 1) 34 35void wfe() { 36 __builtin_arm_wfe(); 37} 38 39// CHECK: call {{.*}} @llvm.arm.hint(i32 2) 40 41void wfi() { 42 __builtin_arm_wfi(); 43} 44 45// CHECK: call {{.*}} @llvm.arm.hint(i32 3) 46 47void sev() { 48 __builtin_arm_sev(); 49} 50 51// CHECK: call {{.*}} @llvm.arm.hint(i32 4) 52 53void sevl() { 54 __builtin_arm_sevl(); 55} 56 57// CHECK: call {{.*}} @llvm.arm.hint(i32 5) 58 59void dbg() { 60 __builtin_arm_dbg(0); 61} 62 63// CHECK: call {{.*}} @llvm.arm.dbg(i32 0) 64 65void test_barrier() { 66 __builtin_arm_dmb(1); //CHECK: call {{.*}} @llvm.arm.dmb(i32 1) 67 __builtin_arm_dsb(2); //CHECK: call {{.*}} @llvm.arm.dsb(i32 2) 68 __builtin_arm_isb(3); //CHECK: call {{.*}} @llvm.arm.isb(i32 3) 69} 70 71// CHECK: call {{.*}} @llvm.arm.rbit(i32 %a) 72 73unsigned rbit(unsigned a) { 74 return __builtin_arm_rbit(a); 75} 76 77void prefetch(int i) { 78 __builtin_arm_prefetch(&i, 0, 1); 79// CHECK: call {{.*}} @llvm.prefetch(i8* %{{.*}}, i32 0, i32 3, i32 1) 80 81 __builtin_arm_prefetch(&i, 1, 1); 82// CHECK: call {{.*}} @llvm.prefetch(i8* %{{.*}}, i32 1, i32 3, i32 1) 83 84 85 __builtin_arm_prefetch(&i, 1, 0); 86// CHECK: call {{.*}} @llvm.prefetch(i8* %{{.*}}, i32 1, i32 3, i32 0) 87} 88 89void ldc(const void *i) { 90 // CHECK: define void @ldc(i8* %i) 91 // CHECK: call void @llvm.arm.ldc(i32 1, i32 2, i8* %i) 92 // CHECK-NEXT: ret void 93 __builtin_arm_ldc(1, 2, i); 94} 95 96void ldcl(const void *i) { 97 // CHECK: define void @ldcl(i8* %i) 98 // CHECK: call void @llvm.arm.ldcl(i32 1, i32 2, i8* %i) 99 // CHECK-NEXT: ret void 100 __builtin_arm_ldcl(1, 2, i); 101} 102 103void ldc2(const void *i) { 104 // CHECK: define void @ldc2(i8* %i) 105 // CHECK: call void @llvm.arm.ldc2(i32 1, i32 2, i8* %i) 106 // CHECK-NEXT: ret void 107 __builtin_arm_ldc2(1, 2, i); 108} 109 110void ldc2l(const void *i) { 111 // CHECK: define void @ldc2l(i8* %i) 112 // CHECK: call void @llvm.arm.ldc2l(i32 1, i32 2, i8* %i) 113 // CHECK-NEXT: ret void 114 __builtin_arm_ldc2l(1, 2, i); 115} 116 117void stc(void *i) { 118 // CHECK: define void @stc(i8* %i) 119 // CHECK: call void @llvm.arm.stc(i32 1, i32 2, i8* %i) 120 // CHECK-NEXT: ret void 121 __builtin_arm_stc(1, 2, i); 122} 123 124void stcl(void *i) { 125 // CHECK: define void @stcl(i8* %i) 126 // CHECK: call void @llvm.arm.stcl(i32 1, i32 2, i8* %i) 127 // CHECK-NEXT: ret void 128 __builtin_arm_stcl(1, 2, i); 129} 130 131void stc2(void *i) { 132 // CHECK: define void @stc2(i8* %i) 133 // CHECK: call void @llvm.arm.stc2(i32 1, i32 2, i8* %i) 134 // CHECK-NEXT: ret void 135 __builtin_arm_stc2(1, 2, i); 136} 137 138void stc2l(void *i) { 139 // CHECK: define void @stc2l(i8* %i) 140 // CHECK: call void @llvm.arm.stc2l(i32 1, i32 2, i8* %i) 141 // CHECK-NEXT: ret void 142 __builtin_arm_stc2l(1, 2, i); 143} 144 145void cdp() { 146 // CHECK: define void @cdp() 147 // CHECK: call void @llvm.arm.cdp(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6) 148 // CHECK-NEXT: ret void 149 __builtin_arm_cdp(1, 2, 3, 4, 5, 6); 150} 151 152void cdp2() { 153 // CHECK: define void @cdp2() 154 // CHECK: call void @llvm.arm.cdp2(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6) 155 // CHECK-NEXT: ret void 156 __builtin_arm_cdp2(1, 2, 3, 4, 5, 6); 157} 158 159unsigned mrc() { 160 // CHECK: define i32 @mrc() 161 // CHECK: [[R:%.*]] = call i32 @llvm.arm.mrc(i32 15, i32 0, i32 13, i32 0, i32 3) 162 // CHECK-NEXT: ret i32 [[R]] 163 return __builtin_arm_mrc(15, 0, 13, 0, 3); 164} 165 166unsigned mrc2() { 167 // CHECK: define i32 @mrc2() 168 // CHECK: [[R:%.*]] = call i32 @llvm.arm.mrc2(i32 15, i32 0, i32 13, i32 0, i32 3) 169 // CHECK-NEXT: ret i32 [[R]] 170 return __builtin_arm_mrc2(15, 0, 13, 0, 3); 171} 172 173void mcr(unsigned a) { 174 // CHECK: define void @mcr(i32 [[A:%.*]]) 175 // CHECK: call void @llvm.arm.mcr(i32 15, i32 0, i32 [[A]], i32 13, i32 0, i32 3) 176 __builtin_arm_mcr(15, 0, a, 13, 0, 3); 177} 178 179void mcr2(unsigned a) { 180 // CHECK: define void @mcr2(i32 [[A:%.*]]) 181 // CHECK: call void @llvm.arm.mcr2(i32 15, i32 0, i32 [[A]], i32 13, i32 0, i32 3) 182 __builtin_arm_mcr2(15, 0, a, 13, 0, 3); 183} 184 185void mcrr(uint64_t a) { 186 // CHECK: define void @mcrr(i64 %{{.*}}) 187 // CHECK: call void @llvm.arm.mcrr(i32 15, i32 0, i32 %{{[0-9]+}}, i32 %{{[0-9]+}}, i32 0) 188 __builtin_arm_mcrr(15, 0, a, 0); 189} 190 191void mcrr2(uint64_t a) { 192 // CHECK: define void @mcrr2(i64 %{{.*}}) 193 // CHECK: call void @llvm.arm.mcrr2(i32 15, i32 0, i32 %{{[0-9]+}}, i32 %{{[0-9]+}}, i32 0) 194 __builtin_arm_mcrr2(15, 0, a, 0); 195} 196 197uint64_t mrrc() { 198 // CHECK: define i64 @mrrc() 199 // CHECK: call { i32, i32 } @llvm.arm.mrrc(i32 15, i32 0, i32 0) 200 return __builtin_arm_mrrc(15, 0, 0); 201} 202 203uint64_t mrrc2() { 204 // CHECK: define i64 @mrrc2() 205 // CHECK: call { i32, i32 } @llvm.arm.mrrc2(i32 15, i32 0, i32 0) 206 return __builtin_arm_mrrc2(15, 0, 0); 207} 208 209unsigned rsr() { 210 // CHECK: [[V0:[%A-Za-z0-9.]+]] = call i32 @llvm.read_register.i32(metadata ![[M0:.*]]) 211 // CHECK-NEXT: ret i32 [[V0]] 212 return __builtin_arm_rsr("cp1:2:c3:c4:5"); 213} 214 215unsigned long long rsr64() { 216 // CHECK: [[V0:[%A-Za-z0-9.]+]] = call i64 @llvm.read_register.i64(metadata ![[M1:.*]]) 217 // CHECK-NEXT: ret i64 [[V0]] 218 return __builtin_arm_rsr64("cp1:2:c3"); 219} 220 221void *rsrp() { 222 // CHECK: [[V0:[%A-Za-z0-9.]+]] = call i32 @llvm.read_register.i32(metadata ![[M2:.*]]) 223 // CHECK-NEXT: [[V1:[%A-Za-z0-9.]+]] = inttoptr i32 [[V0]] to i8* 224 // CHECK-NEXT: ret i8* [[V1]] 225 return __builtin_arm_rsrp("sysreg"); 226} 227 228void wsr(unsigned v) { 229 // CHECK: call void @llvm.write_register.i32(metadata ![[M0]], i32 %v) 230 __builtin_arm_wsr("cp1:2:c3:c4:5", v); 231} 232 233void wsr64(unsigned long long v) { 234 // CHECK: call void @llvm.write_register.i64(metadata ![[M1]], i64 %v) 235 __builtin_arm_wsr64("cp1:2:c3", v); 236} 237 238void wsrp(void *v) { 239 // CHECK: [[V0:[%A-Za-z0-9.]+]] = ptrtoint i8* %v to i32 240 // CHECK-NEXT: call void @llvm.write_register.i32(metadata ![[M2]], i32 [[V0]]) 241 __builtin_arm_wsrp("sysreg", v); 242} 243 244// CHECK: ![[M0]] = !{!"cp1:2:c3:c4:5"} 245// CHECK: ![[M1]] = !{!"cp1:2:c3"} 246// CHECK: ![[M2]] = !{!"sysreg"} 247