1// REQUIRES: xcore-registered-target 2// RUN: %clang_cc1 -triple xcore -verify %s 3_Static_assert(sizeof(long long) == 8, "sizeof long long is wrong"); 4_Static_assert(_Alignof(long long) == 4, "alignof long long is wrong"); 5 6_Static_assert(sizeof(double) == 8, "sizeof double is wrong"); 7_Static_assert(_Alignof(double) == 4, "alignof double is wrong"); 8 9// RUN: %clang_cc1 -triple xcore-unknown-unknown -fno-signed-char -fno-common -emit-llvm -o - %s | FileCheck %s 10 11// CHECK: target triple = "xcore-unknown-unknown" 12 13// CHECK: @cgx = external constant i32, section ".cp.rodata" 14extern const int cgx; 15int fcgx() { return cgx;} 16// CHECK: @g1 = global i32 0, align 4 17int g1; 18// CHECK: @cg1 = constant i32 0, section ".cp.rodata", align 4 19const int cg1; 20 21#include <stdarg.h> 22struct x { int a[5]; }; 23void f(void*); 24void testva (int n, ...) { 25 // CHECK-LABEL: testva 26 va_list ap; 27 va_start(ap,n); 28 // CHECK: [[AP:%[a-z0-9]+]] = alloca i8*, align 4 29 // CHECK: [[AP1:%[a-z0-9]+]] = bitcast i8** [[AP]] to i8* 30 // CHECK: call void @llvm.va_start(i8* [[AP1]]) 31 32 char* v1 = va_arg (ap, char*); 33 f(v1); 34 // CHECK: [[I:%[a-z0-9]+]] = load i8** [[AP]] 35 // CHECK: [[P:%[a-z0-9]+]] = bitcast i8* [[I]] to i8** 36 // CHECK: [[IN:%[a-z0-9]+]] = getelementptr i8* [[I]], i32 4 37 // CHECK: store i8* [[IN]], i8** [[AP]] 38 // CHECK: [[V1:%[a-z0-9]+]] = load i8** [[P]] 39 // CHECK: store i8* [[V1]], i8** [[V:%[a-z0-9]+]], align 4 40 // CHECK: [[V2:%[a-z0-9]+]] = load i8** [[V]], align 4 41 // CHECK: call void @f(i8* [[V2]]) 42 43 char v2 = va_arg (ap, char); // expected-warning{{second argument to 'va_arg' is of promotable type 'char'}} 44 f(&v2); 45 // CHECK: [[I:%[a-z0-9]+]] = load i8** [[AP]] 46 // CHECK: [[IN:%[a-z0-9]+]] = getelementptr i8* [[I]], i32 4 47 // CHECK: store i8* [[IN]], i8** [[AP]] 48 // CHECK: [[V1:%[a-z0-9]+]] = load i8* [[I]] 49 // CHECK: store i8 [[V1]], i8* [[V:%[a-z0-9]+]], align 1 50 // CHECK: call void @f(i8* [[V]]) 51 52 int v3 = va_arg (ap, int); 53 f(&v3); 54 // CHECK: [[I:%[a-z0-9]+]] = load i8** [[AP]] 55 // CHECK: [[P:%[a-z0-9]+]] = bitcast i8* [[I]] to i32* 56 // CHECK: [[IN:%[a-z0-9]+]] = getelementptr i8* [[I]], i32 4 57 // CHECK: store i8* [[IN]], i8** [[AP]] 58 // CHECK: [[V1:%[a-z0-9]+]] = load i32* [[P]] 59 // CHECK: store i32 [[V1]], i32* [[V:%[a-z0-9]+]], align 4 60 // CHECK: [[V2:%[a-z0-9]+]] = bitcast i32* [[V]] to i8* 61 // CHECK: call void @f(i8* [[V2]]) 62 63 long long int v4 = va_arg (ap, long long int); 64 f(&v4); 65 // CHECK: [[I:%[a-z0-9]+]] = load i8** [[AP]] 66 // CHECK: [[P:%[a-z0-9]+]] = bitcast i8* [[I]] to i64* 67 // CHECK: [[IN:%[a-z0-9]+]] = getelementptr i8* [[I]], i32 8 68 // CHECK: store i8* [[IN]], i8** [[AP]] 69 // CHECK: [[V1:%[a-z0-9]+]] = load i64* [[P]] 70 // CHECK: store i64 [[V1]], i64* [[V:%[a-z0-9]+]], align 4 71 // CHECK:[[V2:%[a-z0-9]+]] = bitcast i64* [[V]] to i8* 72 // CHECK: call void @f(i8* [[V2]]) 73 74 struct x v5 = va_arg (ap, struct x); // typical aggregate type 75 f(&v5); 76 // CHECK: [[I:%[a-z0-9]+]] = load i8** [[AP]] 77 // CHECK: [[I2:%[a-z0-9]+]] = bitcast i8* [[I]] to %struct.x** 78 // CHECK: [[P:%[a-z0-9]+]] = load %struct.x** [[I2]] 79 // CHECK: [[IN:%[a-z0-9]+]] = getelementptr i8* [[I]], i32 4 80 // CHECK: store i8* [[IN]], i8** [[AP]] 81 // CHECK: [[V1:%[a-z0-9]+]] = bitcast %struct.x* [[V:%[a-z0-9]+]] to i8* 82 // CHECK: [[P1:%[a-z0-9]+]] = bitcast %struct.x* [[P]] to i8* 83 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[V1]], i8* [[P1]], i32 20, i32 4, i1 false) 84 // CHECK: [[V2:%[a-z0-9]+]] = bitcast %struct.x* [[V]] to i8* 85 // CHECK: call void @f(i8* [[V2]]) 86 87 int* v6 = va_arg (ap, int[4]); // an unusual aggregate type 88 f(v6); 89 // CHECK: [[I:%[a-z0-9]+]] = load i8** [[AP]] 90 // CHECK: [[I2:%[a-z0-9]+]] = bitcast i8* [[I]] to [4 x i32]** 91 // CHECK: [[P:%[a-z0-9]+]] = load [4 x i32]** [[I2]] 92 // CHECK: [[IN:%[a-z0-9]+]] = getelementptr i8* [[I]], i32 4 93 // CHECK: store i8* [[IN]], i8** [[AP]] 94 // CHECK: [[V1:%[a-z0-9]+]] = bitcast [4 x i32]* [[V0:%[a-z0-9]+]] to i8* 95 // CHECK: [[P1:%[a-z0-9]+]] = bitcast [4 x i32]* [[P]] to i8* 96 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[V1]], i8* [[P1]], i32 16, i32 4, i1 false) 97 // CHECK: [[V2:%[a-z0-9]+]] = getelementptr inbounds [4 x i32]* [[V0]], i32 0, i32 0 98 // CHECK: store i32* [[V2]], i32** [[V:%[a-z0-9]+]], align 4 99 // CHECK: [[V3:%[a-z0-9]+]] = load i32** [[V]], align 4 100 // CHECK: [[V4:%[a-z0-9]+]] = bitcast i32* [[V3]] to i8* 101 // CHECK: call void @f(i8* [[V4]]) 102 103 double v7 = va_arg (ap, double); 104 f(&v7); 105 // CHECK: [[I:%[a-z0-9]+]] = load i8** [[AP]] 106 // CHECK: [[P:%[a-z0-9]+]] = bitcast i8* [[I]] to double* 107 // CHECK: [[IN:%[a-z0-9]+]] = getelementptr i8* [[I]], i32 8 108 // CHECK: store i8* [[IN]], i8** [[AP]] 109 // CHECK: [[V1:%[a-z0-9]+]] = load double* [[P]] 110 // CHECK: store double [[V1]], double* [[V:%[a-z0-9]+]], align 4 111 // CHECK: [[V2:%[a-z0-9]+]] = bitcast double* [[V]] to i8* 112 // CHECK: call void @f(i8* [[V2]]) 113} 114 115void testbuiltin (void) { 116 // CHECK-LABEL: testbuiltin 117 // CHECK: call i32 @llvm.xcore.getid() 118 // CHECK: call i32 @llvm.xcore.getps(i32 {{%[a-z0-9]+}}) 119 // CHECK: call i32 @llvm.xcore.bitrev(i32 {{%[a-z0-9]+}}) 120 // CHECK: call void @llvm.xcore.setps(i32 {{%[a-z0-9]+}}, i32 {{%[a-z0-9]+}}) 121 volatile int i = __builtin_getid(); 122 volatile unsigned int ui = __builtin_getps(i); 123 ui = __builtin_bitrev(ui); 124 __builtin_setps(i,ui); 125 126 // CHECK: store volatile i32 0, i32* {{%[a-z0-9]+}}, align 4 127 // CHECK: store volatile i32 1, i32* {{%[a-z0-9]+}}, align 4 128 // CHECK: store volatile i32 -1, i32* {{%[a-z0-9]+}}, align 4 129 volatile int res; 130 res = __builtin_eh_return_data_regno(0); 131 res = __builtin_eh_return_data_regno(1); 132 res = __builtin_eh_return_data_regno(2); 133} 134 135// CHECK-LABEL: define zeroext i8 @testchar() 136// CHECK: ret i8 -1 137char testchar (void) { 138 return (char)-1; 139} 140 141// CHECK: "no-frame-pointer-elim"="false" 142// CHECK-NOT: "no-frame-pointer-elim-non-leaf" 143