1; RUN: llc < %s -march=ppc32 | \
2; RUN:   grep eqv | count 3
3; RUN: llc < %s -march=ppc32 -mcpu=g5 | \
4; RUN:   grep andc | count 3
5; RUN: llc < %s -march=ppc32 | \
6; RUN:   grep orc | count 2
7; RUN: llc < %s -march=ppc32 -mcpu=g5 | \
8; RUN:   grep nor | count 3
9; RUN: llc < %s -march=ppc32 | \
10; RUN:   grep nand | count 1
11
12define i32 @EQV1(i32 %X, i32 %Y) nounwind {
13	%A = xor i32 %X, %Y		; <i32> [#uses=1]
14	%B = xor i32 %A, -1		; <i32> [#uses=1]
15	ret i32 %B
16}
17
18define i32 @EQV2(i32 %X, i32 %Y) nounwind {
19	%A = xor i32 %X, -1		; <i32> [#uses=1]
20	%B = xor i32 %A, %Y		; <i32> [#uses=1]
21	ret i32 %B
22}
23
24define i32 @EQV3(i32 %X, i32 %Y) nounwind {
25	%A = xor i32 %X, -1		; <i32> [#uses=1]
26	%B = xor i32 %Y, %A		; <i32> [#uses=1]
27	ret i32 %B
28}
29
30define i32 @ANDC1(i32 %X, i32 %Y) nounwind {
31	%A = xor i32 %Y, -1		; <i32> [#uses=1]
32	%B = and i32 %X, %A		; <i32> [#uses=1]
33	ret i32 %B
34}
35
36define i32 @ANDC2(i32 %X, i32 %Y) nounwind {
37	%A = xor i32 %X, -1		; <i32> [#uses=1]
38	%B = and i32 %A, %Y		; <i32> [#uses=1]
39	ret i32 %B
40}
41
42define i32 @ORC1(i32 %X, i32 %Y) nounwind {
43	%A = xor i32 %Y, -1		; <i32> [#uses=1]
44	%B = or i32 %X, %A		; <i32> [#uses=1]
45	ret i32 %B
46}
47
48define i32 @ORC2(i32 %X, i32 %Y) nounwind {
49	%A = xor i32 %X, -1		; <i32> [#uses=1]
50	%B = or i32 %A, %Y		; <i32> [#uses=1]
51	ret i32 %B
52}
53
54define i32 @NOR1(i32 %X) nounwind {
55	%Y = xor i32 %X, -1		; <i32> [#uses=1]
56	ret i32 %Y
57}
58
59define i32 @NOR2(i32 %X, i32 %Y) nounwind {
60	%Z = or i32 %X, %Y		; <i32> [#uses=1]
61	%R = xor i32 %Z, -1		; <i32> [#uses=1]
62	ret i32 %R
63}
64
65define i32 @NAND1(i32 %X, i32 %Y) nounwind {
66	%Z = and i32 %X, %Y		; <i32> [#uses=1]
67	%W = xor i32 %Z, -1		; <i32> [#uses=1]
68	ret i32 %W
69}
70
71define void @VNOR(<4 x float>* %P, <4 x float>* %Q) nounwind {
72	%tmp = load <4 x float>, <4 x float>* %P		; <<4 x float>> [#uses=1]
73	%tmp.upgrd.1 = bitcast <4 x float> %tmp to <4 x i32>		; <<4 x i32>> [#uses=1]
74	%tmp2 = load <4 x float>, <4 x float>* %Q		; <<4 x float>> [#uses=1]
75	%tmp2.upgrd.2 = bitcast <4 x float> %tmp2 to <4 x i32>		; <<4 x i32>> [#uses=1]
76	%tmp3 = or <4 x i32> %tmp.upgrd.1, %tmp2.upgrd.2		; <<4 x i32>> [#uses=1]
77	%tmp4 = xor <4 x i32> %tmp3, < i32 -1, i32 -1, i32 -1, i32 -1 >		; <<4 x i32>> [#uses=1]
78	%tmp4.upgrd.3 = bitcast <4 x i32> %tmp4 to <4 x float>		; <<4 x float>> [#uses=1]
79	store <4 x float> %tmp4.upgrd.3, <4 x float>* %P
80	ret void
81}
82
83define void @VANDC(<4 x float>* %P, <4 x float>* %Q) nounwind {
84	%tmp = load <4 x float>, <4 x float>* %P		; <<4 x float>> [#uses=1]
85	%tmp.upgrd.4 = bitcast <4 x float> %tmp to <4 x i32>		; <<4 x i32>> [#uses=1]
86	%tmp2 = load <4 x float>, <4 x float>* %Q		; <<4 x float>> [#uses=1]
87	%tmp2.upgrd.5 = bitcast <4 x float> %tmp2 to <4 x i32>		; <<4 x i32>> [#uses=1]
88	%tmp4 = xor <4 x i32> %tmp2.upgrd.5, < i32 -1, i32 -1, i32 -1, i32 -1 >		; <<4 x i32>> [#uses=1]
89	%tmp3 = and <4 x i32> %tmp.upgrd.4, %tmp4		; <<4 x i32>> [#uses=1]
90	%tmp4.upgrd.6 = bitcast <4 x i32> %tmp3 to <4 x float>		; <<4 x float>> [#uses=1]
91	store <4 x float> %tmp4.upgrd.6, <4 x float>* %P
92	ret void
93}
94