atomic-inline.h revision 8dfa47da8cb33ebaf7aae6db6548e75ed86e8f1e
1/* 2 * Copyright (C) 2010 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ANDROID_CUTILS_ATOMIC_INLINE_H 18#define ANDROID_CUTILS_ATOMIC_INLINE_H 19 20/* 21 * Inline declarations and macros for some special-purpose atomic 22 * operations. These are intended for rare circumstances where a 23 * memory barrier needs to be issued inline rather than as a function 24 * call. 25 * 26 * Most code should not use these. 27 * 28 * Anything that does include this file must set ANDROID_SMP to either 29 * 0 or 1, indicating compilation for UP or SMP, respectively. 30 * 31 * Macros defined in this header: 32 * 33 * void ANDROID_MEMBAR_FULL(void) 34 * Full memory barrier. Provides a compiler reordering barrier, and 35 * on SMP systems emits an appropriate instruction. 36 */ 37 38#if !defined(ANDROID_SMP) 39# error "Must define ANDROID_SMP before including atomic-inline.h" 40#endif 41 42#ifdef __cplusplus 43extern "C" { 44#endif 45 46/* 47 * Define the full memory barrier for an SMP system. This is 48 * platform-specific. 49 */ 50 51#ifdef __arm__ 52#include <machine/cpu-features.h> 53 54/* 55 * For ARMv6K we need to issue a specific MCR instead of the DMB, since 56 * that wasn't added until v7. For anything older, SMP isn't relevant. 57 * Since we don't have an ARMv6K to test with, we're not going to deal 58 * with that now. 59 * 60 * The DMB instruction is found in the ARM and Thumb2 instruction sets. 61 * This will fail on plain 16-bit Thumb. 62 */ 63#if defined(__ARM_HAVE_DMB) 64# define _ANDROID_MEMBAR_FULL_SMP() \ 65 do { __asm__ __volatile__ ("dmb" ::: "memory"); } while (0) 66#else 67# define _ANDROID_MEMBAR_FULL_SMP() ARM_SMP_defined_but_no_DMB() 68#endif 69 70#elif defined(__i386__) || defined(__x86_64__) 71/* 72 * For recent x86, we can use the SSE2 mfence instruction. 73 */ 74# define _ANDROID_MEMBAR_FULL_SMP() \ 75 do { __asm__ __volatile__ ("mfence" ::: "memory"); } while (0) 76 77#else 78/* 79 * Implementation not defined for this platform. Hopefully we're building 80 * in uniprocessor mode. 81 */ 82# define _ANDROID_MEMBAR_FULL_SMP() SMP_barrier_not_defined_for_platform() 83#endif 84 85 86/* 87 * Full barrier. On uniprocessors this is just a compiler reorder barrier, 88 * which ensures that the statements appearing above the barrier in the C/C++ 89 * code will be issued after the statements appearing below the barrier. 90 * 91 * For SMP this also includes a memory barrier instruction. On an ARM 92 * CPU this means that the current core will flush pending writes, wait 93 * for pending reads to complete, and discard any cached reads that could 94 * be stale. Other CPUs may do less, but the end result is equivalent. 95 */ 96#if ANDROID_SMP != 0 97# define ANDROID_MEMBAR_FULL() _ANDROID_MEMBAR_FULL_SMP() 98#else 99# define ANDROID_MEMBAR_FULL() \ 100 do { __asm__ __volatile__ ("" ::: "memory"); } while (0) 101#endif 102 103#ifdef __cplusplus 104} // extern "C" 105#endif 106 107#endif // ANDROID_CUTILS_ATOMIC_INLINE_H 108