/* SPDX-License-Identifier: BSD-3-Clause * Copyright(c) 2010-2014 Intel Corporation. */ /* * Inspired from FreeBSD src/sys/amd64/include/atomic.h * Copyright (c) 1998 Doug Rabson * Copyright (c) 2019 Intel Corporation * All rights reserved. */ #ifndef _RTE_ATOMIC_X86_H_ #error do not include this file directly, use instead #endif #ifndef _RTE_ATOMIC_X86_64_H_ #define _RTE_ATOMIC_X86_64_H_ #include #include #include #include /*------------------------- 64 bit atomic operations -------------------------*/ #ifndef RTE_FORCE_INTRINSICS static inline int rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src) { uint8_t res; asm volatile( MPLOCKED "cmpxchgq %[src], %[dst];" "sete %[res];" : [res] "=a" (res), /* output */ [dst] "=m" (*dst) : [src] "r" (src), /* input */ "a" (exp), "m" (*dst) : "memory"); /* no-clobber list */ return res; } static inline uint64_t rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val) { asm volatile( MPLOCKED "xchgq %0, %1;" : "=r" (val), "=m" (*dst) : "0" (val), "m" (*dst) : "memory"); /* no-clobber list */ return val; } static inline void rte_atomic64_init(rte_atomic64_t *v) { v->cnt = 0; } static inline int64_t rte_atomic64_read(rte_atomic64_t *v) { return v->cnt; } static inline void rte_atomic64_set(rte_atomic64_t *v, int64_t new_value) { v->cnt = new_value; } static inline void rte_atomic64_add(rte_atomic64_t *v, int64_t inc) { asm volatile( MPLOCKED "addq %[inc], %[cnt]" : [cnt] "=m" (v->cnt) /* output */ : [inc] "ir" (inc), /* input */ "m" (v->cnt) ); } static inline void rte_atomic64_sub(rte_atomic64_t *v, int64_t dec) { asm volatile( MPLOCKED "subq %[dec], %[cnt]" : [cnt] "=m" (v->cnt) /* output */ : [dec] "ir" (dec), /* input */ "m" (v->cnt) ); } static inline void rte_atomic64_inc(rte_atomic64_t *v) { asm volatile( MPLOCKED "incq %[cnt]" : [cnt] "=m" (v->cnt) /* output */ : "m" (v->cnt) /* input */ ); } static inline void rte_atomic64_dec(rte_atomic64_t *v) { asm volatile( MPLOCKED "decq %[cnt]" : [cnt] "=m" (v->cnt) /* output */ : "m" (v->cnt) /* input */ ); } static inline int64_t rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc) { int64_t prev = inc; asm volatile( MPLOCKED "xaddq %[prev], %[cnt]" : [prev] "+r" (prev), /* output */ [cnt] "=m" (v->cnt) : "m" (v->cnt) /* input */ ); return prev + inc; } static inline int64_t rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec) { return rte_atomic64_add_return(v, -dec); } static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v) { uint8_t ret; asm volatile( MPLOCKED "incq %[cnt] ; " "sete %[ret]" : [cnt] "+m" (v->cnt), /* output */ [ret] "=qm" (ret) ); return ret != 0; } static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v) { uint8_t ret; asm volatile( MPLOCKED "decq %[cnt] ; " "sete %[ret]" : [cnt] "+m" (v->cnt), /* output */ [ret] "=qm" (ret) ); return ret != 0; } static inline int rte_atomic64_test_and_set(rte_atomic64_t *v) { return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1); } static inline void rte_atomic64_clear(rte_atomic64_t *v) { v->cnt = 0; } #endif /*------------------------ 128 bit atomic operations -------------------------*/ __rte_experimental static inline int rte_atomic128_cmp_exchange(rte_int128_t *dst, rte_int128_t *exp, const rte_int128_t *src, unsigned int weak, int success, int failure) { RTE_SET_USED(weak); RTE_SET_USED(success); RTE_SET_USED(failure); uint8_t res; asm volatile ( MPLOCKED "cmpxchg16b %[dst];" " sete %[res]" : [dst] "=m" (dst->val[0]), "=a" (exp->val[0]), "=d" (exp->val[1]), [res] "=r" (res) : "b" (src->val[0]), "c" (src->val[1]), "a" (exp->val[0]), "d" (exp->val[1]), "m" (dst->val[0]) : "memory"); return res; } #endif /* _RTE_ATOMIC_X86_64_H_ */