2011-02-04 09:06:04 +01:00
|
|
|
/*
|
|
|
|
* Bitops Module
|
|
|
|
*
|
|
|
|
* Copyright (C) 2010 Corentin Chary <corentin.chary@gmail.com>
|
|
|
|
*
|
|
|
|
* Mostly inspired by (stolen from) linux/bitmap.h and linux/bitops.h
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
|
|
|
|
* See the COPYING.LIB file in the top-level directory.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef BITOPS_H
|
|
|
|
#define BITOPS_H
|
|
|
|
|
2014-09-26 21:46:03 +02:00
|
|
|
|
2013-02-01 23:03:16 +01:00
|
|
|
#include "host-utils.h"
|
2014-12-02 12:23:14 +01:00
|
|
|
#include "atomic.h"
|
2011-02-04 09:06:04 +01:00
|
|
|
|
|
|
|
#define BITS_PER_BYTE CHAR_BIT
|
|
|
|
#define BITS_PER_LONG (sizeof (unsigned long) * BITS_PER_BYTE)
|
|
|
|
|
2015-04-08 20:04:10 +02:00
|
|
|
#define BIT(nr) (1UL << (nr))
|
target/arm/cpu64: max cpu: Introduce sve<N> properties
Introduce cpu properties to give fine control over SVE vector lengths.
We introduce a property for each valid length up to the current
maximum supported, which is 2048-bits. The properties are named, e.g.
sve128, sve256, sve384, sve512, ..., where the number is the number of
bits. See the updates to docs/arm-cpu-features.rst for a description
of the semantics and for example uses.
Note, as sve-max-vq is still present and we'd like to be able to
support qmp_query_cpu_model_expansion with guests launched with e.g.
-cpu max,sve-max-vq=8 on their command lines, then we do allow
sve-max-vq and sve<N> properties to be provided at the same time, but
this is not recommended, and is why sve-max-vq is not mentioned in the
document. If sve-max-vq is provided then it enables all lengths smaller
than and including the max and disables all lengths larger. It also has
the side-effect that no larger lengths may be enabled and that the max
itself cannot be disabled. Smaller non-power-of-two lengths may,
however, be disabled, e.g. -cpu max,sve-max-vq=4,sve384=off provides a
guest the vector lengths 128, 256, and 512 bits.
This patch has been co-authored with Richard Henderson, who reworked
the target/arm/cpu64.c changes in order to push all the validation and
auto-enabling/disabling steps into the finalizer, resulting in a nice
LOC reduction.
Signed-off-by: Andrew Jones <drjones@redhat.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Tested-by: Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>
Reviewed-by: Beata Michalska <beata.michalska@linaro.org>
Message-id: 20191031142734.8590-5-drjones@redhat.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2019-10-31 15:27:29 +01:00
|
|
|
#define BIT_ULL(nr) (1ULL << (nr))
|
2015-04-08 20:04:10 +02:00
|
|
|
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
|
|
|
|
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
|
|
|
|
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
|
2011-02-04 09:06:04 +01:00
|
|
|
|
2016-07-04 14:06:36 +02:00
|
|
|
#define MAKE_64BIT_MASK(shift, length) \
|
|
|
|
(((~0ULL) >> (64 - (length))) << (shift))
|
|
|
|
|
2011-02-04 09:06:04 +01:00
|
|
|
/**
|
|
|
|
* set_bit - Set a bit in memory
|
|
|
|
* @nr: the bit to set
|
|
|
|
* @addr: the address to start counting from
|
|
|
|
*/
|
2013-12-17 12:12:24 +01:00
|
|
|
static inline void set_bit(long nr, unsigned long *addr)
|
2011-02-04 09:06:04 +01:00
|
|
|
{
|
2015-04-08 20:04:10 +02:00
|
|
|
unsigned long mask = BIT_MASK(nr);
|
|
|
|
unsigned long *p = addr + BIT_WORD(nr);
|
2011-02-04 09:06:04 +01:00
|
|
|
|
2015-04-08 20:04:10 +02:00
|
|
|
*p |= mask;
|
2011-02-04 09:06:04 +01:00
|
|
|
}
|
|
|
|
|
2014-12-02 12:23:14 +01:00
|
|
|
/**
|
|
|
|
* set_bit_atomic - Set a bit in memory atomically
|
|
|
|
* @nr: the bit to set
|
|
|
|
* @addr: the address to start counting from
|
|
|
|
*/
|
|
|
|
static inline void set_bit_atomic(long nr, unsigned long *addr)
|
|
|
|
{
|
|
|
|
unsigned long mask = BIT_MASK(nr);
|
|
|
|
unsigned long *p = addr + BIT_WORD(nr);
|
|
|
|
|
2020-09-23 12:56:46 +02:00
|
|
|
qatomic_or(p, mask);
|
2014-12-02 12:23:14 +01:00
|
|
|
}
|
|
|
|
|
2011-02-04 09:06:04 +01:00
|
|
|
/**
|
|
|
|
* clear_bit - Clears a bit in memory
|
|
|
|
* @nr: Bit to clear
|
|
|
|
* @addr: Address to start counting from
|
|
|
|
*/
|
2013-12-17 12:12:24 +01:00
|
|
|
static inline void clear_bit(long nr, unsigned long *addr)
|
2011-02-04 09:06:04 +01:00
|
|
|
{
|
2015-04-08 20:04:10 +02:00
|
|
|
unsigned long mask = BIT_MASK(nr);
|
|
|
|
unsigned long *p = addr + BIT_WORD(nr);
|
2011-02-04 09:06:04 +01:00
|
|
|
|
2015-04-08 20:04:10 +02:00
|
|
|
*p &= ~mask;
|
2011-02-04 09:06:04 +01:00
|
|
|
}
|
|
|
|
|
2024-02-29 16:30:14 +01:00
|
|
|
/**
|
|
|
|
* clear_bit_atomic - Clears a bit in memory atomically
|
|
|
|
* @nr: Bit to clear
|
|
|
|
* @addr: Address to start counting from
|
|
|
|
*/
|
|
|
|
static inline void clear_bit_atomic(long nr, unsigned long *addr)
|
|
|
|
{
|
|
|
|
unsigned long mask = BIT_MASK(nr);
|
|
|
|
unsigned long *p = addr + BIT_WORD(nr);
|
|
|
|
|
|
|
|
return qatomic_and(p, ~mask);
|
|
|
|
}
|
|
|
|
|
2011-02-04 09:06:04 +01:00
|
|
|
/**
|
|
|
|
* change_bit - Toggle a bit in memory
|
|
|
|
* @nr: Bit to change
|
|
|
|
* @addr: Address to start counting from
|
|
|
|
*/
|
2013-12-17 12:12:24 +01:00
|
|
|
static inline void change_bit(long nr, unsigned long *addr)
|
2011-02-04 09:06:04 +01:00
|
|
|
{
|
2015-04-08 20:04:10 +02:00
|
|
|
unsigned long mask = BIT_MASK(nr);
|
|
|
|
unsigned long *p = addr + BIT_WORD(nr);
|
2011-02-04 09:06:04 +01:00
|
|
|
|
2015-04-08 20:04:10 +02:00
|
|
|
*p ^= mask;
|
2011-02-04 09:06:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* test_and_set_bit - Set a bit and return its old value
|
|
|
|
* @nr: Bit to set
|
|
|
|
* @addr: Address to count from
|
|
|
|
*/
|
2013-12-17 12:12:24 +01:00
|
|
|
static inline int test_and_set_bit(long nr, unsigned long *addr)
|
2011-02-04 09:06:04 +01:00
|
|
|
{
|
2015-04-08 20:04:10 +02:00
|
|
|
unsigned long mask = BIT_MASK(nr);
|
|
|
|
unsigned long *p = addr + BIT_WORD(nr);
|
|
|
|
unsigned long old = *p;
|
2011-02-04 09:06:04 +01:00
|
|
|
|
2015-04-08 20:04:10 +02:00
|
|
|
*p = old | mask;
|
|
|
|
return (old & mask) != 0;
|
2011-02-04 09:06:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* test_and_clear_bit - Clear a bit and return its old value
|
|
|
|
* @nr: Bit to clear
|
|
|
|
* @addr: Address to count from
|
|
|
|
*/
|
2013-12-17 12:12:24 +01:00
|
|
|
static inline int test_and_clear_bit(long nr, unsigned long *addr)
|
2011-02-04 09:06:04 +01:00
|
|
|
{
|
2015-04-08 20:04:10 +02:00
|
|
|
unsigned long mask = BIT_MASK(nr);
|
|
|
|
unsigned long *p = addr + BIT_WORD(nr);
|
|
|
|
unsigned long old = *p;
|
2011-02-04 09:06:04 +01:00
|
|
|
|
2015-04-08 20:04:10 +02:00
|
|
|
*p = old & ~mask;
|
|
|
|
return (old & mask) != 0;
|
2011-02-04 09:06:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* test_and_change_bit - Change a bit and return its old value
|
|
|
|
* @nr: Bit to change
|
|
|
|
* @addr: Address to count from
|
|
|
|
*/
|
2013-12-17 12:12:24 +01:00
|
|
|
static inline int test_and_change_bit(long nr, unsigned long *addr)
|
2011-02-04 09:06:04 +01:00
|
|
|
{
|
2015-04-08 20:04:10 +02:00
|
|
|
unsigned long mask = BIT_MASK(nr);
|
|
|
|
unsigned long *p = addr + BIT_WORD(nr);
|
|
|
|
unsigned long old = *p;
|
2011-02-04 09:06:04 +01:00
|
|
|
|
2015-04-08 20:04:10 +02:00
|
|
|
*p = old ^ mask;
|
|
|
|
return (old & mask) != 0;
|
2011-02-04 09:06:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* test_bit - Determine whether a bit is set
|
|
|
|
* @nr: bit number to test
|
|
|
|
* @addr: Address to start counting from
|
|
|
|
*/
|
2013-12-17 12:12:24 +01:00
|
|
|
static inline int test_bit(long nr, const unsigned long *addr)
|
2011-02-04 09:06:04 +01:00
|
|
|
{
|
2015-04-08 20:04:10 +02:00
|
|
|
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
|
2011-02-04 09:06:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* find_last_bit - find the last set bit in a memory region
|
|
|
|
* @addr: The address to start the search at
|
|
|
|
* @size: The maximum size to search
|
|
|
|
*
|
2021-05-10 22:07:57 +02:00
|
|
|
* Returns the bit number of the last set bit,
|
|
|
|
* or @size if there is no set bit in the bitmap.
|
2011-02-04 09:06:04 +01:00
|
|
|
*/
|
|
|
|
unsigned long find_last_bit(const unsigned long *addr,
|
|
|
|
unsigned long size);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* find_next_bit - find the next set bit in a memory region
|
|
|
|
* @addr: The address to base the search on
|
|
|
|
* @offset: The bitnumber to start searching at
|
|
|
|
* @size: The bitmap size in bits
|
2021-05-10 22:07:57 +02:00
|
|
|
*
|
|
|
|
* Returns the bit number of the next set bit,
|
|
|
|
* or @size if there are no further set bits in the bitmap.
|
2011-02-04 09:06:04 +01:00
|
|
|
*/
|
|
|
|
unsigned long find_next_bit(const unsigned long *addr,
|
2015-04-08 20:04:10 +02:00
|
|
|
unsigned long size,
|
|
|
|
unsigned long offset);
|
2011-02-04 09:06:04 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
* find_next_zero_bit - find the next cleared bit in a memory region
|
|
|
|
* @addr: The address to base the search on
|
|
|
|
* @offset: The bitnumber to start searching at
|
|
|
|
* @size: The bitmap size in bits
|
2021-05-10 22:07:57 +02:00
|
|
|
*
|
|
|
|
* Returns the bit number of the next cleared bit,
|
|
|
|
* or @size if there are no further clear bits in the bitmap.
|
2011-02-04 09:06:04 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
unsigned long find_next_zero_bit(const unsigned long *addr,
|
|
|
|
unsigned long size,
|
|
|
|
unsigned long offset);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* find_first_bit - find the first set bit in a memory region
|
|
|
|
* @addr: The address to start the search at
|
|
|
|
* @size: The maximum size to search
|
|
|
|
*
|
2021-05-10 22:07:57 +02:00
|
|
|
* Returns the bit number of the first set bit,
|
|
|
|
* or @size if there is no set bit in the bitmap.
|
2011-02-04 09:06:04 +01:00
|
|
|
*/
|
|
|
|
static inline unsigned long find_first_bit(const unsigned long *addr,
|
|
|
|
unsigned long size)
|
|
|
|
{
|
2013-12-21 01:15:21 +01:00
|
|
|
unsigned long result, tmp;
|
|
|
|
|
|
|
|
for (result = 0; result < size; result += BITS_PER_LONG) {
|
|
|
|
tmp = *addr++;
|
|
|
|
if (tmp) {
|
|
|
|
result += ctzl(tmp);
|
|
|
|
return result < size ? result : size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Not found */
|
|
|
|
return size;
|
2011-02-04 09:06:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* find_first_zero_bit - find the first cleared bit in a memory region
|
|
|
|
* @addr: The address to start the search at
|
|
|
|
* @size: The maximum size to search
|
|
|
|
*
|
2021-05-10 22:07:57 +02:00
|
|
|
* Returns the bit number of the first cleared bit,
|
|
|
|
* or @size if there is no clear bit in the bitmap.
|
2011-02-04 09:06:04 +01:00
|
|
|
*/
|
|
|
|
static inline unsigned long find_first_zero_bit(const unsigned long *addr,
|
|
|
|
unsigned long size)
|
|
|
|
{
|
|
|
|
return find_next_zero_bit(addr, size, 0);
|
|
|
|
}
|
|
|
|
|
2013-09-12 21:13:12 +02:00
|
|
|
/**
|
|
|
|
* rol8 - rotate an 8-bit value left
|
|
|
|
* @word: value to rotate
|
|
|
|
* @shift: bits to roll
|
|
|
|
*/
|
|
|
|
static inline uint8_t rol8(uint8_t word, unsigned int shift)
|
|
|
|
{
|
2023-04-28 16:47:46 +02:00
|
|
|
return (word << (shift & 7)) | (word >> (-shift & 7));
|
2013-09-12 21:13:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ror8 - rotate an 8-bit value right
|
|
|
|
* @word: value to rotate
|
|
|
|
* @shift: bits to roll
|
|
|
|
*/
|
|
|
|
static inline uint8_t ror8(uint8_t word, unsigned int shift)
|
|
|
|
{
|
2023-04-28 16:47:46 +02:00
|
|
|
return (word >> (shift & 7)) | (word << (-shift & 7));
|
2013-09-12 21:13:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* rol16 - rotate a 16-bit value left
|
|
|
|
* @word: value to rotate
|
|
|
|
* @shift: bits to roll
|
|
|
|
*/
|
|
|
|
static inline uint16_t rol16(uint16_t word, unsigned int shift)
|
|
|
|
{
|
2023-04-28 16:47:46 +02:00
|
|
|
return (word << (shift & 15)) | (word >> (-shift & 15));
|
2013-09-12 21:13:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ror16 - rotate a 16-bit value right
|
|
|
|
* @word: value to rotate
|
|
|
|
* @shift: bits to roll
|
|
|
|
*/
|
|
|
|
static inline uint16_t ror16(uint16_t word, unsigned int shift)
|
|
|
|
{
|
2023-04-28 16:47:46 +02:00
|
|
|
return (word >> (shift & 15)) | (word << (-shift & 15));
|
2013-09-12 21:13:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* rol32 - rotate a 32-bit value left
|
|
|
|
* @word: value to rotate
|
|
|
|
* @shift: bits to roll
|
|
|
|
*/
|
|
|
|
static inline uint32_t rol32(uint32_t word, unsigned int shift)
|
|
|
|
{
|
2023-04-28 16:47:46 +02:00
|
|
|
return (word << (shift & 31)) | (word >> (-shift & 31));
|
2013-09-12 21:13:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ror32 - rotate a 32-bit value right
|
|
|
|
* @word: value to rotate
|
|
|
|
* @shift: bits to roll
|
|
|
|
*/
|
|
|
|
static inline uint32_t ror32(uint32_t word, unsigned int shift)
|
|
|
|
{
|
2023-04-28 16:47:46 +02:00
|
|
|
return (word >> (shift & 31)) | (word << (-shift & 31));
|
2013-09-12 21:13:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* rol64 - rotate a 64-bit value left
|
|
|
|
* @word: value to rotate
|
|
|
|
* @shift: bits to roll
|
|
|
|
*/
|
|
|
|
static inline uint64_t rol64(uint64_t word, unsigned int shift)
|
|
|
|
{
|
2023-04-28 16:47:46 +02:00
|
|
|
return (word << (shift & 63)) | (word >> (-shift & 63));
|
2013-09-12 21:13:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ror64 - rotate a 64-bit value right
|
|
|
|
* @word: value to rotate
|
|
|
|
* @shift: bits to roll
|
|
|
|
*/
|
|
|
|
static inline uint64_t ror64(uint64_t word, unsigned int shift)
|
|
|
|
{
|
2023-04-28 16:47:46 +02:00
|
|
|
return (word >> (shift & 63)) | (word << (-shift & 63));
|
2013-09-12 21:13:12 +02:00
|
|
|
}
|
|
|
|
|
2021-06-14 17:09:26 +02:00
|
|
|
/**
|
|
|
|
* hswap32 - swap 16-bit halfwords within a 32-bit value
|
|
|
|
* @h: value to swap
|
|
|
|
*/
|
|
|
|
static inline uint32_t hswap32(uint32_t h)
|
|
|
|
{
|
|
|
|
return rol32(h, 16);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* hswap64 - swap 16-bit halfwords within a 64-bit value
|
|
|
|
* @h: value to swap
|
|
|
|
*/
|
|
|
|
static inline uint64_t hswap64(uint64_t h)
|
|
|
|
{
|
|
|
|
uint64_t m = 0x0000ffff0000ffffull;
|
|
|
|
h = rol64(h, 32);
|
|
|
|
return ((h & m) << 16) | ((h >> 16) & m);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* wswap64 - swap 32-bit words within a 64-bit value
|
|
|
|
* @h: value to swap
|
|
|
|
*/
|
|
|
|
static inline uint64_t wswap64(uint64_t h)
|
|
|
|
{
|
|
|
|
return rol64(h, 32);
|
|
|
|
}
|
|
|
|
|
2012-07-06 16:48:00 +02:00
|
|
|
/**
|
|
|
|
* extract32:
|
|
|
|
* @value: the value to extract the bit field from
|
|
|
|
* @start: the lowest bit in the bit field (numbered from 0)
|
|
|
|
* @length: the length of the bit field
|
|
|
|
*
|
|
|
|
* Extract from the 32 bit input @value the bit field specified by the
|
|
|
|
* @start and @length parameters, and return it. The bit field must
|
|
|
|
* lie entirely within the 32 bit word. It is valid to request that
|
|
|
|
* all 32 bits are returned (ie @length 32 and @start 0).
|
|
|
|
*
|
|
|
|
* Returns: the value of the bit field extracted from the input value.
|
|
|
|
*/
|
|
|
|
static inline uint32_t extract32(uint32_t value, int start, int length)
|
|
|
|
{
|
|
|
|
assert(start >= 0 && length > 0 && length <= 32 - start);
|
|
|
|
return (value >> start) & (~0U >> (32 - length));
|
|
|
|
}
|
|
|
|
|
2020-02-25 13:47:04 +01:00
|
|
|
/**
|
|
|
|
* extract8:
|
|
|
|
* @value: the value to extract the bit field from
|
|
|
|
* @start: the lowest bit in the bit field (numbered from 0)
|
|
|
|
* @length: the length of the bit field
|
|
|
|
*
|
|
|
|
* Extract from the 8 bit input @value the bit field specified by the
|
|
|
|
* @start and @length parameters, and return it. The bit field must
|
|
|
|
* lie entirely within the 8 bit word. It is valid to request that
|
|
|
|
* all 8 bits are returned (ie @length 8 and @start 0).
|
|
|
|
*
|
|
|
|
* Returns: the value of the bit field extracted from the input value.
|
|
|
|
*/
|
|
|
|
static inline uint8_t extract8(uint8_t value, int start, int length)
|
|
|
|
{
|
|
|
|
assert(start >= 0 && length > 0 && length <= 8 - start);
|
|
|
|
return extract32(value, start, length);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* extract16:
|
|
|
|
* @value: the value to extract the bit field from
|
|
|
|
* @start: the lowest bit in the bit field (numbered from 0)
|
|
|
|
* @length: the length of the bit field
|
|
|
|
*
|
|
|
|
* Extract from the 16 bit input @value the bit field specified by the
|
|
|
|
* @start and @length parameters, and return it. The bit field must
|
|
|
|
* lie entirely within the 16 bit word. It is valid to request that
|
|
|
|
* all 16 bits are returned (ie @length 16 and @start 0).
|
|
|
|
*
|
|
|
|
* Returns: the value of the bit field extracted from the input value.
|
|
|
|
*/
|
|
|
|
static inline uint16_t extract16(uint16_t value, int start, int length)
|
|
|
|
{
|
|
|
|
assert(start >= 0 && length > 0 && length <= 16 - start);
|
|
|
|
return extract32(value, start, length);
|
|
|
|
}
|
|
|
|
|
2012-07-06 16:48:00 +02:00
|
|
|
/**
|
|
|
|
* extract64:
|
|
|
|
* @value: the value to extract the bit field from
|
|
|
|
* @start: the lowest bit in the bit field (numbered from 0)
|
|
|
|
* @length: the length of the bit field
|
|
|
|
*
|
|
|
|
* Extract from the 64 bit input @value the bit field specified by the
|
|
|
|
* @start and @length parameters, and return it. The bit field must
|
|
|
|
* lie entirely within the 64 bit word. It is valid to request that
|
|
|
|
* all 64 bits are returned (ie @length 64 and @start 0).
|
|
|
|
*
|
|
|
|
* Returns: the value of the bit field extracted from the input value.
|
|
|
|
*/
|
|
|
|
static inline uint64_t extract64(uint64_t value, int start, int length)
|
|
|
|
{
|
|
|
|
assert(start >= 0 && length > 0 && length <= 64 - start);
|
|
|
|
return (value >> start) & (~0ULL >> (64 - length));
|
|
|
|
}
|
|
|
|
|
2013-06-28 13:40:31 +02:00
|
|
|
/**
|
|
|
|
* sextract32:
|
|
|
|
* @value: the value to extract the bit field from
|
|
|
|
* @start: the lowest bit in the bit field (numbered from 0)
|
|
|
|
* @length: the length of the bit field
|
|
|
|
*
|
|
|
|
* Extract from the 32 bit input @value the bit field specified by the
|
|
|
|
* @start and @length parameters, and return it, sign extended to
|
|
|
|
* an int32_t (ie with the most significant bit of the field propagated
|
|
|
|
* to all the upper bits of the return value). The bit field must lie
|
|
|
|
* entirely within the 32 bit word. It is valid to request that
|
|
|
|
* all 32 bits are returned (ie @length 32 and @start 0).
|
|
|
|
*
|
|
|
|
* Returns: the sign extended value of the bit field extracted from the
|
|
|
|
* input value.
|
|
|
|
*/
|
|
|
|
static inline int32_t sextract32(uint32_t value, int start, int length)
|
|
|
|
{
|
|
|
|
assert(start >= 0 && length > 0 && length <= 32 - start);
|
|
|
|
/* Note that this implementation relies on right shift of signed
|
|
|
|
* integers being an arithmetic shift.
|
|
|
|
*/
|
|
|
|
return ((int32_t)(value << (32 - length - start))) >> (32 - length);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* sextract64:
|
|
|
|
* @value: the value to extract the bit field from
|
|
|
|
* @start: the lowest bit in the bit field (numbered from 0)
|
|
|
|
* @length: the length of the bit field
|
|
|
|
*
|
|
|
|
* Extract from the 64 bit input @value the bit field specified by the
|
|
|
|
* @start and @length parameters, and return it, sign extended to
|
|
|
|
* an int64_t (ie with the most significant bit of the field propagated
|
|
|
|
* to all the upper bits of the return value). The bit field must lie
|
|
|
|
* entirely within the 64 bit word. It is valid to request that
|
|
|
|
* all 64 bits are returned (ie @length 64 and @start 0).
|
|
|
|
*
|
|
|
|
* Returns: the sign extended value of the bit field extracted from the
|
|
|
|
* input value.
|
|
|
|
*/
|
2015-03-11 14:21:06 +01:00
|
|
|
static inline int64_t sextract64(uint64_t value, int start, int length)
|
2013-06-28 13:40:31 +02:00
|
|
|
{
|
|
|
|
assert(start >= 0 && length > 0 && length <= 64 - start);
|
|
|
|
/* Note that this implementation relies on right shift of signed
|
|
|
|
* integers being an arithmetic shift.
|
|
|
|
*/
|
|
|
|
return ((int64_t)(value << (64 - length - start))) >> (64 - length);
|
|
|
|
}
|
|
|
|
|
2012-07-06 16:48:00 +02:00
|
|
|
/**
|
|
|
|
* deposit32:
|
|
|
|
* @value: initial value to insert bit field into
|
|
|
|
* @start: the lowest bit in the bit field (numbered from 0)
|
|
|
|
* @length: the length of the bit field
|
|
|
|
* @fieldval: the value to insert into the bit field
|
|
|
|
*
|
|
|
|
* Deposit @fieldval into the 32 bit @value at the bit field specified
|
|
|
|
* by the @start and @length parameters, and return the modified
|
|
|
|
* @value. Bits of @value outside the bit field are not modified.
|
|
|
|
* Bits of @fieldval above the least significant @length bits are
|
|
|
|
* ignored. The bit field must lie entirely within the 32 bit word.
|
2012-07-08 22:31:21 +02:00
|
|
|
* It is valid to request that all 32 bits are modified (ie @length
|
|
|
|
* 32 and @start 0).
|
2012-07-06 16:48:00 +02:00
|
|
|
*
|
|
|
|
* Returns: the modified @value.
|
|
|
|
*/
|
|
|
|
static inline uint32_t deposit32(uint32_t value, int start, int length,
|
|
|
|
uint32_t fieldval)
|
|
|
|
{
|
|
|
|
uint32_t mask;
|
|
|
|
assert(start >= 0 && length > 0 && length <= 32 - start);
|
|
|
|
mask = (~0U >> (32 - length)) << start;
|
|
|
|
return (value & ~mask) | ((fieldval << start) & mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2012-07-08 22:31:21 +02:00
|
|
|
* deposit64:
|
2012-07-06 16:48:00 +02:00
|
|
|
* @value: initial value to insert bit field into
|
|
|
|
* @start: the lowest bit in the bit field (numbered from 0)
|
|
|
|
* @length: the length of the bit field
|
|
|
|
* @fieldval: the value to insert into the bit field
|
|
|
|
*
|
|
|
|
* Deposit @fieldval into the 64 bit @value at the bit field specified
|
|
|
|
* by the @start and @length parameters, and return the modified
|
|
|
|
* @value. Bits of @value outside the bit field are not modified.
|
|
|
|
* Bits of @fieldval above the least significant @length bits are
|
2012-07-08 22:31:21 +02:00
|
|
|
* ignored. The bit field must lie entirely within the 64 bit word.
|
2012-07-06 16:48:00 +02:00
|
|
|
* It is valid to request that all 64 bits are modified (ie @length
|
|
|
|
* 64 and @start 0).
|
|
|
|
*
|
|
|
|
* Returns: the modified @value.
|
|
|
|
*/
|
|
|
|
static inline uint64_t deposit64(uint64_t value, int start, int length,
|
|
|
|
uint64_t fieldval)
|
|
|
|
{
|
|
|
|
uint64_t mask;
|
|
|
|
assert(start >= 0 && length > 0 && length <= 64 - start);
|
|
|
|
mask = (~0ULL >> (64 - length)) << start;
|
|
|
|
return (value & ~mask) | ((fieldval << start) & mask);
|
|
|
|
}
|
|
|
|
|
2016-06-17 16:23:45 +02:00
|
|
|
/**
|
|
|
|
* half_shuffle32:
|
2019-05-21 14:25:14 +02:00
|
|
|
* @x: 32-bit value (of which only the bottom 16 bits are of interest)
|
|
|
|
*
|
|
|
|
* Given an input value::
|
|
|
|
*
|
|
|
|
* xxxx xxxx xxxx xxxx ABCD EFGH IJKL MNOP
|
2016-06-17 16:23:45 +02:00
|
|
|
*
|
|
|
|
* return the value where the bottom 16 bits are spread out into
|
2019-05-21 14:25:14 +02:00
|
|
|
* the odd bits in the word, and the even bits are zeroed::
|
|
|
|
*
|
|
|
|
* 0A0B 0C0D 0E0F 0G0H 0I0J 0K0L 0M0N 0O0P
|
2016-06-17 16:23:45 +02:00
|
|
|
*
|
|
|
|
* Any bits set in the top half of the input are ignored.
|
|
|
|
*
|
|
|
|
* Returns: the shuffled bits.
|
|
|
|
*/
|
|
|
|
static inline uint32_t half_shuffle32(uint32_t x)
|
|
|
|
{
|
|
|
|
/* This algorithm is from _Hacker's Delight_ section 7-2 "Shuffling Bits".
|
|
|
|
* It ignores any bits set in the top half of the input.
|
|
|
|
*/
|
|
|
|
x = ((x & 0xFF00) << 8) | (x & 0x00FF);
|
|
|
|
x = ((x << 4) | x) & 0x0F0F0F0F;
|
|
|
|
x = ((x << 2) | x) & 0x33333333;
|
|
|
|
x = ((x << 1) | x) & 0x55555555;
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* half_shuffle64:
|
2019-05-21 14:25:14 +02:00
|
|
|
* @x: 64-bit value (of which only the bottom 32 bits are of interest)
|
|
|
|
*
|
|
|
|
* Given an input value::
|
|
|
|
*
|
|
|
|
* xxxx xxxx xxxx .... xxxx xxxx ABCD EFGH IJKL MNOP QRST UVWX YZab cdef
|
2016-06-17 16:23:45 +02:00
|
|
|
*
|
|
|
|
* return the value where the bottom 32 bits are spread out into
|
2019-05-21 14:25:14 +02:00
|
|
|
* the odd bits in the word, and the even bits are zeroed::
|
|
|
|
*
|
|
|
|
* 0A0B 0C0D 0E0F 0G0H 0I0J 0K0L 0M0N .... 0U0V 0W0X 0Y0Z 0a0b 0c0d 0e0f
|
2016-06-17 16:23:45 +02:00
|
|
|
*
|
|
|
|
* Any bits set in the top half of the input are ignored.
|
|
|
|
*
|
|
|
|
* Returns: the shuffled bits.
|
|
|
|
*/
|
|
|
|
static inline uint64_t half_shuffle64(uint64_t x)
|
|
|
|
{
|
|
|
|
/* This algorithm is from _Hacker's Delight_ section 7-2 "Shuffling Bits".
|
|
|
|
* It ignores any bits set in the top half of the input.
|
|
|
|
*/
|
|
|
|
x = ((x & 0xFFFF0000ULL) << 16) | (x & 0xFFFF);
|
|
|
|
x = ((x << 8) | x) & 0x00FF00FF00FF00FFULL;
|
|
|
|
x = ((x << 4) | x) & 0x0F0F0F0F0F0F0F0FULL;
|
|
|
|
x = ((x << 2) | x) & 0x3333333333333333ULL;
|
|
|
|
x = ((x << 1) | x) & 0x5555555555555555ULL;
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* half_unshuffle32:
|
2019-05-21 14:25:14 +02:00
|
|
|
* @x: 32-bit value (of which only the odd bits are of interest)
|
|
|
|
*
|
|
|
|
* Given an input value::
|
|
|
|
*
|
|
|
|
* xAxB xCxD xExF xGxH xIxJ xKxL xMxN xOxP
|
2016-06-17 16:23:45 +02:00
|
|
|
*
|
|
|
|
* return the value where all the odd bits are compressed down
|
2019-05-21 14:25:14 +02:00
|
|
|
* into the low half of the word, and the high half is zeroed::
|
|
|
|
*
|
|
|
|
* 0000 0000 0000 0000 ABCD EFGH IJKL MNOP
|
2016-06-17 16:23:45 +02:00
|
|
|
*
|
|
|
|
* Any even bits set in the input are ignored.
|
|
|
|
*
|
|
|
|
* Returns: the unshuffled bits.
|
|
|
|
*/
|
|
|
|
static inline uint32_t half_unshuffle32(uint32_t x)
|
|
|
|
{
|
|
|
|
/* This algorithm is from _Hacker's Delight_ section 7-2 "Shuffling Bits".
|
|
|
|
* where it is called an inverse half shuffle.
|
|
|
|
*/
|
|
|
|
x &= 0x55555555;
|
|
|
|
x = ((x >> 1) | x) & 0x33333333;
|
|
|
|
x = ((x >> 2) | x) & 0x0F0F0F0F;
|
|
|
|
x = ((x >> 4) | x) & 0x00FF00FF;
|
|
|
|
x = ((x >> 8) | x) & 0x0000FFFF;
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* half_unshuffle64:
|
2019-05-21 14:25:14 +02:00
|
|
|
* @x: 64-bit value (of which only the odd bits are of interest)
|
|
|
|
*
|
|
|
|
* Given an input value::
|
|
|
|
*
|
|
|
|
* xAxB xCxD xExF xGxH xIxJ xKxL xMxN .... xUxV xWxX xYxZ xaxb xcxd xexf
|
2016-06-17 16:23:45 +02:00
|
|
|
*
|
|
|
|
* return the value where all the odd bits are compressed down
|
2019-05-21 14:25:14 +02:00
|
|
|
* into the low half of the word, and the high half is zeroed::
|
|
|
|
*
|
|
|
|
* 0000 0000 0000 .... 0000 0000 ABCD EFGH IJKL MNOP QRST UVWX YZab cdef
|
2016-06-17 16:23:45 +02:00
|
|
|
*
|
|
|
|
* Any even bits set in the input are ignored.
|
|
|
|
*
|
|
|
|
* Returns: the unshuffled bits.
|
|
|
|
*/
|
|
|
|
static inline uint64_t half_unshuffle64(uint64_t x)
|
|
|
|
{
|
|
|
|
/* This algorithm is from _Hacker's Delight_ section 7-2 "Shuffling Bits".
|
|
|
|
* where it is called an inverse half shuffle.
|
|
|
|
*/
|
|
|
|
x &= 0x5555555555555555ULL;
|
|
|
|
x = ((x >> 1) | x) & 0x3333333333333333ULL;
|
|
|
|
x = ((x >> 2) | x) & 0x0F0F0F0F0F0F0F0FULL;
|
|
|
|
x = ((x >> 4) | x) & 0x00FF00FF00FF00FFULL;
|
|
|
|
x = ((x >> 8) | x) & 0x0000FFFF0000FFFFULL;
|
|
|
|
x = ((x >> 16) | x) & 0x00000000FFFFFFFFULL;
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
|
2011-02-04 09:06:04 +01:00
|
|
|
#endif
|