netfilter: nf_tables: Introduce new 64-bit helper register functions
Introduce new helper functions to load/store 64-bit values onto/from registers: - nft_reg_store64 - nft_reg_load64 This commit also re-orders all these helpers from smallest to largest target bit size. Signed-off-by: Ander Juaristi <a@juaristi.eus> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
This commit is contained in:
parent
0846e1616f
commit
a1b840adaf
|
@ -2,6 +2,7 @@
|
||||||
#ifndef _NET_NF_TABLES_H
|
#ifndef _NET_NF_TABLES_H
|
||||||
#define _NET_NF_TABLES_H
|
#define _NET_NF_TABLES_H
|
||||||
|
|
||||||
|
#include <asm/unaligned.h>
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
#include <linux/netfilter.h>
|
#include <linux/netfilter.h>
|
||||||
#include <linux/netfilter/nfnetlink.h>
|
#include <linux/netfilter/nfnetlink.h>
|
||||||
|
@ -102,33 +103,43 @@ struct nft_regs {
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Store/load an u16 or u8 integer to/from the u32 data register.
|
/* Store/load an u8, u16 or u64 integer to/from the u32 data register.
|
||||||
*
|
*
|
||||||
* Note, when using concatenations, register allocation happens at 32-bit
|
* Note, when using concatenations, register allocation happens at 32-bit
|
||||||
* level. So for store instruction, pad the rest part with zero to avoid
|
* level. So for store instruction, pad the rest part with zero to avoid
|
||||||
* garbage values.
|
* garbage values.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static inline void nft_reg_store16(u32 *dreg, u16 val)
|
|
||||||
{
|
|
||||||
*dreg = 0;
|
|
||||||
*(u16 *)dreg = val;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void nft_reg_store8(u32 *dreg, u8 val)
|
static inline void nft_reg_store8(u32 *dreg, u8 val)
|
||||||
{
|
{
|
||||||
*dreg = 0;
|
*dreg = 0;
|
||||||
*(u8 *)dreg = val;
|
*(u8 *)dreg = val;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline u8 nft_reg_load8(u32 *sreg)
|
||||||
|
{
|
||||||
|
return *(u8 *)sreg;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void nft_reg_store16(u32 *dreg, u16 val)
|
||||||
|
{
|
||||||
|
*dreg = 0;
|
||||||
|
*(u16 *)dreg = val;
|
||||||
|
}
|
||||||
|
|
||||||
static inline u16 nft_reg_load16(u32 *sreg)
|
static inline u16 nft_reg_load16(u32 *sreg)
|
||||||
{
|
{
|
||||||
return *(u16 *)sreg;
|
return *(u16 *)sreg;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u8 nft_reg_load8(u32 *sreg)
|
static inline void nft_reg_store64(u32 *dreg, u64 val)
|
||||||
{
|
{
|
||||||
return *(u8 *)sreg;
|
put_unaligned(val, (u64 *)dreg);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u64 nft_reg_load64(u32 *sreg)
|
||||||
|
{
|
||||||
|
return get_unaligned((u64 *)sreg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
|
static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
|
||||||
|
|
|
@ -43,14 +43,15 @@ void nft_byteorder_eval(const struct nft_expr *expr,
|
||||||
switch (priv->op) {
|
switch (priv->op) {
|
||||||
case NFT_BYTEORDER_NTOH:
|
case NFT_BYTEORDER_NTOH:
|
||||||
for (i = 0; i < priv->len / 8; i++) {
|
for (i = 0; i < priv->len / 8; i++) {
|
||||||
src64 = get_unaligned((u64 *)&src[i]);
|
src64 = nft_reg_load64(&src[i]);
|
||||||
put_unaligned_be64(src64, &dst[i]);
|
nft_reg_store64(&dst[i], be64_to_cpu(src64));
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case NFT_BYTEORDER_HTON:
|
case NFT_BYTEORDER_HTON:
|
||||||
for (i = 0; i < priv->len / 8; i++) {
|
for (i = 0; i < priv->len / 8; i++) {
|
||||||
src64 = get_unaligned_be64(&src[i]);
|
src64 = (__force __u64)
|
||||||
put_unaligned(src64, (u64 *)&dst[i]);
|
cpu_to_be64(nft_reg_load64(&src[i]));
|
||||||
|
nft_reg_store64(&dst[i], src64);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue