Compare commits

..

25 Commits
v2.8 ... v2.12

Author SHA1 Message Date
Jan Engelhardt
cd410aefe7 Xtables-addons 2.12 2017-01-11 01:39:53 +01:00
Jan Engelhardt
e4b5cef8f0 build: mark Linux 4.10 as supported 2017-01-04 02:44:43 +01:00
Ralph Sennhauser
a8af97b8fa build: support for Linux 4.10
Commit 613dbd95723aee7abd16860745691b6c7bda20dc (netfilter:
x_tables: move hook state into xt_action_param structure) changes the
struct xt_action_param, accommodate for it.

Signed-off-by: Ralph Sennhauser <ralph.sennhauser@gmail.com>
2017-01-04 02:40:41 +01:00
Ralph Sennhauser
db234c30cd build: support for Linux 4.9
Commit f330a7fdbe1611104622faff7e614a246a7d20f0 (netfilter: conntrack:
get rid of conntrack timer) replaces timer_list with an u32, use helper
from commit c8607e020014cf11a61601a0005270bad81cabdf (netfilter: nft_ct:
fix expiration getter).

Signed-off-by: Ralph Sennhauser <ralph.sennhauser@gmail.com>
2017-01-04 02:04:14 +01:00
Jan Engelhardt
2e6fb73e85 Xtables-addons 2.11 2016-05-20 14:35:46 +02:00
Jan Engelhardt
6024758b28 xt_ECHO: ensure IP header length is set 2016-05-20 10:48:27 +02:00
Jan Engelhardt
69f3f21a32 xt_ECHO: handle fragments
Since everything is just echoed back verbatim without modification,
supporting fragments seems easy.
2016-05-20 10:48:00 +02:00
Your Name
7af1b9737c xt_pknock: use shash crypto API
The old hash API is dropped as of Linux 4.6.
Only build tested.
2016-05-20 04:46:31 -04:00
Jan Engelhardt
f5e95f35a7 xt_pknock: replace nemesis by socat
Use a utility much more widely available.
2016-04-22 22:51:24 +02:00
Jan Engelhardt
80bed0655f xt_pknock: import digest generation utility 2016-04-22 22:48:56 +02:00
Jan Engelhardt
bc6aaf74d8 xt_pknock: remove reference to non-existing documentation
Even in the old pknock-0.5.tar.gz tarball, there is no doc/pknock/
directory.
2016-04-22 22:43:17 +02:00
Jan Engelhardt
192243483a xt_SYSRQ: use new shash crypto API
The "shash" API is not exactly new (Linux 2.6.27), but the "hash" API
was finally thrown out for Linux 4.6.
2016-04-22 11:11:57 +02:00
Andreas Schultz
e3114d60d5 xt_ACCOUNT: make it namespace aware
xt_ACCOUNTing objects create in one network namespace could be
read from all namespaces. Also object with the same name in
different namespaces would collide.

Signed-off-by: Andreas Schultz <aschultz@tpip.net>
2016-04-05 13:30:57 +02:00
Matthias Schiffer
e6f20befad build: fix configure compatiblity with POSIX shells
The kernel version detection code uses some bashisms, which makes the
build fail on Debian systems where /bin/sh links to dash. Replace with
POSIX-conforming commands at the cost of requiring awk.
2016-04-05 12:25:13 +02:00
Jan Engelhardt
5038e160f8 Xtables-addons 2.10 2015-11-20 23:30:33 +01:00
Jan Engelhardt
a6289ec3ff build: silence compiler warning in xt_quota2
xt_quota2.c:67:6: warning: unused variable "ret" [-Wunused-variable]
2015-11-20 23:17:40 +01:00
Jan Engelhardt
01e7128a80 build: support for Linux 4.4 2015-11-20 23:17:39 +01:00
Jan Engelhardt
1dc2a1c2de xt_ACCOUNT: remove redundant braces
For single-line statements, the {} are not strictly needed.
2015-11-09 22:33:49 +01:00
Jan Engelhardt
60b6b1dbef xt_ACCOUNT: indent reduction
Invert early terminating conditions so the rest of the block can be
de-indented.
2015-11-09 22:33:49 +01:00
Jan Engelhardt
fcb19403bc xt_ACCOUNT: call free_pages(x,2) (doc)
Below is the patch with the *rest* of the free_page(X) calls changed
to free_pages(X, 2). xt_ACCOUNT should always allocate memory in page
pairs. And always *free* memory in page pairs.

References: http://www.spinics.net/lists/netfilter-devel/msg39025.html
2015-11-09 22:33:47 +01:00
Neil P. Murphy
f89f10bbe9 xt_ACCOUNT: call free_pages(x,2)
Below is the patch with the *rest* of the free_page(X) calls changed
to free_pages(X, 2). xt_ACCOUNT should always allocate memory in page
pairs. And always *free* memory in page pairs.

References: http://www.spinics.net/lists/netfilter-devel/msg39025.html
2015-11-09 22:25:16 +01:00
Jan Engelhardt
a9358542fe Xtables-addons 2.9 2015-10-12 16:27:59 +02:00
Jan Engelhardt
237fe7c660 build: support for Linux 4.3 2015-10-12 16:27:08 +02:00
Sam Liddicott
939fc901c1 xt_quota2: allow incremental value to be written to quota proc file
As well as writing absolute numeric values to the quota file, you
can now also write numbers preceded by a + sign or a - sign, e.g.

* "+30" would increase the quota by 30
* "+-20" would increase the quota by negative 20,
  which is the same as decrease by 20
* "-5" would decrease the quota by 5
2015-09-29 20:54:18 +02:00
Boris Figovsky
a4a077ff86 xt_DHCPMAC: correct L2addr set and compare 2015-09-29 20:40:24 +02:00
21 changed files with 491 additions and 285 deletions

View File

@@ -1,4 +1,4 @@
AC_INIT([xtables-addons], [2.8])
AC_INIT([xtables-addons], [2.12])
AC_CONFIG_AUX_DIR([build-aux])
AC_CONFIG_HEADERS([config.h])
AC_CONFIG_MACRO_DIR([m4])
@@ -44,28 +44,22 @@ regular_CFLAGS="-Wall -Waggregate-return -Wmissing-declarations \
if test -n "$kbuilddir"; then
AC_MSG_CHECKING([kernel version that we will build against])
krel="$(make -sC "$kbuilddir" M=$PWD kernelrelease)";
kmajor="${krel%%[[^0-9]]*}";
kmajor="$(($kmajor+0))";
krel="${krel:${#kmajor}}";
krel="${krel#.}";
kminor="${krel%%[[^0-9]]*}";
kminor="$(($kminor+0))";
krel="${krel:${#kminor}}";
krel="${krel#.}";
kmicro="${krel%%[[^0-9]]*}";
kmicro="$(($kmicro+0))";
krel="${krel:${#kmicro}}";
krel="${krel#.}";
kstable="${krel%%[[^0-9]]*}";
kstable="$(($kstable+0))";
krel="$(make -sC "$kbuilddir" M=$PWD kernelrelease | $AWK -v 'FS=[[^0-9.]]' '{print $1; exit}')"
save_IFS="$IFS"
IFS='.'
set x $krel
IFS="$save_IFS"
kmajor="$(($2+0))"
kminor="$(($3+0))"
kmicro="$(($4+0))"
kstable="$(($5+0))"
if test -z "$kmajor" -o -z "$kminor" -o -z "$kmicro"; then
echo "WARNING: Version detection did not succeed. Continue at own luck.";
else
echo "$kmajor.$kminor.$kmicro.$kstable in $kbuilddir";
if test "$kmajor" -gt 4 -o "$kmajor" -eq 4 -a "$kminor" -gt 1; then
if test "$kmajor" -gt 4 -o "$kmajor" -eq 4 -a "$kminor" -gt 10; then
echo "WARNING: That kernel version is not officially supported yet. Continue at own luck.";
elif test "$kmajor" -eq 4 -a "$kminor" -le 1; then
elif test "$kmajor" -eq 4 -a "$kminor" -le 10; then
:;
elif test "$kmajor" -eq 3 -a "$kminor" -ge 7; then
:;

View File

@@ -3,6 +3,33 @@ HEAD
====
v2.12 (2017-01-11)
==================
Enhancements:
- support for Linux up to 4.10
v2.11 (2016-05-20)
==================
Enhancements:
- support for Linux 4.5, 4.6
- xt_ECHO: tentatively support responding to fragments
v2.10 (2015-11-20)
==================
Enhancements:
- Support for Linux 4.4
Fixes:
- xt_ACCOUNT: call free_page with the right amount of pages
v2.9 (2015-10-12)
=================
Enhancements:
- Support for Linux 4.3
v2.8 (2015-08-19)
=================
Enhancements:

View File

@@ -15,6 +15,7 @@
//#define DEBUG 1
#include <linux/module.h>
#include <linux/version.h>
#include <net/net_namespace.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <net/icmp.h>
@@ -29,6 +30,7 @@
#include <linux/string.h>
#include <linux/spinlock.h>
#include <asm/uaccess.h>
#include <net/netns/generic.h>
#include <net/route.h>
#include "xt_ACCOUNT.h"
@@ -100,14 +102,19 @@ struct ipt_acc_mask_8 {
struct ipt_acc_mask_16 *mask_16[256];
};
static struct ipt_acc_table *ipt_acc_tables;
static struct ipt_acc_handle *ipt_acc_handles;
static void *ipt_acc_tmpbuf;
static int ipt_acc_net_id __read_mostly;
/* Spinlock used for manipulating the current accounting tables/data */
static DEFINE_SPINLOCK(ipt_acc_lock);
/* Mutex (semaphore) used for manipulating userspace handles/snapshot data */
static struct semaphore ipt_acc_userspace_mutex;
struct ipt_acc_net {
/* Spinlock used for manipulating the current accounting tables/data */
spinlock_t ipt_acc_lock;
/* Mutex (semaphore) used for manipulating userspace handles/snapshot data */
struct semaphore ipt_acc_userspace_mutex;
struct ipt_acc_table *ipt_acc_tables;
struct ipt_acc_handle *ipt_acc_handles;
void *ipt_acc_tmpbuf;
};
/* Allocates a page pair and clears it */
static void *ipt_acc_zalloc_page(void)
@@ -115,10 +122,8 @@ static void *ipt_acc_zalloc_page(void)
// Don't use get_zeroed_page until it's fixed in the kernel.
// get_zeroed_page(GFP_ATOMIC)
void *mem = (void *)__get_free_pages(GFP_ATOMIC, 2);
if (mem) {
if (mem != NULL)
memset(mem, 0, 2 *PAGE_SIZE);
}
return mem;
}
@@ -139,11 +144,9 @@ static void ipt_acc_data_free(void *data, uint8_t depth)
if (depth == 1) {
struct ipt_acc_mask_16 *mask_16 = data;
unsigned int b;
for (b = 0; b <= 255; b++) {
if (mask_16->mask_24[b]) {
free_page((unsigned long)mask_16->mask_24[b]);
}
}
for (b = 0; b <= 255; ++b)
if (mask_16->mask_24[b])
free_pages((unsigned long)mask_16->mask_24[b], 2);
free_pages((unsigned long)data, 2);
return;
}
@@ -156,12 +159,10 @@ static void ipt_acc_data_free(void *data, uint8_t depth)
struct ipt_acc_mask_16 *mask_16 =
((struct ipt_acc_mask_8 *)data)->mask_16[a];
for (b = 0; b <= 255; b++) {
if (mask_16->mask_24[b]) {
free_page((unsigned long)mask_16->mask_24[b]);
}
}
free_page((unsigned long)mask_16);
for (b = 0; b <= 255; ++b)
if (mask_16->mask_24[b])
free_pages((unsigned long)mask_16->mask_24[b], 2);
free_pages((unsigned long)mask_16, 2);
}
}
free_pages((unsigned long)data, 2);
@@ -175,7 +176,8 @@ static void ipt_acc_data_free(void *data, uint8_t depth)
/* Look for existing table / insert new one.
Return internal ID or -1 on error */
static int ipt_acc_table_insert(const char *name, __be32 ip, __be32 netmask)
static int ipt_acc_table_insert(struct ipt_acc_table *ipt_acc_tables,
const char *name, __be32 ip, __be32 netmask)
{
unsigned int i;
@@ -262,13 +264,15 @@ static int ipt_acc_table_insert(const char *name, __be32 ip, __be32 netmask)
static int ipt_acc_checkentry(const struct xt_tgchk_param *par)
{
struct ipt_acc_net *ian = net_generic(par->net, ipt_acc_net_id);
struct ipt_acc_info *info = par->targinfo;
int table_nr;
spin_lock_bh(&ipt_acc_lock);
table_nr = ipt_acc_table_insert(info->table_name, info->net_ip,
spin_lock_bh(&ian->ipt_acc_lock);
table_nr = ipt_acc_table_insert(ian->ipt_acc_tables,
info->table_name, info->net_ip,
info->net_mask);
spin_unlock_bh(&ipt_acc_lock);
spin_unlock_bh(&ian->ipt_acc_lock);
if (table_nr == -1) {
printk("ACCOUNT: Table insert problem. Aborting\n");
@@ -283,10 +287,11 @@ static int ipt_acc_checkentry(const struct xt_tgchk_param *par)
static void ipt_acc_destroy(const struct xt_tgdtor_param *par)
{
struct ipt_acc_net *ian = net_generic(par->net, ipt_acc_net_id);
unsigned int i;
struct ipt_acc_info *info = par->targinfo;
spin_lock_bh(&ipt_acc_lock);
spin_lock_bh(&ian->ipt_acc_lock);
pr_debug("ACCOUNT: ipt_acc_deleteentry called for table: %s (#%d)\n",
info->table_name, info->table_nr);
@@ -295,31 +300,31 @@ static void ipt_acc_destroy(const struct xt_tgdtor_param *par)
/* Look for table */
for (i = 0; i < ACCOUNT_MAX_TABLES; i++) {
if (strncmp(ipt_acc_tables[i].name, info->table_name,
if (strncmp(ian->ipt_acc_tables[i].name, info->table_name,
ACCOUNT_TABLE_NAME_LEN) == 0) {
pr_debug("ACCOUNT: Found table at slot: %d\n", i);
ipt_acc_tables[i].refcount--;
ian->ipt_acc_tables[i].refcount--;
pr_debug("ACCOUNT: Refcount left: %d\n",
ipt_acc_tables[i].refcount);
ian->ipt_acc_tables[i].refcount);
/* Table not needed anymore? */
if (ipt_acc_tables[i].refcount == 0) {
if (ian->ipt_acc_tables[i].refcount == 0) {
pr_debug("ACCOUNT: Destroying table at slot: %d\n", i);
ipt_acc_data_free(ipt_acc_tables[i].data,
ipt_acc_tables[i].depth);
memset(&ipt_acc_tables[i], 0,
ipt_acc_data_free(ian->ipt_acc_tables[i].data,
ian->ipt_acc_tables[i].depth);
memset(&ian->ipt_acc_tables[i], 0,
sizeof(struct ipt_acc_table));
}
spin_unlock_bh(&ipt_acc_lock);
spin_unlock_bh(&ian->ipt_acc_lock);
return;
}
}
/* Table not found */
printk("ACCOUNT: Table %s not found for destroy\n", info->table_name);
spin_unlock_bh(&ipt_acc_lock);
spin_unlock_bh(&ian->ipt_acc_lock);
}
static void ipt_acc_depth0_insert(struct ipt_acc_mask_24 *mask_24,
@@ -477,6 +482,17 @@ static void ipt_acc_depth2_insert(struct ipt_acc_mask_8 *mask_8,
static unsigned int
ipt_acc_target(struct sk_buff *skb, const struct xt_action_param *par)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)
struct ipt_acc_net *ian = net_generic(par->state->net, ipt_acc_net_id);
#else
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0)
struct ipt_acc_net *ian = net_generic(par->net, ipt_acc_net_id);
#else
struct net *net = dev_net(par->in ? par->in : par->out);
struct ipt_acc_net *ian = net_generic(net, ipt_acc_net_id);
#endif
#endif
struct ipt_acc_table *ipt_acc_tables = ian->ipt_acc_tables;
const struct ipt_acc_info *info =
par->targinfo;
@@ -484,13 +500,13 @@ ipt_acc_target(struct sk_buff *skb, const struct xt_action_param *par)
__be32 dst_ip = ip_hdr(skb)->daddr;
uint32_t size = ntohs(ip_hdr(skb)->tot_len);
spin_lock_bh(&ipt_acc_lock);
spin_lock_bh(&ian->ipt_acc_lock);
if (ipt_acc_tables[info->table_nr].name[0] == 0) {
printk("ACCOUNT: ipt_acc_target: Invalid table id %u. "
"IPs %u.%u.%u.%u/%u.%u.%u.%u\n", info->table_nr,
NIPQUAD(src_ip), NIPQUAD(dst_ip));
spin_unlock_bh(&ipt_acc_lock);
spin_unlock_bh(&ian->ipt_acc_lock);
return XT_CONTINUE;
}
@@ -502,7 +518,7 @@ ipt_acc_target(struct sk_buff *skb, const struct xt_action_param *par)
ipt_acc_tables[info->table_nr].ip,
ipt_acc_tables[info->table_nr].netmask,
src_ip, dst_ip, size, &ipt_acc_tables[info->table_nr].itemcount);
spin_unlock_bh(&ipt_acc_lock);
spin_unlock_bh(&ian->ipt_acc_lock);
return XT_CONTINUE;
}
@@ -513,7 +529,7 @@ ipt_acc_target(struct sk_buff *skb, const struct xt_action_param *par)
ipt_acc_tables[info->table_nr].ip,
ipt_acc_tables[info->table_nr].netmask,
src_ip, dst_ip, size, &ipt_acc_tables[info->table_nr].itemcount);
spin_unlock_bh(&ipt_acc_lock);
spin_unlock_bh(&ian->ipt_acc_lock);
return XT_CONTINUE;
}
@@ -524,7 +540,7 @@ ipt_acc_target(struct sk_buff *skb, const struct xt_action_param *par)
ipt_acc_tables[info->table_nr].ip,
ipt_acc_tables[info->table_nr].netmask,
src_ip, dst_ip, size, &ipt_acc_tables[info->table_nr].itemcount);
spin_unlock_bh(&ipt_acc_lock);
spin_unlock_bh(&ian->ipt_acc_lock);
return XT_CONTINUE;
}
@@ -532,7 +548,7 @@ ipt_acc_target(struct sk_buff *skb, const struct xt_action_param *par)
"Table id %u. IPs %u.%u.%u.%u/%u.%u.%u.%u\n",
info->table_nr, NIPQUAD(src_ip), NIPQUAD(dst_ip));
spin_unlock_bh(&ipt_acc_lock);
spin_unlock_bh(&ian->ipt_acc_lock);
return XT_CONTINUE;
}
@@ -553,7 +569,7 @@ ipt_acc_target(struct sk_buff *skb, const struct xt_action_param *par)
but there could be two or more applications accessing the data
at the same time.
*/
static int ipt_acc_handle_find_slot(void)
static int ipt_acc_handle_find_slot(struct ipt_acc_handle *ipt_acc_handles)
{
unsigned int i;
/* Insert new table */
@@ -573,7 +589,8 @@ static int ipt_acc_handle_find_slot(void)
return -1;
}
static int ipt_acc_handle_free(unsigned int handle)
static int ipt_acc_handle_free(struct ipt_acc_handle *ipt_acc_handles,
unsigned int handle)
{
if (handle >= ACCOUNT_MAX_HANDLES) {
printk("ACCOUNT: Invalid handle for ipt_acc_handle_free() specified:"
@@ -589,7 +606,8 @@ static int ipt_acc_handle_free(unsigned int handle)
/* Prepare data for read without flush. Use only for debugging!
Real applications should use read&flush as it's way more efficent */
static int ipt_acc_handle_prepare_read(char *tablename,
static int ipt_acc_handle_prepare_read(struct ipt_acc_table *ipt_acc_tables,
char *tablename,
struct ipt_acc_handle *dest, uint32_t *count)
{
int table_nr = -1;
@@ -631,18 +649,18 @@ static int ipt_acc_handle_prepare_read(char *tablename,
unsigned int b;
for (b = 0; b <= 255; b++) {
if (src_16->mask_24[b]) {
if ((network_16->mask_24[b] =
ipt_acc_zalloc_page()) == NULL) {
printk("ACCOUNT: out of memory during copy of 16 bit "
"network in ipt_acc_handle_prepare_read()\n");
ipt_acc_data_free(dest->data, depth);
return -1;
}
memcpy(network_16->mask_24[b], src_16->mask_24[b],
sizeof(struct ipt_acc_mask_24));
if (src_16->mask_24[b] == NULL)
continue;
if ((network_16->mask_24[b] =
ipt_acc_zalloc_page()) == NULL) {
printk("ACCOUNT: out of memory during copy of 16 bit "
"network in ipt_acc_handle_prepare_read()\n");
ipt_acc_data_free(dest->data, depth);
return -1;
}
memcpy(network_16->mask_24[b], src_16->mask_24[b],
sizeof(struct ipt_acc_mask_24));
}
} else if (depth == 2) {
struct ipt_acc_mask_8 *src_8 =
@@ -652,35 +670,35 @@ static int ipt_acc_handle_prepare_read(char *tablename,
unsigned int a, b;
for (a = 0; a <= 255; a++) {
if (src_8->mask_16[a]) {
if ((network_8->mask_16[a] =
if (src_8->mask_16[a] == NULL)
continue;
if ((network_8->mask_16[a] =
ipt_acc_zalloc_page()) == NULL) {
printk("ACCOUNT: out of memory during copy of 24 bit network"
" in ipt_acc_handle_prepare_read()\n");
ipt_acc_data_free(dest->data, depth);
return -1;
}
memcpy(network_8->mask_16[a], src_8->mask_16[a],
sizeof(struct ipt_acc_mask_16));
src_16 = src_8->mask_16[a];
network_16 = network_8->mask_16[a];
for (b = 0; b <= 255; b++) {
if (src_16->mask_24[b] == NULL)
continue;
if ((network_16->mask_24[b] =
ipt_acc_zalloc_page()) == NULL) {
printk("ACCOUNT: out of memory during copy of 24 bit network"
" in ipt_acc_handle_prepare_read()\n");
printk("ACCOUNT: out of memory during copy of 16 bit"
" network in ipt_acc_handle_prepare_read()\n");
ipt_acc_data_free(dest->data, depth);
return -1;
}
memcpy(network_8->mask_16[a], src_8->mask_16[a],
sizeof(struct ipt_acc_mask_16));
src_16 = src_8->mask_16[a];
network_16 = network_8->mask_16[a];
for (b = 0; b <= 255; b++) {
if (src_16->mask_24[b]) {
if ((network_16->mask_24[b] =
ipt_acc_zalloc_page()) == NULL) {
printk("ACCOUNT: out of memory during copy of 16 bit"
" network in ipt_acc_handle_prepare_read()\n");
ipt_acc_data_free(dest->data, depth);
return -1;
}
memcpy(network_16->mask_24[b], src_16->mask_24[b],
sizeof(struct ipt_acc_mask_24));
}
}
memcpy(network_16->mask_24[b], src_16->mask_24[b],
sizeof(struct ipt_acc_mask_24));
}
}
}
@@ -691,7 +709,8 @@ static int ipt_acc_handle_prepare_read(char *tablename,
}
/* Prepare data for read and flush it */
static int ipt_acc_handle_prepare_read_flush(char *tablename,
static int ipt_acc_handle_prepare_read_flush(struct ipt_acc_table *ipt_acc_tables,
char *tablename,
struct ipt_acc_handle *dest, uint32_t *count)
{
int table_nr;
@@ -732,7 +751,8 @@ static int ipt_acc_handle_prepare_read_flush(char *tablename,
/* Copy 8 bit network data into a prepared buffer.
We only copy entries != 0 to increase performance.
*/
static int ipt_acc_handle_copy_data(void *to_user, unsigned long *to_user_pos,
static int ipt_acc_handle_copy_data(struct ipt_acc_net *ian,
void *to_user, unsigned long *to_user_pos,
unsigned long *tmpbuf_pos,
struct ipt_acc_mask_24 *data,
uint32_t net_ip, uint32_t net_OR_mask)
@@ -742,25 +762,26 @@ static int ipt_acc_handle_copy_data(void *to_user, unsigned long *to_user_pos,
unsigned int i;
for (i = 0; i <= 255; i++) {
if (data->ip[i].src_packets || data->ip[i].dst_packets) {
handle_ip.ip = net_ip | net_OR_mask | i;
if (data->ip[i].src_packets == 0 &&
data->ip[i].dst_packets == 0)
continue;
handle_ip.src_packets = data->ip[i].src_packets;
handle_ip.src_bytes = data->ip[i].src_bytes;
handle_ip.dst_packets = data->ip[i].dst_packets;
handle_ip.dst_bytes = data->ip[i].dst_bytes;
handle_ip.ip = net_ip | net_OR_mask | i;
handle_ip.src_packets = data->ip[i].src_packets;
handle_ip.src_bytes = data->ip[i].src_bytes;
handle_ip.dst_packets = data->ip[i].dst_packets;
handle_ip.dst_bytes = data->ip[i].dst_bytes;
/* Temporary buffer full? Flush to userspace */
if (*tmpbuf_pos + handle_ip_size >= PAGE_SIZE) {
if (copy_to_user(to_user + *to_user_pos, ipt_acc_tmpbuf,
*tmpbuf_pos))
return -EFAULT;
*to_user_pos = *to_user_pos + *tmpbuf_pos;
*tmpbuf_pos = 0;
}
memcpy(ipt_acc_tmpbuf + *tmpbuf_pos, &handle_ip, handle_ip_size);
*tmpbuf_pos += handle_ip_size;
/* Temporary buffer full? Flush to userspace */
if (*tmpbuf_pos + handle_ip_size >= PAGE_SIZE) {
if (copy_to_user(to_user + *to_user_pos, ian->ipt_acc_tmpbuf,
*tmpbuf_pos))
return -EFAULT;
*to_user_pos = *to_user_pos + *tmpbuf_pos;
*tmpbuf_pos = 0;
}
memcpy(ian->ipt_acc_tmpbuf + *tmpbuf_pos, &handle_ip, handle_ip_size);
*tmpbuf_pos += handle_ip_size;
}
return 0;
@@ -770,7 +791,8 @@ static int ipt_acc_handle_copy_data(void *to_user, unsigned long *to_user_pos,
We only copy entries != 0 to increase performance.
Overwrites ipt_acc_tmpbuf.
*/
static int ipt_acc_handle_get_data(uint32_t handle, void *to_user)
static int ipt_acc_handle_get_data(struct ipt_acc_net *ian,
uint32_t handle, void *to_user)
{
unsigned long to_user_pos = 0, tmpbuf_pos = 0;
uint32_t net_ip;
@@ -782,25 +804,25 @@ static int ipt_acc_handle_get_data(uint32_t handle, void *to_user)
return -1;
}
if (ipt_acc_handles[handle].data == NULL) {
if (ian->ipt_acc_handles[handle].data == NULL) {
printk("ACCOUNT: handle %u is BROKEN: Contains no data\n", handle);
return -1;
}
net_ip = ntohl(ipt_acc_handles[handle].ip);
depth = ipt_acc_handles[handle].depth;
net_ip = ntohl(ian->ipt_acc_handles[handle].ip);
depth = ian->ipt_acc_handles[handle].depth;
/* 8 bit network */
if (depth == 0) {
struct ipt_acc_mask_24 *network =
ipt_acc_handles[handle].data;
if (ipt_acc_handle_copy_data(to_user, &to_user_pos, &tmpbuf_pos,
ian->ipt_acc_handles[handle].data;
if (ipt_acc_handle_copy_data(ian, to_user, &to_user_pos, &tmpbuf_pos,
network, net_ip, 0))
return -1;
/* Flush remaining data to userspace */
if (tmpbuf_pos)
if (copy_to_user(to_user + to_user_pos, ipt_acc_tmpbuf, tmpbuf_pos))
if (copy_to_user(to_user + to_user_pos, ian->ipt_acc_tmpbuf, tmpbuf_pos))
return -1;
return 0;
@@ -809,13 +831,13 @@ static int ipt_acc_handle_get_data(uint32_t handle, void *to_user)
/* 16 bit network */
if (depth == 1) {
struct ipt_acc_mask_16 *network_16 =
ipt_acc_handles[handle].data;
ian->ipt_acc_handles[handle].data;
unsigned int b;
for (b = 0; b <= 255; b++) {
if (network_16->mask_24[b]) {
struct ipt_acc_mask_24 *network =
network_16->mask_24[b];
if (ipt_acc_handle_copy_data(to_user, &to_user_pos,
if (ipt_acc_handle_copy_data(ian, to_user, &to_user_pos,
&tmpbuf_pos, network, net_ip, (b << 8)))
return -1;
}
@@ -823,7 +845,7 @@ static int ipt_acc_handle_get_data(uint32_t handle, void *to_user)
/* Flush remaining data to userspace */
if (tmpbuf_pos)
if (copy_to_user(to_user + to_user_pos, ipt_acc_tmpbuf, tmpbuf_pos))
if (copy_to_user(to_user + to_user_pos, ian->ipt_acc_tmpbuf, tmpbuf_pos))
return -1;
return 0;
@@ -832,7 +854,7 @@ static int ipt_acc_handle_get_data(uint32_t handle, void *to_user)
/* 24 bit network */
if (depth == 2) {
struct ipt_acc_mask_8 *network_8 =
ipt_acc_handles[handle].data;
ian->ipt_acc_handles[handle].data;
unsigned int a, b;
for (a = 0; a <= 255; a++) {
if (network_8->mask_16[a]) {
@@ -842,7 +864,7 @@ static int ipt_acc_handle_get_data(uint32_t handle, void *to_user)
if (network_16->mask_24[b]) {
struct ipt_acc_mask_24 *network =
network_16->mask_24[b];
if (ipt_acc_handle_copy_data(to_user,
if (ipt_acc_handle_copy_data(ian, to_user,
&to_user_pos, &tmpbuf_pos,
network, net_ip, (a << 16) | (b << 8)))
return -1;
@@ -853,7 +875,7 @@ static int ipt_acc_handle_get_data(uint32_t handle, void *to_user)
/* Flush remaining data to userspace */
if (tmpbuf_pos)
if (copy_to_user(to_user + to_user_pos, ipt_acc_tmpbuf, tmpbuf_pos))
if (copy_to_user(to_user + to_user_pos, ian->ipt_acc_tmpbuf, tmpbuf_pos))
return -1;
return 0;
@@ -865,6 +887,8 @@ static int ipt_acc_handle_get_data(uint32_t handle, void *to_user)
static int ipt_acc_set_ctl(struct sock *sk, int cmd,
void *user, unsigned int len)
{
struct net *net = sock_net(sk);
struct ipt_acc_net *ian = net_generic(net, ipt_acc_net_id);
struct ipt_acc_handle_sockopt handle;
int ret = -EINVAL;
@@ -886,16 +910,16 @@ static int ipt_acc_set_ctl(struct sock *sk, int cmd,
break;
}
down(&ipt_acc_userspace_mutex);
ret = ipt_acc_handle_free(handle.handle_nr);
up(&ipt_acc_userspace_mutex);
down(&ian->ipt_acc_userspace_mutex);
ret = ipt_acc_handle_free(ian->ipt_acc_handles, handle.handle_nr);
up(&ian->ipt_acc_userspace_mutex);
break;
case IPT_SO_SET_ACCOUNT_HANDLE_FREE_ALL: {
unsigned int i;
down(&ipt_acc_userspace_mutex);
down(&ian->ipt_acc_userspace_mutex);
for (i = 0; i < ACCOUNT_MAX_HANDLES; i++)
ipt_acc_handle_free(i);
up(&ipt_acc_userspace_mutex);
ipt_acc_handle_free(ian->ipt_acc_handles, i);
up(&ian->ipt_acc_userspace_mutex);
ret = 0;
break;
}
@@ -908,6 +932,8 @@ static int ipt_acc_set_ctl(struct sock *sk, int cmd,
static int ipt_acc_get_ctl(struct sock *sk, int cmd, void *user, int *len)
{
struct net *net = sock_net(sk);
struct ipt_acc_net *ian = net_generic(net, ipt_acc_net_id);
struct ipt_acc_handle_sockopt handle;
int ret = -EINVAL;
@@ -932,28 +958,28 @@ static int ipt_acc_get_ctl(struct sock *sk, int cmd, void *user, int *len)
break;
}
spin_lock_bh(&ipt_acc_lock);
spin_lock_bh(&ian->ipt_acc_lock);
if (cmd == IPT_SO_GET_ACCOUNT_PREPARE_READ_FLUSH)
ret = ipt_acc_handle_prepare_read_flush(
handle.name, &dest, &handle.itemcount);
ian->ipt_acc_tables, handle.name, &dest, &handle.itemcount);
else
ret = ipt_acc_handle_prepare_read(
handle.name, &dest, &handle.itemcount);
spin_unlock_bh(&ipt_acc_lock);
ian->ipt_acc_tables, handle.name, &dest, &handle.itemcount);
spin_unlock_bh(&ian->ipt_acc_lock);
// Error occured during prepare_read?
if (ret == -1)
return -EINVAL;
/* Allocate a userspace handle */
down(&ipt_acc_userspace_mutex);
if ((handle.handle_nr = ipt_acc_handle_find_slot()) == -1) {
down(&ian->ipt_acc_userspace_mutex);
if ((handle.handle_nr = ipt_acc_handle_find_slot(ian->ipt_acc_handles)) == -1) {
ipt_acc_data_free(dest.data, dest.depth);
up(&ipt_acc_userspace_mutex);
up(&ian->ipt_acc_userspace_mutex);
return -EINVAL;
}
memcpy(&ipt_acc_handles[handle.handle_nr], &dest,
memcpy(&ian->ipt_acc_handles[handle.handle_nr], &dest,
sizeof(struct ipt_acc_handle));
up(&ipt_acc_userspace_mutex);
up(&ian->ipt_acc_userspace_mutex);
if (copy_to_user(user, &handle,
sizeof(struct ipt_acc_handle_sockopt))) {
@@ -982,19 +1008,19 @@ static int ipt_acc_get_ctl(struct sock *sk, int cmd, void *user, int *len)
break;
}
if (*len < ipt_acc_handles[handle.handle_nr].itemcount
if (*len < ian->ipt_acc_handles[handle.handle_nr].itemcount
* sizeof(struct ipt_acc_handle_ip)) {
printk("ACCOUNT: ipt_acc_get_ctl: not enough space (%u < %zu)"
" to store data from IPT_SO_GET_ACCOUNT_GET_DATA\n",
*len, ipt_acc_handles[handle.handle_nr].itemcount
*len, ian->ipt_acc_handles[handle.handle_nr].itemcount
* sizeof(struct ipt_acc_handle_ip));
ret = -ENOMEM;
break;
}
down(&ipt_acc_userspace_mutex);
ret = ipt_acc_handle_get_data(handle.handle_nr, user);
up(&ipt_acc_userspace_mutex);
down(&ian->ipt_acc_userspace_mutex);
ret = ipt_acc_handle_get_data(ian, handle.handle_nr, user);
up(&ian->ipt_acc_userspace_mutex);
if (ret) {
printk("ACCOUNT: ipt_acc_get_ctl: ipt_acc_handle_get_data"
" failed for handle %u\n", handle.handle_nr);
@@ -1014,11 +1040,11 @@ static int ipt_acc_get_ctl(struct sock *sk, int cmd, void *user, int *len)
/* Find out how many handles are in use */
handle.itemcount = 0;
down(&ipt_acc_userspace_mutex);
down(&ian->ipt_acc_userspace_mutex);
for (i = 0; i < ACCOUNT_MAX_HANDLES; i++)
if (ipt_acc_handles[i].data)
if (ian->ipt_acc_handles[i].data)
handle.itemcount++;
up(&ipt_acc_userspace_mutex);
up(&ian->ipt_acc_userspace_mutex);
if (copy_to_user(user, &handle,
sizeof(struct ipt_acc_handle_sockopt))) {
@@ -1032,38 +1058,38 @@ static int ipt_acc_get_ctl(struct sock *sk, int cmd, void *user, int *len)
uint32_t size = 0, i, name_len;
char *tnames;
spin_lock_bh(&ipt_acc_lock);
spin_lock_bh(&ian->ipt_acc_lock);
/* Determine size of table names */
for (i = 0; i < ACCOUNT_MAX_TABLES; i++) {
if (ipt_acc_tables[i].name[0] != 0)
size += strlen(ipt_acc_tables[i].name) + 1;
if (ian->ipt_acc_tables[i].name[0] != 0)
size += strlen(ian->ipt_acc_tables[i].name) + 1;
}
size += 1; /* Terminating NULL character */
if (*len < size || size > PAGE_SIZE) {
spin_unlock_bh(&ipt_acc_lock);
spin_unlock_bh(&ian->ipt_acc_lock);
printk("ACCOUNT: ipt_acc_get_ctl: not enough space (%u < %u < %lu)"
" to store table names\n", *len, size, PAGE_SIZE);
ret = -ENOMEM;
break;
}
/* Copy table names to userspace */
tnames = ipt_acc_tmpbuf;
tnames = ian->ipt_acc_tmpbuf;
for (i = 0; i < ACCOUNT_MAX_TABLES; i++) {
if (ipt_acc_tables[i].name[0] != 0) {
name_len = strlen(ipt_acc_tables[i].name) + 1;
memcpy(tnames, ipt_acc_tables[i].name, name_len);
if (ian->ipt_acc_tables[i].name[0] != 0) {
name_len = strlen(ian->ipt_acc_tables[i].name) + 1;
memcpy(tnames, ian->ipt_acc_tables[i].name, name_len);
tnames += name_len;
}
}
spin_unlock_bh(&ipt_acc_lock);
spin_unlock_bh(&ian->ipt_acc_lock);
/* Terminating NULL character */
*tnames = 0;
/* Transfer to userspace */
if (copy_to_user(user, ipt_acc_tmpbuf, size))
if (copy_to_user(user, ian->ipt_acc_tmpbuf, size))
return -EFAULT;
ret = 0;
@@ -1076,6 +1102,59 @@ static int ipt_acc_get_ctl(struct sock *sk, int cmd, void *user, int *len)
return ret;
}
static int __net_init ipt_acc_net_init(struct net *net)
{
struct ipt_acc_net *ian = net_generic(net, ipt_acc_net_id);
memset(ian, 0, sizeof(*ian));
sema_init(&ian->ipt_acc_userspace_mutex, 1);
ian->ipt_acc_tables = kcalloc(ACCOUNT_MAX_TABLES,
sizeof(struct ipt_acc_table), GFP_KERNEL);
if (ian->ipt_acc_tables == NULL) {
printk("ACCOUNT: Out of memory allocating account_tables structure");
goto error_cleanup;
}
ian->ipt_acc_handles = kcalloc(ACCOUNT_MAX_HANDLES,
sizeof(struct ipt_acc_handle), GFP_KERNEL);
if (ian->ipt_acc_handles == NULL) {
printk("ACCOUNT: Out of memory allocating account_handles structure");
goto error_cleanup;
}
/* Allocate one page as temporary storage */
ian->ipt_acc_tmpbuf = (void *)__get_free_pages(GFP_KERNEL, 2);
if (ian->ipt_acc_tmpbuf == NULL) {
printk("ACCOUNT: Out of memory for temporary buffer page\n");
goto error_cleanup;
}
return 0;
error_cleanup:
kfree(ian->ipt_acc_tables);
kfree(ian->ipt_acc_handles);
free_pages((unsigned long)ian->ipt_acc_tmpbuf, 2);
return -ENOMEM;
}
static void __net_exit ipt_acc_net_exit(struct net *net)
{
struct ipt_acc_net *ian = net_generic(net, ipt_acc_net_id);
kfree(ian->ipt_acc_tables);
kfree(ian->ipt_acc_handles);
free_pages((unsigned long)ian->ipt_acc_tmpbuf, 2);
}
static struct pernet_operations ipt_acc_net_ops = {
.init = ipt_acc_net_init,
.exit = ipt_acc_net_exit,
.id = &ipt_acc_net_id,
.size = sizeof(struct ipt_acc_net),
};
static struct xt_target xt_acc_reg __read_mostly = {
.name = "ACCOUNT",
.revision = 1,
@@ -1099,63 +1178,41 @@ static struct nf_sockopt_ops ipt_acc_sockopts = {
static int __init account_tg_init(void)
{
sema_init(&ipt_acc_userspace_mutex, 1);
int ret;
if ((ipt_acc_tables =
kmalloc(ACCOUNT_MAX_TABLES *
sizeof(struct ipt_acc_table), GFP_KERNEL)) == NULL) {
printk("ACCOUNT: Out of memory allocating account_tables structure");
goto error_cleanup;
}
memset(ipt_acc_tables, 0,
ACCOUNT_MAX_TABLES * sizeof(struct ipt_acc_table));
if ((ipt_acc_handles =
kmalloc(ACCOUNT_MAX_HANDLES *
sizeof(struct ipt_acc_handle), GFP_KERNEL)) == NULL) {
printk("ACCOUNT: Out of memory allocating account_handles structure");
goto error_cleanup;
}
memset(ipt_acc_handles, 0,
ACCOUNT_MAX_HANDLES * sizeof(struct ipt_acc_handle));
/* Allocate one page as temporary storage */
if ((ipt_acc_tmpbuf = (void *)__get_free_pages(GFP_KERNEL, 2)) == NULL) {
printk("ACCOUNT: Out of memory for temporary buffer page\n");
goto error_cleanup;
ret = register_pernet_subsys(&ipt_acc_net_ops);
if (ret < 0) {
pr_err("ACCOUNT: cannot register per net operations.\n");
goto error_out;
}
/* Register setsockopt */
if (nf_register_sockopt(&ipt_acc_sockopts) < 0) {
printk("ACCOUNT: Can't register sockopts. Aborting\n");
goto error_cleanup;
ret = nf_register_sockopt(&ipt_acc_sockopts);
if (ret < 0) {
pr_err("ACCOUNT: cannot register sockopts.\n");
goto unreg_pernet;
}
if (xt_register_target(&xt_acc_reg))
goto error_cleanup;
ret = xt_register_target(&xt_acc_reg);
if (ret < 0) {
pr_err("ACCOUNT: cannot register sockopts.\n");
goto unreg_sockopt;
}
return 0;
error_cleanup:
if (ipt_acc_tables)
kfree(ipt_acc_tables);
if (ipt_acc_handles)
kfree(ipt_acc_handles);
if (ipt_acc_tmpbuf)
free_pages((unsigned long)ipt_acc_tmpbuf, 2);
return -EINVAL;
unreg_sockopt:
nf_unregister_sockopt(&ipt_acc_sockopts);
unreg_pernet:
unregister_pernet_subsys(&ipt_acc_net_ops);
error_out:
return ret;
}
static void __exit account_tg_exit(void)
{
xt_unregister_target(&xt_acc_reg);
nf_unregister_sockopt(&ipt_acc_sockopts);
kfree(ipt_acc_tables);
kfree(ipt_acc_handles);
free_pages((unsigned long)ipt_acc_tmpbuf, 2);
unregister_pernet_subsys(&ipt_acc_net_ops);
}
module_init(account_tg_init);

View File

@@ -73,4 +73,24 @@ static inline void proc_remove(struct proc_dir_entry *de)
}
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)
# define ip6_local_out(xnet, xsk, xskb) ip6_local_out(xskb)
# define ip6_route_me_harder(xnet, xskb) ip6_route_me_harder(xskb)
# define ip_local_out(xnet, xsk, xskb) ip_local_out(xskb)
# define ip_route_me_harder(xnet, xskb, xaddrtype) ip_route_me_harder((xskb), (xaddrtype))
#endif
static inline struct net *par_net(const struct xt_action_param *par)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
return par->state->net;
#else
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
return par->net;
#else
return dev_net((par->in != NULL) ? par->in : par->out);
#endif
#endif
}
#endif /* _XTABLES_COMPAT_H */

View File

@@ -0,0 +1,17 @@
from Crypto.Hash import SHA256
from Crypto.Hash import MD5
import sys
import hmac
import struct
import socket
from time import time
def gen_hmac(secret, ip):
epoch_mins = (long)(time()/60)
s = hmac.HMAC(secret, digestmod = SHA256)
s.update(socket.inet_aton(socket.gethostbyname(ip)))
s.update(struct.pack("i", epoch_mins)) # "i" is for integer
print s.hexdigest()
if __name__ == '__main__':
gen_hmac(sys.argv[1], sys.argv[2])

6
extensions/pknock/knock.sh Executable file
View File

@@ -0,0 +1,6 @@
#!/bin/bash
if [ "$#" -ne 4 ]; then
echo "usage: $0 <IP src> <IP dst> <PORT dst> <secret>"
exit 1
fi
python gen_hmac.py "$4" "$1" | socat - "udp-sendto:$2:$3,bind=$1"

View File

@@ -62,8 +62,6 @@ Specifying \fB--autoclose 0\fP means that no automatic close will be performed a
xt_pknock is capable of sending information about successful matches
via a netlink socket to userspace, should you need to implement your own
way of receiving and handling portknock notifications.
Be sure to read the documentation in the doc/pknock/ directory,
or visit the original site \(em http://portknocko.berlios.de/ .
.PP
\fBTCP mode\fP:
.PP

View File

@@ -19,16 +19,14 @@
#include <linux/spinlock.h>
#include <linux/jhash.h>
#include <linux/random.h>
#include <linux/crypto.h>
#include <linux/proc_fs.h>
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/seq_file.h>
#include <linux/connector.h>
#include <linux/netfilter/x_tables.h>
#include <crypto/hash.h>
#include "xt_pknock.h"
#include "compat_xtables.h"
@@ -111,9 +109,9 @@ static DEFINE_SPINLOCK(list_lock);
static struct {
const char *algo;
struct crypto_hash *tfm;
struct crypto_shash *tfm;
unsigned int size;
struct hash_desc desc;
struct shash_desc desc;
} crypto = {
.algo = "hmac(sha256)",
.tfm = NULL,
@@ -744,7 +742,6 @@ static bool
has_secret(const unsigned char *secret, unsigned int secret_len, uint32_t ipsrc,
const unsigned char *payload, unsigned int payload_len)
{
struct scatterlist sg[2];
char result[64]; // 64 bytes * 8 = 512 bits
char *hexresult;
unsigned int hexa_size;
@@ -775,11 +772,7 @@ has_secret(const unsigned char *secret, unsigned int secret_len, uint32_t ipsrc,
epoch_min = get_seconds() / 60;
sg_init_table(sg, ARRAY_SIZE(sg));
sg_set_buf(&sg[0], &ipsrc, sizeof(ipsrc));
sg_set_buf(&sg[1], &epoch_min, sizeof(epoch_min));
ret = crypto_hash_setkey(crypto.tfm, secret, secret_len);
ret = crypto_shash_setkey(crypto.tfm, secret, secret_len);
if (ret != 0) {
printk("crypto_hash_setkey() failed ret=%d\n", ret);
goto out;
@@ -790,10 +783,10 @@ has_secret(const unsigned char *secret, unsigned int secret_len, uint32_t ipsrc,
* 4 bytes IP (32 bits) +
* 4 bytes int epoch_min (32 bits)
*/
ret = crypto_hash_digest(&crypto.desc, sg,
sizeof(ipsrc) + sizeof(epoch_min), result);
if (ret != 0) {
printk("crypto_hash_digest() failed ret=%d\n", ret);
if ((ret = crypto_shash_update(&crypto.desc, (const void *)&ipsrc, sizeof(ipsrc))) != 0 ||
(ret = crypto_shash_update(&crypto.desc, (const void *)&epoch_min, sizeof(epoch_min))) != 0 ||
(ret = crypto_shash_final(&crypto.desc, result)) != 0) {
printk("crypto_shash_update/final() failed ret=%d\n", ret);
goto out;
}
@@ -1133,14 +1126,14 @@ static int __init xt_pknock_mt_init(void)
return -ENXIO;
}
crypto.tfm = crypto_alloc_hash(crypto.algo, 0, CRYPTO_ALG_ASYNC);
crypto.tfm = crypto_alloc_shash(crypto.algo, 0, 0);
if (IS_ERR(crypto.tfm)) {
printk(KERN_ERR PKNOCK "failed to load transform for %s\n",
crypto.algo);
return PTR_ERR(crypto.tfm);
}
crypto.size = crypto_hash_digestsize(crypto.tfm);
crypto.size = crypto_shash_digestsize(crypto.tfm);
crypto.desc.tfm = crypto.tfm;
crypto.desc.flags = 0;
@@ -1158,7 +1151,7 @@ static void __exit xt_pknock_mt_exit(void)
xt_unregister_match(&xt_pknock_mt_reg);
kfree(rule_hashtable);
if (crypto.tfm != NULL)
crypto_free_hash(crypto.tfm);
crypto_free_shash(crypto.tfm);
}
module_init(xt_pknock_mt_init);

View File

@@ -58,8 +58,12 @@ xt_chaos_total(struct sk_buff *skb, const struct xt_action_param *par)
{
struct xt_action_param local_par;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)
local_par.state = par->state;
#else
local_par.in = par->in,
local_par.out = par->out,
#endif
local_par.match = xm_tcp;
local_par.matchinfo = &tcp_params;
local_par.fragoff = fragoff;
@@ -74,12 +78,16 @@ xt_chaos_total(struct sk_buff *skb, const struct xt_action_param *par)
destiny = (info->variant == XTCHAOS_TARPIT) ? xt_tarpit : xt_delude;
{
struct xt_action_param local_par;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)
local_par.state = par->state;
#else
local_par.in = par->in;
local_par.out = par->out;
local_par.hooknum = par->hooknum;
local_par.family = par->family;
#endif
local_par.target = destiny;
local_par.targinfo = par->targinfo;
local_par.family = par->family;
destiny->target(skb, &local_par);
}
}
@@ -100,9 +108,13 @@ chaos_tg(struct sk_buff *skb, const struct xt_action_param *par)
if ((unsigned int)prandom_u32() <= reject_percentage) {
struct xt_action_param local_par;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)
local_par.state = par->state;
#else
local_par.in = par->in;
local_par.out = par->out;
local_par.hooknum = par->hooknum;
#endif
local_par.target = xt_reject;
local_par.targinfo = &reject_params;
return xt_reject->target(skb, &local_par);
@@ -111,7 +123,12 @@ chaos_tg(struct sk_buff *skb, const struct xt_action_param *par)
/* TARPIT/DELUDE may not be called from the OUTPUT chain */
if (iph->protocol == IPPROTO_TCP &&
info->variant != XTCHAOS_NORMAL &&
par->hooknum != NF_INET_LOCAL_OUT)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)
par->state->hook
#else
par->hooknum
#endif
!= NF_INET_LOCAL_OUT)
xt_chaos_total(skb, par);
return NF_DROP;

View File

@@ -25,7 +25,8 @@
#include "compat_xtables.h"
#define PFX KBUILD_MODNAME ": "
static void delude_send_reset(struct sk_buff *oldskb, unsigned int hook)
static void delude_send_reset(struct net *net, struct sk_buff *oldskb,
unsigned int hook)
{
struct tcphdr _otcph, *tcph;
const struct tcphdr *oth;
@@ -121,7 +122,7 @@ static void delude_send_reset(struct sk_buff *oldskb, unsigned int hook)
/* ip_route_me_harder expects skb->dst to be set */
skb_dst_set(nskb, dst_clone(skb_dst(oldskb)));
if (ip_route_me_harder(nskb, addr_type))
if (ip_route_me_harder(net, nskb, addr_type))
goto free_nskb;
else
niph = ip_hdr(nskb);
@@ -135,7 +136,7 @@ static void delude_send_reset(struct sk_buff *oldskb, unsigned int hook)
nf_ct_attach(nskb, oldskb);
ip_local_out(nskb);
ip_local_out(net, nskb->sk, nskb);
return;
free_nskb:
@@ -150,7 +151,13 @@ delude_tg(struct sk_buff *skb, const struct xt_action_param *par)
* a problem, as that is supported since Linux 2.6.35. But since we do not
* actually want to have a connection open, we are still going to drop it.
*/
delude_send_reset(skb, par->hooknum);
delude_send_reset(par_net(par), skb,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)
par->state->hook
#else
par->hooknum
#endif
);
return NF_DROP;
}

View File

@@ -34,9 +34,9 @@ static void ether_set(unsigned char *addr, const unsigned char *op,
unsigned int i;
for (i = 0; i < ETH_ALEN && mask > 0; ++i) {
lo_mask = mask % 8;
lo_mask = (mask >= 8) ? 8 : mask;
/* FF << 4 >> 4 = 0F */
lo_mask = ~(uint8_t)0U << lo_mask >> lo_mask;
lo_mask = (uint8_t)(~0U << lo_mask) >> lo_mask;
addr[i] &= lo_mask;
addr[i] |= op[i] & ~lo_mask;
if (mask >= 8)
@@ -55,9 +55,9 @@ static bool ether_cmp(const unsigned char *lh, const unsigned char *rh,
#define ZMACHEX(s) s[0], s[1], s[2], s[3], s[4], s[5]
for (i = 0; i < ETH_ALEN && mask > 0; ++i) {
lo_mask = mask % 8;
lo_mask = (mask >= 8) ? 8 : mask;
/* ~(0xFF << 4 >> 4) = ~0x0F = 0xF0 */
lo_mask = ~(~(uint8_t)0U << lo_mask >> lo_mask);
lo_mask = ~((uint8_t)(~0U << lo_mask) >> lo_mask);
if ((lh[i] ^ rh[i]) & lo_mask)
return false;
if (mask >= 8)
@@ -110,13 +110,12 @@ dhcpmac_tg(struct sk_buff *skb, const struct xt_action_param *par)
return NF_DROP;
for (i = 0; i < sizeof(dh->chaddr); i += 2)
csum_replace2(&udph->check, *(const __be16 *)dh->chaddr, 0);
csum_replace2(&udph->check, *(const __be16 *)(dh->chaddr + i), 0);
memset(dh->chaddr, 0, sizeof(dh->chaddr));
ether_set(dh->chaddr, info->addr, info->mask);
for (i = 0; i < sizeof(dh->chaddr); i += 2)
csum_replace2(&udph->check, 0, *(const __be16 *)dh->chaddr);
csum_replace2(&udph->check, 0, *(const __be16 *)(dh->chaddr + i));
return XT_CONTINUE;
}

View File

@@ -356,7 +356,11 @@ out:
static unsigned int
dnetmap_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)
struct net *net = dev_net(par->state->in ? par->state->in : par->state->out);
#else
struct net *net = dev_net(par->in ? par->in : par->out);
#endif
struct dnetmap_net *dnetmap_net = dnetmap_pernet(net);
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
@@ -367,16 +371,21 @@ dnetmap_tg(struct sk_buff *skb, const struct xt_action_param *par)
struct dnetmap_entry *e;
struct dnetmap_prefix *p;
__s32 jttl;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)
unsigned int hooknum = par->state->hook;
#else
unsigned int hooknum = par->hooknum;
#endif
NF_CT_ASSERT(par->hooknum == NF_INET_POST_ROUTING ||
par->hooknum == NF_INET_LOCAL_OUT ||
par->hooknum == NF_INET_PRE_ROUTING);
NF_CT_ASSERT(hooknum == NF_INET_POST_ROUTING ||
hooknum == NF_INET_LOCAL_OUT ||
hooknum == NF_INET_PRE_ROUTING);
ct = nf_ct_get(skb, &ctinfo);
jttl = tginfo->flags & XT_DNETMAP_TTL ? tginfo->ttl * HZ : jtimeout;
/* in prerouting we try to map postnat-ip to prenat-ip */
if (par->hooknum == NF_INET_PRE_ROUTING) {
if (hooknum == NF_INET_PRE_ROUTING) {
postnat_ip = ip_hdr(skb)->daddr;
spin_lock_bh(&dnetmap_lock);
@@ -407,7 +416,7 @@ dnetmap_tg(struct sk_buff *skb, const struct xt_action_param *par)
newrange.min_proto = mr->min_proto;
newrange.max_proto = mr->max_proto;
return nf_nat_setup_info(ct, &newrange,
HOOK2MANIP(par->hooknum));
HOOK2MANIP(hooknum));
}
prenat_ip = ip_hdr(skb)->saddr;
@@ -495,7 +504,11 @@ bind_new_prefix:
newrange.max_addr.ip = postnat_ip;
newrange.min_proto = mr->min_proto;
newrange.max_proto = mr->max_proto;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)
return nf_nat_setup_info(ct, &newrange, HOOK2MANIP(par->state->hook));
#else
return nf_nat_setup_info(ct, &newrange, HOOK2MANIP(par->hooknum));
#endif
no_rev_map:
no_free_ip:

View File

@@ -35,7 +35,11 @@ echo_tg6(struct sk_buff *oldskb, const struct xt_action_param *par)
void *payload;
struct flowi6 fl;
struct dst_entry *dst = NULL;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)
struct net *net = dev_net((par->state->in != NULL) ? par->state->in : par->state->out);
#else
struct net *net = dev_net((par->in != NULL) ? par->in : par->out);
#endif
/* This allows us to do the copy operation in fewer lines of code. */
if (skb_linearize(oldskb) < 0)
@@ -76,6 +80,7 @@ echo_tg6(struct sk_buff *oldskb, const struct xt_action_param *par)
payload = skb_header_pointer(oldskb, par->thoff +
sizeof(*oldudp), data_len, NULL);
memcpy(skb_put(newskb, data_len), payload, data_len);
newip->payload_len = htons(newskb->len);
#if 0
/*
@@ -112,7 +117,7 @@ echo_tg6(struct sk_buff *oldskb, const struct xt_action_param *par)
goto free_nskb;
nf_ct_attach(newskb, oldskb);
ip6_local_out(newskb);
ip6_local_out(par_net(par), newskb->sk, newskb);
return NF_DROP;
free_nskb:
@@ -156,8 +161,8 @@ echo_tg4(struct sk_buff *oldskb, const struct xt_action_param *par)
newip->version = oldip->version;
newip->ihl = sizeof(*newip) / 4;
newip->tos = oldip->tos;
newip->id = 0;
newip->frag_off = htons(IP_DF);
newip->id = oldip->id;
newip->frag_off = 0;
newip->protocol = oldip->protocol;
newip->check = 0;
newip->saddr = oldip->daddr;
@@ -173,6 +178,7 @@ echo_tg4(struct sk_buff *oldskb, const struct xt_action_param *par)
payload = skb_header_pointer(oldskb, par->thoff +
sizeof(*oldudp), data_len, NULL);
memcpy(skb_put(newskb, data_len), payload, data_len);
newip->tot_len = htons(newskb->len);
#if 0
/*
@@ -190,7 +196,7 @@ echo_tg4(struct sk_buff *oldskb, const struct xt_action_param *par)
/* ip_route_me_harder expects the skb's dst to be set */
skb_dst_set(newskb, dst_clone(skb_dst(oldskb)));
if (ip_route_me_harder(newskb, RTN_UNSPEC) != 0)
if (ip_route_me_harder(par_net(par), newskb, RTN_UNSPEC) != 0)
goto free_nskb;
newip->ttl = ip4_dst_hoplimit(skb_dst(newskb));
@@ -201,7 +207,7 @@ echo_tg4(struct sk_buff *oldskb, const struct xt_action_param *par)
goto free_nskb;
nf_ct_attach(newskb, oldskb);
ip_local_out(newskb);
ip_local_out(par_net(par), newskb->sk, newskb);
return NF_DROP;
free_nskb:

View File

@@ -58,8 +58,12 @@ static void logmark_ct(const struct nf_conn *ct, enum ip_conntrack_info ctinfo)
printk("%s""ASSURED", prev++ ? "," : "");
if (ct->status & IPS_CONFIRMED)
printk("%s""CONFIRMED", prev++ ? "," : "");
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,9,0)
printk(" lifetime=%lus", nf_ct_expires(ct) / HZ);
#else
printk(" lifetime=%lus",
(jiffies - ct->timeout.expires) / HZ);
#endif
}
static unsigned int
@@ -72,7 +76,11 @@ logmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
printk("<%u>%.*s""iif=%d hook=%s nfmark=0x%x "
"secmark=0x%x classify=0x%x",
info->level, (unsigned int)sizeof(info->prefix), info->prefix,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)
skb_ifindex(skb), hook_names[par->state->hook],
#else
skb_ifindex(skb), hook_names[par->hooknum],
#endif
skb_nfmark(skb), skb_secmark(skb), skb->priority);
ct = nf_ct_get(skb, &ctinfo);

View File

@@ -1,6 +1,6 @@
/*
* "SYSRQ" target extension for Xtables
* Copyright © Jan Engelhardt, 2008 - 2012
* Copyright Jan Engelhardt, 2016
*
* Based upon the ipt_SYSRQ idea by Marek Zalem <marek [at] terminus sk>
*
@@ -21,8 +21,7 @@
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/netfilter/x_tables.h>
#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <crypto/hash.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include "compat_xtables.h"
@@ -50,7 +49,7 @@ MODULE_PARM_DESC(seqno, "sequence number for remote sysrq");
MODULE_PARM_DESC(debug, "debugging: 0=off, 1=on");
#ifdef WITH_CRYPTO
static struct crypto_hash *sysrq_tfm;
static struct crypto_shash *sysrq_tfm;
static int sysrq_digest_size;
static unsigned char *sysrq_digest_password;
static unsigned char *sysrq_digest;
@@ -75,8 +74,7 @@ static unsigned int sysrq_tg(const void *pdata, uint16_t len)
{
const char *data = pdata;
int i, n;
struct scatterlist sg[2];
struct hash_desc desc;
struct shash_desc desc;
int ret;
long new_seqno = 0;
@@ -117,15 +115,15 @@ static unsigned int sysrq_tg(const void *pdata, uint16_t len)
desc.tfm = sysrq_tfm;
desc.flags = 0;
ret = crypto_hash_init(&desc);
ret = crypto_shash_init(&desc);
if (ret != 0)
goto hash_fail;
sg_init_table(sg, 2);
sg_set_buf(&sg[0], data, n);
i = strlen(sysrq_digest_password);
sg_set_buf(&sg[1], sysrq_digest_password, i);
ret = crypto_hash_digest(&desc, sg, n + i, sysrq_digest);
if (ret != 0)
if (crypto_shash_update(&desc, data, n) != 0)
goto hash_fail;
if (crypto_shash_update(&desc, sysrq_digest_password,
strlen(sysrq_digest_password)) != 0)
goto hash_fail;
if (crypto_shash_final(&desc, sysrq_digest) != 0)
goto hash_fail;
for (i = 0; i < sysrq_digest_size; ++i) {
@@ -303,7 +301,7 @@ static void sysrq_crypto_exit(void)
{
#ifdef WITH_CRYPTO
if (sysrq_tfm)
crypto_free_hash(sysrq_tfm);
crypto_free_shash(sysrq_tfm);
if (sysrq_digest)
kfree(sysrq_digest);
if (sysrq_hexdigest)
@@ -319,7 +317,7 @@ static int __init sysrq_crypto_init(void)
struct timeval now;
int ret;
sysrq_tfm = crypto_alloc_hash(sysrq_hash, 0, CRYPTO_ALG_ASYNC);
sysrq_tfm = crypto_alloc_shash(sysrq_hash, 0, 0);
if (IS_ERR(sysrq_tfm)) {
printk(KERN_WARNING KBUILD_MODNAME
": Error: Could not find or load %s hash\n",
@@ -328,7 +326,7 @@ static int __init sysrq_crypto_init(void)
sysrq_tfm = NULL;
goto fail;
}
sysrq_digest_size = crypto_hash_digestsize(sysrq_tfm);
sysrq_digest_size = crypto_shash_digestsize(sysrq_tfm);
sysrq_digest = kmalloc(sysrq_digest_size, GFP_KERNEL);
ret = -ENOMEM;
if (sysrq_digest == NULL)
@@ -371,7 +369,7 @@ static void __exit sysrq_tg_exit(void)
module_init(sysrq_tg_init);
module_exit(sysrq_tg_exit);
MODULE_DESCRIPTION("Xtables: triggering SYSRQ remotely");
MODULE_AUTHOR("Jan Engelhardt ");
MODULE_AUTHOR("Jan Engelhardt");
MODULE_AUTHOR("John Haxby <john.haxby@oracle.com");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_SYSRQ");

View File

@@ -170,8 +170,8 @@ static bool tarpit_generic(struct tcphdr *tcph, const struct tcphdr *oth,
return true;
}
static void tarpit_tcp4(struct sk_buff *oldskb, unsigned int hook,
unsigned int mode)
static void tarpit_tcp4(struct net *net, struct sk_buff *oldskb,
unsigned int hook, unsigned int mode)
{
struct tcphdr _otcph, *tcph;
const struct tcphdr *oth;
@@ -261,7 +261,7 @@ static void tarpit_tcp4(struct sk_buff *oldskb, unsigned int hook,
#endif
addr_type = RTN_LOCAL;
if (ip_route_me_harder(nskb, addr_type))
if (ip_route_me_harder(net, nskb, addr_type))
goto free_nskb;
else
niph = ip_hdr(nskb);
@@ -284,8 +284,11 @@ static void tarpit_tcp4(struct sk_buff *oldskb, unsigned int hook,
nf_ct_attach(nskb, oldskb);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, NULL, nskb, NULL,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, nskb->sk, nskb, NULL,
skb_dst(nskb)->dev, dst_output);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, nskb->sk, nskb, NULL,
skb_dst(nskb)->dev, dst_output_sk);
#else
NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, nskb, NULL,
@@ -298,8 +301,8 @@ static void tarpit_tcp4(struct sk_buff *oldskb, unsigned int hook,
}
#ifdef WITH_IPV6
static void tarpit_tcp6(struct sk_buff *oldskb, unsigned int hook,
unsigned int mode)
static void tarpit_tcp6(struct net *net, struct sk_buff *oldskb,
unsigned int hook, unsigned int mode)
{
struct sk_buff *nskb;
struct tcphdr *tcph, oth;
@@ -397,15 +400,18 @@ static void tarpit_tcp6(struct sk_buff *oldskb, unsigned int hook,
IPPROTO_TCP,
csum_partial(tcph, sizeof(struct tcphdr), 0));
if (ip6_route_me_harder(nskb))
if (ip6_route_me_harder(net, nskb))
goto free_nskb;
nskb->ip_summed = CHECKSUM_NONE;
nf_ct_attach(nskb, oldskb);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, NULL, nskb, NULL,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, nskb->sk, nskb, NULL,
skb_dst(nskb)->dev, dst_output);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, nskb->sk, nskb, NULL,
skb_dst(nskb)->dev, dst_output_sk);
#else
NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, nskb, NULL,
@@ -449,7 +455,11 @@ tarpit_tg4(struct sk_buff *skb, const struct xt_action_param *par)
if (iph->frag_off & htons(IP_OFFSET))
return NF_DROP;
tarpit_tcp4(skb, par->hooknum, info->variant);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)
tarpit_tcp4(par_net(par), skb, par->state->hook, info->variant);
#else
tarpit_tcp4(par_net(par), skb, par->hooknum, info->variant);
#endif
return NF_DROP;
}
@@ -491,7 +501,11 @@ tarpit_tg6(struct sk_buff *skb, const struct xt_action_param *par)
return NF_DROP;
}
tarpit_tcp6(skb, par->hooknum, info->variant);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)
tarpit_tcp6(par_net(par), skb, par->state->hook, info->variant);
#else
tarpit_tcp6(par_net(par), skb, par->hooknum, info->variant);
#endif
return NF_DROP;
}
#endif

View File

@@ -66,7 +66,8 @@ static int condition_proc_show(struct seq_file *m, void *data)
{
const struct condition_variable *var = m->private;
return seq_printf(m, var->enabled ? "1\n" : "0\n");
seq_printf(m, var->enabled ? "1\n" : "0\n");
return 0;
}
static int condition_proc_open(struct inode *inode, struct file *file)

View File

@@ -45,9 +45,17 @@ static const struct net_device *iface_get(const struct xt_iface_mtinfo *info,
const struct xt_action_param *par, struct net_device **put)
{
if (info->flags & XT_IFACE_DEV_IN)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)
return par->state->in;
#else
return par->in;
#endif
else if (info->flags & XT_IFACE_DEV_OUT)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)
return par->state->out;
#else
return par->out;
#endif
return *put = dev_get_by_name(&init_net, info->ifname);
}

View File

@@ -204,7 +204,11 @@ lscan_mt(const struct sk_buff *skb, struct xt_action_param *par)
unsigned int n;
n = lscan_mt_full(ctdata->mark & connmark_mask, ctstate,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)
par->state->in == init_net.loopback_dev, tcph,
#else
par->in == init_net.loopback_dev, tcph,
#endif
skb->len - par->thoff - 4 * tcph->doff);
ctdata->mark = (ctdata->mark & ~connmark_mask) | n;

View File

@@ -64,12 +64,11 @@ module_param_named(gid, quota_list_gid, uint, S_IRUGO | S_IWUSR);
static int quota_proc_show(struct seq_file *m, void *data)
{
struct xt_quota_counter *e = m->private;
int ret;
spin_lock_bh(&e->lock);
ret = seq_printf(m, "%llu\n", e->quota);
seq_printf(m, "%llu\n", e->quota);
spin_unlock_bh(&e->lock);
return ret;
return 0;
}
static int quota_proc_open(struct inode *inode, struct file *file)
@@ -82,7 +81,7 @@ quota_proc_write(struct file *file, const char __user *input,
size_t size, loff_t *loff)
{
struct xt_quota_counter *e = PDE_DATA(file_inode(file));
char buf[sizeof("18446744073709551616")];
char buf[sizeof("+-18446744073709551616")];
if (size > sizeof(buf))
size = sizeof(buf);
@@ -92,9 +91,29 @@ quota_proc_write(struct file *file, const char __user *input,
if (size < sizeof(buf))
buf[size] = '\0';
spin_lock_bh(&e->lock);
e->quota = simple_strtoull(buf, NULL, 0);
spin_unlock_bh(&e->lock);
if (*buf == '+') {
int64_t temp = simple_strtoll(buf + 1, NULL, 0);
spin_lock_bh(&e->lock);
/* Do not let quota become negative if @tmp is very negative */
if (temp > 0 || -temp < e->quota)
e->quota += temp;
else
e->quota = 0;
spin_unlock_bh(&e->lock);
} else if (*buf == '-') {
int64_t temp = simple_strtoll(buf + 1, NULL, 0);
spin_lock_bh(&e->lock);
/* Do not let quota become negative if @tmp is very big */
if (temp < 0 || temp < e->quota)
e->quota -= temp;
else
e->quota = 0;
spin_unlock_bh(&e->lock);
} else {
spin_lock_bh(&e->lock);
e->quota = simple_strtoull(buf, NULL, 0);
spin_unlock_bh(&e->lock);
}
return size;
}

View File

@@ -1,4 +1,4 @@
.TH xtables-addons 8 "" "" "v2.8 (2015-08-19)"
.TH xtables-addons 8 "Not For Workgroups" "" "v2.12 (2017-01-11)"
.SH Name
Xtables-addons \(em additional extensions for iptables, ip6tables, etc.
.SH Targets