Compare commits

..

31 Commits
v1.24 ... v1.26

Author SHA1 Message Date
Jan Engelhardt
a46ca95078 Xtables-addons 1.26 2010-04-30 22:54:06 +02:00
Jan Engelhardt
8b2a266db0 compat_xtables: fix 2.6.34 compile error due to a typo 2010-04-30 22:50:52 +02:00
Jan Engelhardt
03e10ff544 Xtables-addons 1.25 2010-04-26 14:14:02 +02:00
Jan Engelhardt
37986fd785 Merge branch 'tee' 2010-04-26 14:12:03 +02:00
Jan Engelhardt
8ff64f4ef4 xt_TEE: move skb cleanup outwards 2010-04-15 23:47:07 +02:00
Jan Engelhardt
93f6c1a312 xt_TEE: remove debug printks 2010-04-15 22:54:05 +02:00
Jan Engelhardt
b535abce2e xt_TEE: use nf_conntrack_untracked
No reason having to use our own nf_conntrack bucket.
2010-04-15 21:29:37 +02:00
Jan Engelhardt
5db988626f Merge branch 'condition' 2010-04-15 21:10:36 +02:00
Jan Engelhardt
c6f8f72bf1 xt_condition: use non-interruptible check routine
Patrick McHardy let's it be known: "No need for interruptible locking,
the section is very short and usually there's only a single iptables
process running at a time."
2010-04-09 12:38:48 +02:00
Jan Engelhardt
47cbb07162 xt_condition: remove unnecessary RCU protection
The module does not use the RCU mechanism, so calling
list_add_rcu/list_del_rcu does not make much sense either.
2010-04-09 12:28:12 +02:00
Jan Engelhardt
79c55ab325 Merge branch 'api35' 2010-04-09 12:24:21 +02:00
Jan Engelhardt
11ab4d0acc compat_xtables: correct compile errors 2010-04-09 12:24:19 +02:00
Jan Engelhardt
8ae9ac5433 xt_TEE: use less expensive pskb_copy 2010-04-07 01:31:18 +02:00
Jan Engelhardt
2060a58912 build: do not print enter/exit during banner 2010-04-05 02:22:49 +02:00
Jan Engelhardt
e1eed2b05e Merge branch 'tee' 2010-04-05 02:15:26 +02:00
Jan Engelhardt
7b077c7459 Merge branch 'api35' 2010-04-05 02:15:24 +02:00
Jan Engelhardt
ad146dbeef compat_xtables: move to 2.6.35 API for targets 2010-04-05 02:15:20 +02:00
Jan Engelhardt
fb4c49d794 xt_TEE: new loop detection logic 2010-04-05 00:47:08 +02:00
Jan Engelhardt
a17203e036 xt_TEE: remove old loop detection
The loop detection does not work if the kernel is built without
conntrack. In fact, since cloned packets are sent directly and do not
pass through Xtables, there are no loops happening.
2010-04-05 00:47:08 +02:00
Jan Engelhardt
987402dc61 xt_TEE: do not retain iif and mark on cloned packet
Patrick McHardy explains in [1] that locally-generated packets (such
as the clones xt_TEE will create) usually start with no iif and no
mark value, and even if cloned packets are a little more special than
locally-generated ones, let's do it that way.

[1] http://marc.info/?l=netfilter-devel&m=127012289008156&w=2
2010-04-05 00:47:08 +02:00
Jan Engelhardt
295b6b6d73 xt_TEE: do not limit use to mangle table 2010-04-05 00:47:08 +02:00
Jan Engelhardt
7338a2a400 xt_TEE: free skb when route lookup failed 2010-04-05 00:47:08 +02:00
Jan Engelhardt
ba35636718 xt_TEE: set dont-fragment on cloned packets 2010-04-05 00:47:08 +02:00
Jan Engelhardt
fd19a40dbe xt_TEE: avoid making original packet writable
There is not any real need to make the original packet writable, as it
is not going to be modified anyway.
2010-04-05 00:47:08 +02:00
Jan Engelhardt
937571bb9d xt_TEE: decrease TTL on cloned packet 2010-04-05 00:47:03 +02:00
Jan Engelhardt
346fc1a376 xt_TEE: do rechecksumming in PREROUTING too 2010-04-05 00:46:12 +02:00
Jan Engelhardt
56535551b3 xt_TEE: use ip_send_check instead of open-coded logic 2010-04-05 00:46:12 +02:00
Jan Engelhardt
dd8fdd09c8 xt_SYSRQ: do not print error messages on ENOMEM
Memory allocation failures are usually already reported by SLAB and
the ENOMEM error code itself.
2010-04-05 00:45:28 +02:00
Jan Engelhardt
beb3358297 compat_xtables: move to 2.6.35 API for matches 2010-04-05 00:43:47 +02:00
Jan Engelhardt
02d8bdc3d9 build: add a version banner on make modules
Because the build error logs of module-assistant are totally useless,
as the tarball filename has been stripped of the version, and
configure is not run either.
2010-04-05 00:43:32 +02:00
Jan Engelhardt
42b77a386a doc: put --with-xtlibdir in the spotlight
Too many people forget to specify the proper location...
2010-04-03 22:08:42 +02:00
22 changed files with 200 additions and 223 deletions

View File

@@ -4,7 +4,7 @@ Installation instructions for Xtables-addons
Xtables-addons uses the well-known configure(autotools) infrastructure
in combination with the kernel's Kbuild system.
$ ./configure
$ ./configure --with-xtlibdir=SEE_BELOW
$ make
# make install
@@ -55,7 +55,10 @@ Configuring and compiling
Specifies the path to where the newly built extensions should
be installed when `make install` is run. It uses the same
default as the Xtables/iptables package, ${libexecdir}/xtables.
default as the Xtables/iptables package, ${libexecdir}/xtables,
but you may need to specify this nevertheless, as autotools
defaults to using /usr/local as prefix, and distributions put
the files in differing locations.
If you want to enable debugging, use

View File

@@ -1,5 +1,5 @@
AC_INIT([xtables-addons], [1.24])
AC_INIT([xtables-addons], [1.26])
AC_CONFIG_HEADERS([config.h])
AC_CONFIG_MACRO_DIR([m4])
AC_PROG_INSTALL

View File

@@ -3,6 +3,24 @@ HEAD
====
Xtables-addons 1.26 (April 30 2010)
===================================
- compat_xtables: fix 2.6.34 compile error due to a typo
Xtables-addons 1.25 (April 26 2010)
===================================
- TEE: do rechecksumming in PREROUTING too
- TEE: decrease TTL on cloned packet
- TEE: set dont-fragment on cloned packets
- TEE: free skb when route lookup failed
- TEE: do not limit use to mangle table
- TEE: do not retain iif and mark on cloned packet
- TEE: new loop detection logic
- TEE: use less expensive pskb_copy
- condition: remove unnecessary RCU protection
Xtables-addons 1.24 (March 17 2010)
===================================
- build: fix build of userspace modules against old (pre-2.6.25)

View File

@@ -264,7 +264,7 @@ static int ipt_acc_table_insert(const char *name, __be32 ip, __be32 netmask)
return -1;
}
static bool ipt_acc_checkentry(const struct xt_tgchk_param *par)
static int ipt_acc_checkentry(const struct xt_tgchk_param *par)
{
struct ipt_acc_info *info = par->targinfo;
int table_nr;
@@ -276,13 +276,13 @@ static bool ipt_acc_checkentry(const struct xt_tgchk_param *par)
if (table_nr == -1) {
printk("ACCOUNT: Table insert problem. Aborting\n");
return false;
return -EINVAL;
}
/* Table nr caching so we don't have to do an extra string compare
for every packet */
info->table_nr = table_nr;
return true;
return 0;
}
static void ipt_acc_destroy(const struct xt_tgdtor_param *par)

View File

@@ -7,6 +7,8 @@
_kcall = -C ${kbuilddir} M=${abs_srcdir}
modules:
@echo -n "Xtables-addons ${PACKAGE_VERSION} - Linux "
@if [ -n "${kbuilddir}" ]; then ${MAKE} ${_kcall} --no-print-directory -s kernelrelease; fi;
${AM_V_silent}if [ -n "${kbuilddir}" ]; then ${MAKE} ${_kcall} modules; fi;
modules_install:

View File

@@ -84,6 +84,19 @@ static bool xtnu_match_check(const char *table, const void *entry,
return nm->checkentry(&local_par);
}
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) && \
LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 34)
static bool xtnu_match_check(const struct xt_mtchk_param *par)
{
struct xtnu_match *nm = xtcompat_numatch(par->match);
if (nm == NULL)
return false;
if (nm->checkentry == NULL)
return true;
return nm->checkentry(par) == 0 ? true : false;
}
#endif
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
static void xtnu_match_destroy(const struct xt_match *cm, void *matchinfo,
@@ -105,7 +118,7 @@ static void xtnu_match_destroy(const struct xt_match *cm, void *matchinfo)
}
#endif
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 27)
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 34)
int xtnu_register_match(struct xtnu_match *nt)
{
struct xt_match *ct;
@@ -127,9 +140,15 @@ int xtnu_register_match(struct xtnu_match *nt)
ct->table = (char *)nt->table;
ct->hooks = nt->hooks;
ct->proto = nt->proto;
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 27)
ct->match = xtnu_match_run;
ct->checkentry = xtnu_match_check;
ct->destroy = xtnu_match_destroy;
#else
ct->match = nt->match;
ct->checkentry = xtnu_match_check;
ct->destroy = nt->destroy;
#endif
ct->matchsize = nt->matchsize;
ct->me = nt->me;
@@ -250,6 +269,20 @@ static bool xtnu_target_check(const char *table, const void *entry,
}
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) && \
LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 34)
static bool xtnu_target_check(const struct xt_tgchk_param *par)
{
struct xtnu_target *nt = xtcompat_nutarget(par->target);
if (nt == NULL)
return false;
if (nt->checkentry == NULL)
return true;
return nt->checkentry(par) == 0 ? true : false;
}
#endif
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
static void xtnu_target_destroy(const struct xt_target *ct, void *targinfo,
unsigned int targinfosize)
@@ -295,6 +328,9 @@ int xtnu_register_target(struct xtnu_target *nt)
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 27)
ct->checkentry = xtnu_target_check;
ct->destroy = xtnu_target_destroy;
#elif LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 34)
ct->checkentry = xtnu_target_check;
ct->destroy = nt->destroy;
#else
ct->checkentry = nt->checkentry;
ct->destroy = nt->destroy;

View File

@@ -60,7 +60,7 @@
# define init_net__proc_net init_net.proc_net
#endif
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 27)
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 34)
# define xt_match xtnu_match
# define xt_register_match xtnu_register_match
# define xt_unregister_match xtnu_unregister_match

View File

@@ -85,7 +85,7 @@ struct xtnu_match {
struct list_head list;
char name[XT_FUNCTION_MAXNAMELEN - 1 - sizeof(void *)];
bool (*match)(const struct sk_buff *, const struct xt_match_param *);
bool (*checkentry)(const struct xt_mtchk_param *);
int (*checkentry)(const struct xt_mtchk_param *);
void (*destroy)(const struct xt_mtdtor_param *);
struct module *me;
const char *table;
@@ -101,7 +101,7 @@ struct xtnu_target {
char name[XT_FUNCTION_MAXNAMELEN - 1 - sizeof(void *)];
unsigned int (*target)(struct sk_buff **,
const struct xt_target_param *);
bool (*checkentry)(const struct xt_tgchk_param *);
int (*checkentry)(const struct xt_tgchk_param *);
void (*destroy)(const struct xt_tgdtor_param *);
struct module *me;
const char *table;

View File

@@ -45,7 +45,7 @@ target(struct sk_buff **pskb, const struct xt_target_param *par)
return XT_CONTINUE;
}
static bool
static int
checkentry(const struct xt_tgchk_param *par)
{
struct ipt_set_info_target *info = par->targinfo;
@@ -54,7 +54,7 @@ checkentry(const struct xt_tgchk_param *par)
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
if (targinfosize != IPT_ALIGN(sizeof(*info))) {
DP("bad target info size %u", targinfosize);
return 0;
return -EINVAL;
}
#endif
@@ -63,7 +63,7 @@ checkentry(const struct xt_tgchk_param *par)
if (index == IP_SET_INVALID_ID) {
ip_set_printk("cannot find add_set index %u as target",
info->add_set.index);
return 0; /* error */
return -EINVAL;
}
}
@@ -72,16 +72,16 @@ checkentry(const struct xt_tgchk_param *par)
if (index == IP_SET_INVALID_ID) {
ip_set_printk("cannot find del_set index %u as target",
info->del_set.index);
return 0; /* error */
return -EINVAL;
}
}
if (info->add_set.flags[IP_SET_MAX_BINDINGS] != 0
|| info->del_set.flags[IP_SET_MAX_BINDINGS] != 0) {
ip_set_printk("That's nasty!");
return 0; /* error */
return -EINVAL;
}
return 1;
return 0;
}
static void destroy(const struct xt_tgdtor_param *par)

View File

@@ -47,7 +47,7 @@ match(const struct sk_buff *skb, const struct xt_match_param *par)
info->match_set.flags[0] & IPSET_MATCH_INV);
}
static bool
static int
checkentry(const struct xt_mtchk_param *par)
{
struct ipt_set_info_match *info = par->matchinfo;
@@ -56,7 +56,7 @@ checkentry(const struct xt_mtchk_param *par)
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
ip_set_printk("invalid matchsize %d", matchsize);
return 0;
return -EINVAL;
}
#endif
@@ -65,14 +65,14 @@ checkentry(const struct xt_mtchk_param *par)
if (index == IP_SET_INVALID_ID) {
ip_set_printk("Cannot find set indentified by id %u to match",
info->match_set.index);
return 0; /* error */
return -ENOENT;
}
if (info->match_set.flags[IP_SET_MAX_BINDINGS] != 0) {
ip_set_printk("That's nasty!");
return 0; /* error */
return -EINVAL;
}
return 1;
return 0;
}
static void destroy(const struct xt_mtdtor_param *par)

View File

@@ -1064,9 +1064,9 @@ out:
return ret;
}
#define RETURN_ERR(err) do { printk(KERN_ERR PKNOCK err); return false; } while (false)
#define RETURN_ERR(err) do { printk(KERN_ERR PKNOCK err); return -EINVAL; } while (false)
static bool pknock_mt_check(const struct xt_mtchk_param *par)
static int pknock_mt_check(const struct xt_mtchk_param *par)
{
struct xt_pknock_mtinfo *info = par->matchinfo;
@@ -1124,9 +1124,10 @@ static bool pknock_mt_check(const struct xt_mtchk_param *par)
}
if (!add_rule(info))
/* should ENOMEM here */
RETURN_ERR("add_rule() error in checkentry() function.\n");
return true;
return 0;
}
static void pknock_mt_destroy(const struct xt_mtdtor_param *par)

View File

@@ -141,22 +141,22 @@ chaos_tg(struct sk_buff **pskb, const struct xt_target_param *par)
return NF_DROP;
}
static bool chaos_tg_check(const struct xt_tgchk_param *par)
static int chaos_tg_check(const struct xt_tgchk_param *par)
{
const struct xt_chaos_tginfo *info = par->targinfo;
if (info->variant == XTCHAOS_DELUDE && !have_delude) {
printk(KERN_WARNING PFX "Error: Cannot use --delude when "
"DELUDE module not available\n");
return false;
return -EINVAL;
}
if (info->variant == XTCHAOS_TARPIT && !have_tarpit) {
printk(KERN_WARNING PFX "Error: Cannot use --tarpit when "
"TARPIT module not available\n");
return false;
return -EINVAL;
}
return true;
return 0;
}
static struct xt_target chaos_tg_reg = {

View File

@@ -81,17 +81,17 @@ logmark_tg(struct sk_buff **pskb, const struct xt_target_param *par)
return XT_CONTINUE;
}
static bool
static int
logmark_tg_check(const struct xt_tgchk_param *par)
{
const struct xt_logmark_tginfo *info = par->targinfo;
if (info->level >= 8) {
pr_debug("LOGMARK: level %u >= 8\n", info->level);
return false;
return -EINVAL;
}
return true;
return 0;
}
static struct xt_target logmark_tg_reg[] __read_mostly = {

View File

@@ -283,15 +283,15 @@ rawdnat_tg6(struct sk_buff **pskb, const struct xt_target_param *par)
}
#endif
static bool rawnat_tg_check(const struct xt_tgchk_param *par)
static int rawnat_tg_check(const struct xt_tgchk_param *par)
{
if (strcmp(par->table, "raw") == 0 ||
strcmp(par->table, "rawpost") == 0)
return true;
return 0;
printk(KERN_ERR KBUILD_MODNAME " may only be used in the \"raw\" or "
"\"rawpost\" table.\n");
return false;
return -EINVAL;
}
static struct xt_target rawnat_tg_reg[] __read_mostly = {

View File

@@ -253,9 +253,8 @@ sysrq_tg6(struct sk_buff **pskb, const struct xt_target_param *par)
}
#endif
static bool sysrq_tg_check(const struct xt_tgchk_param *par)
static int sysrq_tg_check(const struct xt_tgchk_param *par)
{
if (par->target->family == NFPROTO_IPV4) {
const struct ipt_entry *entry = par->entryinfo;
@@ -272,11 +271,11 @@ static bool sysrq_tg_check(const struct xt_tgchk_param *par)
goto out;
}
return true;
return 0;
out:
printk(KERN_ERR KBUILD_MODNAME ": only available for UDP and UDP-Lite");
return false;
return -EINVAL;
}
static struct xt_target sysrq_tg_reg[] __read_mostly = {
@@ -332,23 +331,14 @@ static int __init sysrq_crypto_init(void)
sysrq_digest_size = crypto_hash_digestsize(sysrq_tfm);
sysrq_digest = kmalloc(sysrq_digest_size, GFP_KERNEL);
ret = -ENOMEM;
if (sysrq_digest == NULL) {
printk(KERN_WARNING KBUILD_MODNAME
": Cannot allocate digest\n");
if (sysrq_digest == NULL)
goto fail;
}
sysrq_hexdigest = kmalloc(2 * sysrq_digest_size + 1, GFP_KERNEL);
if (sysrq_hexdigest == NULL) {
printk(KERN_WARNING KBUILD_MODNAME
": Cannot allocate hexdigest\n");
if (sysrq_hexdigest == NULL)
goto fail;
}
sysrq_digest_password = kmalloc(sizeof(sysrq_password), GFP_KERNEL);
if (sysrq_digest_password == NULL) {
printk(KERN_WARNING KBUILD_MODNAME
": Cannot allocate password digest space\n");
if (sysrq_digest_password == NULL)
goto fail;
}
do_gettimeofday(&now);
sysrq_seqno = now.tv_sec;
ret = xt_register_targets(sysrq_tg_reg, ARRAY_SIZE(sysrq_tg_reg));

View File

@@ -24,7 +24,6 @@
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
# define WITH_CONNTRACK 1
# include <net/netfilter/nf_conntrack.h>
static struct nf_conn tee_track;
#endif
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
# define WITH_IPV6 1
@@ -33,51 +32,23 @@ static struct nf_conn tee_track;
#include "compat_xtables.h"
#include "xt_TEE.h"
static bool tee_active[NR_CPUS];
static const union nf_inet_addr tee_zero_address;
/*
* Try to route the packet according to the routing keys specified in
* route_info. Keys are :
* - ifindex :
* 0 if no oif preferred,
* otherwise set to the index of the desired oif
* - route_info->gateway :
* 0 if no gateway specified,
* otherwise set to the next host to which the pkt must be routed
* If success, skb->dev is the output device to which the packet must
* be sent and skb->dst is not NULL
*
* RETURN: false - if an error occured
* true - if the packet was succesfully routed to the
* destination desired
*/
static bool
tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info)
{
const struct iphdr *iph = ip_hdr(skb);
int err;
struct rtable *rt;
struct flowi fl;
memset(&fl, 0, sizeof(fl));
fl.iif = skb_ifindex(skb);
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 19)
fl.nl_u.ip4_u.fwmark = skb_nfmark(skb);
#else
fl.mark = skb_nfmark(skb);
#endif
fl.nl_u.ip4_u.daddr = info->gw.ip;
fl.nl_u.ip4_u.tos = RT_TOS(iph->tos);
fl.nl_u.ip4_u.scope = RT_SCOPE_UNIVERSE;
/* Trying to route the packet using the standard routing table. */
err = ip_route_output_key(&init_net, &rt, &fl);
if (err != 0) {
if (net_ratelimit())
pr_debug(KBUILD_MODNAME
": could not route packet (%d)", err);
if (ip_route_output_key(&init_net, &rt, &fl) != 0)
return false;
}
dst_release(skb_dst(skb));
skb_dst_set(skb, &rt->u.dst);
@@ -123,79 +94,58 @@ static void tee_tg_send(struct sk_buff *skb)
skb = skb2;
}
if (dst->hh != NULL) {
if (dst->hh != NULL)
neigh_hh_output(dst->hh, skb);
} else if (dst->neighbour != NULL) {
else if (dst->neighbour != NULL)
dst->neighbour->output(skb);
} else {
if (net_ratelimit())
pr_debug(KBUILD_MODNAME "no hdr & no neighbour cache!\n");
else
kfree_skb(skb);
}
}
/*
* To detect and deter routed packet loopback when using the --tee option, we
* take a page out of the raw.patch book: on the copied skb, we set up a fake
* ->nfct entry, pointing to the local &route_tee_track. We skip routing
* packets when we see they already have that ->nfct.
*/
static unsigned int
tee_tg4(struct sk_buff **pskb, const struct xt_target_param *par)
{
const struct xt_tee_tginfo *info = par->targinfo;
struct sk_buff *skb = *pskb;
struct iphdr *iph;
unsigned int cpu = smp_processor_id();
#ifdef WITH_CONNTRACK
if (skb->nfct == &tee_track.ct_general) {
/*
* Loopback - a packet we already routed, is to be
* routed another time. Avoid that, now.
*/
if (net_ratelimit())
pr_debug(KBUILD_MODNAME "loopback - DROP!\n");
return NF_DROP;
}
#endif
if (!skb_make_writable(pskb, sizeof(struct iphdr)))
return NF_DROP;
skb = *pskb;
/*
* If we are in INPUT, the checksum must be recalculated since
* the length could have changed as a result of defragmentation.
*/
if (par->hooknum == NF_INET_LOCAL_IN) {
struct iphdr *iph = ip_hdr(skb);
iph->check = 0;
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
}
if (tee_active[cpu])
return XT_CONTINUE;
/*
* Copy the skb, and route the copy. Will later return %XT_CONTINUE for
* the original skb, which should continue on its way as if nothing has
* happened. The copy should be independently delivered to the TEE
* --gateway.
*/
skb = skb_copy(skb, GFP_ATOMIC);
if (skb == NULL) {
if (net_ratelimit())
pr_debug(KBUILD_MODNAME "copy failed!\n");
skb = pskb_copy(skb, GFP_ATOMIC);
if (skb == NULL)
return XT_CONTINUE;
}
/*
* If we are in PREROUTING/INPUT, the checksum must be recalculated
* since the length could have changed as a result of defragmentation.
*
* We also decrease the TTL to mitigate potential TEE loops
* between two hosts.
*
* Set %IP_DF so that the original source is notified of a potentially
* decreased MTU on the clone route. IPv6 does this too.
*/
iph = ip_hdr(skb);
iph->frag_off |= htons(IP_DF);
if (par->hooknum == NF_INET_PRE_ROUTING ||
par->hooknum == NF_INET_LOCAL_IN)
--iph->ttl;
ip_send_check(iph);
#ifdef WITH_CONNTRACK
/*
* Tell conntrack to forget this packet since it may get confused
* when a packet is leaving with dst address == our address.
* Good idea? Dunno. Need advice.
*
* NEW: mark the skb with our &tee_track, so we avoid looping
* on any already routed packet.
* Tell conntrack to forget this packet. It may have side effects to
* see the same packet twice, as for example, accounting the original
* connection for the cloned packet.
*/
nf_conntrack_put(skb->nfct);
skb->nfct = &tee_track.ct_general;
skb->nfct = &nf_conntrack_untracked.ct_general;
skb->nfctinfo = IP_CT_NEW;
nf_conntrack_get(skb->nfct);
#endif
@@ -216,9 +166,13 @@ tee_tg4(struct sk_buff **pskb, const struct xt_target_param *par)
* Also on purpose, no fragmentation is done, to preserve the
* packet as best as possible.
*/
if (tee_tg_route4(skb, info))
if (tee_tg_route4(skb, info)) {
tee_active[cpu] = true;
tee_tg_send(skb);
tee_active[cpu] = false;
} else {
kfree_skb(skb);
}
return XT_CONTINUE;
}
@@ -231,13 +185,6 @@ tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
struct flowi fl;
memset(&fl, 0, sizeof(fl));
fl.iif = skb_ifindex(skb);
/* No mark in flowi before 2.6.19 */
#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 19)
fl.nl_u.ip6_u.fwmark = skb_nfmark(skb);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
fl.mark = skb_nfmark(skb);
#endif
fl.nl_u.ip6_u.daddr = info->gw.in6;
fl.nl_u.ip6_u.flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) |
(iph->flow_lbl[1] << 8) | iph->flow_lbl[2];
@@ -247,11 +194,8 @@ tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
#else
dst = ip6_route_output(dev_net(skb->dev), NULL, &fl);
#endif
if (dst == NULL) {
if (net_ratelimit())
printk(KERN_ERR "ip6_route_output failed for tee\n");
if (dst == NULL)
return false;
}
dst_release(skb_dst(skb));
skb_dst_set(skb, dst);
@@ -265,36 +209,43 @@ tee_tg6(struct sk_buff **pskb, const struct xt_target_param *par)
{
const struct xt_tee_tginfo *info = par->targinfo;
struct sk_buff *skb = *pskb;
unsigned int cpu = smp_processor_id();
/* Try silence. */
#ifdef WITH_CONNTRACK
if (skb->nfct == &tee_track.ct_general)
return NF_DROP;
#endif
if ((skb = skb_copy(skb, GFP_ATOMIC)) == NULL)
if (tee_active[cpu])
return XT_CONTINUE;
skb = pskb_copy(skb, GFP_ATOMIC);
if (skb == NULL)
return XT_CONTINUE;
#ifdef WITH_CONNTRACK
nf_conntrack_put(skb->nfct);
skb->nfct = &tee_track.ct_general;
skb->nfct = &nf_conntrack_untracked.ct_general;
skb->nfctinfo = IP_CT_NEW;
nf_conntrack_get(skb->nfct);
#endif
if (tee_tg_route6(skb, info))
if (par->hooknum == NF_INET_PRE_ROUTING ||
par->hooknum == NF_INET_LOCAL_IN) {
struct ipv6hdr *iph = ipv6_hdr(skb);
--iph->hop_limit;
}
if (tee_tg_route6(skb, info)) {
tee_active[cpu] = true;
tee_tg_send(skb);
tee_active[cpu] = false;
} else {
kfree_skb(skb);
}
return XT_CONTINUE;
}
#endif /* WITH_IPV6 */
static bool tee_tg_check(const struct xt_tgchk_param *par)
static int tee_tg_check(const struct xt_tgchk_param *par)
{
const struct xt_tee_tginfo *info = par->targinfo;
/* 0.0.0.0 and :: not allowed */
return memcmp(&info->gw, &tee_zero_address,
sizeof(tee_zero_address)) != 0;
return (memcmp(&info->gw, &tee_zero_address,
sizeof(tee_zero_address)) == 0) ? -EINVAL : 0;
}
static struct xt_target tee_tg_reg[] __read_mostly = {
@@ -302,7 +253,6 @@ static struct xt_target tee_tg_reg[] __read_mostly = {
.name = "TEE",
.revision = 0,
.family = NFPROTO_IPV4,
.table = "mangle",
.target = tee_tg4,
.targetsize = sizeof(struct xt_tee_tginfo),
.checkentry = tee_tg_check,
@@ -313,7 +263,6 @@ static struct xt_target tee_tg_reg[] __read_mostly = {
.name = "TEE",
.revision = 0,
.family = NFPROTO_IPV6,
.table = "mangle",
.target = tee_tg6,
.targetsize = sizeof(struct xt_tee_tginfo),
.checkentry = tee_tg_check,
@@ -324,27 +273,12 @@ static struct xt_target tee_tg_reg[] __read_mostly = {
static int __init tee_tg_init(void)
{
#ifdef WITH_CONNTRACK
/*
* Set up fake conntrack (stolen from raw.patch):
* - to never be deleted, not in any hashes
*/
atomic_set(&tee_track.ct_general.use, 1);
/* - and look it like as a confirmed connection */
set_bit(IPS_CONFIRMED_BIT, &tee_track.status);
/* Initialize fake conntrack so that NAT will skip it */
tee_track.status |= IPS_NAT_DONE_MASK;
#endif
return xt_register_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
}
static void __exit tee_tg_exit(void)
{
xt_unregister_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
/* [SC]: shoud not we cleanup tee_track here? */
}
module_init(tee_tg_init);

View File

@@ -56,7 +56,7 @@ struct condition_variable {
/* proc_lock is a user context only semaphore used for write access */
/* to the conditions' list. */
static struct mutex proc_lock;
static DEFINE_MUTEX(proc_lock);
static LIST_HEAD(conditions_list);
static struct proc_dir_entry *proc_net_condition;
@@ -100,16 +100,11 @@ condition_mt(const struct sk_buff *skb, const struct xt_match_param *par)
{
const struct xt_condition_mtinfo *info = par->matchinfo;
const struct condition_variable *var = info->condvar;
bool x;
rcu_read_lock();
x = rcu_dereference(var->enabled);
rcu_read_unlock();
return x ^ info->invert;
return var->enabled ^ info->invert;
}
static bool condition_mt_check(const struct xt_mtchk_param *par)
static int condition_mt_check(const struct xt_mtchk_param *par)
{
struct xt_condition_mtinfo *info = par->matchinfo;
struct condition_variable *var;
@@ -121,21 +116,19 @@ static bool condition_mt_check(const struct xt_mtchk_param *par)
printk(KERN_INFO KBUILD_MODNAME ": name not allowed or too "
"long: \"%.*s\"\n", (unsigned int)sizeof(info->name),
info->name);
return false;
return -EINVAL;
}
/*
* Let's acquire the lock, check for the condition and add it
* or increase the reference counter.
*/
if (mutex_lock_interruptible(&proc_lock) != 0)
return false;
mutex_lock(&proc_lock);
list_for_each_entry(var, &conditions_list, list) {
if (strcmp(info->name, var->status_proc->name) == 0) {
var->refcount++;
mutex_unlock(&proc_lock);
info->condvar = var;
return true;
return 0;
}
}
@@ -143,7 +136,7 @@ static bool condition_mt_check(const struct xt_mtchk_param *par)
var = kmalloc(sizeof(struct condition_variable), GFP_KERNEL);
if (var == NULL) {
mutex_unlock(&proc_lock);
return false;
return -ENOMEM;
}
/* Create the condition variable's proc file entry. */
@@ -152,7 +145,7 @@ static bool condition_mt_check(const struct xt_mtchk_param *par)
if (var->status_proc == NULL) {
kfree(var);
mutex_unlock(&proc_lock);
return false;
return -ENOMEM;
}
var->refcount = 1;
@@ -164,12 +157,12 @@ static bool condition_mt_check(const struct xt_mtchk_param *par)
wmb();
var->status_proc->read_proc = condition_proc_read;
var->status_proc->write_proc = condition_proc_write;
list_add_rcu(&var->list, &conditions_list);
list_add(&var->list, &conditions_list);
var->status_proc->uid = condition_uid_perms;
var->status_proc->gid = condition_gid_perms;
mutex_unlock(&proc_lock);
info->condvar = var;
return true;
return 0;
}
static void condition_mt_destroy(const struct xt_mtdtor_param *par)
@@ -179,16 +172,9 @@ static void condition_mt_destroy(const struct xt_mtdtor_param *par)
mutex_lock(&proc_lock);
if (--var->refcount == 0) {
list_del_rcu(&var->list);
list_del(&var->list);
remove_proc_entry(var->status_proc->name, proc_net_condition);
mutex_unlock(&proc_lock);
/*
* synchronize_rcu() would be good enough, but
* synchronize_net() guarantees that no packet
* will go out with the old rule after
* succesful removal.
*/
synchronize_net();
kfree(var);
return;
}

View File

@@ -125,7 +125,7 @@ fuzzy_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return false;
}
static bool fuzzy_mt_check(const struct xt_mtchk_param *par)
static int fuzzy_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_fuzzy_mtinfo *info = par->matchinfo;
@@ -133,10 +133,10 @@ static bool fuzzy_mt_check(const struct xt_mtchk_param *par)
info->maximum_rate > FUZZY_MAX_RATE ||
info->minimum_rate >= info->maximum_rate) {
printk(KERN_INFO KBUILD_MODNAME ": bad values, please check.\n");
return false;
return -EDOM;
}
return true;
return 0;
}
static struct xt_match fuzzy_mt_reg[] __read_mostly = {

View File

@@ -46,23 +46,28 @@ geoip_add_node(const struct geoip_country_user __user *umem_ptr)
struct geoip_country_user umem;
struct geoip_country_kernel *p;
struct geoip_subnet *s;
int ret;
if (copy_from_user(&umem, umem_ptr, sizeof(umem)) != 0)
return NULL;
return ERR_PTR(-EFAULT);
p = kmalloc(sizeof(struct geoip_country_kernel), GFP_KERNEL);
if (p == NULL)
return NULL;
return ERR_PTR(-ENOMEM);
p->count = umem.count;
p->cc = umem.cc;
s = vmalloc(p->count * sizeof(struct geoip_subnet));
if (s == NULL)
if (s == NULL) {
ret = -ENOMEM;
goto free_p;
}
if (copy_from_user(s, (const void __user *)(unsigned long)umem.subnets,
p->count * sizeof(struct geoip_subnet)) != 0)
p->count * sizeof(struct geoip_subnet)) != 0) {
ret = -EFAULT;
goto free_s;
}
p->subnets = s;
atomic_set(&p->ref, 1);
@@ -78,7 +83,7 @@ geoip_add_node(const struct geoip_country_user __user *umem_ptr)
vfree(s);
free_p:
kfree(p);
return NULL;
return ERR_PTR(ret);
}
static void geoip_try_remove_node(struct geoip_country_kernel *p)
@@ -168,7 +173,7 @@ xt_geoip_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return info->flags & XT_GEOIP_INV;
}
static bool xt_geoip_mt_checkentry(const struct xt_mtchk_param *par)
static int xt_geoip_mt_checkentry(const struct xt_mtchk_param *par)
{
struct xt_geoip_match_info *info = par->matchinfo;
struct geoip_country_kernel *node;
@@ -176,13 +181,15 @@ static bool xt_geoip_mt_checkentry(const struct xt_mtchk_param *par)
for (i = 0; i < info->count; i++) {
node = find_node(info->cc[i]);
if (node == NULL)
if ((node = geoip_add_node((const void __user *)(unsigned long)info->mem[i].user)) == NULL) {
if (node == NULL) {
node = geoip_add_node((const void __user *)(unsigned long)info->mem[i].user);
if (IS_ERR(node)) {
printk(KERN_ERR
"xt_geoip: unable to load '%c%c' into memory\n",
COUNTRY(info->cc[i]));
return false;
"xt_geoip: unable to load '%c%c' into memory: %ld\n",
COUNTRY(info->cc[i]), PTR_ERR(node));
return PTR_ERR(node);
}
}
/* Overwrite the now-useless pointer info->mem[i] with
* a pointer to the node's kernelspace structure.
@@ -192,7 +199,7 @@ static bool xt_geoip_mt_checkentry(const struct xt_mtchk_param *par)
info->mem[i].kernel = node;
}
return true;
return 0;
}
static void xt_geoip_mt_destroy(const struct xt_mtdtor_param *par)

View File

@@ -216,16 +216,16 @@ lscan_mt(const struct sk_buff *skb, const struct xt_match_param *par)
(info->match_gr && ctdata->mark == mark_grscan);
}
static bool lscan_mt_check(const struct xt_mtchk_param *par)
static int lscan_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_lscan_mtinfo *info = par->matchinfo;
if ((info->match_stealth & ~1) || (info->match_syn & ~1) ||
(info->match_cn & ~1) || (info->match_gr & ~1)) {
printk(KERN_WARNING PFX "Invalid flags\n");
return false;
return -EINVAL;
}
return true;
return 0;
}
static struct xt_match lscan_mt_reg[] __read_mostly = {

View File

@@ -144,28 +144,28 @@ q2_get_counter(const struct xt_quota_mtinfo2 *q)
return NULL;
}
static bool quota_mt2_check(const struct xt_mtchk_param *par)
static int quota_mt2_check(const struct xt_mtchk_param *par)
{
struct xt_quota_mtinfo2 *q = par->matchinfo;
if (q->flags & ~XT_QUOTA_MASK)
return false;
return -EINVAL;
q->name[sizeof(q->name)-1] = '\0';
if (*q->name == '.' || strchr(q->name, '/') != NULL) {
printk(KERN_ERR "xt_quota<%u>: illegal name\n",
par->match->revision);
return false;
return -EINVAL;
}
q->master = q2_get_counter(q);
if (q->master == NULL) {
printk(KERN_ERR "xt_quota<%u>: memory alloc failure\n",
par->match->revision);
return false;
return -ENOMEM;
}
return true;
return 0;
}
static void quota_mt2_destroy(const struct xt_mtdtor_param *par)

View File

@@ -1,4 +1,4 @@
.TH xtables-addons 8 "v1.24 (2010-03-17)" "" "v1.24 (2010-03-17)"
.TH xtables-addons 8 "v1.26 (2010-04-30)" "" "v1.26 (2010-04-30)"
.SH Name
Xtables-addons \(em additional extensions for iptables, ip6tables, etc.
.SH Targets