mirror of
git://git.code.sf.net/p/xtables-addons/xtables-addons
synced 2025-09-21 12:04:56 +02:00
Compare commits
9 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
ebcd176822 | ||
![]() |
6b47d09a36 | ||
![]() |
1849c47ae8 | ||
![]() |
68d895f75c | ||
![]() |
53b6b862cc | ||
![]() |
ed10cb9c17 | ||
![]() |
5903f4bcfc | ||
![]() |
5e19871613 | ||
![]() |
425a035959 |
54
README
54
README
@@ -1,57 +1,15 @@
|
|||||||
Xtables-addons
|
Xtables-addons
|
||||||
==============
|
==============
|
||||||
|
|
||||||
Xtables-addons is the proclaimed successor to patch-o-matic(-ng). It
|
Xtables-addons is a set of extensions that were not accepted in the
|
||||||
contains extensions that were not accepted in the main Xtables
|
Linux kernel and/or main Xtables/iptables package.
|
||||||
package.
|
|
||||||
|
|
||||||
Xtables-addons is different from patch-o-matic in that you do not
|
It superseded the earlier patch-o-matic(-ng) package in that no
|
||||||
have to patch or recompile either kernel or Xtables(iptables). But
|
patching and/or recompilation of either the kernel or
|
||||||
please see the INSTALL file for the minimum requirements of this
|
Xtables/iptables is required. However, do see the INSTALL file for
|
||||||
package.
|
the minimum requirements of Xtables-addons.
|
||||||
|
|
||||||
All code imported from patch-o-matic has been reviewed and all
|
|
||||||
apparent bugs like binary stability across multiarches, missing
|
|
||||||
sanity checks and incorrect endianess handling have been fixed,
|
|
||||||
simplified, and sped up.
|
|
||||||
|
|
||||||
|
|
||||||
Included in this package
|
Included in this package
|
||||||
========================
|
========================
|
||||||
- xt_ACCOUNT 1.16, libxt_ACCOUNT 1.3
|
- xt_ACCOUNT 1.16, libxt_ACCOUNT 1.3
|
||||||
|
|
||||||
|
|
||||||
Inclusion into a kernel tree
|
|
||||||
============================
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
External extensions
|
|
||||||
===================
|
|
||||||
|
|
||||||
The program "xa-download-more" can be used to download more
|
|
||||||
extensions from 3rd parties into the source tree. The URLs are listed
|
|
||||||
in the "sources" file. If the "sources" file contains an entry like
|
|
||||||
|
|
||||||
http://foobar.org/xa/
|
|
||||||
|
|
||||||
xa-download-more will inspect http://foobar.org/xa/xa-index.txt for
|
|
||||||
files to download. That file may contain
|
|
||||||
|
|
||||||
foobar.tar.bz2
|
|
||||||
|
|
||||||
and xa-download-more will then retrieve and unpack
|
|
||||||
http://foobar.org/xa/foobar.tar.bz2.
|
|
||||||
|
|
||||||
Files that should be contained in the tarball are an mconfig and
|
|
||||||
Kbuild files to control building the extension, libxt_foobar.c for
|
|
||||||
the userspace extension and xt_foobar.c for the kernel extension.
|
|
||||||
|
|
||||||
mconfig.foobar
|
|
||||||
extensions/Kbuild.foobar
|
|
||||||
extensions/Mbuild.foobar
|
|
||||||
extensions/libxt_foobar.c
|
|
||||||
extensions/libxt_foobar.man
|
|
||||||
extensions/xt_foobar.c
|
|
||||||
extensions/xt_foobar.h
|
|
||||||
|
@@ -1,4 +1,4 @@
|
|||||||
AC_INIT([xtables-addons], [3.1])
|
AC_INIT([xtables-addons], [3.3])
|
||||||
AC_CONFIG_AUX_DIR([build-aux])
|
AC_CONFIG_AUX_DIR([build-aux])
|
||||||
AC_CONFIG_HEADERS([config.h])
|
AC_CONFIG_HEADERS([config.h])
|
||||||
AC_CONFIG_MACRO_DIR([m4])
|
AC_CONFIG_MACRO_DIR([m4])
|
||||||
@@ -57,7 +57,7 @@ if test -n "$kbuilddir"; then
|
|||||||
echo "WARNING: Version detection did not succeed. Continue at own luck.";
|
echo "WARNING: Version detection did not succeed. Continue at own luck.";
|
||||||
else
|
else
|
||||||
echo "$kmajor.$kminor.$kmicro.$kstable in $kbuilddir";
|
echo "$kmajor.$kminor.$kmicro.$kstable in $kbuilddir";
|
||||||
if test "$kmajor" -gt 4 -o "$kmajor" -eq 4 -a "$kminor" -gt 18; then
|
if test "$kmajor" -gt 5 -o "$kmajor" -eq 5 -a "$kminor" -gt 0; then
|
||||||
echo "WARNING: That kernel version is not officially supported yet. Continue at own luck.";
|
echo "WARNING: That kernel version is not officially supported yet. Continue at own luck.";
|
||||||
elif test "$kmajor" -eq 4 -a "$kminor" -ge 18; then
|
elif test "$kmajor" -eq 4 -a "$kminor" -ge 18; then
|
||||||
:
|
:
|
||||||
|
@@ -3,6 +3,19 @@ HEAD
|
|||||||
====
|
====
|
||||||
|
|
||||||
|
|
||||||
|
v3.3 (2019-03-07)
|
||||||
|
=================
|
||||||
|
Enhancements:
|
||||||
|
- support for Linux 5.0
|
||||||
|
|
||||||
|
|
||||||
|
v3.2 (2018-09-07)
|
||||||
|
=================
|
||||||
|
Changes:
|
||||||
|
- rework xt_geoip_build to scan the immediate directory for .csv,
|
||||||
|
not to scan for GeoLite2-Country-CSV_\d+.
|
||||||
|
|
||||||
|
|
||||||
v3.1 (2018-08-14)
|
v3.1 (2018-08-14)
|
||||||
=================
|
=================
|
||||||
Enhancements:
|
Enhancements:
|
||||||
|
@@ -40,6 +40,9 @@
|
|||||||
#error "ipt_ACCOUNT needs at least a PAGE_SIZE of 4096"
|
#error "ipt_ACCOUNT needs at least a PAGE_SIZE of 4096"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static unsigned int max_tables_limit = 128;
|
||||||
|
module_param(max_tables_limit, uint, 0);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Internal table structure, generated by check_entry()
|
* Internal table structure, generated by check_entry()
|
||||||
* @name: name of the table
|
* @name: name of the table
|
||||||
@@ -185,7 +188,7 @@ static int ipt_acc_table_insert(struct ipt_acc_table *ipt_acc_tables,
|
|||||||
name, NIPQUAD(ip), NIPQUAD(netmask));
|
name, NIPQUAD(ip), NIPQUAD(netmask));
|
||||||
|
|
||||||
/* Look for existing table */
|
/* Look for existing table */
|
||||||
for (i = 0; i < ACCOUNT_MAX_TABLES; i++) {
|
for (i = 0; i < max_tables_limit; i++) {
|
||||||
if (strncmp(ipt_acc_tables[i].name, name,
|
if (strncmp(ipt_acc_tables[i].name, name,
|
||||||
ACCOUNT_TABLE_NAME_LEN) == 0) {
|
ACCOUNT_TABLE_NAME_LEN) == 0) {
|
||||||
pr_debug("ACCOUNT: Found existing slot: %d - "
|
pr_debug("ACCOUNT: Found existing slot: %d - "
|
||||||
@@ -209,7 +212,7 @@ static int ipt_acc_table_insert(struct ipt_acc_table *ipt_acc_tables,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Insert new table */
|
/* Insert new table */
|
||||||
for (i = 0; i < ACCOUNT_MAX_TABLES; i++) {
|
for (i = 0; i < max_tables_limit; i++) {
|
||||||
/* Found free slot */
|
/* Found free slot */
|
||||||
if (ipt_acc_tables[i].name[0] == 0) {
|
if (ipt_acc_tables[i].name[0] == 0) {
|
||||||
unsigned int netsize = 0;
|
unsigned int netsize = 0;
|
||||||
@@ -258,7 +261,7 @@ static int ipt_acc_table_insert(struct ipt_acc_table *ipt_acc_tables,
|
|||||||
|
|
||||||
/* No free slot found */
|
/* No free slot found */
|
||||||
printk("ACCOUNT: No free table slot found (max: %d). "
|
printk("ACCOUNT: No free table slot found (max: %d). "
|
||||||
"Please increase ACCOUNT_MAX_TABLES.\n", ACCOUNT_MAX_TABLES);
|
"Please increase the \"max_tables_limit\" module parameter.\n", max_tables_limit);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -299,7 +302,7 @@ static void ipt_acc_destroy(const struct xt_tgdtor_param *par)
|
|||||||
info->table_nr = -1; /* Set back to original state */
|
info->table_nr = -1; /* Set back to original state */
|
||||||
|
|
||||||
/* Look for table */
|
/* Look for table */
|
||||||
for (i = 0; i < ACCOUNT_MAX_TABLES; i++) {
|
for (i = 0; i < max_tables_limit; i++) {
|
||||||
if (strncmp(ian->ipt_acc_tables[i].name, info->table_name,
|
if (strncmp(ian->ipt_acc_tables[i].name, info->table_name,
|
||||||
ACCOUNT_TABLE_NAME_LEN) == 0) {
|
ACCOUNT_TABLE_NAME_LEN) == 0) {
|
||||||
pr_debug("ACCOUNT: Found table at slot: %d\n", i);
|
pr_debug("ACCOUNT: Found table at slot: %d\n", i);
|
||||||
@@ -604,12 +607,12 @@ static int ipt_acc_handle_prepare_read(struct ipt_acc_table *ipt_acc_tables,
|
|||||||
int table_nr = -1;
|
int table_nr = -1;
|
||||||
uint8_t depth;
|
uint8_t depth;
|
||||||
|
|
||||||
for (table_nr = 0; table_nr < ACCOUNT_MAX_TABLES; table_nr++)
|
for (table_nr = 0; table_nr < max_tables_limit; table_nr++)
|
||||||
if (strncmp(ipt_acc_tables[table_nr].name, tablename,
|
if (strncmp(ipt_acc_tables[table_nr].name, tablename,
|
||||||
ACCOUNT_TABLE_NAME_LEN) == 0)
|
ACCOUNT_TABLE_NAME_LEN) == 0)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (table_nr == ACCOUNT_MAX_TABLES) {
|
if (table_nr == max_tables_limit) {
|
||||||
printk("ACCOUNT: ipt_acc_handle_prepare_read(): "
|
printk("ACCOUNT: ipt_acc_handle_prepare_read(): "
|
||||||
"Table %s not found\n", tablename);
|
"Table %s not found\n", tablename);
|
||||||
return -1;
|
return -1;
|
||||||
@@ -707,12 +710,12 @@ static int ipt_acc_handle_prepare_read_flush(struct ipt_acc_table *ipt_acc_table
|
|||||||
int table_nr;
|
int table_nr;
|
||||||
void *new_data_page;
|
void *new_data_page;
|
||||||
|
|
||||||
for (table_nr = 0; table_nr < ACCOUNT_MAX_TABLES; table_nr++)
|
for (table_nr = 0; table_nr < max_tables_limit; table_nr++)
|
||||||
if (strncmp(ipt_acc_tables[table_nr].name, tablename,
|
if (strncmp(ipt_acc_tables[table_nr].name, tablename,
|
||||||
ACCOUNT_TABLE_NAME_LEN) == 0)
|
ACCOUNT_TABLE_NAME_LEN) == 0)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (table_nr == ACCOUNT_MAX_TABLES) {
|
if (table_nr == max_tables_limit) {
|
||||||
printk("ACCOUNT: ipt_acc_handle_prepare_read_flush(): "
|
printk("ACCOUNT: ipt_acc_handle_prepare_read_flush(): "
|
||||||
"Table %s not found\n", tablename);
|
"Table %s not found\n", tablename);
|
||||||
return -1;
|
return -1;
|
||||||
@@ -1052,7 +1055,7 @@ static int ipt_acc_get_ctl(struct sock *sk, int cmd, void *user, int *len)
|
|||||||
spin_lock_bh(&ian->ipt_acc_lock);
|
spin_lock_bh(&ian->ipt_acc_lock);
|
||||||
|
|
||||||
/* Determine size of table names */
|
/* Determine size of table names */
|
||||||
for (i = 0; i < ACCOUNT_MAX_TABLES; i++) {
|
for (i = 0; i < max_tables_limit; i++) {
|
||||||
if (ian->ipt_acc_tables[i].name[0] != 0)
|
if (ian->ipt_acc_tables[i].name[0] != 0)
|
||||||
size += strlen(ian->ipt_acc_tables[i].name) + 1;
|
size += strlen(ian->ipt_acc_tables[i].name) + 1;
|
||||||
}
|
}
|
||||||
@@ -1067,7 +1070,7 @@ static int ipt_acc_get_ctl(struct sock *sk, int cmd, void *user, int *len)
|
|||||||
}
|
}
|
||||||
/* Copy table names to userspace */
|
/* Copy table names to userspace */
|
||||||
tnames = ian->ipt_acc_tmpbuf;
|
tnames = ian->ipt_acc_tmpbuf;
|
||||||
for (i = 0; i < ACCOUNT_MAX_TABLES; i++) {
|
for (i = 0; i < max_tables_limit; i++) {
|
||||||
if (ian->ipt_acc_tables[i].name[0] != 0) {
|
if (ian->ipt_acc_tables[i].name[0] != 0) {
|
||||||
name_len = strlen(ian->ipt_acc_tables[i].name) + 1;
|
name_len = strlen(ian->ipt_acc_tables[i].name) + 1;
|
||||||
memcpy(tnames, ian->ipt_acc_tables[i].name, name_len);
|
memcpy(tnames, ian->ipt_acc_tables[i].name, name_len);
|
||||||
@@ -1100,7 +1103,7 @@ static int __net_init ipt_acc_net_init(struct net *net)
|
|||||||
memset(ian, 0, sizeof(*ian));
|
memset(ian, 0, sizeof(*ian));
|
||||||
sema_init(&ian->ipt_acc_userspace_mutex, 1);
|
sema_init(&ian->ipt_acc_userspace_mutex, 1);
|
||||||
|
|
||||||
ian->ipt_acc_tables = kcalloc(ACCOUNT_MAX_TABLES,
|
ian->ipt_acc_tables = kcalloc(max_tables_limit,
|
||||||
sizeof(struct ipt_acc_table), GFP_KERNEL);
|
sizeof(struct ipt_acc_table), GFP_KERNEL);
|
||||||
if (ian->ipt_acc_tables == NULL) {
|
if (ian->ipt_acc_tables == NULL) {
|
||||||
printk("ACCOUNT: Out of memory allocating account_tables structure");
|
printk("ACCOUNT: Out of memory allocating account_tables structure");
|
||||||
|
@@ -34,7 +34,6 @@
|
|||||||
#define IPT_SO_GET_ACCOUNT_GET_TABLE_NAMES (SO_ACCOUNT_BASE_CTL + 8)
|
#define IPT_SO_GET_ACCOUNT_GET_TABLE_NAMES (SO_ACCOUNT_BASE_CTL + 8)
|
||||||
#define IPT_SO_GET_ACCOUNT_MAX IPT_SO_GET_ACCOUNT_GET_TABLE_NAMES
|
#define IPT_SO_GET_ACCOUNT_MAX IPT_SO_GET_ACCOUNT_GET_TABLE_NAMES
|
||||||
|
|
||||||
#define ACCOUNT_MAX_TABLES 128
|
|
||||||
#define ACCOUNT_TABLE_NAME_LEN 32
|
#define ACCOUNT_TABLE_NAME_LEN 32
|
||||||
#define ACCOUNT_MAX_HANDLES 10
|
#define ACCOUNT_MAX_HANDLES 10
|
||||||
|
|
||||||
|
@@ -314,7 +314,7 @@ static void sysrq_crypto_exit(void)
|
|||||||
static int __init sysrq_crypto_init(void)
|
static int __init sysrq_crypto_init(void)
|
||||||
{
|
{
|
||||||
#if defined(WITH_CRYPTO)
|
#if defined(WITH_CRYPTO)
|
||||||
struct timeval now;
|
struct timespec64 now;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
sysrq_tfm = crypto_alloc_shash(sysrq_hash, 0, 0);
|
sysrq_tfm = crypto_alloc_shash(sysrq_hash, 0, 0);
|
||||||
@@ -339,7 +339,7 @@ static int __init sysrq_crypto_init(void)
|
|||||||
sizeof(sysrq_password), GFP_KERNEL);
|
sizeof(sysrq_password), GFP_KERNEL);
|
||||||
if (sysrq_digest_password == NULL)
|
if (sysrq_digest_password == NULL)
|
||||||
goto fail;
|
goto fail;
|
||||||
do_gettimeofday(&now);
|
ktime_get_real_ts64(&now);
|
||||||
sysrq_seqno = now.tv_sec;
|
sysrq_seqno = now.tv_sec;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
#!/usr/bin/perl
|
#!/usr/bin/perl
|
||||||
#
|
#
|
||||||
# Converter for MaxMind CSV database to binary, for xt_geoip
|
# Converter for MaxMind (GeoLite2) CSV database to binary, for xt_geoip
|
||||||
# Copyright Jan Engelhardt, 2008-2011
|
# Copyright Jan Engelhardt, 2008-2011
|
||||||
# Copyright Philip Prindeville, 2018
|
# Copyright Philip Prindeville, 2018
|
||||||
#
|
#
|
||||||
@@ -16,53 +16,37 @@ my $csv = Text::CSV_XS->new({
|
|||||||
binary => 1,
|
binary => 1,
|
||||||
eol => $/,
|
eol => $/,
|
||||||
}); # or Text::CSV
|
}); # or Text::CSV
|
||||||
|
my $source_dir = ".";
|
||||||
my $target_dir = ".";
|
my $target_dir = ".";
|
||||||
|
|
||||||
&Getopt::Long::Configure(qw(bundling));
|
&Getopt::Long::Configure(qw(bundling));
|
||||||
&GetOptions(
|
&GetOptions(
|
||||||
"D=s" => \$target_dir,
|
"D=s" => \$target_dir,
|
||||||
|
"S=s" => \$source_dir,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
if (!-d $source_dir) {
|
||||||
|
print STDERR "Source directory \"$source_dir\" does not exist.\n";
|
||||||
|
exit 1;
|
||||||
|
}
|
||||||
if (!-d $target_dir) {
|
if (!-d $target_dir) {
|
||||||
print STDERR "Target directory $target_dir does not exist.\n";
|
print STDERR "Target directory \"$target_dir\" does not exist.\n";
|
||||||
exit 1;
|
exit 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
my %countryId;
|
my %countryId;
|
||||||
my %countryName;
|
my %countryName;
|
||||||
|
|
||||||
my $dir = findVersion();
|
|
||||||
|
|
||||||
&loadCountries();
|
&loadCountries();
|
||||||
|
|
||||||
&dump(&collect());
|
&dump(&collect());
|
||||||
|
|
||||||
sub findVersion
|
|
||||||
{
|
|
||||||
my @dirs = ();
|
|
||||||
|
|
||||||
opendir(my $dh, '.') || die "Can't open .: $!\n";
|
|
||||||
|
|
||||||
while (readdir $dh) {
|
|
||||||
if ($_ =~ m/^GeoLite2-Country-CSV_\d{8}$/) {
|
|
||||||
push(@dirs, $_);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
closedir $dh;
|
|
||||||
|
|
||||||
@dirs = sort @dirs;
|
|
||||||
return pop(@dirs);
|
|
||||||
}
|
|
||||||
|
|
||||||
sub loadCountries
|
sub loadCountries
|
||||||
{
|
{
|
||||||
my $file = "$dir/GeoLite2-Country-Locations-en.csv";
|
|
||||||
|
|
||||||
sub id; sub cc; sub long; sub ct; sub cn;
|
sub id; sub cc; sub long; sub ct; sub cn;
|
||||||
|
|
||||||
%countryId = ();
|
%countryId = ();
|
||||||
%countryName = ();
|
%countryName = ();
|
||||||
|
|
||||||
|
my $file = "$source_dir/GeoLite2-Country-Locations-en.csv";
|
||||||
open(my $fh, '<', $file) || die "Couldn't open list country names\n";
|
open(my $fh, '<', $file) || die "Couldn't open list country names\n";
|
||||||
|
|
||||||
# first line is headers
|
# first line is headers
|
||||||
@@ -152,8 +136,7 @@ sub collect
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
$file = "$dir/GeoLite2-Country-Blocks-IPv4.csv";
|
$file = "$source_dir/GeoLite2-Country-Blocks-IPv4.csv";
|
||||||
|
|
||||||
open($fh, '<', $file) || die "Can't open IPv4 database\n";
|
open($fh, '<', $file) || die "Can't open IPv4 database\n";
|
||||||
|
|
||||||
# first line is headers
|
# first line is headers
|
||||||
@@ -194,8 +177,7 @@ sub collect
|
|||||||
# clean up the namespace
|
# clean up the namespace
|
||||||
undef &net; undef &id; undef &rid; undef &proxy; undef &sat;
|
undef &net; undef &id; undef &rid; undef &proxy; undef &sat;
|
||||||
|
|
||||||
$file = "$dir/GeoLite2-Country-Blocks-IPv6.csv";
|
$file = "$source_dir/GeoLite2-Country-Blocks-IPv6.csv";
|
||||||
|
|
||||||
open($fh, '<', $file) || die "Can't open IPv6 database\n";
|
open($fh, '<', $file) || die "Can't open IPv6 database\n";
|
||||||
|
|
||||||
# first line is headers
|
# first line is headers
|
||||||
@@ -281,4 +263,3 @@ sub writeCountry
|
|||||||
}
|
}
|
||||||
close $fh;
|
close $fh;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -5,7 +5,7 @@ xt_geoip_build \(em convert GeoIP.csv to packed format for xt_geoip
|
|||||||
.SH Syntax
|
.SH Syntax
|
||||||
.PP
|
.PP
|
||||||
\fI/usr/libexec/xt_geoip/\fP\fBxt_geoip_build\fP [\fB\-D\fP
|
\fI/usr/libexec/xt_geoip/\fP\fBxt_geoip_build\fP [\fB\-D\fP
|
||||||
\fItarget_dir\fP]
|
\fItarget_dir\fP] [\fB\-S\fP \fIsource_dir\fP]
|
||||||
.SH Description
|
.SH Description
|
||||||
.PP
|
.PP
|
||||||
xt_geoip_build is used to build packed raw representations of the range
|
xt_geoip_build is used to build packed raw representations of the range
|
||||||
@@ -16,20 +16,19 @@ required to be loaded into memory. The ranges in the packed database files are
|
|||||||
also ordered, as xt_geoip relies on this property for its bisection approach to
|
also ordered, as xt_geoip relies on this property for its bisection approach to
|
||||||
work.
|
work.
|
||||||
.PP
|
.PP
|
||||||
It expects to find a directory named
|
|
||||||
.IR GeoLite2-Country-CSV_YYYYMMDD
|
|
||||||
in the current directory, and will select the most recent if multiple
|
|
||||||
instances are found. The
|
|
||||||
.IR xt_geoip_dl
|
|
||||||
script can be used to populate this directory.
|
|
||||||
.PP
|
|
||||||
Since the script is usually installed to the libexec directory of the
|
Since the script is usually installed to the libexec directory of the
|
||||||
xtables-addons package and this is outside $PATH (on purpose), invoking the
|
xtables-addons package and this is outside $PATH (on purpose), invoking the
|
||||||
script requires it to be called with a path.
|
script requires it to be called with a path.
|
||||||
.PP Options
|
.PP Options
|
||||||
.TP
|
.TP
|
||||||
\fB\-D\fP \fItarget_dir\fP
|
\fB\-D\fP \fItarget_dir\fP
|
||||||
Specify a target directory into which the files are to be put.
|
Specifies the target directory into which the files are to be put. Defaults to ".".
|
||||||
|
.TP
|
||||||
|
\fB\-S\fP \fIsource_dir\fP
|
||||||
|
Specifies the source directory from which to read the three files by the name
|
||||||
|
of \fBGeoLite2\-Country\-Blocks\-IPv4.csv\fP,
|
||||||
|
\fBGeoLite2\-Country\-Blocks\-IPv6.csv\fP and
|
||||||
|
\fBGeoLite2\-Country\-Locations\-en.csv\fP. Defaults to ".".
|
||||||
.SH Application
|
.SH Application
|
||||||
.PP
|
.PP
|
||||||
Shell commands to build the databases and put them to where they are expected:
|
Shell commands to build the databases and put them to where they are expected:
|
||||||
|
@@ -1,83 +0,0 @@
|
|||||||
#!/usr/bin/perl -w
|
|
||||||
|
|
||||||
use HTTP::Request;
|
|
||||||
use LWP::UserAgent;
|
|
||||||
use strict;
|
|
||||||
|
|
||||||
&main(\@ARGV);
|
|
||||||
|
|
||||||
sub main
|
|
||||||
{
|
|
||||||
local *FH;
|
|
||||||
|
|
||||||
if (!-d "downloads") {
|
|
||||||
if (!mkdir("downloads")) {
|
|
||||||
die "Could not create downloads/ directory";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
open(FH, "<sources");
|
|
||||||
while (defined($_ = <FH>)) {
|
|
||||||
chomp $_;
|
|
||||||
$_ =~ s/#.*//gs;
|
|
||||||
$_ =~ s/^\s+|\s+$//gs;
|
|
||||||
if (length($_) == 0) {
|
|
||||||
next;
|
|
||||||
}
|
|
||||||
&process_index($_);
|
|
||||||
}
|
|
||||||
|
|
||||||
close FH;
|
|
||||||
}
|
|
||||||
|
|
||||||
sub process_index
|
|
||||||
{
|
|
||||||
my $top = shift @_;
|
|
||||||
my($agent, $res, $url);
|
|
||||||
local *FH;
|
|
||||||
|
|
||||||
$agent = LWP::UserAgent->new();
|
|
||||||
$agent->env_proxy();
|
|
||||||
|
|
||||||
$url = &slash_remove("$top/xa-index.txt");
|
|
||||||
print " GET $url\n";
|
|
||||||
$res = $agent->get($url);
|
|
||||||
if (!$res->is_success()) {
|
|
||||||
print STDERR " `-> ", $res->status_line(), "\n";
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
foreach my $ext (split(/\s+/, $res->content())) {
|
|
||||||
my($ex_url, $ex_res);
|
|
||||||
|
|
||||||
$ex_url = &slash_remove("$top/$ext");
|
|
||||||
print " GET $ex_url\n";
|
|
||||||
|
|
||||||
$ex_res = $agent->mirror($ex_url, "downloads/$ext");
|
|
||||||
if ($ex_res->code() == 304) {
|
|
||||||
# "Not modified" = up to date
|
|
||||||
next;
|
|
||||||
}
|
|
||||||
if (!$ex_res->is_success()) {
|
|
||||||
print STDERR " `-> ", $ex_res->status_line(), "\n";
|
|
||||||
next;
|
|
||||||
}
|
|
||||||
|
|
||||||
print " UNPACK downloads/$ext\n";
|
|
||||||
system "tar", "-xjf", "downloads/$ext";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sub slash_remove
|
|
||||||
{
|
|
||||||
my $s = shift @_;
|
|
||||||
$s =~ s{(\w+://)(.*)}{$1.&slash_remove2($2)}eg;
|
|
||||||
return $s;
|
|
||||||
}
|
|
||||||
|
|
||||||
sub slash_remove2
|
|
||||||
{
|
|
||||||
my $s = shift @_;
|
|
||||||
$s =~ s{/+}{/}g;
|
|
||||||
return $s;
|
|
||||||
}
|
|
@@ -1,4 +1,4 @@
|
|||||||
.TH xtables-addons 8 "Windows" "" "v3.1 (2018-08-14)"
|
.TH xtables-addons 8 "" "" "v3.3 (2019-03-07)"
|
||||||
.SH Name
|
.SH Name
|
||||||
Xtables-addons \(em additional extensions for iptables, ip6tables, etc.
|
Xtables-addons \(em additional extensions for iptables, ip6tables, etc.
|
||||||
.SH Targets
|
.SH Targets
|
||||||
|
Reference in New Issue
Block a user