upgrade to 1.10.1

This commit is contained in:
zhaoyuxing 2021-09-16 14:28:52 +08:00
parent 359da94256
commit 951f73e2cc
9 changed files with 11 additions and 3565 deletions

View File

@ -9,11 +9,11 @@ Subject: [PATCH 3/4] pcap-linux: apparently ctc interfaces on s390 has
1 file changed, 4 insertions(+)
diff --git a/pcap-linux.c b/pcap-linux.c
index 900ebbc..58292c3 100644
index 878f27f..6917203 100644
--- a/pcap-linux.c
+++ b/pcap-linux.c
@@ -3197,6 +3197,10 @@ activate_new(pcap_t *handle)
handle->linktype = DLT_LINUX_SLL;
@@ -2474,6 +2474,10 @@ activate_pf_packet(pcap_t *handle, int is_any_device)
}
}
+ /* Hack to make things work on s390 ctc interfaces */
@ -24,6 +24,5 @@ index 900ebbc..58292c3 100644
handle->errbuf);
if (handlep->ifindex == -1) {
--
1.8.3.1
2.23.0

View File

@ -1,31 +0,0 @@
From 3b9b6100912f7bb1ee43f9cfb51e804765a37bd4 Mon Sep 17 00:00:00 2001
From: Guy Harris <guy@alum.mit.edu>
Date: Thu, 3 Oct 2019 09:27:36 -0700
Subject: [PATCH 611/977] With MSVC, abort if _BitScanForward() returns 0.
It should never return zero, as never pass a value of 0 to
lowest_set_bit(), but this should keep Coverity from getting upset (as a
result of not understanding _BitScanForward() well enough to realize
that if it's passed a non-zero value it will never return 0).
Reported by Charles Smith at Tangible Security.
---
optimize.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/optimize.c b/optimize.c
index 283a6de..e38d2d7 100644
--- a/optimize.c
+++ b/optimize.c
@@ -137,7 +137,7 @@ lowest_set_bit(int mask)
* (It's currently not, in MSVC, even on 64-bit platforms, but....)
*/
if (_BitScanForward(&bit, (unsigned int)mask) == 0)
- return -1; /* mask is zero */
+ abort(); /* mask is zero */
return (int)bit;
}
#elif defined(MSDOS) && defined(__DJGPP__)
--
1.8.3.1

View File

@ -1,280 +0,0 @@
From d71913d38475f5b4f0dc753074337ba29d5c0789 Mon Sep 17 00:00:00 2001
From: Guy Harris <gharris@sonic.net>
Date: Thu, 21 May 2020 22:37:19 -0700
Subject: [PATCH 875/977] optimize: make some variables unsigned.
That should avoid some overflow failure modes, and allow saner overflow
checking.
---
optimize.c | 92 +++++++++++++++++++++++++++++++++---------------------
1 file changed, 57 insertions(+), 35 deletions(-)
diff --git a/optimize.c b/optimize.c
index a70dfc5..d90b463 100644
--- a/optimize.c
+++ b/optimize.c
@@ -115,7 +115,7 @@ pcap_set_print_dot_graph(int value)
/*
* GCC 3.4 and later; we have __builtin_ctz().
*/
- #define lowest_set_bit(mask) __builtin_ctz(mask)
+ #define lowest_set_bit(mask) ((u_int)__builtin_ctz(mask))
#elif defined(_MSC_VER)
/*
* Visual Studio; we support only 2005 and later, so use
@@ -127,7 +127,7 @@ pcap_set_print_dot_graph(int value)
#pragma intrinsic(_BitScanForward)
#endif
-static __forceinline int
+static __forceinline u_int
lowest_set_bit(int mask)
{
unsigned long bit;
@@ -138,14 +138,14 @@ lowest_set_bit(int mask)
*/
if (_BitScanForward(&bit, (unsigned int)mask) == 0)
abort(); /* mask is zero */
- return (int)bit;
+ return (u_int)bit;
}
#elif defined(MSDOS) && defined(__DJGPP__)
/*
* MS-DOS with DJGPP, which declares ffs() in <string.h>, which
* we've already included.
*/
- #define lowest_set_bit(mask) (ffs((mask)) - 1)
+ #define lowest_set_bit(mask) ((u_int)(ffs((mask)) - 1))
#elif (defined(MSDOS) && defined(__WATCOMC__)) || defined(STRINGS_H_DECLARES_FFS)
/*
* MS-DOS with Watcom C, which has <strings.h> and declares ffs() there,
@@ -153,18 +153,18 @@ lowest_set_bit(int mask)
* of the Single UNIX Specification).
*/
#include <strings.h>
- #define lowest_set_bit(mask) (ffs((mask)) - 1)
+ #define lowest_set_bit(mask) (u_int)((ffs((mask)) - 1))
#else
/*
* None of the above.
* Use a perfect-hash-function-based function.
*/
-static int
+static u_int
lowest_set_bit(int mask)
{
unsigned int v = (unsigned int)mask;
- static const int MultiplyDeBruijnBitPosition[32] = {
+ static const u_int MultiplyDeBruijnBitPosition[32] = {
0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
};
@@ -244,17 +244,17 @@ typedef struct {
*/
int done;
- int n_blocks;
+ u_int n_blocks; /* number of blocks in the CFG; guaranteed to be > 0, as it's a RET instruction at a minimum */
struct block **blocks;
- int n_edges;
+ u_int n_edges; /* twice n_blocks, so guaranteed to be > 0 */
struct edge **edges;
/*
* A bit vector set representation of the dominators.
* We round up the set size to the next power of two.
*/
- int nodewords;
- int edgewords;
+ u_int nodewords; /* number of 32-bit words for a bit vector of "number of nodes" bits; guaranteed to be > 0 */
+ u_int edgewords; /* number of 32-bit words for a bit vector of "number of edges" bits; guaranteed to be > 0 */
struct block **levels;
bpf_u_int32 *space;
@@ -279,32 +279,35 @@ typedef struct {
/*
* a := a intersect b
+ * n must be guaranteed to be > 0
*/
#define SET_INTERSECT(a, b, n)\
{\
register bpf_u_int32 *_x = a, *_y = b;\
- register int _n = n;\
- while (--_n >= 0) *_x++ &= *_y++;\
+ register u_int _n = n;\
+ do *_x++ &= *_y++; while (--_n != 0);\
}
/*
* a := a - b
+ * n must be guaranteed to be > 0
*/
#define SET_SUBTRACT(a, b, n)\
{\
register bpf_u_int32 *_x = a, *_y = b;\
- register int _n = n;\
- while (--_n >= 0) *_x++ &=~ *_y++;\
+ register u_int _n = n;\
+ do *_x++ &=~ *_y++; while (--_n != 0);\
}
/*
* a := a union b
+ * n must be guaranteed to be > 0
*/
#define SET_UNION(a, b, n)\
{\
register bpf_u_int32 *_x = a, *_y = b;\
- register int _n = n;\
- while (--_n >= 0) *_x++ |= *_y++;\
+ register u_int _n = n;\
+ do *_x++ |= *_y++; while (--_n != 0);\
}
uset all_dom_sets;
@@ -401,7 +404,8 @@ find_levels(opt_state_t *opt_state, struct icode *ic)
static void
find_dom(opt_state_t *opt_state, struct block *root)
{
- int i;
+ u_int i;
+ int level;
struct block *b;
bpf_u_int32 *x;
@@ -409,16 +413,25 @@ find_dom(opt_state_t *opt_state, struct block *root)
* Initialize sets to contain all nodes.
*/
x = opt_state->all_dom_sets;
+ /*
+ * These are both guaranteed to be > 0, so the product is
+ * guaranteed to be > 0.
+ *
+ * XXX - but what if it overflows?
+ */
i = opt_state->n_blocks * opt_state->nodewords;
- while (--i >= 0)
+ do
*x++ = 0xFFFFFFFFU;
+ while (--i != 0);
/* Root starts off empty. */
- for (i = opt_state->nodewords; --i >= 0;)
+ i = opt_state->nodewords;
+ do
root->dom[i] = 0;
+ while (--i != 0);
/* root->level is the highest level no found. */
- for (i = root->level; i >= 0; --i) {
- for (b = opt_state->levels[i]; b; b = b->link) {
+ for (level = root->level; level >= 0; --level) {
+ for (b = opt_state->levels[level]; b; b = b->link) {
SET_INSERT(b->dom, b->id);
if (JT(b) == 0)
continue;
@@ -445,19 +458,28 @@ propedom(opt_state_t *opt_state, struct edge *ep)
static void
find_edom(opt_state_t *opt_state, struct block *root)
{
- int i;
+ u_int i;
uset x;
+ int level;
struct block *b;
x = opt_state->all_edge_sets;
- for (i = opt_state->n_edges * opt_state->edgewords; --i >= 0; )
+ /*
+ * These are both guaranteed to be > 0, so the product is
+ * guaranteed to be > 0.
+ *
+ * XXX - but what if it overflows?
+ */
+ i = opt_state->n_edges * opt_state->edgewords;
+ do
x[i] = 0xFFFFFFFFU;
+ while (--i != 0);
/* root->level is the highest level no found. */
memset(root->et.edom, 0, opt_state->edgewords * sizeof(*(uset)0));
memset(root->ef.edom, 0, opt_state->edgewords * sizeof(*(uset)0));
- for (i = root->level; i >= 0; --i) {
- for (b = opt_state->levels[i]; b != 0; b = b->link) {
+ for (level = root->level; level >= 0; --level) {
+ for (b = opt_state->levels[level]; b != 0; b = b->link) {
propedom(opt_state, &b->et);
propedom(opt_state, &b->ef);
}
@@ -474,7 +496,7 @@ find_edom(opt_state_t *opt_state, struct block *root)
static void
find_closure(opt_state_t *opt_state, struct block *root)
{
- int i;
+ int level;
struct block *b;
/*
@@ -484,8 +506,8 @@ find_closure(opt_state_t *opt_state, struct block *root)
opt_state->n_blocks * opt_state->nodewords * sizeof(*opt_state->all_closure_sets));
/* root->level is the highest level no found. */
- for (i = root->level; i >= 0; --i) {
- for (b = opt_state->levels[i]; b; b = b->link) {
+ for (level = root->level; level >= 0; --level) {
+ for (b = opt_state->levels[level]; b; b = b->link) {
SET_INSERT(b->closure, b->id);
if (JT(b) == 0)
continue;
@@ -1555,7 +1577,7 @@ fold_edge(struct block *child, struct edge *ep)
static void
opt_j(opt_state_t *opt_state, struct edge *ep)
{
- register int i, k;
+ register u_int i, k;
register struct block *target;
if (JT(ep->succ) == 0)
@@ -1839,17 +1861,16 @@ link_inedge(struct edge *parent, struct block *child)
static void
find_inedges(opt_state_t *opt_state, struct block *root)
{
- int i;
struct block *b;
- for (i = 0; i < opt_state->n_blocks; ++i)
+ for (u_int i = 0; i < opt_state->n_blocks; ++i)
opt_state->blocks[i]->in_edges = 0;
/*
* Traverse the graph, adding each edge to the predecessor
* list of its successors. Skip the leaves (i.e. level 0).
*/
- for (i = root->level; i > 0; --i) {
+ for (int i = root->level; i > 0; --i) {
for (b = opt_state->levels[i]; b != 0; b = b->link) {
link_inedge(&b->et, JT(b));
link_inedge(&b->ef, JF(b));
@@ -2005,7 +2026,7 @@ static void
intern_blocks(opt_state_t *opt_state, struct icode *ic)
{
struct block *p;
- int i, j;
+ u_int i, j;
int done1; /* don't shadow global */
top:
done1 = 1;
@@ -2014,7 +2035,8 @@ intern_blocks(opt_state_t *opt_state, struct icode *ic)
mark_code(ic);
- for (i = opt_state->n_blocks - 1; --i >= 0; ) {
+ for (i = opt_state->n_blocks - 1; i != 0; ) {
+ --i;
if (!isMarked(ic, opt_state->blocks[i]))
continue;
for (j = i + 1; j < opt_state->n_blocks; ++j) {
--
2.27.0

View File

@ -1,96 +0,0 @@
From 2aa7d268e2ce37025d68a3f428001f6f779f56e8 Mon Sep 17 00:00:00 2001
From: Guy Harris <gharris@sonic.net>
Date: Thu, 21 May 2020 23:36:09 -0700
Subject: [PATCH 876/977] optimize: fix some of those changes.
For loops with predecrements that we eliminated because we don't want to
rely on the loop index going negative if it's zero before the
predecrement - the index is now unsigned, so it'll *never go negative -
we revert to a similar loop, but with the test checking whether the
index is 0 and decrementing it as the first action in the loop body.
(Yeah, it means that, on machines with condition codes, you don't get to
use the condition code setting of the decrement "for free"; we'll let
the compiler and the processor figure out how to do that efficiently.)
---
optimize.c | 35 ++++++++++++++++-------------------
1 file changed, 16 insertions(+), 19 deletions(-)
diff --git a/optimize.c b/optimize.c
index bea8cdc..07fc0f3 100644
--- a/optimize.c
+++ b/optimize.c
@@ -427,20 +427,18 @@ find_dom(opt_state_t *opt_state, struct block *root)
*/
x = opt_state->all_dom_sets;
/*
- * These are both guaranteed to be > 0, so the product is
- * guaranteed to be > 0.
- *
- * XXX - but what if it overflows?
+ * XXX - what if the multiplication overflows?
*/
i = opt_state->n_blocks * opt_state->nodewords;
- do
+ while (i != 0) {
+ --i;
*x++ = 0xFFFFFFFFU;
- while (--i != 0);
+ }
/* Root starts off empty. */
- i = opt_state->nodewords;
- do
+ for (i = opt_state->nodewords; i != 0;) {
+ --i;
root->dom[i] = 0;
- while (--i != 0);
+ }
/* root->level is the highest level no found. */
for (level = root->level; level >= 0; --level) {
@@ -478,15 +476,12 @@ find_edom(opt_state_t *opt_state, struct block *root)
x = opt_state->all_edge_sets;
/*
- * These are both guaranteed to be > 0, so the product is
- * guaranteed to be > 0.
- *
- * XXX - but what if it overflows?
+ * XXX - what if the multiplication overflows?
*/
- i = opt_state->n_edges * opt_state->edgewords;
- do
+ for (i = opt_state->n_edges * opt_state->edgewords; i != 0; ) {
+ --i;
x[i] = 0xFFFFFFFFU;
- while (--i != 0);
+ }
/* root->level is the highest level no found. */
memset(root->et.edom, 0, opt_state->edgewords * sizeof(*(uset)0));
@@ -2138,17 +2133,19 @@ link_inedge(struct edge *parent, struct block *child)
static void
find_inedges(opt_state_t *opt_state, struct block *root)
{
+ u_int i;
+ int level;
struct block *b;
- for (u_int i = 0; i < opt_state->n_blocks; ++i)
+ for (i = 0; i < opt_state->n_blocks; ++i)
opt_state->blocks[i]->in_edges = 0;
/*
* Traverse the graph, adding each edge to the predecessor
* list of its successors. Skip the leaves (i.e. level 0).
*/
- for (int i = root->level; i > 0; --i) {
- for (b = opt_state->levels[i]; b != 0; b = b->link) {
+ for (level = root->level; level > 0; --level) {
+ for (b = opt_state->levels[level]; b != 0; b = b->link) {
link_inedge(&b->et, JT(b));
link_inedge(&b->ef, JF(b));
}
--
1.8.3.1

View File

@ -1,169 +0,0 @@
From 119b90af867f3073c571ee333fd47dcd0dbccd3a Mon Sep 17 00:00:00 2001
From: Guy Harris <gharris@sonic.net>
Date: Fri, 22 May 2020 01:20:45 -0700
Subject: [PATCH] optimize: add a bunch of overflow checks.
This should address GitHub issue #929; it picks up checks from GitHub
pull request #930, and adds some more.
Also, make some more values unsigned.
---
gencode.h | 4 +--
optimize.c | 82 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++------
2 files changed, 76 insertions(+), 10 deletions(-)
diff --git a/gencode.h b/gencode.h
index dc099f5..053e85f 100644
--- a/gencode.h
+++ b/gencode.h
@@ -240,7 +240,7 @@ typedef bpf_u_int32 *uset;
#define N_ATOMS (BPF_MEMWORDS+2)
struct edge {
- int id;
+ u_int id;
int code;
uset edom;
struct block *succ;
@@ -254,7 +254,7 @@ struct edge {
};
struct block {
- int id;
+ u_int id;
struct slist *stmts; /* side effect stmts */
struct stmt s; /* branch stmt */
int mark;
diff --git a/optimize.c b/optimize.c
index 07fc0f3..18fbe70 100644
--- a/optimize.c
+++ b/optimize.c
@@ -2457,13 +2459,19 @@ count_blocks(struct icode *ic, struct block *p)
static void
number_blks_r(opt_state_t *opt_state, struct icode *ic, struct block *p)
{
- int n;
+ u_int n;
if (p == 0 || isMarked(ic, p))
return;
Mark(ic, p);
n = opt_state->n_blocks++;
+ if (opt_state->n_blocks == 0) {
+ /*
+ * Overflow.
+ */
+ opt_error(opt_state, "filter is too complex to optimize");
+ }
p->id = n;
opt_state->blocks[n] = p;
@@ -2511,6 +2519,8 @@ opt_init(opt_state_t *opt_state, struct icode *ic)
{
bpf_u_int32 *p;
int i, n, max_stmts;
+ u_int product;
+ size_t block_memsize, edge_memsize;
/*
* First, count the blocks, so we can malloc an array to map
@@ -2526,6 +2536,12 @@ opt_init(opt_state_t *opt_state, struct icode *ic)
number_blks_r(opt_state, ic, ic->root);
opt_state->n_edges = 2 * opt_state->n_blocks;
+ if ((opt_state->n_edges / 2) != opt_state->n_blocks) {
+ /*
+ * Overflow.
+ */
+ opt_error(opt_state, "filter is too complex to optimize");
+ }
opt_state->edges = (struct edge **)calloc(opt_state->n_edges, sizeof(*opt_state->edges));
if (opt_state->edges == NULL) {
opt_error(opt_state, "malloc");
@@ -2542,9 +2558,59 @@ opt_init(opt_state_t *opt_state, struct icode *ic)
opt_state->edgewords = opt_state->n_edges / (8 * sizeof(bpf_u_int32)) + 1;
opt_state->nodewords = opt_state->n_blocks / (8 * sizeof(bpf_u_int32)) + 1;
+ /*
+ * Make sure opt_state->n_blocks * opt_state->nodewords fits
+ * in a u_int; we use it as a u_int number-of-iterations
+ * value.
+ */
+ product = opt_state->n_blocks * opt_state->nodewords;
+ if ((product / opt_state->n_blocks) != opt_state->nodewords) {
+ /*
+ * XXX - just punt and don't try to optimize?
+ * In practice, this is unlikely to happen with
+ * a normal filter.
+ */
+ opt_error(opt_state, "filter is too complex to optimize");
+ }
+
+ /*
+ * Make sure the total memory required for that doesn't
+ * overflow.
+ */
+ block_memsize = (size_t)2 * product * sizeof(*opt_state->space);
+ if ((block_memsize / product) != 2 * sizeof(*opt_state->space)) {
+ opt_error(opt_state, "filter is too complex to optimize");
+ }
+
+ /*
+ * Make sure opt_state->n_edges * opt_state->edgewords fits
+ * in a u_int; we use it as a u_int number-of-iterations
+ * value.
+ */
+ product = opt_state->n_edges * opt_state->edgewords;
+ if ((product / opt_state->n_edges) != opt_state->edgewords) {
+ opt_error(opt_state, "filter is too complex to optimize");
+ }
+
+ /*
+ * Make sure the total memory required for that doesn't
+ * overflow.
+ */
+ edge_memsize = (size_t)product * sizeof(*opt_state->space);
+ if (edge_memsize / product != sizeof(*opt_state->space)) {
+ opt_error(opt_state, "filter is too complex to optimize");
+ }
+
+ /*
+ * Make sure the total memory required for both of them dosn't
+ * overflow.
+ */
+ if (block_memsize > SIZE_MAX - edge_memsize) {
+ opt_error(opt_state, "filter is too complex to optimize");
+ }
+
/* XXX */
- opt_state->space = (bpf_u_int32 *)malloc(2 * opt_state->n_blocks * opt_state->nodewords * sizeof(*opt_state->space)
- + opt_state->n_edges * opt_state->edgewords * sizeof(*opt_state->space));
+ opt_state->space = (bpf_u_int32 *)malloc(block_memsize + edge_memsize);
if (opt_state->space == NULL) {
opt_error(opt_state, "malloc");
}
@@ -2920,7 +2986,7 @@ dot_dump_node(struct icode *ic, struct block *block, struct bpf_program *prog,
icount = slength(block->stmts) + 1 + block->longjt + block->longjf;
noffset = min(block->offset + icount, (int)prog->bf_len);
- fprintf(out, "\tblock%d [shape=ellipse, id=\"block-%d\" label=\"BLOCK%d\\n", block->id, block->id, block->id);
+ fprintf(out, "\tblock%u [shape=ellipse, id=\"block-%u\" label=\"BLOCK%u\\n", block->id, block->id, block->id);
for (i = block->offset; i < noffset; i++) {
fprintf(out, "\\n%s", bpf_image(prog->bf_insns + i, i));
}
@@ -2947,9 +3013,9 @@ dot_dump_edge(struct icode *ic, struct block *block, FILE *out)
Mark(ic, block);
if (JT(block)) {
- fprintf(out, "\t\"block%d\":se -> \"block%d\":n [label=\"T\"]; \n",
+ fprintf(out, "\t\"block%u\":se -> \"block%u\":n [label=\"T\"]; \n",
block->id, JT(block)->id);
- fprintf(out, "\t\"block%d\":sw -> \"block%d\":n [label=\"F\"]; \n",
+ fprintf(out, "\t\"block%u\":sw -> \"block%u\":n [label=\"F\"]; \n",
block->id, JF(block)->id);
}
dot_dump_edge(ic, JT(block), out);
--
1.8.3.1

BIN
libpcap-1.10.1.tar.gz Normal file

Binary file not shown.

Binary file not shown.

View File

@ -1,19 +1,14 @@
Name: libpcap
Epoch: 14
Version: 1.9.1
Release: 8
Version: 1.10.1
Release: 1
Summary: A system-independent interface for user-level packet capture
License: BSD with advertising
License: BSD
URL: http://www.tcpdump.org
Source0: http://www.tcpdump.org/release/%{name}-%{version}.tar.gz
Patch0: 0003-pcap-linux-apparently-ctc-interfaces-on-s390-has-eth.patch
Patch1: clean-up-signed-vs-unsigned-do-more-error-checking-in-the-parser.patch
Patch2: fix-optimize-add-a-bunch-of-overflow-checks.patch
Patch3: 0611-With-MSVC-abort-if-_BitScanForward-returns-0.patch
Patch4: 0875-optimize-make-some-variables-unsigned.patch
Patch5: 0876-optimize-fix-some-of-those-changes.patch
Patch6: pcap-config-mitigate-multilib-conflict.patch
Patch1: pcap-config-mitigate-multilib-conflict.patch
BuildRequires: bison bluez-libs-devel flex gcc glibc-kernheaders >= 2.2.0
@ -64,6 +59,9 @@ export CFLAGS="$RPM_OPT_FLAGS -fno-strict-aliasing"
%{_mandir}/man*
%changelog
* Thu Sep 16 2021 xinghe <xinghe2@huawei.com> - 14:1.10.1-1
- DESC:upgrade to 1.10.1
* Mon Aug 09 2021 yanglu <yanglu72@huawei.com> - 14:1.9.1-8
- DESC:delete -S git from %autosetup,and delete Buildrequires git