From ceda772a2670cc68564ce1efaee28390bb2d942a Mon Sep 17 00:00:00 2001 From: Kajetan Staszkiewicz Date: Mon, 13 Aug 2018 17:22:40 +0200 Subject: [PATCH 1/5] Convert table counters to counter(9) facility --- sys/net/pfvar.h | 37 +++++++---- sys/netpfil/pf/pf_table.c | 154 ++++++++++++++++++++++++++++++++++------------ 2 files changed, 138 insertions(+), 53 deletions(-) diff --git a/sys/net/pfvar.h b/sys/net/pfvar.h index ea0d55f9010..4076780caeb 100644 --- a/sys/net/pfvar.h +++ b/sys/net/pfvar.h @@ -994,76 +994,87 @@ enum { PFR_REFCNT_RULE, PFR_REFCNT_ANCHOR, PFR_REFCNT_MAX }; struct pfr_tstats { struct pfr_table pfrts_t; u_int64_t pfrts_packets[PFR_DIR_MAX][PFR_OP_TABLE_MAX]; u_int64_t pfrts_bytes[PFR_DIR_MAX][PFR_OP_TABLE_MAX]; u_int64_t pfrts_match; u_int64_t pfrts_nomatch; long pfrts_tzero; int pfrts_cnt; int pfrts_refcnt[PFR_REFCNT_MAX]; }; + +struct pfr_ktstats { + struct pfr_table pfrts_t; + counter_u64_t pfrkts_packets[PFR_DIR_MAX][PFR_OP_TABLE_MAX]; + counter_u64_t pfrkts_bytes[PFR_DIR_MAX][PFR_OP_TABLE_MAX]; + counter_u64_t pfrkts_match; + counter_u64_t pfrkts_nomatch; + long pfrkts_tzero; + int pfrkts_cnt; + int pfrkts_refcnt[PFR_REFCNT_MAX]; +}; #define pfrts_name pfrts_t.pfrt_name #define pfrts_flags pfrts_t.pfrt_flags #ifndef _SOCKADDR_UNION_DEFINED #define _SOCKADDR_UNION_DEFINED union sockaddr_union { struct sockaddr sa; struct sockaddr_in sin; struct sockaddr_in6 sin6; }; #endif /* _SOCKADDR_UNION_DEFINED */ struct pfr_kcounters { - u_int64_t pfrkc_packets[PFR_DIR_MAX][PFR_OP_ADDR_MAX]; - u_int64_t pfrkc_bytes[PFR_DIR_MAX][PFR_OP_ADDR_MAX]; + counter_u64_t pfrkc_packets[PFR_DIR_MAX][PFR_OP_ADDR_MAX]; + counter_u64_t pfrkc_bytes[PFR_DIR_MAX][PFR_OP_ADDR_MAX]; + long pfrkc_tzero; }; SLIST_HEAD(pfr_kentryworkq, pfr_kentry); struct pfr_kentry { struct radix_node pfrke_node[2]; union sockaddr_union pfrke_sa; SLIST_ENTRY(pfr_kentry) pfrke_workq; - struct pfr_kcounters *pfrke_counters; - long pfrke_tzero; + struct pfr_kcounters pfrke_counters; u_int8_t pfrke_af; u_int8_t pfrke_net; u_int8_t pfrke_not; u_int8_t pfrke_mark; }; SLIST_HEAD(pfr_ktableworkq, pfr_ktable); RB_HEAD(pfr_ktablehead, pfr_ktable); struct pfr_ktable { - struct pfr_tstats pfrkt_ts; + struct pfr_ktstats pfrkt_kts; RB_ENTRY(pfr_ktable) pfrkt_tree; SLIST_ENTRY(pfr_ktable) pfrkt_workq; struct radix_node_head *pfrkt_ip4; struct radix_node_head *pfrkt_ip6; struct pfr_ktable *pfrkt_shadow; struct pfr_ktable *pfrkt_root; struct pf_ruleset *pfrkt_rs; long pfrkt_larg; int pfrkt_nflags; }; -#define pfrkt_t pfrkt_ts.pfrts_t +#define pfrkt_t pfrkt_kts.pfrts_t #define pfrkt_name pfrkt_t.pfrt_name #define pfrkt_anchor pfrkt_t.pfrt_anchor #define pfrkt_ruleset pfrkt_t.pfrt_ruleset #define pfrkt_flags pfrkt_t.pfrt_flags -#define pfrkt_cnt pfrkt_ts.pfrts_cnt -#define pfrkt_refcnt pfrkt_ts.pfrts_refcnt -#define pfrkt_packets pfrkt_ts.pfrts_packets -#define pfrkt_bytes pfrkt_ts.pfrts_bytes -#define pfrkt_match pfrkt_ts.pfrts_match -#define pfrkt_nomatch pfrkt_ts.pfrts_nomatch -#define pfrkt_tzero pfrkt_ts.pfrts_tzero +#define pfrkt_cnt pfrkt_kts.pfrkts_cnt +#define pfrkt_refcnt pfrkt_kts.pfrkts_refcnt +#define pfrkt_packets pfrkt_kts.pfrkts_packets +#define pfrkt_bytes pfrkt_kts.pfrkts_bytes +#define pfrkt_match pfrkt_kts.pfrkts_match +#define pfrkt_nomatch pfrkt_kts.pfrkts_nomatch +#define pfrkt_tzero pfrkt_kts.pfrkts_tzero /* keep synced with pfi_kif, used in RB_FIND */ struct pfi_kif_cmp { char pfik_name[IFNAMSIZ]; }; struct pfi_kif { char pfik_name[IFNAMSIZ]; union { RB_ENTRY(pfi_kif) _pfik_tree; diff --git a/sys/netpfil/pf/pf_table.c b/sys/netpfil/pf/pf_table.c index d6b29b31e24..02a0913fac3 100644 --- a/sys/netpfil/pf/pf_table.c +++ b/sys/netpfil/pf/pf_table.c @@ -103,35 +103,34 @@ struct pfr_walktree { PFRW_DYNADDR_UPDATE } pfrw_op; union { struct pfr_addr *pfrw1_addr; struct pfr_astats *pfrw1_astats; struct pfr_kentryworkq *pfrw1_workq; struct pfr_kentry *pfrw1_kentry; struct pfi_dynaddr *pfrw1_dyn; } pfrw_1; int pfrw_free; + int pfrw_flags; }; #define pfrw_addr pfrw_1.pfrw1_addr #define pfrw_astats pfrw_1.pfrw1_astats #define pfrw_workq pfrw_1.pfrw1_workq #define pfrw_kentry pfrw_1.pfrw1_kentry #define pfrw_dyn pfrw_1.pfrw1_dyn #define pfrw_cnt pfrw_free #define senderr(e) do { rv = (e); goto _bad; } while (0) static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures"); static VNET_DEFINE(uma_zone_t, pfr_kentry_z); #define V_pfr_kentry_z VNET(pfr_kentry_z) -static VNET_DEFINE(uma_zone_t, pfr_kcounters_z); -#define V_pfr_kcounters_z VNET(pfr_kcounters_z) static struct pf_addr pfr_ffaddr = { .addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff } }; static void pfr_copyout_addr(struct pfr_addr *, struct pfr_kentry *ke); static int pfr_validate_addr(struct pfr_addr *); static void pfr_enqueue_addrs(struct pfr_ktable *, struct pfr_kentryworkq *, int *, int); @@ -187,33 +186,29 @@ struct pfr_ktablehead pfr_ktables; struct pfr_table pfr_nulltable; int pfr_ktable_cnt; void pfr_initialize(void) { V_pfr_kentry_z = uma_zcreate("pf table entries", sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); - V_pfr_kcounters_z = uma_zcreate("pf table counters", - sizeof(struct pfr_kcounters), NULL, NULL, NULL, NULL, - UMA_ALIGN_PTR, 0); V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z; V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT; } void pfr_cleanup(void) { uma_zdestroy(V_pfr_kentry_z); - uma_zdestroy(V_pfr_kcounters_z); } int pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags) { struct pfr_ktable *kt; struct pfr_kentryworkq workq; PF_RULES_WASSERT(); @@ -593,20 +588,27 @@ pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size, return (ESRCH); if (kt->pfrkt_cnt > *size) { *size = kt->pfrkt_cnt; return (0); } bzero(&w, sizeof(w)); w.pfrw_op = PFRW_GET_ASTATS; w.pfrw_astats = addr; w.pfrw_free = kt->pfrkt_cnt; + /* + * Flags below are for backward compatibility. It was possible to have + * a table without per-entry counters. Now they are always allocated, + * we just discard data when reading it if table is not configured to + * have counters. + */ + w.pfrw_flags = kt->pfrkt_flags; rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); if (!rv) rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, pfr_walktree, &w); if (!rv && (flags & PFR_FLAG_CLSTATS)) { pfr_enqueue_addrs(kt, &workq, NULL, 0); pfr_clstats_kentries(&workq, tzero, 0); } if (rv) return (rv); @@ -763,69 +765,85 @@ pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact) if (exact && ke && KENTRY_NETWORK(ke)) ke = NULL; } return (ke); } static struct pfr_kentry * pfr_create_kentry(struct pfr_addr *ad) { struct pfr_kentry *ke; + int pfr_dir, pfr_op; ke = uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO); if (ke == NULL) return (NULL); if (ad->pfra_af == AF_INET) FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr); else if (ad->pfra_af == AF_INET6) FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr); ke->pfrke_af = ad->pfra_af; ke->pfrke_net = ad->pfra_net; ke->pfrke_not = ad->pfra_not; + for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) + for (pfr_op = 0; pfr_op < PFR_OP_ADDR_MAX; pfr_op ++) { + ke->pfrke_counters.pfrkc_packets[pfr_dir][pfr_op] = + counter_u64_alloc(M_WAITOK); + ke->pfrke_counters.pfrkc_bytes[pfr_dir][pfr_op] = + counter_u64_alloc(M_WAITOK); + } return (ke); } static void pfr_destroy_kentries(struct pfr_kentryworkq *workq) { struct pfr_kentry *p, *q; for (p = SLIST_FIRST(workq); p != NULL; p = q) { q = SLIST_NEXT(p, pfrke_workq); pfr_destroy_kentry(p); } } static void pfr_destroy_kentry(struct pfr_kentry *ke) { - if (ke->pfrke_counters) - uma_zfree(V_pfr_kcounters_z, ke->pfrke_counters); + int pfr_dir, pfr_op; + + for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) + for (pfr_op = 0; pfr_op < PFR_OP_ADDR_MAX; pfr_op ++) { + counter_u64_free( + ke->pfrke_counters.pfrkc_packets[pfr_dir][pfr_op]); + counter_u64_free( + ke->pfrke_counters.pfrkc_bytes[pfr_dir][pfr_op]); + } + uma_zfree(V_pfr_kentry_z, ke); } static void pfr_insert_kentries(struct pfr_ktable *kt, struct pfr_kentryworkq *workq, long tzero) { struct pfr_kentry *p; int rv, n = 0; SLIST_FOREACH(p, workq, pfrke_workq) { rv = pfr_route_kentry(kt, p); if (rv) { printf("pfr_insert_kentries: cannot route entry " "(code=%d).\n", rv); break; } - p->pfrke_tzero = tzero; + p->pfrke_counters.pfrkc_tzero = tzero; n++; } kt->pfrkt_cnt += n; } int pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero) { struct pfr_kentry *p; int rv; @@ -834,21 +852,21 @@ pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero) if (p != NULL) return (0); p = pfr_create_kentry(ad); if (p == NULL) return (ENOMEM); rv = pfr_route_kentry(kt, p); if (rv) return (rv); - p->pfrke_tzero = tzero; + p->pfrke_counters.pfrkc_tzero = tzero; kt->pfrkt_cnt++; return (0); } static void pfr_remove_kentries(struct pfr_ktable *kt, struct pfr_kentryworkq *workq) { struct pfr_kentry *p; @@ -869,29 +887,34 @@ pfr_clean_node_mask(struct pfr_ktable *kt, struct pfr_kentry *p; SLIST_FOREACH(p, workq, pfrke_workq) pfr_unroute_kentry(kt, p); } static void pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange) { struct pfr_kentry *p; + int pfr_dir, pfr_op; SLIST_FOREACH(p, workq, pfrke_workq) { if (negchange) p->pfrke_not = !p->pfrke_not; - if (p->pfrke_counters) { - uma_zfree(V_pfr_kcounters_z, p->pfrke_counters); - p->pfrke_counters = NULL; + for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { + for (pfr_op = 0; pfr_op < PFR_OP_ADDR_MAX; pfr_op ++) { + counter_u64_zero(p->pfrke_counters. + pfrkc_packets[pfr_dir][pfr_op]); + counter_u64_zero(p->pfrke_counters. + pfrkc_bytes[pfr_dir][pfr_op]); + } } - p->pfrke_tzero = tzero; + p->pfrke_counters.pfrkc_tzero = tzero; } } static void pfr_reset_feedback(struct pfr_addr *addr, int size) { struct pfr_addr *ad; int i; for (i = 0, ad = addr; i < size; i++, ad++) @@ -985,20 +1008,21 @@ pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke) ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr; else if (ad->pfra_af == AF_INET6) ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr; } static int pfr_walktree(struct radix_node *rn, void *arg) { struct pfr_kentry *ke = (struct pfr_kentry *)rn; struct pfr_walktree *w = arg; + int pfr_dir, pfr_op; switch (w->pfrw_op) { case PFRW_MARK: ke->pfrke_mark = 0; break; case PFRW_SWEEP: if (ke->pfrke_mark) break; /* FALLTHROUGH */ case PFRW_ENQUEUE: @@ -1009,32 +1033,42 @@ pfr_walktree(struct radix_node *rn, void *arg) if (w->pfrw_free-- > 0) { pfr_copyout_addr(w->pfrw_addr, ke); w->pfrw_addr++; } break; case PFRW_GET_ASTATS: if (w->pfrw_free-- > 0) { struct pfr_astats as; pfr_copyout_addr(&as.pfras_a, ke); - - if (ke->pfrke_counters) { - bcopy(ke->pfrke_counters->pfrkc_packets, - as.pfras_packets, sizeof(as.pfras_packets)); - bcopy(ke->pfrke_counters->pfrkc_bytes, - as.pfras_bytes, sizeof(as.pfras_bytes)); + if (w->pfrw_flags & PFR_TFLAG_COUNTERS) { + for (pfr_dir = 0; + pfr_dir < PFR_DIR_MAX; + pfr_dir ++) + for (pfr_op = 0; + pfr_op < PFR_OP_ADDR_MAX; + pfr_op ++) { + as.pfras_packets[pfr_dir][pfr_op] = + counter_u64_fetch( + ke->pfrke_counters. + pfrkc_packets[pfr_dir][pfr_op]); + as.pfras_bytes[pfr_dir][pfr_op] = + counter_u64_fetch( + ke->pfrke_counters. + pfrkc_bytes[pfr_dir][pfr_op]); + } } else { bzero(as.pfras_packets, sizeof(as.pfras_packets)); bzero(as.pfras_bytes, sizeof(as.pfras_bytes)); as.pfras_a.pfra_fback = PFR_FB_NOCOUNT; } - as.pfras_tzero = ke->pfrke_tzero; + as.pfras_tzero = ke->pfrke_counters.pfrkc_tzero; bcopy(&as, w->pfrw_astats, sizeof(as)); w->pfrw_astats++; } break; case PFRW_POOL_GET: if (ke->pfrke_not) break; /* negative entries are ignored */ if (!w->pfrw_cnt--) { w->pfrw_kentry = ke; @@ -1245,39 +1279,57 @@ pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size, } int pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size, int flags) { struct pfr_ktable *p; struct pfr_ktableworkq workq; int n, nn; long tzero = time_second; + int pfr_dir, pfr_op; /* XXX PFR_FLAG_CLSTATS disabled */ ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS); if (pfr_fix_anchor(filter->pfrt_anchor)) return (EINVAL); n = nn = pfr_table_count(filter, flags); if (n < 0) return (ENOENT); if (n > *size) { *size = n; return (0); } SLIST_INIT(&workq); RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { if (pfr_skip_table(filter, p, flags)) continue; if (n-- <= 0) continue; - bcopy(&p->pfrkt_ts, tbl++, sizeof(*tbl)); + bcopy(&p->pfrkt_kts.pfrts_t, &tbl->pfrts_t, + sizeof(struct pfr_table)); + for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) + for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { + tbl->pfrts_packets[pfr_dir][pfr_op] = + counter_u64_fetch( + p->pfrkt_packets[pfr_dir][pfr_op]); + tbl->pfrts_bytes[pfr_dir][pfr_op] = + counter_u64_fetch( + p->pfrkt_bytes[pfr_dir][pfr_op]); + } + tbl->pfrts_match = counter_u64_fetch(p->pfrkt_match); + tbl->pfrts_nomatch = counter_u64_fetch(p->pfrkt_nomatch); + tbl->pfrts_tzero = p->pfrkt_tzero; + tbl->pfrts_cnt = p->pfrkt_cnt; + for (pfr_op = 0; pfr_op < PFR_REFCNT_MAX; pfr_op++) + tbl->pfrts_refcnt[pfr_op] = p->pfrkt_refcnt[pfr_op]; + tbl++; SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); } if (flags & PFR_FLAG_CLSTATS) pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO); KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n)); *size = nn; return (0); @@ -1597,21 +1649,21 @@ pfr_commit_ktable(struct pfr_ktable *kt, long tzero) next = SLIST_NEXT(p, pfrke_workq); /* XXX */ pfr_copyout_addr(&ad, p); q = pfr_lookup_addr(kt, &ad, 1); if (q != NULL) { if (q->pfrke_not != p->pfrke_not) SLIST_INSERT_HEAD(&changeq, q, pfrke_workq); q->pfrke_mark = 1; SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq); } else { - p->pfrke_tzero = tzero; + p->pfrke_counters.pfrkc_tzero = tzero; SLIST_INSERT_HEAD(&addq, p, pfrke_workq); } } pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY); pfr_insert_kentries(kt, &addq, tzero); pfr_remove_kentries(kt, &delq); pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); pfr_destroy_kentries(&garbageq); } else { /* kt cannot contain addresses */ @@ -1780,54 +1832,70 @@ pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse) struct pfr_ktable *p; SLIST_FOREACH(p, workq, pfrkt_workq) pfr_clstats_ktable(p, tzero, recurse); } static void pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse) { struct pfr_kentryworkq addrq; + int pfr_dir, pfr_op; if (recurse) { pfr_enqueue_addrs(kt, &addrq, NULL, 0); pfr_clstats_kentries(&addrq, tzero, 0); } - bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets)); - bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes)); - kt->pfrkt_match = kt->pfrkt_nomatch = 0; + for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) + for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { + counter_u64_zero(kt->pfrkt_packets[pfr_dir][pfr_op]); + counter_u64_zero(kt->pfrkt_bytes[pfr_dir][pfr_op]); + } + counter_u64_zero(kt->pfrkt_match); + counter_u64_zero(kt->pfrkt_nomatch); kt->pfrkt_tzero = tzero; } static struct pfr_ktable * pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset) { struct pfr_ktable *kt; struct pf_ruleset *rs; + int pfr_dir, pfr_op; PF_RULES_WASSERT(); kt = malloc(sizeof(*kt), M_PFTABLE, M_NOWAIT|M_ZERO); if (kt == NULL) return (NULL); kt->pfrkt_t = *tbl; if (attachruleset) { rs = pf_find_or_create_ruleset(tbl->pfrt_anchor); if (!rs) { pfr_destroy_ktable(kt, 0); return (NULL); } kt->pfrkt_rs = rs; rs->tables++; } + for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) + for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { + kt->pfrkt_packets[pfr_dir][pfr_op] = + counter_u64_alloc(M_WAITOK); + kt->pfrkt_bytes[pfr_dir][pfr_op] = + counter_u64_alloc(M_WAITOK); + } + kt->pfrkt_match = counter_u64_alloc(M_WAITOK); + kt->pfrkt_nomatch = counter_u64_alloc(M_WAITOK); + if (!rn_inithead((void **)&kt->pfrkt_ip4, offsetof(struct sockaddr_in, sin_addr) * 8) || !rn_inithead((void **)&kt->pfrkt_ip6, offsetof(struct sockaddr_in6, sin6_addr) * 8)) { pfr_destroy_ktable(kt, 0); return (NULL); } kt->pfrkt_tzero = tzero; return (kt); @@ -1841,36 +1909,45 @@ pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr) for (p = SLIST_FIRST(workq); p; p = q) { q = SLIST_NEXT(p, pfrkt_workq); pfr_destroy_ktable(p, flushaddr); } } static void pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr) { struct pfr_kentryworkq addrq; + int pfr_dir, pfr_op; if (flushaddr) { pfr_enqueue_addrs(kt, &addrq, NULL, 0); pfr_clean_node_mask(kt, &addrq); pfr_destroy_kentries(&addrq); } if (kt->pfrkt_ip4 != NULL) rn_detachhead((void **)&kt->pfrkt_ip4); if (kt->pfrkt_ip6 != NULL) rn_detachhead((void **)&kt->pfrkt_ip6); if (kt->pfrkt_shadow != NULL) pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr); if (kt->pfrkt_rs != NULL) { kt->pfrkt_rs->tables--; pf_remove_if_empty_ruleset(kt->pfrkt_rs); } + for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) + for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { + counter_u64_free(kt->pfrkt_packets[pfr_dir][pfr_op]); + counter_u64_free(kt->pfrkt_bytes[pfr_dir][pfr_op]); + } + counter_u64_free(kt->pfrkt_match); + counter_u64_free(kt->pfrkt_nomatch); + free(kt, M_PFTABLE); } static int pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q) { int d; if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE))) return (d); @@ -1925,23 +2002,23 @@ pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af) bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr)); ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh); if (ke && KENTRY_RNF_ROOT(ke)) ke = NULL; break; } #endif /* INET6 */ } match = (ke && !ke->pfrke_not); if (match) - kt->pfrkt_match++; + counter_u64_add(kt->pfrkt_match, 1); else - kt->pfrkt_nomatch++; + counter_u64_add(kt->pfrkt_nomatch, 1); return (match); } void pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af, u_int64_t len, int dir_out, int op_pass, int notrule) { struct pfr_kentry *ke = NULL; if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) @@ -1981,31 +2058,28 @@ pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af, } #endif /* INET6 */ default: panic("%s: unknown address family %u", __func__, af); } if ((ke == NULL || ke->pfrke_not) != notrule) { if (op_pass != PFR_OP_PASS) printf("pfr_update_stats: assertion failed.\n"); op_pass = PFR_OP_XPASS; } - kt->pfrkt_packets[dir_out][op_pass]++; - kt->pfrkt_bytes[dir_out][op_pass] += len; + counter_u64_add(kt->pfrkt_packets[dir_out][op_pass], 1); + counter_u64_add(kt->pfrkt_bytes[dir_out][op_pass], len); if (ke != NULL && op_pass != PFR_OP_XPASS && (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) { - if (ke->pfrke_counters == NULL) - ke->pfrke_counters = uma_zalloc(V_pfr_kcounters_z, - M_NOWAIT | M_ZERO); - if (ke->pfrke_counters != NULL) { - ke->pfrke_counters->pfrkc_packets[dir_out][op_pass]++; - ke->pfrke_counters->pfrkc_bytes[dir_out][op_pass] += len; - } + counter_u64_add(ke->pfrke_counters. + pfrkc_packets[dir_out][op_pass], 1); + counter_u64_add(ke->pfrke_counters. + pfrkc_bytes[dir_out][op_pass], len); } } struct pfr_ktable * pfr_attach_table(struct pf_ruleset *rs, char *name) { struct pfr_ktable *kt, *rt; struct pfr_table tbl; struct pf_anchor *ac = rs->anchor; @@ -2081,21 +2155,21 @@ pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter, if (pidx != NULL) idx = *pidx; if (counter != NULL && idx >= 0) use_counter = 1; if (idx < 0) idx = 0; _next_block: ke = pfr_kentry_byidx(kt, idx, af); if (ke == NULL) { - kt->pfrkt_nomatch++; + counter_u64_add(kt->pfrkt_nomatch, 1); return (1); } pfr_prepare_network(&umask, af, ke->pfrke_net); cur = SUNION2PF(&ke->pfrke_sa, af); mask = SUNION2PF(&umask, af); if (use_counter) { /* is supplied address within block? */ if (!PF_MATCHA(0, cur, mask, counter, af)) { /* no, go to next block in table */ @@ -2106,41 +2180,41 @@ pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter, PF_ACPY(addr, counter, af); } else { /* use first address of block */ PF_ACPY(addr, cur, af); } if (!KENTRY_NETWORK(ke)) { /* this is a single IP address - no possible nested block */ PF_ACPY(counter, addr, af); *pidx = idx; - kt->pfrkt_match++; + counter_u64_add(kt->pfrkt_match, 1); return (0); } for (;;) { /* we don't want to use a nested block */ switch (af) { case AF_INET: ke2 = (struct pfr_kentry *)rn_match(&uaddr, &kt->pfrkt_ip4->rh); break; case AF_INET6: ke2 = (struct pfr_kentry *)rn_match(&uaddr, &kt->pfrkt_ip6->rh); break; } /* no need to check KENTRY_RNF_ROOT() here */ if (ke2 == ke) { /* lookup return the same block - perfect */ PF_ACPY(counter, addr, af); *pidx = idx; - kt->pfrkt_match++; + counter_u64_add(kt->pfrkt_match, 1); return (0); } /* we need to increase the counter past the nested block */ pfr_prepare_network(&umask, AF_INET, ke2->pfrke_net); PF_POOLMASK(addr, addr, SUNION2PF(&umask, af), &pfr_ffaddr, af); PF_AINC(addr, af); if (!PF_MATCHA(0, cur, mask, addr, af)) { /* ok, we reached the end of our main block */ /* go to next block in table */ -- 2.11.0