|
Lines 103-137
struct pfr_walktree {
Link Here
|
| 103 |
PFRW_DYNADDR_UPDATE |
103 |
PFRW_DYNADDR_UPDATE |
| 104 |
} pfrw_op; |
104 |
} pfrw_op; |
| 105 |
union { |
105 |
union { |
| 106 |
struct pfr_addr *pfrw1_addr; |
106 |
struct pfr_addr *pfrw1_addr; |
| 107 |
struct pfr_astats *pfrw1_astats; |
107 |
struct pfr_astats *pfrw1_astats; |
| 108 |
struct pfr_kentryworkq *pfrw1_workq; |
108 |
struct pfr_kentryworkq *pfrw1_workq; |
| 109 |
struct pfr_kentry *pfrw1_kentry; |
109 |
struct pfr_kentry *pfrw1_kentry; |
| 110 |
struct pfi_dynaddr *pfrw1_dyn; |
110 |
struct pfi_dynaddr *pfrw1_dyn; |
| 111 |
} pfrw_1; |
111 |
} pfrw_1; |
| 112 |
int pfrw_free; |
112 |
int pfrw_free; |
|
|
113 |
int pfrw_flags; |
| 113 |
}; |
114 |
}; |
| 114 |
#define pfrw_addr pfrw_1.pfrw1_addr |
115 |
#define pfrw_addr pfrw_1.pfrw1_addr |
| 115 |
#define pfrw_astats pfrw_1.pfrw1_astats |
116 |
#define pfrw_astats pfrw_1.pfrw1_astats |
| 116 |
#define pfrw_workq pfrw_1.pfrw1_workq |
117 |
#define pfrw_workq pfrw_1.pfrw1_workq |
| 117 |
#define pfrw_kentry pfrw_1.pfrw1_kentry |
118 |
#define pfrw_kentry pfrw_1.pfrw1_kentry |
| 118 |
#define pfrw_dyn pfrw_1.pfrw1_dyn |
119 |
#define pfrw_dyn pfrw_1.pfrw1_dyn |
| 119 |
#define pfrw_cnt pfrw_free |
120 |
#define pfrw_cnt pfrw_free |
| 120 |
|
121 |
|
| 121 |
#define senderr(e) do { rv = (e); goto _bad; } while (0) |
122 |
#define senderr(e) do { rv = (e); goto _bad; } while (0) |
| 122 |
|
123 |
|
| 123 |
static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures"); |
124 |
static MALLOC_DEFINE(M_PFTABLE, "pf_table", "pf(4) tables structures"); |
| 124 |
static VNET_DEFINE(uma_zone_t, pfr_kentry_z); |
125 |
static VNET_DEFINE(uma_zone_t, pfr_kentry_z); |
| 125 |
#define V_pfr_kentry_z VNET(pfr_kentry_z) |
126 |
#define V_pfr_kentry_z VNET(pfr_kentry_z) |
| 126 |
static VNET_DEFINE(uma_zone_t, pfr_kcounters_z); |
|
|
| 127 |
#define V_pfr_kcounters_z VNET(pfr_kcounters_z) |
| 128 |
|
127 |
|
| 129 |
static struct pf_addr pfr_ffaddr = { |
128 |
static struct pf_addr pfr_ffaddr = { |
| 130 |
.addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff } |
129 |
.addr32 = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff } |
| 131 |
}; |
130 |
}; |
| 132 |
|
131 |
|
| 133 |
static void pfr_copyout_addr(struct pfr_addr *, |
132 |
static void pfr_copyout_addr(struct pfr_addr *, |
| 134 |
struct pfr_kentry *ke); |
133 |
struct pfr_kentry *ke); |
| 135 |
static int pfr_validate_addr(struct pfr_addr *); |
134 |
static int pfr_validate_addr(struct pfr_addr *); |
| 136 |
static void pfr_enqueue_addrs(struct pfr_ktable *, |
135 |
static void pfr_enqueue_addrs(struct pfr_ktable *, |
| 137 |
struct pfr_kentryworkq *, int *, int); |
136 |
struct pfr_kentryworkq *, int *, int); |
|
Lines 187-219
struct pfr_ktablehead pfr_ktables;
Link Here
|
| 187 |
struct pfr_table pfr_nulltable; |
186 |
struct pfr_table pfr_nulltable; |
| 188 |
int pfr_ktable_cnt; |
187 |
int pfr_ktable_cnt; |
| 189 |
|
188 |
|
| 190 |
void |
189 |
void |
| 191 |
pfr_initialize(void) |
190 |
pfr_initialize(void) |
| 192 |
{ |
191 |
{ |
| 193 |
|
192 |
|
| 194 |
V_pfr_kentry_z = uma_zcreate("pf table entries", |
193 |
V_pfr_kentry_z = uma_zcreate("pf table entries", |
| 195 |
sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, |
194 |
sizeof(struct pfr_kentry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, |
| 196 |
0); |
195 |
0); |
| 197 |
V_pfr_kcounters_z = uma_zcreate("pf table counters", |
|
|
| 198 |
sizeof(struct pfr_kcounters), NULL, NULL, NULL, NULL, |
| 199 |
UMA_ALIGN_PTR, 0); |
| 200 |
V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z; |
196 |
V_pf_limits[PF_LIMIT_TABLE_ENTRIES].zone = V_pfr_kentry_z; |
| 201 |
V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT; |
197 |
V_pf_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT; |
| 202 |
} |
198 |
} |
| 203 |
|
199 |
|
| 204 |
void |
200 |
void |
| 205 |
pfr_cleanup(void) |
201 |
pfr_cleanup(void) |
| 206 |
{ |
202 |
{ |
| 207 |
|
203 |
|
| 208 |
uma_zdestroy(V_pfr_kentry_z); |
204 |
uma_zdestroy(V_pfr_kentry_z); |
| 209 |
uma_zdestroy(V_pfr_kcounters_z); |
|
|
| 210 |
} |
205 |
} |
| 211 |
|
206 |
|
| 212 |
int |
207 |
int |
| 213 |
pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags) |
208 |
pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags) |
| 214 |
{ |
209 |
{ |
| 215 |
struct pfr_ktable *kt; |
210 |
struct pfr_ktable *kt; |
| 216 |
struct pfr_kentryworkq workq; |
211 |
struct pfr_kentryworkq workq; |
| 217 |
|
212 |
|
| 218 |
PF_RULES_WASSERT(); |
213 |
PF_RULES_WASSERT(); |
| 219 |
|
214 |
|
|
Lines 593-612
pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
Link Here
|
| 593 |
return (ESRCH); |
588 |
return (ESRCH); |
| 594 |
if (kt->pfrkt_cnt > *size) { |
589 |
if (kt->pfrkt_cnt > *size) { |
| 595 |
*size = kt->pfrkt_cnt; |
590 |
*size = kt->pfrkt_cnt; |
| 596 |
return (0); |
591 |
return (0); |
| 597 |
} |
592 |
} |
| 598 |
|
593 |
|
| 599 |
bzero(&w, sizeof(w)); |
594 |
bzero(&w, sizeof(w)); |
| 600 |
w.pfrw_op = PFRW_GET_ASTATS; |
595 |
w.pfrw_op = PFRW_GET_ASTATS; |
| 601 |
w.pfrw_astats = addr; |
596 |
w.pfrw_astats = addr; |
| 602 |
w.pfrw_free = kt->pfrkt_cnt; |
597 |
w.pfrw_free = kt->pfrkt_cnt; |
|
|
598 |
/* |
| 599 |
* Flags below are for backward compatibility. It was possible to have |
| 600 |
* a table without per-entry counters. Now they are always allocated, |
| 601 |
* we just discard data when reading it if table is not configured to |
| 602 |
* have counters. |
| 603 |
*/ |
| 604 |
w.pfrw_flags = kt->pfrkt_flags; |
| 603 |
rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); |
605 |
rv = kt->pfrkt_ip4->rnh_walktree(&kt->pfrkt_ip4->rh, pfr_walktree, &w); |
| 604 |
if (!rv) |
606 |
if (!rv) |
| 605 |
rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, |
607 |
rv = kt->pfrkt_ip6->rnh_walktree(&kt->pfrkt_ip6->rh, |
| 606 |
pfr_walktree, &w); |
608 |
pfr_walktree, &w); |
| 607 |
if (!rv && (flags & PFR_FLAG_CLSTATS)) { |
609 |
if (!rv && (flags & PFR_FLAG_CLSTATS)) { |
| 608 |
pfr_enqueue_addrs(kt, &workq, NULL, 0); |
610 |
pfr_enqueue_addrs(kt, &workq, NULL, 0); |
| 609 |
pfr_clstats_kentries(&workq, tzero, 0); |
611 |
pfr_clstats_kentries(&workq, tzero, 0); |
| 610 |
} |
612 |
} |
| 611 |
if (rv) |
613 |
if (rv) |
| 612 |
return (rv); |
614 |
return (rv); |
|
Lines 763-831
pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
Link Here
|
| 763 |
if (exact && ke && KENTRY_NETWORK(ke)) |
765 |
if (exact && ke && KENTRY_NETWORK(ke)) |
| 764 |
ke = NULL; |
766 |
ke = NULL; |
| 765 |
} |
767 |
} |
| 766 |
return (ke); |
768 |
return (ke); |
| 767 |
} |
769 |
} |
| 768 |
|
770 |
|
| 769 |
static struct pfr_kentry * |
771 |
static struct pfr_kentry * |
| 770 |
pfr_create_kentry(struct pfr_addr *ad) |
772 |
pfr_create_kentry(struct pfr_addr *ad) |
| 771 |
{ |
773 |
{ |
| 772 |
struct pfr_kentry *ke; |
774 |
struct pfr_kentry *ke; |
|
|
775 |
int pfr_dir, pfr_op; |
| 773 |
|
776 |
|
| 774 |
ke = uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO); |
777 |
ke = uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO); |
| 775 |
if (ke == NULL) |
778 |
if (ke == NULL) |
| 776 |
return (NULL); |
779 |
return (NULL); |
| 777 |
|
780 |
|
| 778 |
if (ad->pfra_af == AF_INET) |
781 |
if (ad->pfra_af == AF_INET) |
| 779 |
FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr); |
782 |
FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr); |
| 780 |
else if (ad->pfra_af == AF_INET6) |
783 |
else if (ad->pfra_af == AF_INET6) |
| 781 |
FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr); |
784 |
FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr); |
| 782 |
ke->pfrke_af = ad->pfra_af; |
785 |
ke->pfrke_af = ad->pfra_af; |
| 783 |
ke->pfrke_net = ad->pfra_net; |
786 |
ke->pfrke_net = ad->pfra_net; |
| 784 |
ke->pfrke_not = ad->pfra_not; |
787 |
ke->pfrke_not = ad->pfra_not; |
|
|
788 |
for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) |
| 789 |
for (pfr_op = 0; pfr_op < PFR_OP_ADDR_MAX; pfr_op ++) { |
| 790 |
ke->pfrke_counters.pfrkc_packets[pfr_dir][pfr_op] = |
| 791 |
counter_u64_alloc(M_WAITOK); |
| 792 |
ke->pfrke_counters.pfrkc_bytes[pfr_dir][pfr_op] = |
| 793 |
counter_u64_alloc(M_WAITOK); |
| 794 |
} |
| 785 |
return (ke); |
795 |
return (ke); |
| 786 |
} |
796 |
} |
| 787 |
|
797 |
|
| 788 |
static void |
798 |
static void |
| 789 |
pfr_destroy_kentries(struct pfr_kentryworkq *workq) |
799 |
pfr_destroy_kentries(struct pfr_kentryworkq *workq) |
| 790 |
{ |
800 |
{ |
| 791 |
struct pfr_kentry *p, *q; |
801 |
struct pfr_kentry *p, *q; |
| 792 |
|
802 |
|
| 793 |
for (p = SLIST_FIRST(workq); p != NULL; p = q) { |
803 |
for (p = SLIST_FIRST(workq); p != NULL; p = q) { |
| 794 |
q = SLIST_NEXT(p, pfrke_workq); |
804 |
q = SLIST_NEXT(p, pfrke_workq); |
| 795 |
pfr_destroy_kentry(p); |
805 |
pfr_destroy_kentry(p); |
| 796 |
} |
806 |
} |
| 797 |
} |
807 |
} |
| 798 |
|
808 |
|
| 799 |
static void |
809 |
static void |
| 800 |
pfr_destroy_kentry(struct pfr_kentry *ke) |
810 |
pfr_destroy_kentry(struct pfr_kentry *ke) |
| 801 |
{ |
811 |
{ |
| 802 |
if (ke->pfrke_counters) |
812 |
int pfr_dir, pfr_op; |
| 803 |
uma_zfree(V_pfr_kcounters_z, ke->pfrke_counters); |
813 |
|
|
|
814 |
for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) |
| 815 |
for (pfr_op = 0; pfr_op < PFR_OP_ADDR_MAX; pfr_op ++) { |
| 816 |
counter_u64_free( |
| 817 |
ke->pfrke_counters.pfrkc_packets[pfr_dir][pfr_op]); |
| 818 |
counter_u64_free( |
| 819 |
ke->pfrke_counters.pfrkc_bytes[pfr_dir][pfr_op]); |
| 820 |
} |
| 821 |
|
| 804 |
uma_zfree(V_pfr_kentry_z, ke); |
822 |
uma_zfree(V_pfr_kentry_z, ke); |
| 805 |
} |
823 |
} |
| 806 |
|
824 |
|
| 807 |
static void |
825 |
static void |
| 808 |
pfr_insert_kentries(struct pfr_ktable *kt, |
826 |
pfr_insert_kentries(struct pfr_ktable *kt, |
| 809 |
struct pfr_kentryworkq *workq, long tzero) |
827 |
struct pfr_kentryworkq *workq, long tzero) |
| 810 |
{ |
828 |
{ |
| 811 |
struct pfr_kentry *p; |
829 |
struct pfr_kentry *p; |
| 812 |
int rv, n = 0; |
830 |
int rv, n = 0; |
| 813 |
|
831 |
|
| 814 |
SLIST_FOREACH(p, workq, pfrke_workq) { |
832 |
SLIST_FOREACH(p, workq, pfrke_workq) { |
| 815 |
rv = pfr_route_kentry(kt, p); |
833 |
rv = pfr_route_kentry(kt, p); |
| 816 |
if (rv) { |
834 |
if (rv) { |
| 817 |
printf("pfr_insert_kentries: cannot route entry " |
835 |
printf("pfr_insert_kentries: cannot route entry " |
| 818 |
"(code=%d).\n", rv); |
836 |
"(code=%d).\n", rv); |
| 819 |
break; |
837 |
break; |
| 820 |
} |
838 |
} |
| 821 |
p->pfrke_tzero = tzero; |
839 |
p->pfrke_counters.pfrkc_tzero = tzero; |
| 822 |
n++; |
840 |
n++; |
| 823 |
} |
841 |
} |
| 824 |
kt->pfrkt_cnt += n; |
842 |
kt->pfrkt_cnt += n; |
| 825 |
} |
843 |
} |
| 826 |
|
844 |
|
| 827 |
int |
845 |
int |
| 828 |
pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero) |
846 |
pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero) |
| 829 |
{ |
847 |
{ |
| 830 |
struct pfr_kentry *p; |
848 |
struct pfr_kentry *p; |
| 831 |
int rv; |
849 |
int rv; |
|
Lines 834-854
pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
Link Here
|
| 834 |
if (p != NULL) |
852 |
if (p != NULL) |
| 835 |
return (0); |
853 |
return (0); |
| 836 |
p = pfr_create_kentry(ad); |
854 |
p = pfr_create_kentry(ad); |
| 837 |
if (p == NULL) |
855 |
if (p == NULL) |
| 838 |
return (ENOMEM); |
856 |
return (ENOMEM); |
| 839 |
|
857 |
|
| 840 |
rv = pfr_route_kentry(kt, p); |
858 |
rv = pfr_route_kentry(kt, p); |
| 841 |
if (rv) |
859 |
if (rv) |
| 842 |
return (rv); |
860 |
return (rv); |
| 843 |
|
861 |
|
| 844 |
p->pfrke_tzero = tzero; |
862 |
p->pfrke_counters.pfrkc_tzero = tzero; |
| 845 |
kt->pfrkt_cnt++; |
863 |
kt->pfrkt_cnt++; |
| 846 |
|
864 |
|
| 847 |
return (0); |
865 |
return (0); |
| 848 |
} |
866 |
} |
| 849 |
|
867 |
|
| 850 |
static void |
868 |
static void |
| 851 |
pfr_remove_kentries(struct pfr_ktable *kt, |
869 |
pfr_remove_kentries(struct pfr_ktable *kt, |
| 852 |
struct pfr_kentryworkq *workq) |
870 |
struct pfr_kentryworkq *workq) |
| 853 |
{ |
871 |
{ |
| 854 |
struct pfr_kentry *p; |
872 |
struct pfr_kentry *p; |
|
Lines 869-897
pfr_clean_node_mask(struct pfr_ktable *kt,
Link Here
|
| 869 |
struct pfr_kentry *p; |
887 |
struct pfr_kentry *p; |
| 870 |
|
888 |
|
| 871 |
SLIST_FOREACH(p, workq, pfrke_workq) |
889 |
SLIST_FOREACH(p, workq, pfrke_workq) |
| 872 |
pfr_unroute_kentry(kt, p); |
890 |
pfr_unroute_kentry(kt, p); |
| 873 |
} |
891 |
} |
| 874 |
|
892 |
|
| 875 |
static void |
893 |
static void |
| 876 |
pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange) |
894 |
pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange) |
| 877 |
{ |
895 |
{ |
| 878 |
struct pfr_kentry *p; |
896 |
struct pfr_kentry *p; |
|
|
897 |
int pfr_dir, pfr_op; |
| 879 |
|
898 |
|
| 880 |
SLIST_FOREACH(p, workq, pfrke_workq) { |
899 |
SLIST_FOREACH(p, workq, pfrke_workq) { |
| 881 |
if (negchange) |
900 |
if (negchange) |
| 882 |
p->pfrke_not = !p->pfrke_not; |
901 |
p->pfrke_not = !p->pfrke_not; |
| 883 |
if (p->pfrke_counters) { |
902 |
for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { |
| 884 |
uma_zfree(V_pfr_kcounters_z, p->pfrke_counters); |
903 |
for (pfr_op = 0; pfr_op < PFR_OP_ADDR_MAX; pfr_op ++) { |
| 885 |
p->pfrke_counters = NULL; |
904 |
counter_u64_zero(p->pfrke_counters. |
|
|
905 |
pfrkc_packets[pfr_dir][pfr_op]); |
| 906 |
counter_u64_zero(p->pfrke_counters. |
| 907 |
pfrkc_bytes[pfr_dir][pfr_op]); |
| 908 |
} |
| 886 |
} |
909 |
} |
| 887 |
p->pfrke_tzero = tzero; |
910 |
p->pfrke_counters.pfrkc_tzero = tzero; |
| 888 |
} |
911 |
} |
| 889 |
} |
912 |
} |
| 890 |
|
913 |
|
| 891 |
static void |
914 |
static void |
| 892 |
pfr_reset_feedback(struct pfr_addr *addr, int size) |
915 |
pfr_reset_feedback(struct pfr_addr *addr, int size) |
| 893 |
{ |
916 |
{ |
| 894 |
struct pfr_addr *ad; |
917 |
struct pfr_addr *ad; |
| 895 |
int i; |
918 |
int i; |
| 896 |
|
919 |
|
| 897 |
for (i = 0, ad = addr; i < size; i++, ad++) |
920 |
for (i = 0, ad = addr; i < size; i++, ad++) |
|
Lines 985-1004
pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
Link Here
|
| 985 |
ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr; |
1008 |
ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr; |
| 986 |
else if (ad->pfra_af == AF_INET6) |
1009 |
else if (ad->pfra_af == AF_INET6) |
| 987 |
ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr; |
1010 |
ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr; |
| 988 |
} |
1011 |
} |
| 989 |
|
1012 |
|
| 990 |
static int |
1013 |
static int |
| 991 |
pfr_walktree(struct radix_node *rn, void *arg) |
1014 |
pfr_walktree(struct radix_node *rn, void *arg) |
| 992 |
{ |
1015 |
{ |
| 993 |
struct pfr_kentry *ke = (struct pfr_kentry *)rn; |
1016 |
struct pfr_kentry *ke = (struct pfr_kentry *)rn; |
| 994 |
struct pfr_walktree *w = arg; |
1017 |
struct pfr_walktree *w = arg; |
|
|
1018 |
int pfr_dir, pfr_op; |
| 995 |
|
1019 |
|
| 996 |
switch (w->pfrw_op) { |
1020 |
switch (w->pfrw_op) { |
| 997 |
case PFRW_MARK: |
1021 |
case PFRW_MARK: |
| 998 |
ke->pfrke_mark = 0; |
1022 |
ke->pfrke_mark = 0; |
| 999 |
break; |
1023 |
break; |
| 1000 |
case PFRW_SWEEP: |
1024 |
case PFRW_SWEEP: |
| 1001 |
if (ke->pfrke_mark) |
1025 |
if (ke->pfrke_mark) |
| 1002 |
break; |
1026 |
break; |
| 1003 |
/* FALLTHROUGH */ |
1027 |
/* FALLTHROUGH */ |
| 1004 |
case PFRW_ENQUEUE: |
1028 |
case PFRW_ENQUEUE: |
|
Lines 1009-1040
pfr_walktree(struct radix_node *rn, void *arg)
Link Here
|
| 1009 |
if (w->pfrw_free-- > 0) { |
1033 |
if (w->pfrw_free-- > 0) { |
| 1010 |
pfr_copyout_addr(w->pfrw_addr, ke); |
1034 |
pfr_copyout_addr(w->pfrw_addr, ke); |
| 1011 |
w->pfrw_addr++; |
1035 |
w->pfrw_addr++; |
| 1012 |
} |
1036 |
} |
| 1013 |
break; |
1037 |
break; |
| 1014 |
case PFRW_GET_ASTATS: |
1038 |
case PFRW_GET_ASTATS: |
| 1015 |
if (w->pfrw_free-- > 0) { |
1039 |
if (w->pfrw_free-- > 0) { |
| 1016 |
struct pfr_astats as; |
1040 |
struct pfr_astats as; |
| 1017 |
|
1041 |
|
| 1018 |
pfr_copyout_addr(&as.pfras_a, ke); |
1042 |
pfr_copyout_addr(&as.pfras_a, ke); |
| 1019 |
|
1043 |
if (w->pfrw_flags & PFR_TFLAG_COUNTERS) { |
| 1020 |
if (ke->pfrke_counters) { |
1044 |
for (pfr_dir = 0; |
| 1021 |
bcopy(ke->pfrke_counters->pfrkc_packets, |
1045 |
pfr_dir < PFR_DIR_MAX; |
| 1022 |
as.pfras_packets, sizeof(as.pfras_packets)); |
1046 |
pfr_dir ++) |
| 1023 |
bcopy(ke->pfrke_counters->pfrkc_bytes, |
1047 |
for (pfr_op = 0; |
| 1024 |
as.pfras_bytes, sizeof(as.pfras_bytes)); |
1048 |
pfr_op < PFR_OP_ADDR_MAX; |
|
|
1049 |
pfr_op ++) { |
| 1050 |
as.pfras_packets[pfr_dir][pfr_op] = |
| 1051 |
counter_u64_fetch( |
| 1052 |
ke->pfrke_counters. |
| 1053 |
pfrkc_packets[pfr_dir][pfr_op]); |
| 1054 |
as.pfras_bytes[pfr_dir][pfr_op] = |
| 1055 |
counter_u64_fetch( |
| 1056 |
ke->pfrke_counters. |
| 1057 |
pfrkc_bytes[pfr_dir][pfr_op]); |
| 1058 |
} |
| 1025 |
} else { |
1059 |
} else { |
| 1026 |
bzero(as.pfras_packets, sizeof(as.pfras_packets)); |
1060 |
bzero(as.pfras_packets, sizeof(as.pfras_packets)); |
| 1027 |
bzero(as.pfras_bytes, sizeof(as.pfras_bytes)); |
1061 |
bzero(as.pfras_bytes, sizeof(as.pfras_bytes)); |
| 1028 |
as.pfras_a.pfra_fback = PFR_FB_NOCOUNT; |
1062 |
as.pfras_a.pfra_fback = PFR_FB_NOCOUNT; |
| 1029 |
} |
1063 |
} |
| 1030 |
as.pfras_tzero = ke->pfrke_tzero; |
1064 |
as.pfras_tzero = ke->pfrke_counters.pfrkc_tzero; |
| 1031 |
|
1065 |
|
| 1032 |
bcopy(&as, w->pfrw_astats, sizeof(as)); |
1066 |
bcopy(&as, w->pfrw_astats, sizeof(as)); |
| 1033 |
w->pfrw_astats++; |
1067 |
w->pfrw_astats++; |
| 1034 |
} |
1068 |
} |
| 1035 |
break; |
1069 |
break; |
| 1036 |
case PFRW_POOL_GET: |
1070 |
case PFRW_POOL_GET: |
| 1037 |
if (ke->pfrke_not) |
1071 |
if (ke->pfrke_not) |
| 1038 |
break; /* negative entries are ignored */ |
1072 |
break; /* negative entries are ignored */ |
| 1039 |
if (!w->pfrw_cnt--) { |
1073 |
if (!w->pfrw_cnt--) { |
| 1040 |
w->pfrw_kentry = ke; |
1074 |
w->pfrw_kentry = ke; |
|
Lines 1245-1283
pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
Link Here
|
| 1245 |
} |
1279 |
} |
| 1246 |
|
1280 |
|
| 1247 |
int |
1281 |
int |
| 1248 |
pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size, |
1282 |
pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size, |
| 1249 |
int flags) |
1283 |
int flags) |
| 1250 |
{ |
1284 |
{ |
| 1251 |
struct pfr_ktable *p; |
1285 |
struct pfr_ktable *p; |
| 1252 |
struct pfr_ktableworkq workq; |
1286 |
struct pfr_ktableworkq workq; |
| 1253 |
int n, nn; |
1287 |
int n, nn; |
| 1254 |
long tzero = time_second; |
1288 |
long tzero = time_second; |
|
|
1289 |
int pfr_dir, pfr_op; |
| 1255 |
|
1290 |
|
| 1256 |
/* XXX PFR_FLAG_CLSTATS disabled */ |
1291 |
/* XXX PFR_FLAG_CLSTATS disabled */ |
| 1257 |
ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS); |
1292 |
ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS); |
| 1258 |
if (pfr_fix_anchor(filter->pfrt_anchor)) |
1293 |
if (pfr_fix_anchor(filter->pfrt_anchor)) |
| 1259 |
return (EINVAL); |
1294 |
return (EINVAL); |
| 1260 |
n = nn = pfr_table_count(filter, flags); |
1295 |
n = nn = pfr_table_count(filter, flags); |
| 1261 |
if (n < 0) |
1296 |
if (n < 0) |
| 1262 |
return (ENOENT); |
1297 |
return (ENOENT); |
| 1263 |
if (n > *size) { |
1298 |
if (n > *size) { |
| 1264 |
*size = n; |
1299 |
*size = n; |
| 1265 |
return (0); |
1300 |
return (0); |
| 1266 |
} |
1301 |
} |
| 1267 |
SLIST_INIT(&workq); |
1302 |
SLIST_INIT(&workq); |
| 1268 |
RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { |
1303 |
RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { |
| 1269 |
if (pfr_skip_table(filter, p, flags)) |
1304 |
if (pfr_skip_table(filter, p, flags)) |
| 1270 |
continue; |
1305 |
continue; |
| 1271 |
if (n-- <= 0) |
1306 |
if (n-- <= 0) |
| 1272 |
continue; |
1307 |
continue; |
| 1273 |
bcopy(&p->pfrkt_ts, tbl++, sizeof(*tbl)); |
1308 |
bcopy(&p->pfrkt_kts.pfrts_t, &tbl->pfrts_t, |
|
|
1309 |
sizeof(struct pfr_table)); |
| 1310 |
for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) |
| 1311 |
for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { |
| 1312 |
tbl->pfrts_packets[pfr_dir][pfr_op] = |
| 1313 |
counter_u64_fetch( |
| 1314 |
p->pfrkt_packets[pfr_dir][pfr_op]); |
| 1315 |
tbl->pfrts_bytes[pfr_dir][pfr_op] = |
| 1316 |
counter_u64_fetch( |
| 1317 |
p->pfrkt_bytes[pfr_dir][pfr_op]); |
| 1318 |
} |
| 1319 |
tbl->pfrts_match = counter_u64_fetch(p->pfrkt_match); |
| 1320 |
tbl->pfrts_nomatch = counter_u64_fetch(p->pfrkt_nomatch); |
| 1321 |
tbl->pfrts_tzero = p->pfrkt_tzero; |
| 1322 |
tbl->pfrts_cnt = p->pfrkt_cnt; |
| 1323 |
for (pfr_op = 0; pfr_op < PFR_REFCNT_MAX; pfr_op++) |
| 1324 |
tbl->pfrts_refcnt[pfr_op] = p->pfrkt_refcnt[pfr_op]; |
| 1325 |
tbl++; |
| 1274 |
SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); |
1326 |
SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); |
| 1275 |
} |
1327 |
} |
| 1276 |
if (flags & PFR_FLAG_CLSTATS) |
1328 |
if (flags & PFR_FLAG_CLSTATS) |
| 1277 |
pfr_clstats_ktables(&workq, tzero, |
1329 |
pfr_clstats_ktables(&workq, tzero, |
| 1278 |
flags & PFR_FLAG_ADDRSTOO); |
1330 |
flags & PFR_FLAG_ADDRSTOO); |
| 1279 |
|
1331 |
|
| 1280 |
KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n)); |
1332 |
KASSERT(n == 0, ("%s: corruption detected (%d)", __func__, n)); |
| 1281 |
|
1333 |
|
| 1282 |
*size = nn; |
1334 |
*size = nn; |
| 1283 |
return (0); |
1335 |
return (0); |
|
Lines 1597-1617
pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
Link Here
|
| 1597 |
next = SLIST_NEXT(p, pfrke_workq); /* XXX */ |
1649 |
next = SLIST_NEXT(p, pfrke_workq); /* XXX */ |
| 1598 |
pfr_copyout_addr(&ad, p); |
1650 |
pfr_copyout_addr(&ad, p); |
| 1599 |
q = pfr_lookup_addr(kt, &ad, 1); |
1651 |
q = pfr_lookup_addr(kt, &ad, 1); |
| 1600 |
if (q != NULL) { |
1652 |
if (q != NULL) { |
| 1601 |
if (q->pfrke_not != p->pfrke_not) |
1653 |
if (q->pfrke_not != p->pfrke_not) |
| 1602 |
SLIST_INSERT_HEAD(&changeq, q, |
1654 |
SLIST_INSERT_HEAD(&changeq, q, |
| 1603 |
pfrke_workq); |
1655 |
pfrke_workq); |
| 1604 |
q->pfrke_mark = 1; |
1656 |
q->pfrke_mark = 1; |
| 1605 |
SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq); |
1657 |
SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq); |
| 1606 |
} else { |
1658 |
} else { |
| 1607 |
p->pfrke_tzero = tzero; |
1659 |
p->pfrke_counters.pfrkc_tzero = tzero; |
| 1608 |
SLIST_INSERT_HEAD(&addq, p, pfrke_workq); |
1660 |
SLIST_INSERT_HEAD(&addq, p, pfrke_workq); |
| 1609 |
} |
1661 |
} |
| 1610 |
} |
1662 |
} |
| 1611 |
pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY); |
1663 |
pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY); |
| 1612 |
pfr_insert_kentries(kt, &addq, tzero); |
1664 |
pfr_insert_kentries(kt, &addq, tzero); |
| 1613 |
pfr_remove_kentries(kt, &delq); |
1665 |
pfr_remove_kentries(kt, &delq); |
| 1614 |
pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); |
1666 |
pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); |
| 1615 |
pfr_destroy_kentries(&garbageq); |
1667 |
pfr_destroy_kentries(&garbageq); |
| 1616 |
} else { |
1668 |
} else { |
| 1617 |
/* kt cannot contain addresses */ |
1669 |
/* kt cannot contain addresses */ |
|
Lines 1780-1833
pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
Link Here
|
| 1780 |
struct pfr_ktable *p; |
1832 |
struct pfr_ktable *p; |
| 1781 |
|
1833 |
|
| 1782 |
SLIST_FOREACH(p, workq, pfrkt_workq) |
1834 |
SLIST_FOREACH(p, workq, pfrkt_workq) |
| 1783 |
pfr_clstats_ktable(p, tzero, recurse); |
1835 |
pfr_clstats_ktable(p, tzero, recurse); |
| 1784 |
} |
1836 |
} |
| 1785 |
|
1837 |
|
| 1786 |
static void |
1838 |
static void |
| 1787 |
pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse) |
1839 |
pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse) |
| 1788 |
{ |
1840 |
{ |
| 1789 |
struct pfr_kentryworkq addrq; |
1841 |
struct pfr_kentryworkq addrq; |
|
|
1842 |
int pfr_dir, pfr_op; |
| 1790 |
|
1843 |
|
| 1791 |
if (recurse) { |
1844 |
if (recurse) { |
| 1792 |
pfr_enqueue_addrs(kt, &addrq, NULL, 0); |
1845 |
pfr_enqueue_addrs(kt, &addrq, NULL, 0); |
| 1793 |
pfr_clstats_kentries(&addrq, tzero, 0); |
1846 |
pfr_clstats_kentries(&addrq, tzero, 0); |
| 1794 |
} |
1847 |
} |
| 1795 |
bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets)); |
1848 |
for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) |
| 1796 |
bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes)); |
1849 |
for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { |
| 1797 |
kt->pfrkt_match = kt->pfrkt_nomatch = 0; |
1850 |
counter_u64_zero(kt->pfrkt_packets[pfr_dir][pfr_op]); |
|
|
1851 |
counter_u64_zero(kt->pfrkt_bytes[pfr_dir][pfr_op]); |
| 1852 |
} |
| 1853 |
counter_u64_zero(kt->pfrkt_match); |
| 1854 |
counter_u64_zero(kt->pfrkt_nomatch); |
| 1798 |
kt->pfrkt_tzero = tzero; |
1855 |
kt->pfrkt_tzero = tzero; |
| 1799 |
} |
1856 |
} |
| 1800 |
|
1857 |
|
| 1801 |
static struct pfr_ktable * |
1858 |
static struct pfr_ktable * |
| 1802 |
pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset) |
1859 |
pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset) |
| 1803 |
{ |
1860 |
{ |
| 1804 |
struct pfr_ktable *kt; |
1861 |
struct pfr_ktable *kt; |
| 1805 |
struct pf_ruleset *rs; |
1862 |
struct pf_ruleset *rs; |
|
|
1863 |
int pfr_dir, pfr_op; |
| 1806 |
|
1864 |
|
| 1807 |
PF_RULES_WASSERT(); |
1865 |
PF_RULES_WASSERT(); |
| 1808 |
|
1866 |
|
| 1809 |
kt = malloc(sizeof(*kt), M_PFTABLE, M_NOWAIT|M_ZERO); |
1867 |
kt = malloc(sizeof(*kt), M_PFTABLE, M_NOWAIT|M_ZERO); |
| 1810 |
if (kt == NULL) |
1868 |
if (kt == NULL) |
| 1811 |
return (NULL); |
1869 |
return (NULL); |
| 1812 |
kt->pfrkt_t = *tbl; |
1870 |
kt->pfrkt_t = *tbl; |
| 1813 |
|
1871 |
|
| 1814 |
if (attachruleset) { |
1872 |
if (attachruleset) { |
| 1815 |
rs = pf_find_or_create_ruleset(tbl->pfrt_anchor); |
1873 |
rs = pf_find_or_create_ruleset(tbl->pfrt_anchor); |
| 1816 |
if (!rs) { |
1874 |
if (!rs) { |
| 1817 |
pfr_destroy_ktable(kt, 0); |
1875 |
pfr_destroy_ktable(kt, 0); |
| 1818 |
return (NULL); |
1876 |
return (NULL); |
| 1819 |
} |
1877 |
} |
| 1820 |
kt->pfrkt_rs = rs; |
1878 |
kt->pfrkt_rs = rs; |
| 1821 |
rs->tables++; |
1879 |
rs->tables++; |
| 1822 |
} |
1880 |
} |
| 1823 |
|
1881 |
|
|
|
1882 |
for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) |
| 1883 |
for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { |
| 1884 |
kt->pfrkt_packets[pfr_dir][pfr_op] = |
| 1885 |
counter_u64_alloc(M_WAITOK); |
| 1886 |
kt->pfrkt_bytes[pfr_dir][pfr_op] = |
| 1887 |
counter_u64_alloc(M_WAITOK); |
| 1888 |
} |
| 1889 |
kt->pfrkt_match = counter_u64_alloc(M_WAITOK); |
| 1890 |
kt->pfrkt_nomatch = counter_u64_alloc(M_WAITOK); |
| 1891 |
|
| 1824 |
if (!rn_inithead((void **)&kt->pfrkt_ip4, |
1892 |
if (!rn_inithead((void **)&kt->pfrkt_ip4, |
| 1825 |
offsetof(struct sockaddr_in, sin_addr) * 8) || |
1893 |
offsetof(struct sockaddr_in, sin_addr) * 8) || |
| 1826 |
!rn_inithead((void **)&kt->pfrkt_ip6, |
1894 |
!rn_inithead((void **)&kt->pfrkt_ip6, |
| 1827 |
offsetof(struct sockaddr_in6, sin6_addr) * 8)) { |
1895 |
offsetof(struct sockaddr_in6, sin6_addr) * 8)) { |
| 1828 |
pfr_destroy_ktable(kt, 0); |
1896 |
pfr_destroy_ktable(kt, 0); |
| 1829 |
return (NULL); |
1897 |
return (NULL); |
| 1830 |
} |
1898 |
} |
| 1831 |
kt->pfrkt_tzero = tzero; |
1899 |
kt->pfrkt_tzero = tzero; |
| 1832 |
|
1900 |
|
| 1833 |
return (kt); |
1901 |
return (kt); |
|
Lines 1841-1876
pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
Link Here
|
| 1841 |
for (p = SLIST_FIRST(workq); p; p = q) { |
1909 |
for (p = SLIST_FIRST(workq); p; p = q) { |
| 1842 |
q = SLIST_NEXT(p, pfrkt_workq); |
1910 |
q = SLIST_NEXT(p, pfrkt_workq); |
| 1843 |
pfr_destroy_ktable(p, flushaddr); |
1911 |
pfr_destroy_ktable(p, flushaddr); |
| 1844 |
} |
1912 |
} |
| 1845 |
} |
1913 |
} |
| 1846 |
|
1914 |
|
| 1847 |
static void |
1915 |
static void |
| 1848 |
pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr) |
1916 |
pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr) |
| 1849 |
{ |
1917 |
{ |
| 1850 |
struct pfr_kentryworkq addrq; |
1918 |
struct pfr_kentryworkq addrq; |
|
|
1919 |
int pfr_dir, pfr_op; |
| 1851 |
|
1920 |
|
| 1852 |
if (flushaddr) { |
1921 |
if (flushaddr) { |
| 1853 |
pfr_enqueue_addrs(kt, &addrq, NULL, 0); |
1922 |
pfr_enqueue_addrs(kt, &addrq, NULL, 0); |
| 1854 |
pfr_clean_node_mask(kt, &addrq); |
1923 |
pfr_clean_node_mask(kt, &addrq); |
| 1855 |
pfr_destroy_kentries(&addrq); |
1924 |
pfr_destroy_kentries(&addrq); |
| 1856 |
} |
1925 |
} |
| 1857 |
if (kt->pfrkt_ip4 != NULL) |
1926 |
if (kt->pfrkt_ip4 != NULL) |
| 1858 |
rn_detachhead((void **)&kt->pfrkt_ip4); |
1927 |
rn_detachhead((void **)&kt->pfrkt_ip4); |
| 1859 |
if (kt->pfrkt_ip6 != NULL) |
1928 |
if (kt->pfrkt_ip6 != NULL) |
| 1860 |
rn_detachhead((void **)&kt->pfrkt_ip6); |
1929 |
rn_detachhead((void **)&kt->pfrkt_ip6); |
| 1861 |
if (kt->pfrkt_shadow != NULL) |
1930 |
if (kt->pfrkt_shadow != NULL) |
| 1862 |
pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr); |
1931 |
pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr); |
| 1863 |
if (kt->pfrkt_rs != NULL) { |
1932 |
if (kt->pfrkt_rs != NULL) { |
| 1864 |
kt->pfrkt_rs->tables--; |
1933 |
kt->pfrkt_rs->tables--; |
| 1865 |
pf_remove_if_empty_ruleset(kt->pfrkt_rs); |
1934 |
pf_remove_if_empty_ruleset(kt->pfrkt_rs); |
| 1866 |
} |
1935 |
} |
|
|
1936 |
for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) |
| 1937 |
for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { |
| 1938 |
counter_u64_free(kt->pfrkt_packets[pfr_dir][pfr_op]); |
| 1939 |
counter_u64_free(kt->pfrkt_bytes[pfr_dir][pfr_op]); |
| 1940 |
} |
| 1941 |
counter_u64_free(kt->pfrkt_match); |
| 1942 |
counter_u64_free(kt->pfrkt_nomatch); |
| 1943 |
|
| 1867 |
free(kt, M_PFTABLE); |
1944 |
free(kt, M_PFTABLE); |
| 1868 |
} |
1945 |
} |
| 1869 |
|
1946 |
|
| 1870 |
static int |
1947 |
static int |
| 1871 |
pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q) |
1948 |
pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q) |
| 1872 |
{ |
1949 |
{ |
| 1873 |
int d; |
1950 |
int d; |
| 1874 |
|
1951 |
|
| 1875 |
if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE))) |
1952 |
if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE))) |
| 1876 |
return (d); |
1953 |
return (d); |
|
Lines 1925-1947
pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
Link Here
|
| 1925 |
bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr)); |
2002 |
bcopy(a, &sin6.sin6_addr, sizeof(sin6.sin6_addr)); |
| 1926 |
ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh); |
2003 |
ke = (struct pfr_kentry *)rn_match(&sin6, &kt->pfrkt_ip6->rh); |
| 1927 |
if (ke && KENTRY_RNF_ROOT(ke)) |
2004 |
if (ke && KENTRY_RNF_ROOT(ke)) |
| 1928 |
ke = NULL; |
2005 |
ke = NULL; |
| 1929 |
break; |
2006 |
break; |
| 1930 |
} |
2007 |
} |
| 1931 |
#endif /* INET6 */ |
2008 |
#endif /* INET6 */ |
| 1932 |
} |
2009 |
} |
| 1933 |
match = (ke && !ke->pfrke_not); |
2010 |
match = (ke && !ke->pfrke_not); |
| 1934 |
if (match) |
2011 |
if (match) |
| 1935 |
kt->pfrkt_match++; |
2012 |
counter_u64_add(kt->pfrkt_match, 1); |
| 1936 |
else |
2013 |
else |
| 1937 |
kt->pfrkt_nomatch++; |
2014 |
counter_u64_add(kt->pfrkt_nomatch, 1); |
| 1938 |
return (match); |
2015 |
return (match); |
| 1939 |
} |
2016 |
} |
| 1940 |
|
2017 |
|
| 1941 |
void |
2018 |
void |
| 1942 |
pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af, |
2019 |
pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af, |
| 1943 |
u_int64_t len, int dir_out, int op_pass, int notrule) |
2020 |
u_int64_t len, int dir_out, int op_pass, int notrule) |
| 1944 |
{ |
2021 |
{ |
| 1945 |
struct pfr_kentry *ke = NULL; |
2022 |
struct pfr_kentry *ke = NULL; |
| 1946 |
|
2023 |
|
| 1947 |
if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) |
2024 |
if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) |
|
Lines 1981-2011
pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
Link Here
|
| 1981 |
} |
2058 |
} |
| 1982 |
#endif /* INET6 */ |
2059 |
#endif /* INET6 */ |
| 1983 |
default: |
2060 |
default: |
| 1984 |
panic("%s: unknown address family %u", __func__, af); |
2061 |
panic("%s: unknown address family %u", __func__, af); |
| 1985 |
} |
2062 |
} |
| 1986 |
if ((ke == NULL || ke->pfrke_not) != notrule) { |
2063 |
if ((ke == NULL || ke->pfrke_not) != notrule) { |
| 1987 |
if (op_pass != PFR_OP_PASS) |
2064 |
if (op_pass != PFR_OP_PASS) |
| 1988 |
printf("pfr_update_stats: assertion failed.\n"); |
2065 |
printf("pfr_update_stats: assertion failed.\n"); |
| 1989 |
op_pass = PFR_OP_XPASS; |
2066 |
op_pass = PFR_OP_XPASS; |
| 1990 |
} |
2067 |
} |
| 1991 |
kt->pfrkt_packets[dir_out][op_pass]++; |
2068 |
counter_u64_add(kt->pfrkt_packets[dir_out][op_pass], 1); |
| 1992 |
kt->pfrkt_bytes[dir_out][op_pass] += len; |
2069 |
counter_u64_add(kt->pfrkt_bytes[dir_out][op_pass], len); |
| 1993 |
if (ke != NULL && op_pass != PFR_OP_XPASS && |
2070 |
if (ke != NULL && op_pass != PFR_OP_XPASS && |
| 1994 |
(kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) { |
2071 |
(kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) { |
| 1995 |
if (ke->pfrke_counters == NULL) |
2072 |
counter_u64_add(ke->pfrke_counters. |
| 1996 |
ke->pfrke_counters = uma_zalloc(V_pfr_kcounters_z, |
2073 |
pfrkc_packets[dir_out][op_pass], 1); |
| 1997 |
M_NOWAIT | M_ZERO); |
2074 |
counter_u64_add(ke->pfrke_counters. |
| 1998 |
if (ke->pfrke_counters != NULL) { |
2075 |
pfrkc_bytes[dir_out][op_pass], len); |
| 1999 |
ke->pfrke_counters->pfrkc_packets[dir_out][op_pass]++; |
|
|
| 2000 |
ke->pfrke_counters->pfrkc_bytes[dir_out][op_pass] += len; |
| 2001 |
} |
| 2002 |
} |
2076 |
} |
| 2003 |
} |
2077 |
} |
| 2004 |
|
2078 |
|
| 2005 |
struct pfr_ktable * |
2079 |
struct pfr_ktable * |
| 2006 |
pfr_attach_table(struct pf_ruleset *rs, char *name) |
2080 |
pfr_attach_table(struct pf_ruleset *rs, char *name) |
| 2007 |
{ |
2081 |
{ |
| 2008 |
struct pfr_ktable *kt, *rt; |
2082 |
struct pfr_ktable *kt, *rt; |
| 2009 |
struct pfr_table tbl; |
2083 |
struct pfr_table tbl; |
| 2010 |
struct pf_anchor *ac = rs->anchor; |
2084 |
struct pf_anchor *ac = rs->anchor; |
| 2011 |
|
2085 |
|
|
Lines 2081-2101
pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
Link Here
|
| 2081 |
if (pidx != NULL) |
2155 |
if (pidx != NULL) |
| 2082 |
idx = *pidx; |
2156 |
idx = *pidx; |
| 2083 |
if (counter != NULL && idx >= 0) |
2157 |
if (counter != NULL && idx >= 0) |
| 2084 |
use_counter = 1; |
2158 |
use_counter = 1; |
| 2085 |
if (idx < 0) |
2159 |
if (idx < 0) |
| 2086 |
idx = 0; |
2160 |
idx = 0; |
| 2087 |
|
2161 |
|
| 2088 |
_next_block: |
2162 |
_next_block: |
| 2089 |
ke = pfr_kentry_byidx(kt, idx, af); |
2163 |
ke = pfr_kentry_byidx(kt, idx, af); |
| 2090 |
if (ke == NULL) { |
2164 |
if (ke == NULL) { |
| 2091 |
kt->pfrkt_nomatch++; |
2165 |
counter_u64_add(kt->pfrkt_nomatch, 1); |
| 2092 |
return (1); |
2166 |
return (1); |
| 2093 |
} |
2167 |
} |
| 2094 |
pfr_prepare_network(&umask, af, ke->pfrke_net); |
2168 |
pfr_prepare_network(&umask, af, ke->pfrke_net); |
| 2095 |
cur = SUNION2PF(&ke->pfrke_sa, af); |
2169 |
cur = SUNION2PF(&ke->pfrke_sa, af); |
| 2096 |
mask = SUNION2PF(&umask, af); |
2170 |
mask = SUNION2PF(&umask, af); |
| 2097 |
|
2171 |
|
| 2098 |
if (use_counter) { |
2172 |
if (use_counter) { |
| 2099 |
/* is supplied address within block? */ |
2173 |
/* is supplied address within block? */ |
| 2100 |
if (!PF_MATCHA(0, cur, mask, counter, af)) { |
2174 |
if (!PF_MATCHA(0, cur, mask, counter, af)) { |
| 2101 |
/* no, go to next block in table */ |
2175 |
/* no, go to next block in table */ |
|
Lines 2106-2146
pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
Link Here
|
| 2106 |
PF_ACPY(addr, counter, af); |
2180 |
PF_ACPY(addr, counter, af); |
| 2107 |
} else { |
2181 |
} else { |
| 2108 |
/* use first address of block */ |
2182 |
/* use first address of block */ |
| 2109 |
PF_ACPY(addr, cur, af); |
2183 |
PF_ACPY(addr, cur, af); |
| 2110 |
} |
2184 |
} |
| 2111 |
|
2185 |
|
| 2112 |
if (!KENTRY_NETWORK(ke)) { |
2186 |
if (!KENTRY_NETWORK(ke)) { |
| 2113 |
/* this is a single IP address - no possible nested block */ |
2187 |
/* this is a single IP address - no possible nested block */ |
| 2114 |
PF_ACPY(counter, addr, af); |
2188 |
PF_ACPY(counter, addr, af); |
| 2115 |
*pidx = idx; |
2189 |
*pidx = idx; |
| 2116 |
kt->pfrkt_match++; |
2190 |
counter_u64_add(kt->pfrkt_match, 1); |
| 2117 |
return (0); |
2191 |
return (0); |
| 2118 |
} |
2192 |
} |
| 2119 |
for (;;) { |
2193 |
for (;;) { |
| 2120 |
/* we don't want to use a nested block */ |
2194 |
/* we don't want to use a nested block */ |
| 2121 |
switch (af) { |
2195 |
switch (af) { |
| 2122 |
case AF_INET: |
2196 |
case AF_INET: |
| 2123 |
ke2 = (struct pfr_kentry *)rn_match(&uaddr, |
2197 |
ke2 = (struct pfr_kentry *)rn_match(&uaddr, |
| 2124 |
&kt->pfrkt_ip4->rh); |
2198 |
&kt->pfrkt_ip4->rh); |
| 2125 |
break; |
2199 |
break; |
| 2126 |
case AF_INET6: |
2200 |
case AF_INET6: |
| 2127 |
ke2 = (struct pfr_kentry *)rn_match(&uaddr, |
2201 |
ke2 = (struct pfr_kentry *)rn_match(&uaddr, |
| 2128 |
&kt->pfrkt_ip6->rh); |
2202 |
&kt->pfrkt_ip6->rh); |
| 2129 |
break; |
2203 |
break; |
| 2130 |
} |
2204 |
} |
| 2131 |
/* no need to check KENTRY_RNF_ROOT() here */ |
2205 |
/* no need to check KENTRY_RNF_ROOT() here */ |
| 2132 |
if (ke2 == ke) { |
2206 |
if (ke2 == ke) { |
| 2133 |
/* lookup return the same block - perfect */ |
2207 |
/* lookup return the same block - perfect */ |
| 2134 |
PF_ACPY(counter, addr, af); |
2208 |
PF_ACPY(counter, addr, af); |
| 2135 |
*pidx = idx; |
2209 |
*pidx = idx; |
| 2136 |
kt->pfrkt_match++; |
2210 |
counter_u64_add(kt->pfrkt_match, 1); |
| 2137 |
return (0); |
2211 |
return (0); |
| 2138 |
} |
2212 |
} |
| 2139 |
|
2213 |
|
| 2140 |
/* we need to increase the counter past the nested block */ |
2214 |
/* we need to increase the counter past the nested block */ |
| 2141 |
pfr_prepare_network(&umask, AF_INET, ke2->pfrke_net); |
2215 |
pfr_prepare_network(&umask, AF_INET, ke2->pfrke_net); |
| 2142 |
PF_POOLMASK(addr, addr, SUNION2PF(&umask, af), &pfr_ffaddr, af); |
2216 |
PF_POOLMASK(addr, addr, SUNION2PF(&umask, af), &pfr_ffaddr, af); |
| 2143 |
PF_AINC(addr, af); |
2217 |
PF_AINC(addr, af); |
| 2144 |
if (!PF_MATCHA(0, cur, mask, addr, af)) { |
2218 |
if (!PF_MATCHA(0, cur, mask, addr, af)) { |
| 2145 |
/* ok, we reached the end of our main block */ |
2219 |
/* ok, we reached the end of our main block */ |
| 2146 |
/* go to next block in table */ |
2220 |
/* go to next block in table */ |
| 2147 |
- |
|
|