View | Details | Raw Unified | Return to bug 257965 | Differences between
and this patch

Collapse All | Expand All

(-)b/sys/netinet/in_fib_dxr.c (-3 / +63 lines)
Lines 203-208 struct dxr_aux { Link Here
203
	uint32_t		rtbl_top;
203
	uint32_t		rtbl_top;
204
	uint32_t		rtbl_work_frags;
204
	uint32_t		rtbl_work_frags;
205
	uint32_t		work_chunk;
205
	uint32_t		work_chunk;
206
207
	/* Rebuild stats, debugging */
208
	uint32_t		range_updates;
209
	uint32_t		range_refs;
210
	uint32_t		range_unrefs;
211
	uint32_t		trie_updates;
206
};
212
};
207
213
208
/* Main lookup structure container */
214
/* Main lookup structure container */
Lines 234-246 SYSCTL_DECL(_net_route_algo); Link Here
234
SYSCTL_NODE(_net_route_algo, OID_AUTO, dxr, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
240
SYSCTL_NODE(_net_route_algo, OID_AUTO, dxr, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
235
    "DXR tunables");
241
    "DXR tunables");
236
242
237
VNET_DEFINE_STATIC(int, max_trie_holes) = 8;
243
VNET_DEFINE_STATIC(int, max_trie_holes) = 10;
238
#define	V_max_trie_holes	VNET(max_trie_holes)
244
#define	V_max_trie_holes	VNET(max_trie_holes)
239
SYSCTL_INT(_net_route_algo_dxr, OID_AUTO, max_trie_holes,
245
SYSCTL_INT(_net_route_algo_dxr, OID_AUTO, max_trie_holes,
240
    CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(max_trie_holes), 0,
246
    CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(max_trie_holes), 0,
241
    "Trie fragmentation threshold before triggering a full rebuild");
247
    "Trie fragmentation threshold before triggering a full rebuild");
242
248
243
VNET_DEFINE_STATIC(int, max_range_holes) = 16;
249
VNET_DEFINE_STATIC(int, max_range_holes) = 100;
244
#define	V_max_range_holes	VNET(max_range_holes)
250
#define	V_max_range_holes	VNET(max_range_holes)
245
SYSCTL_INT(_net_route_algo_dxr, OID_AUTO, max_range_holes,
251
SYSCTL_INT(_net_route_algo_dxr, OID_AUTO, max_range_holes,
246
    CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(max_range_holes), 0,
252
    CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(max_range_holes), 0,
Lines 388-393 chunk_ref(struct dxr_aux *da, uint32_t chunk) Link Here
388
	uint32_t size = chunk_size(da, fdesc);
394
	uint32_t size = chunk_size(da, fdesc);
389
	uint32_t hash = chunk_hash(da, fdesc);
395
	uint32_t hash = chunk_hash(da, fdesc);
390
396
397
	da->range_refs++;
398
391
	/* Find an existing descriptor */
399
	/* Find an existing descriptor */
392
	LIST_FOREACH(cdp, &da->chunk_hashtbl[hash & CHUNK_HASH_MASK],
400
	LIST_FOREACH(cdp, &da->chunk_hashtbl[hash & CHUNK_HASH_MASK],
393
	    cd_hash_le) {
401
	    cd_hash_le) {
Lines 478-483 chunk_unref(struct dxr_aux *da, uint32_t chunk) Link Here
478
	uint32_t size = chunk_size(da, fdesc);
486
	uint32_t size = chunk_size(da, fdesc);
479
	uint32_t hash = chunk_hash(da, fdesc);
487
	uint32_t hash = chunk_hash(da, fdesc);
480
488
489
	da->range_unrefs++;
490
481
	/* Find an existing descriptor */
491
	/* Find an existing descriptor */
482
	LIST_FOREACH(cdp, &da->chunk_hashtbl[hash & CHUNK_HASH_MASK],
492
	LIST_FOREACH(cdp, &da->chunk_hashtbl[hash & CHUNK_HASH_MASK],
483
	    cd_hash_le)
493
	    cd_hash_le)
Lines 707-712 update_chunk(struct dxr_aux *da, uint32_t chunk) Link Here
707
	uint32_t first = chunk << DXR_RANGE_SHIFT;
717
	uint32_t first = chunk << DXR_RANGE_SHIFT;
708
	uint32_t last = first | DXR_RANGE_MASK;
718
	uint32_t last = first | DXR_RANGE_MASK;
709
719
720
	da->range_updates++;
721
710
	if (da->direct_tbl[chunk].fragments != FRAGS_MARK_HIT)
722
	if (da->direct_tbl[chunk].fragments != FRAGS_MARK_HIT)
711
		chunk_unref(da, chunk);
723
		chunk_unref(da, chunk);
712
724
Lines 817-822 dxr_build(struct dxr *dxr) Link Here
817
		da->refcnt = 1;
829
		da->refcnt = 1;
818
		LIST_INIT(&da->all_chunks);
830
		LIST_INIT(&da->all_chunks);
819
		LIST_INIT(&da->all_trie);
831
		LIST_INIT(&da->all_trie);
832
		da->unused_chunks_cnt = 0;
820
		da->rtbl_size = RTBL_SIZE_INCR;
833
		da->rtbl_size = RTBL_SIZE_INCR;
821
		da->range_tbl = NULL;
834
		da->range_tbl = NULL;
822
		da->xtbl_size = XTBL_SIZE_INCR;
835
		da->xtbl_size = XTBL_SIZE_INCR;
Lines 845-850 dxr_build(struct dxr *dxr) Link Here
845
	}
858
	}
846
#endif
859
#endif
847
	da->fd = dxr->fd;
860
	da->fd = dxr->fd;
861
	da->range_updates = 0;
862
	da->range_refs = 0;
863
	da->range_unrefs = 0;
864
	da->trie_updates = 0;
848
865
849
	microuptime(&t0);
866
	microuptime(&t0);
850
867
Lines 855-860 dxr_build(struct dxr *dxr) Link Here
855
	    da->unused_chunks_cnt > V_max_range_holes)
872
	    da->unused_chunks_cnt > V_max_range_holes)
856
		range_rebuild = 1;
873
		range_rebuild = 1;
857
	if (range_rebuild) {
874
	if (range_rebuild) {
875
		/* Dump fragmentation stats */
876
		if (da->unused_chunks_cnt != 0) {
877
#define	NSIZES 32
878
			uint32_t size_cnt[NSIZES];
879
			char out_buf[NSIZES * 10];
880
			char *p = out_buf;
881
882
			bzero(size_cnt, sizeof(size_cnt));
883
			LIST_FOREACH(cdp, &da->unused_chunks, cd_hash_le)
884
				if (cdp->cd_max_size < NSIZES)
885
					size_cnt[cdp->cd_max_size]++;
886
				else
887
					size_cnt[0]++;
888
			
889
			for (i = 0; i < NSIZES; i++)
890
				if (size_cnt[i])
891
					p += sprintf(p, "%d:%d ", i,
892
					    size_cnt[i]);
893
			FIB_PRINTF(LOG_INFO, da->fd, "frags %s", out_buf);
894
#undef	NSIZES
895
		}
896
858
		/* Bulk cleanup */
897
		/* Bulk cleanup */
859
		bzero(da->chunk_hashtbl, sizeof(da->chunk_hashtbl));
898
		bzero(da->chunk_hashtbl, sizeof(da->chunk_hashtbl));
860
		while ((cdp = LIST_FIRST(&da->all_chunks)) != NULL) {
899
		while ((cdp = LIST_FIRST(&da->all_chunks)) != NULL) {
Lines 915-921 dxr_build(struct dxr *dxr) Link Here
915
954
916
	for (i = da->updates_low >> dxr_x; i <= da->updates_high >> dxr_x;
955
	for (i = da->updates_low >> dxr_x; i <= da->updates_high >> dxr_x;
917
	    i++) {
956
	    i++) {
918
		trie_unref(da, i);
957
		if (!trie_rebuild) {
958
			for (int j = 0, m = 0; j < (1 << dxr_x); j += 32)
959
				m |= da->updates_mask[((i << dxr_x) + j) >> 5];
960
			if (m == 0)
961
				continue;
962
			trie_unref(da, i);
963
		}
964
		da->trie_updates++;
919
		ti = trie_ref(da, i);
965
		ti = trie_ref(da, i);
920
		if (ti < 0)
966
		if (ti < 0)
921
			return;
967
			return;
Lines 979-988 dxr_build(struct dxr *dxr) Link Here
979
	FIB_PRINTF(LOG_INFO, da->fd, "%d.%02d KBytes, %d.%02d Bytes/prefix",
1025
	FIB_PRINTF(LOG_INFO, da->fd, "%d.%02d KBytes, %d.%02d Bytes/prefix",
980
	    dxr_tot_size / 1024, dxr_tot_size * 100 / 1024 % 100,
1026
	    dxr_tot_size / 1024, dxr_tot_size * 100 / 1024 % 100,
981
	    i / 100, i % 100);
1027
	    i / 100, i % 100);
1028
	FIB_PRINTF(LOG_INFO, da->fd, "%d range updates, %d refs, %d unrefs",
1029
	    da->range_updates, da->range_refs, da->range_unrefs);
982
	i = (t1.tv_sec - t0.tv_sec) * 1000000 + t1.tv_usec - t0.tv_usec;
1030
	i = (t1.tv_sec - t0.tv_sec) * 1000000 + t1.tv_usec - t0.tv_usec;
983
	FIB_PRINTF(LOG_INFO, da->fd, "range table %s in %u.%03u ms",
1031
	FIB_PRINTF(LOG_INFO, da->fd, "range table %s in %u.%03u ms",
984
	    range_rebuild ? "rebuilt" : "updated", i / 1000, i % 1000);
1032
	    range_rebuild ? "rebuilt" : "updated", i / 1000, i % 1000);
985
#ifdef DXR2
1033
#ifdef DXR2
1034
	FIB_PRINTF(LOG_INFO, da->fd, "%d trie updates", da->trie_updates);
986
	i = (t2.tv_sec - t1.tv_sec) * 1000000 + t2.tv_usec - t1.tv_usec;
1035
	i = (t2.tv_sec - t1.tv_sec) * 1000000 + t2.tv_usec - t1.tv_usec;
987
	FIB_PRINTF(LOG_INFO, da->fd, "trie %s in %u.%03u ms",
1036
	FIB_PRINTF(LOG_INFO, da->fd, "trie %s in %u.%03u ms",
988
	    trie_rebuild ? "rebuilt" : "updated", i / 1000, i % 1000);
1037
	    trie_rebuild ? "rebuilt" : "updated", i / 1000, i % 1000);
Lines 1131-1136 dxr_change_rib_batch(struct rib_head *rnh, struct fib_change_queue *q, Link Here
1131
	struct rib_rtable_info rinfo;
1180
	struct rib_rtable_info rinfo;
1132
	int update_delta = 0;
1181
	int update_delta = 0;
1133
#endif
1182
#endif
1183
	int plen_min = 32;
1184
	int plen_max = 0;
1185
	int plen_acc = 0;
1134
1186
1135
	KASSERT(data != NULL, ("%s: NULL data", __FUNCTION__));
1187
	KASSERT(data != NULL, ("%s: NULL data", __FUNCTION__));
1136
	KASSERT(q != NULL, ("%s: NULL q", __FUNCTION__));
1188
	KASSERT(q != NULL, ("%s: NULL q", __FUNCTION__));
Lines 1164-1170 dxr_change_rib_batch(struct rib_head *rnh, struct fib_change_queue *q, Link Here
1164
			da->updates_low = start;
1216
			da->updates_low = start;
1165
		if (end > da->updates_high)
1217
		if (end > da->updates_high)
1166
			da->updates_high = end;
1218
			da->updates_high = end;
1219
1220
		plen_acc += plen;
1221
		if (plen < plen_min)
1222
			plen_min = plen;
1223
		if (plen > plen_max)
1224
			plen_max = plen;
1167
	}
1225
	}
1226
	FIB_PRINTF(LOG_INFO, da->fd, "plen min %d max %d avg %d", plen_min,
1227
	    plen_max, plen_acc / q->count);
1168
1228
1169
#ifdef INVARIANTS
1229
#ifdef INVARIANTS
1170
	fib_get_rtable_info(fib_get_rh(da->fd), &rinfo);
1230
	fib_get_rtable_info(fib_get_rh(da->fd), &rinfo);

Return to bug 257965