View | Details | Raw Unified | Return to bug 210106
Collapse All | Expand All

(-)amd64/include/counter.h (-1 / +1 lines)
Lines 51-57 Link Here
51
	int i;
51
	int i;
52
52
53
	r = 0;
53
	r = 0;
54
	for (i = 0; i < mp_ncpus; i++)
54
	CPU_FOREACH(i)
55
		r += counter_u64_read_one((uint64_t *)p, i);
55
		r += counter_u64_read_one((uint64_t *)p, i);
56
56
57
	return (r);
57
	return (r);
(-)cddl/compat/opensolaris/sys/proc.h (-2 / +2 lines)
Lines 45-52 Link Here
45
#define	CPU		curcpu
45
#define	CPU		curcpu
46
#define	minclsyspri	PRIBIO
46
#define	minclsyspri	PRIBIO
47
#define	maxclsyspri	PVM
47
#define	maxclsyspri	PVM
48
#define	max_ncpus	mp_ncpus
48
#define	max_ncpus	(mp_maxid+1)
49
#define	boot_max_ncpus	mp_ncpus
49
#define	boot_max_ncpus	(mp_maxid+1)
50
50
51
#define	TS_RUN	0
51
#define	TS_RUN	0
52
52
(-)dev/cpuctl/cpuctl.c (-10 / +10 lines)
Lines 120-126 Link Here
120
set_cpu(int cpu, struct thread *td)
120
set_cpu(int cpu, struct thread *td)
121
{
121
{
122
122
123
	KASSERT(cpu >= 0 && cpu < mp_ncpus && cpu_enabled(cpu),
123
	KASSERT(cpu >= 0 && cpu <= mp_maxid && cpu_enabled(cpu),
124
	    ("[cpuctl,%d]: bad cpu number %d", __LINE__, cpu));
124
	    ("[cpuctl,%d]: bad cpu number %d", __LINE__, cpu));
125
	thread_lock(td);
125
	thread_lock(td);
126
	sched_bind(td, cpu);
126
	sched_bind(td, cpu);
Lines 133-139 Link Here
133
restore_cpu(int oldcpu, int is_bound, struct thread *td)
133
restore_cpu(int oldcpu, int is_bound, struct thread *td)
134
{
134
{
135
135
136
	KASSERT(oldcpu >= 0 && oldcpu < mp_ncpus && cpu_enabled(oldcpu),
136
	KASSERT(oldcpu >= 0 && oldcpu <= mp_maxid && cpu_enabled(oldcpu),
137
	    ("[cpuctl,%d]: bad cpu number %d", __LINE__, oldcpu));
137
	    ("[cpuctl,%d]: bad cpu number %d", __LINE__, oldcpu));
138
	thread_lock(td);
138
	thread_lock(td);
139
	if (is_bound == 0)
139
	if (is_bound == 0)
Lines 150-156 Link Here
150
	int ret;
150
	int ret;
151
	int cpu = dev2unit(dev);
151
	int cpu = dev2unit(dev);
152
152
153
	if (cpu >= mp_ncpus || !cpu_enabled(cpu)) {
153
	if (cpu > mp_maxid || !cpu_enabled(cpu)) {
154
		DPRINTF("[cpuctl,%d]: bad cpu number %d\n", __LINE__, cpu);
154
		DPRINTF("[cpuctl,%d]: bad cpu number %d\n", __LINE__, cpu);
155
		return (ENXIO);
155
		return (ENXIO);
156
	}
156
	}
Lines 201-207 Link Here
201
	int is_bound = 0;
201
	int is_bound = 0;
202
	int oldcpu;
202
	int oldcpu;
203
203
204
	KASSERT(cpu >= 0 && cpu < mp_ncpus,
204
	KASSERT(cpu >= 0 && cpu <= mp_maxid,
205
	    ("[cpuctl,%d]: bad cpu number %d", __LINE__, cpu));
205
	    ("[cpuctl,%d]: bad cpu number %d", __LINE__, cpu));
206
206
207
	/* Explicitly clear cpuid data to avoid returning stale info. */
207
	/* Explicitly clear cpuid data to avoid returning stale info. */
Lines 245-251 Link Here
245
	int oldcpu;
245
	int oldcpu;
246
	int ret;
246
	int ret;
247
247
248
	KASSERT(cpu >= 0 && cpu < mp_ncpus,
248
	KASSERT(cpu >= 0 && cpu <= mp_maxid,
249
	    ("[cpuctl,%d]: bad cpu number %d", __LINE__, cpu));
249
	    ("[cpuctl,%d]: bad cpu number %d", __LINE__, cpu));
250
250
251
	/*
251
	/*
Lines 296-302 Link Here
296
	char vendor[13];
296
	char vendor[13];
297
	int ret;
297
	int ret;
298
298
299
	KASSERT(cpu >= 0 && cpu < mp_ncpus,
299
	KASSERT(cpu >= 0 && cpu <= mp_maxid,
300
	    ("[cpuctl,%d]: bad cpu number %d", __LINE__, cpu));
300
	    ("[cpuctl,%d]: bad cpu number %d", __LINE__, cpu));
301
	DPRINTF("[cpuctl,%d]: XXX %d", __LINE__, cpu);
301
	DPRINTF("[cpuctl,%d]: XXX %d", __LINE__, cpu);
302
302
Lines 512-518 Link Here
512
	int cpu;
512
	int cpu;
513
513
514
	cpu = dev2unit(dev);
514
	cpu = dev2unit(dev);
515
	if (cpu >= mp_ncpus || !cpu_enabled(cpu)) {
515
	if (cpu > mp_maxid || !cpu_enabled(cpu)) {
516
		DPRINTF("[cpuctl,%d]: incorrect cpu number %d\n", __LINE__,
516
		DPRINTF("[cpuctl,%d]: incorrect cpu number %d\n", __LINE__,
517
		    cpu);
517
		    cpu);
518
		return (ENXIO);
518
		return (ENXIO);
Lines 531-545 Link Here
531
	case MOD_LOAD:
531
	case MOD_LOAD:
532
		if (bootverbose)
532
		if (bootverbose)
533
			printf("cpuctl: access to MSR registers/cpuid info.\n");
533
			printf("cpuctl: access to MSR registers/cpuid info.\n");
534
		cpuctl_devs = malloc(sizeof(*cpuctl_devs) * mp_ncpus, M_CPUCTL,
534
		cpuctl_devs = malloc(sizeof(*cpuctl_devs) * (mp_maxid+1), M_CPUCTL,
535
		    M_WAITOK | M_ZERO);
535
		    M_WAITOK | M_ZERO);
536
		for (cpu = 0; cpu < mp_ncpus; cpu++)
536
		CPU_FOREACH(cpu)
537
			if (cpu_enabled(cpu))
537
			if (cpu_enabled(cpu))
538
				cpuctl_devs[cpu] = make_dev(&cpuctl_cdevsw, cpu,
538
				cpuctl_devs[cpu] = make_dev(&cpuctl_cdevsw, cpu,
539
				    UID_ROOT, GID_KMEM, 0640, "cpuctl%d", cpu);
539
				    UID_ROOT, GID_KMEM, 0640, "cpuctl%d", cpu);
540
		break;
540
		break;
541
	case MOD_UNLOAD:
541
	case MOD_UNLOAD:
542
		for (cpu = 0; cpu < mp_ncpus; cpu++) {
542
		CPU_FOREACH(cpu) {
543
			if (cpuctl_devs[cpu] != NULL)
543
			if (cpuctl_devs[cpu] != NULL)
544
				destroy_dev(cpuctl_devs[cpu]);
544
				destroy_dev(cpuctl_devs[cpu]);
545
		}
545
		}
(-)i386/include/counter.h (-3 / +3 lines)
Lines 98-110 Link Here
98
		 * critical section as well.
98
		 * critical section as well.
99
		 */
99
		 */
100
		critical_enter();
100
		critical_enter();
101
		for (i = 0; i < mp_ncpus; i++) {
101
		CPU_FOREACH(i) {
102
			res += *(uint64_t *)((char *)p +
102
			res += *(uint64_t *)((char *)p +
103
			    sizeof(struct pcpu) * i);
103
			    sizeof(struct pcpu) * i);
104
		}
104
		}
105
		critical_exit();
105
		critical_exit();
106
	} else {
106
	} else {
107
		for (i = 0; i < mp_ncpus; i++)
107
		CPU_FOREACH(i)
108
			res += counter_u64_read_one_8b((uint64_t *)((char *)p +
108
			res += counter_u64_read_one_8b((uint64_t *)((char *)p +
109
			    sizeof(struct pcpu) * i));
109
			    sizeof(struct pcpu) * i));
110
	}
110
	}
Lines 144-150 Link Here
144
144
145
	if ((cpu_feature & CPUID_CX8) == 0) {
145
	if ((cpu_feature & CPUID_CX8) == 0) {
146
		critical_enter();
146
		critical_enter();
147
		for (i = 0; i < mp_ncpus; i++)
147
		CPU_FOREACH(i)
148
			*(uint64_t *)((char *)c + sizeof(struct pcpu) * i) = 0;
148
			*(uint64_t *)((char *)c + sizeof(struct pcpu) * i) = 0;
149
		critical_exit();
149
		critical_exit();
150
	} else {
150
	} else {
(-)kern/subr_pcpu.c (-4 / +4 lines)
Lines 248-254 Link Here
248
	uintptr_t dpcpu;
248
	uintptr_t dpcpu;
249
	int i;
249
	int i;
250
250
251
	for (i = 0; i < mp_ncpus; ++i) {
251
	CPU_FOREACH(i) {
252
		dpcpu = dpcpu_off[i];
252
		dpcpu = dpcpu_off[i];
253
		if (dpcpu == 0)
253
		if (dpcpu == 0)
254
			continue;
254
			continue;
Lines 289-295 Link Here
289
	int i;
289
	int i;
290
290
291
	count = 0;
291
	count = 0;
292
	for (i = 0; i < mp_ncpus; ++i) {
292
	CPU_FOREACH(i) {
293
		dpcpu = dpcpu_off[i];
293
		dpcpu = dpcpu_off[i];
294
		if (dpcpu == 0)
294
		if (dpcpu == 0)
295
			continue;
295
			continue;
Lines 306-312 Link Here
306
	int i;
306
	int i;
307
307
308
	count = 0;
308
	count = 0;
309
	for (i = 0; i < mp_ncpus; ++i) {
309
	CPU_FOREACH(i) {
310
		dpcpu = dpcpu_off[i];
310
		dpcpu = dpcpu_off[i];
311
		if (dpcpu == 0)
311
		if (dpcpu == 0)
312
			continue;
312
			continue;
Lines 323-329 Link Here
323
	int i;
323
	int i;
324
324
325
	count = 0;
325
	count = 0;
326
	for (i = 0; i < mp_ncpus; ++i) {
326
	CPU_FOREACH(i) {
327
		dpcpu = dpcpu_off[i];
327
		dpcpu = dpcpu_off[i];
328
		if (dpcpu == 0)
328
		if (dpcpu == 0)
329
			continue;
329
			continue;
(-)kern/subr_taskqueue.c (-5 / +18 lines)
Lines 832-837 Link Here
832
taskqgroup_cpu_create(struct taskqgroup *qgroup, int idx)
832
taskqgroup_cpu_create(struct taskqgroup *qgroup, int idx)
833
{
833
{
834
	struct taskqgroup_cpu *qcpu;
834
	struct taskqgroup_cpu *qcpu;
835
	int i, j;
835
836
836
	qcpu = &qgroup->tqg_queue[idx];
837
	qcpu = &qgroup->tqg_queue[idx];
837
	LIST_INIT(&qcpu->tgc_tasks);
838
	LIST_INIT(&qcpu->tgc_tasks);
Lines 839-845 Link Here
839
	    taskqueue_thread_enqueue, &qcpu->tgc_taskq);
840
	    taskqueue_thread_enqueue, &qcpu->tgc_taskq);
840
	taskqueue_start_threads(&qcpu->tgc_taskq, 1, PI_SOFT,
841
	taskqueue_start_threads(&qcpu->tgc_taskq, 1, PI_SOFT,
841
	    "%s_%d", qgroup->tqg_name, idx);
842
	    "%s_%d", qgroup->tqg_name, idx);
842
	qcpu->tgc_cpu = idx * qgroup->tqg_stride;
843
844
	for (i = CPU_FIRST(), j = 0; j < idx * qgroup->tqg_stride;
845
	    j++, i = CPU_NEXT(i)) {
846
		/*
847
		 * Wait: evaluate the idx * qgroup->tqg_stride'th CPU,
848
		 * potentially wrapping the actual count
849
		 */
850
	}
851
	qcpu->tgc_cpu = i;
843
}
852
}
844
853
845
static void
854
static void
Lines 1017-1029 Link Here
1017
	LIST_HEAD(, grouptask) gtask_head = LIST_HEAD_INITIALIZER(NULL);
1026
	LIST_HEAD(, grouptask) gtask_head = LIST_HEAD_INITIALIZER(NULL);
1018
	cpuset_t mask;
1027
	cpuset_t mask;
1019
	struct grouptask *gtask;
1028
	struct grouptask *gtask;
1020
	int i, old_cnt, qid;
1029
	int i, k, old_cnt, qid, cpu;
1021
1030
1022
	mtx_assert(&qgroup->tqg_lock, MA_OWNED);
1031
	mtx_assert(&qgroup->tqg_lock, MA_OWNED);
1023
1032
1024
	if (cnt < 1 || cnt * stride > mp_ncpus || !smp_started) {
1033
	if (cnt < 1 || cnt * stride > mp_ncpus || !smp_started) {
1025
		printf("taskqgroup_adjust failed cnt: %d stride: %d mp_ncpus: %d smp_started: %d\n",
1034
		printf("taskqgroup_adjust failed cnt: %d stride: %d "
1026
			   cnt, stride, mp_ncpus, smp_started);
1035
		    "mp_ncpus: %d smp_started: %d\n", cnt, stride, mp_ncpus,
1036
		    smp_started);
1027
		return (EINVAL);
1037
		return (EINVAL);
1028
	}
1038
	}
1029
	if (qgroup->tqg_adjusting) {
1039
	if (qgroup->tqg_adjusting) {
Lines 1081-1088 Link Here
1081
	/*
1091
	/*
1082
	 * Set new CPU and IRQ affinity
1092
	 * Set new CPU and IRQ affinity
1083
	 */
1093
	 */
1094
	cpu = CPU_FIRST();
1084
	for (i = 0; i < cnt; i++) {
1095
	for (i = 0; i < cnt; i++) {
1085
		qgroup->tqg_queue[i].tgc_cpu = i * qgroup->tqg_stride;
1096
		qgroup->tqg_queue[i].tgc_cpu = cpu;
1097
		for (k = 0; k < qgroup->tqg_stride; k++)
1098
			cpu = CPU_NEXT(cpu);
1086
		CPU_ZERO(&mask);
1099
		CPU_ZERO(&mask);
1087
		CPU_SET(qgroup->tqg_queue[i].tgc_cpu, &mask);
1100
		CPU_SET(qgroup->tqg_queue[i].tgc_cpu, &mask);
1088
		LIST_FOREACH(gtask, &qgroup->tqg_queue[i].tgc_tasks, gt_list) {
1101
		LIST_FOREACH(gtask, &qgroup->tqg_queue[i].tgc_tasks, gt_list) {
(-)net/flowtable.c (-1 / +1 lines)
Lines 746-752 Link Here
746
		ft->ft_table[i] = uma_zalloc(pcpu_zone_ptr, M_WAITOK | M_ZERO);
746
		ft->ft_table[i] = uma_zalloc(pcpu_zone_ptr, M_WAITOK | M_ZERO);
747
747
748
	ft->ft_masks = uma_zalloc(pcpu_zone_ptr, M_WAITOK);
748
	ft->ft_masks = uma_zalloc(pcpu_zone_ptr, M_WAITOK);
749
	for (int i = 0; i < mp_ncpus; i++) {
749
	CPU_FOREACH(i) {
750
		bitstr_t **b;
750
		bitstr_t **b;
751
751
752
		b = zpcpu_get_cpu(ft->ft_masks, i);
752
		b = zpcpu_get_cpu(ft->ft_masks, i);
(-)net/iflib.c (-4 / +4 lines)
Lines 3848-3854 Link Here
3848
	iflib_txq_t txq;
3848
	iflib_txq_t txq;
3849
	iflib_rxq_t rxq;
3849
	iflib_rxq_t rxq;
3850
	iflib_fl_t fl = NULL;
3850
	iflib_fl_t fl = NULL;
3851
	int i, j, err, txconf, rxconf, fl_ifdi_offset;
3851
	int i, j, cpu, err, txconf, rxconf, fl_ifdi_offset;
3852
	iflib_dma_info_t ifdip;
3852
	iflib_dma_info_t ifdip;
3853
	uint32_t *rxqsizes = sctx->isc_rxqsizes;
3853
	uint32_t *rxqsizes = sctx->isc_rxqsizes;
3854
	uint32_t *txqsizes = sctx->isc_txqsizes;
3854
	uint32_t *txqsizes = sctx->isc_txqsizes;
Lines 3897-3903 Link Here
3897
	/*
3897
	/*
3898
	 * XXX handle allocation failure
3898
	 * XXX handle allocation failure
3899
	 */
3899
	 */
3900
	for (txconf = i = 0; i < ntxqsets; i++, txconf++, txq++) {
3900
	for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) {
3901
		/* Set up some basics */
3901
		/* Set up some basics */
3902
3902
3903
		if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) {
3903
		if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) {
Lines 3917-3924 Link Here
3917
		txq->ift_ctx = ctx;
3917
		txq->ift_ctx = ctx;
3918
		txq->ift_id = i;
3918
		txq->ift_id = i;
3919
		/* XXX fix this */
3919
		/* XXX fix this */
3920
		txq->ift_timer.c_cpu = i % mp_ncpus;
3920
		txq->ift_timer.c_cpu = cpu;
3921
		txq->ift_db_check.c_cpu = i % mp_ncpus;
3921
		txq->ift_db_check.c_cpu = cpu;
3922
		txq->ift_nbr = nbuf_rings;
3922
		txq->ift_nbr = nbuf_rings;
3923
3923
3924
		if (iflib_txsd_alloc(txq)) {
3924
		if (iflib_txsd_alloc(txq)) {
(-)netinet/ip_id.c (-1 / +3 lines)
Lines 275-284 Link Here
275
static void
275
static void
276
ipid_sysinit(void)
276
ipid_sysinit(void)
277
{
277
{
278
	int i;
278
279
279
	mtx_init(&V_ip_id_mtx, "ip_id_mtx", NULL, MTX_DEF);
280
	mtx_init(&V_ip_id_mtx, "ip_id_mtx", NULL, MTX_DEF);
280
	V_ip_id = counter_u64_alloc(M_WAITOK);
281
	V_ip_id = counter_u64_alloc(M_WAITOK);
281
	for (int i = 0; i < mp_ncpus; i++)
282
	
283
	CPU_FOREACH(i)
282
		arc4rand(zpcpu_get_cpu(V_ip_id, i), sizeof(uint64_t), 0);
284
		arc4rand(zpcpu_get_cpu(V_ip_id, i), sizeof(uint64_t), 0);
283
}
285
}
284
VNET_SYSINIT(ip_id, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY, ipid_sysinit, NULL);
286
VNET_SYSINIT(ip_id, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY, ipid_sysinit, NULL);
(-)powerpc/include/counter.h (-1 / +1 lines)
Lines 54-60 Link Here
54
	int i;
54
	int i;
55
55
56
	r = 0;
56
	r = 0;
57
	for (i = 0; i < mp_ncpus; i++)
57
	CPU_FOREACH(i)
58
		r += counter_u64_read_one((uint64_t *)p, i);
58
		r += counter_u64_read_one((uint64_t *)p, i);
59
59
60
	return (r);
60
	return (r);
(-)powerpc/powerpc/mp_machdep.c (-6 / +2 lines)
Lines 113-132 Link Here
113
	int error;
113
	int error;
114
114
115
	mp_ncpus = 0;
115
	mp_ncpus = 0;
116
	mp_maxid = 0;
116
	error = platform_smp_first_cpu(&cpuref);
117
	error = platform_smp_first_cpu(&cpuref);
117
	while (!error) {
118
	while (!error) {
118
		mp_ncpus++;
119
		mp_ncpus++;
120
		mp_maxid = max(cpuref.cr_cpuid, mp_maxid);
119
		error = platform_smp_next_cpu(&cpuref);
121
		error = platform_smp_next_cpu(&cpuref);
120
	}
122
	}
121
	/* Sanity. */
123
	/* Sanity. */
122
	if (mp_ncpus == 0)
124
	if (mp_ncpus == 0)
123
		mp_ncpus = 1;
125
		mp_ncpus = 1;
124
125
	/*
126
	 * Set the largest cpuid we're going to use. This is necessary
127
	 * for VM initialization.
128
	 */
129
	mp_maxid = min(mp_ncpus, MAXCPU) - 1;
130
}
126
}
131
127
132
int
128
int
(-)vm/uma.h (-1 / +1 lines)
Lines 276-282 Link Here
276
					 * mini-dumps.
276
					 * mini-dumps.
277
					 */
277
					 */
278
#define	UMA_ZONE_PCPU		0x8000	/*
278
#define	UMA_ZONE_PCPU		0x8000	/*
279
					 * Allocates mp_ncpus slabs sized to
279
					 * Allocates mp_maxid+1 slabs sized to
280
					 * sizeof(struct pcpu).
280
					 * sizeof(struct pcpu).
281
					 */
281
					 */
282
282
(-)vm/uma_core.c (-2 / +3 lines)
Lines 1227-1233 Link Here
1227
	u_int shsize;
1227
	u_int shsize;
1228
1228
1229
	if (keg->uk_flags & UMA_ZONE_PCPU) {
1229
	if (keg->uk_flags & UMA_ZONE_PCPU) {
1230
		u_int ncpus = mp_ncpus ? mp_ncpus : MAXCPU;
1230
		u_int ncpus = (mp_maxid+1) ? (mp_maxid+1) : MAXCPU;
1231
1231
1232
		keg->uk_slabsize = sizeof(struct pcpu);
1232
		keg->uk_slabsize = sizeof(struct pcpu);
1233
		keg->uk_ppera = howmany(ncpus * sizeof(struct pcpu),
1233
		keg->uk_ppera = howmany(ncpus * sizeof(struct pcpu),
Lines 3265-3273 Link Here
3265
static void
3265
static void
3266
uma_zero_item(void *item, uma_zone_t zone)
3266
uma_zero_item(void *item, uma_zone_t zone)
3267
{
3267
{
3268
	int i;
3268
3269
3269
	if (zone->uz_flags & UMA_ZONE_PCPU) {
3270
	if (zone->uz_flags & UMA_ZONE_PCPU) {
3270
		for (int i = 0; i < mp_ncpus; i++)
3271
		CPU_FOREACH(i)
3271
			bzero(zpcpu_get_cpu(item, i), zone->uz_size);
3272
			bzero(zpcpu_get_cpu(item, i), zone->uz_size);
3272
	} else
3273
	} else
3273
		bzero(item, zone->uz_size);
3274
		bzero(item, zone->uz_size);

Return to bug 210106