View | Details | Raw Unified | Return to bug 197922 | Differences between
and this patch

Collapse All | Expand All

(-)sys/kern/sched_ule.c (-13 / +6 lines)
Lines 277-283 Link Here
277
static struct tdq	tdq_cpu[MAXCPU];
277
static struct tdq	tdq_cpu[MAXCPU];
278
static struct tdq	*balance_tdq;
278
static struct tdq	*balance_tdq;
279
static int balance_ticks;
279
static int balance_ticks;
280
static DPCPU_DEFINE(uint32_t, randomval);
281
280
282
#define	TDQ_SELF()	(&tdq_cpu[PCPU_GET(cpuid)])
281
#define	TDQ_SELF()	(&tdq_cpu[PCPU_GET(cpuid)])
283
#define	TDQ_CPU(x)	(&tdq_cpu[(x)])
282
#define	TDQ_CPU(x)	(&tdq_cpu[(x)])
Lines 651-657 Link Here
651
	cpuset_t cpumask;
652
	cpuset_t cpumask;
652
	struct cpu_group *child;
653
	struct cpu_group *child;
653
	struct tdq *tdq;
654
	struct tdq *tdq;
654
	int cpu, i, hload, lload, load, total, rnd, *rndptr;
655
	int cpu, i, hload, lload, load, total;
655
656
656
	total = 0;
657
	total = 0;
657
	cpumask = cg->cg_mask;
658
	cpumask = cg->cg_mask;
Lines 700-708 Link Here
700
			CPU_CLR(cpu, &cpumask);
701
			CPU_CLR(cpu, &cpumask);
701
			tdq = TDQ_CPU(cpu);
702
			tdq = TDQ_CPU(cpu);
702
			load = tdq->tdq_load * 256;
703
			load = tdq->tdq_load * 256;
703
			rndptr = DPCPU_PTR(randomval);
704
                        if (match & CPU_SEARCH_LOWEST) {
704
			rnd = (*rndptr = *rndptr * 69069 + 5) >> 26;
705
			if (match & CPU_SEARCH_LOWEST) {
706
				if (cpu == low->cs_prefer)
705
				if (cpu == low->cs_prefer)
707
					load -= 64;
706
					load -= 64;
708
				/* If that CPU is allowed and get data. */
707
				/* If that CPU is allowed and get data. */
Lines 710-716 Link Here
710
				    tdq->tdq_load <= lgroup.cs_limit &&
709
				    tdq->tdq_load <= lgroup.cs_limit &&
711
				    CPU_ISSET(cpu, &lgroup.cs_mask)) {
710
				    CPU_ISSET(cpu, &lgroup.cs_mask)) {
712
					lgroup.cs_cpu = cpu;
711
					lgroup.cs_cpu = cpu;
713
					lgroup.cs_load = load - rnd;
712
                                       lgroup.cs_load = load;
714
				}
713
				}
715
			}
714
			}
716
			if (match & CPU_SEARCH_HIGHEST)
715
			if (match & CPU_SEARCH_HIGHEST)
Lines 718-724 Link Here
718
				    tdq->tdq_transferable &&
716
				    tdq->tdq_transferable &&
719
				    CPU_ISSET(cpu, &hgroup.cs_mask)) {
717
				    CPU_ISSET(cpu, &hgroup.cs_mask)) {
720
					hgroup.cs_cpu = cpu;
718
					hgroup.cs_cpu = cpu;
721
					hgroup.cs_load = load - rnd;
719
					hgroup.cs_load = load;
722
				}
720
				}
723
		}
721
		}
724
		total += load;
722
		total += load;
Lines 861-874 Link Here
861
{
859
{
862
	struct tdq *tdq;
860
	struct tdq *tdq;
863
861
864
	/*
865
	 * Select a random time between .5 * balance_interval and
866
	 * 1.5 * balance_interval.
867
	 */
868
	balance_ticks = max(balance_interval / 2, 1);
869
	balance_ticks += random() % balance_interval;
870
	if (smp_started == 0 || rebalance == 0)
862
	if (smp_started == 0 || rebalance == 0)
871
		return;
863
		return;
864
865
	balance_ticks = balance_interval;
872
	tdq = TDQ_SELF();
866
	tdq = TDQ_SELF();
873
	TDQ_UNLOCK(tdq);
867
	TDQ_UNLOCK(tdq);
874
	sched_balance_group(cpu_top);
868
	sched_balance_group(cpu_top);

Return to bug 197922