Index: sys/kern/sched_ule.c =================================================================== --- sys/kern/sched_ule.c (revision 278968) +++ sys/kern/sched_ule.c (working copy) @@ -277,7 +277,6 @@ static struct tdq tdq_cpu[MAXCPU]; static struct tdq *balance_tdq; static int balance_ticks; -static DPCPU_DEFINE(uint32_t, randomval); #define TDQ_SELF() (&tdq_cpu[PCPU_GET(cpuid)]) #define TDQ_CPU(x) (&tdq_cpu[(x)]) @@ -651,7 +652,7 @@ cpuset_t cpumask; struct cpu_group *child; struct tdq *tdq; - int cpu, i, hload, lload, load, total, rnd, *rndptr; + int cpu, i, hload, lload, load, total; total = 0; cpumask = cg->cg_mask; @@ -700,9 +701,7 @@ CPU_CLR(cpu, &cpumask); tdq = TDQ_CPU(cpu); load = tdq->tdq_load * 256; - rndptr = DPCPU_PTR(randomval); - rnd = (*rndptr = *rndptr * 69069 + 5) >> 26; - if (match & CPU_SEARCH_LOWEST) { + if (match & CPU_SEARCH_LOWEST) { if (cpu == low->cs_prefer) load -= 64; /* If that CPU is allowed and get data. */ @@ -710,7 +709,6 @@ tdq->tdq_load <= lgroup.cs_limit && CPU_ISSET(cpu, &lgroup.cs_mask)) { lgroup.cs_cpu = cpu; - lgroup.cs_load = load - rnd; + lgroup.cs_load = load; } } if (match & CPU_SEARCH_HIGHEST) @@ -718,7 +716,7 @@ tdq->tdq_transferable && CPU_ISSET(cpu, &hgroup.cs_mask)) { hgroup.cs_cpu = cpu; - hgroup.cs_load = load - rnd; + hgroup.cs_load = load; } } total += load; @@ -861,14 +859,10 @@ { struct tdq *tdq; - /* - * Select a random time between .5 * balance_interval and - * 1.5 * balance_interval. - */ - balance_ticks = max(balance_interval / 2, 1); - balance_ticks += random() % balance_interval; if (smp_started == 0 || rebalance == 0) return; + + balance_ticks = balance_interval; tdq = TDQ_SELF(); TDQ_UNLOCK(tdq); sched_balance_group(cpu_top);