View | Details | Raw Unified | Return to bug 197923 | Differences between
and this patch

Collapse All | Expand All

(-)sys/kern/sched_ule.c (-5 / +11 lines)
Lines 908-922 Link Here
908
{
909
{
909
	int moved;
910
	int moved;
910
	int cpu;
911
	int cpu;
911
912
	
913
	/* 
914
	 * Don't bother to lock if balancing doesn't make sense.
915
	 */
916
	
917
	if (high->tdq_load <= low->tdq_load)
918
		return(0)
919
	
912
	tdq_lock_pair(high, low);
920
	tdq_lock_pair(high, low);
913
	moved = 0;
921
	moved = 0;
914
	/*
922
	/*
915
	 * Determine what the imbalance is and then adjust that to how many
923
	 * Move one thread from high to low if high has atleast 1 
916
	 * threads we actually have to give up (transferable).
924
	 * transferrable thread.
917
	 */
925
	 */
918
	if (high->tdq_transferable != 0 && high->tdq_load > low->tdq_load &&
926
	if (high->tdq_transferable == 0 && (moved = tdq_move(high, low)) > 0) {
919
	    (moved = tdq_move(high, low)) > 0) {
920
		/*
927
		/*
921
		 * In case the target isn't the current cpu IPI it to force a
928
		 * In case the target isn't the current cpu IPI it to force a
922
		 * reschedule with the new workload.
929
		 * reschedule with the new workload.

Return to bug 197923