doug@s15:~/linux-3.0.0/kernel$ diff -u sched.c.original sched.c.doug05 --- sched.c.original 2012-02-09 11:38:59.376250335 -0800 +++ sched.c.doug05 2012-02-28 08:42:56.805450230 -0800 @@ -3258,6 +3258,7 @@ /* Variables and functions for calc_load */ static atomic_long_t calc_load_tasks; static unsigned long calc_load_update; +static unsigned calc_load_update_done_already=0; unsigned long avenrun[3]; EXPORT_SYMBOL(avenrun); @@ -3403,14 +3404,19 @@ * If we crossed a calc_load_update boundary, make sure to fold * any pending idle changes, the respective CPUs might have * missed the tick driven calc_load_account_active() update - * due to NO_HZ. + * due to NO_HZ. Handshake ensures it is only done once. */ - delta = calc_load_fold_idle(); - if (delta) - atomic_long_add(delta, &calc_load_tasks); - + if(calc_load_update_done_already == 0){ + delta = calc_load_fold_idle(); + if (delta) + atomic_long_add(delta, &calc_load_tasks); + calc_load_update_done_already = 1; + } /* * If we were idle for multiple load cycles, apply them. + * (delete this note in final patch) Note: this code is never executed + * on my server systems. The point being that somehow this path needs + * to be tested with the proposed patch, but I don't know how. */ if (ticks >= LOAD_FREQ) { n = ticks / LOAD_FREQ; @@ -3487,6 +3493,7 @@ avenrun[2] = calc_load(avenrun[2], EXP_15, active); calc_load_update += LOAD_FREQ; + calc_load_update_done_already = 0; } /*