/* * task_dirty_limit - scale down dirty throttling threshold for one task * * task specific dirty limit: * * dirty -= (dirty/8) * p_{t} * * To protect light/slow dirtying tasks from heavier/fast ones, we start * throttling individual tasks before reaching the bdi dirty limit. * Relatively low thresholds will be allocated to heavy dirtiers. So when * dirty pages grow large, heavy dirtiers will be throttled first, which will * effectively curb the growth of dirty pages. Light dirtiers with high enough * dirty threshold may never get throttled. */ static unsigned long task_dirty_limit(struct task_struct *tsk, unsigned long bdi_dirty) { long numerator, denominator; unsigned long dirty = bdi_dirty; u64 inv = dirty >> 3; task_dirties_fraction(tsk, &numerator, &denominator); inv *= numerator; do_div(inv, denominator); dirty -= inv; return max(dirty, bdi_dirty/2); }
/* * scale the dirty limit * * task specific dirty limit: * * dirty -= (dirty/8) * p_{t} */ static void task_dirty_limit(struct task_struct *tsk, long *pdirty) { long numerator, denominator; long dirty = *pdirty; u64 inv = dirty >> 3; task_dirties_fraction(tsk, &numerator, &denominator); inv *= numerator; do_div(inv, denominator); dirty -= inv; if (dirty < *pdirty/2) dirty = *pdirty/2; *pdirty = dirty; }