sched: group scheduling code cleanup

Minor cleanups:

- Fix coding style
- remove obsolete comment

Signed-off-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Srivatsa Vaddagiri 2008-01-25 21:07:59 +01:00 committed by Ingo Molnar
parent 86faf39d0f
commit 93f992ccc0
1 changed files with 3 additions and 18 deletions

View File

@ -191,12 +191,12 @@ struct task_group init_task_group = {
};
#ifdef CONFIG_FAIR_USER_SCHED
# define INIT_TASK_GRP_LOAD 2*NICE_0_LOAD
# define INIT_TASK_GROUP_LOAD 2*NICE_0_LOAD
#else
# define INIT_TASK_GRP_LOAD NICE_0_LOAD
# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
#endif
static int init_task_group_load = INIT_TASK_GRP_LOAD;
static int init_task_group_load = INIT_TASK_GROUP_LOAD;
/* return group to which a task belongs */
static inline struct task_group *task_group(struct task_struct *p)
@ -881,21 +881,6 @@ static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
#define sched_class_highest (&rt_sched_class)
/*
* Update delta_exec, delta_fair fields for rq.
*
* delta_fair clock advances at a rate inversely proportional to
* total load (rq->load.weight) on the runqueue, while
* delta_exec advances at the same rate as wall-clock (provided
* cpu is not idle).
*
* delta_exec / delta_fair is a measure of the (smoothened) load on this
* runqueue over any given interval. This (smoothened) load is used
* during load balance.
*
* This function is called /before/ updating rq->load
* and when switching tasks.
*/
static inline void inc_load(struct rq *rq, const struct task_struct *p)
{
update_load_add(&rq->load, p->se.load.weight);