Braindead safety checks for cgroups stubs. -ck --- kernel/sched/bfs.c | 21 +-------------------- 1 file changed, 1 insertion(+), 20 deletions(-) Index: linux-4.7-ck4/kernel/sched/bfs.c =================================================================== --- linux-4.7-ck4.orig/kernel/sched/bfs.c 2016-09-13 17:21:53.284225917 +1000 +++ linux-4.7-ck4/kernel/sched/bfs.c 2016-09-13 17:21:53.282225963 +1000 @@ -7339,8 +7339,6 @@ LIST_HEAD(task_groups); /* Cacheline aligned slab cache for task_group */ static struct kmem_cache *task_group_cache __read_mostly; -/* task_group_lock serializes the addition/removal of task groups */ -static DEFINE_SPINLOCK(task_group_lock); #endif /* CONFIG_CGROUP_SCHED */ void __init sched_init(void) @@ -7827,17 +7825,6 @@ struct task_group *sched_create_group(st void sched_online_group(struct task_group *tg, struct task_group *parent) { - unsigned long flags; - - spin_lock_irqsave(&task_group_lock, flags); - list_add_rcu(&tg->list, &task_groups); - - WARN_ON(!parent); /* root should already exist */ - - tg->parent = parent; - INIT_LIST_HEAD(&tg->children); - list_add_rcu(&tg->siblings, &parent->children); - spin_unlock_irqrestore(&task_group_lock, flags); } /* rcu callback to free various structures associated with a task group */ @@ -7855,12 +7842,6 @@ void sched_destroy_group(struct task_gro void sched_offline_group(struct task_group *tg) { - unsigned long flags; - - spin_lock_irqsave(&task_group_lock, flags); - list_del_rcu(&tg->list); - list_del_rcu(&tg->siblings); - spin_unlock_irqrestore(&task_group_lock, flags); } static inline struct task_group *css_tg(struct cgroup_subsys_state *css) @@ -7908,7 +7889,7 @@ static void cpu_cgroup_fork(struct task_ static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) { - return 0; + return 0; } static void cpu_cgroup_attach(struct cgroup_taskset *tset)