ARM10C 69주차 후기
일시 : 2014.09.13 (69주차)
모임명 : NAVER개발자커뮤니티지원_IAMROOT.ORG_10차ARM-C
장소 : 토즈 타워점
장소지원 : NAVER 개발자 커뮤니티 지원 프로그램
참여인원 : 3명
스터디 진도 :
- sched_init()
main.c::start_kernel()
계속해서 sched_init()을 분석합니다. sched_init() 전에 어떤 일을 했는지 start_kernel()부터 간추려서 살펴 보겠습니다.
asmlinkage void __init start_kernel(void)
{
char * command_line;
extern const struct kernel_param __start___param[], __stop___param[];
...
local_irq_disable();
// IRQ를 disable한다.
early_boot_irqs_disabled = true;
boot_cpu_init()
// 현재 cpu(core id)를 얻어서 cpu_XXX_bits[] 의 cpu를 셋한다.
page_address_init();
// 128개의 page_address_htable 배열을 초기화
pr_notice("%s", linux_banner);
// 배너:
// Linux version 2.6.37_DM385_IPNC_3.50.00
// (a0875405@bangvideoapps01) (gcc version 4.5.3 20110311
// (prerelease) (GCC) ) #1 Fri Dec 21 17:27:08 IST 2012
setup_arch(&command_line);
// ARM 아키텍쳐에 맞게 초기화한다.
setup_nr_cpu_ids();
setup_per_cpu_areas();
// pcpu 구조체를 만들어 줌 (mm/percpu.c)
smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
// boot cpu 0의 pcpu 영역의 base주소를 core register에 설정해줌
...
pidhash_init();
// pidhash의 크기를 16kB만큼 할당 받고 4096개의 hash list를 만듬
vfs_caches_init_early();
// Dentry cache, Inode-cache용 hash를 위한 메모리 공간을 각각 512kB, 256kB만큼 할당 받고,
// 131072, 65536개 만큼 hash table을 각각 만듬
...
mm_init();
// buddy와 slab 을 활성화 하고 기존 할당 받은 bootmem 은 buddy,
// pcpu 메모리, vmlist 는 slab으로 이관
sched_init();
mm_init()에서 buddy와 slab을 활성화하고 sched_init()을 실행한다.
core.c::sched_init()
// ARM10C 20140830
void __init sched_init(void)
{
int i, j;
unsigned long alloc_size = 0, ptr;
// alloc_size: 0
#ifdef CONFIG_FAIR_GROUP_SCHED // CONFIG_FAIR_GROUP_SCHED=n
alloc_size += 2 * nr_cpu_ids * sizeof(void **);
#endif
#ifdef CONFIG_RT_GROUP_SCHED // CONFIG_RT_GROUP_SCHED=n
alloc_size += 2 * nr_cpu_ids * sizeof(void **);
#endif
#ifdef CONFIG_CPUMASK_OFFSTACK // CONFIG_CPUMASK_OFFSTACK=n
alloc_size += num_possible_cpus() * cpumask_size();
#endif
// alloc_size: 0
if (alloc_size) {
ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
#ifdef CONFIG_FAIR_GROUP_SCHED // CONFIG_FAIR_GROUP_SCHED=n
root_task_group.se = (struct sched_entity **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
root_task_group.cfs_rq = (struct cfs_rq **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED // CONFIG_RT_GROUP_SCHED=n
root_task_group.rt_se = (struct sched_rt_entity **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
root_task_group.rt_rq = (struct rt_rq **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
#endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_CPUMASK_OFFSTACK // CONFIG_CPUMASK_OFFSTACK=n
for_each_possible_cpu(i) {
per_cpu(load_balance_mask, i) = (void *)ptr;
ptr += cpumask_size();
}
#endif /* CONFIG_CPUMASK_OFFSTACK */
}
#ifdef CONFIG_SMP // CONFIG_SMP=y
init_defrootdomain();
// def_root_domain의 맴버 값을 초기화 수행
// (&(&(&def_root_domain)->cpupri)->pri_to_cpu[0 ... 101])->count: 0
// (&(&(&def_root_domain)->cpupri)->pri_to_cpu[0 ... 101])->mask.bit[0]: 0
// (&(&def_root_domain)->cpupri)->pri_to_cpu[0 ... 3]: -1
// &def_root_domain.refcount: 1
#endif
// global_rt_period(): 1000000000, global_rt_runtime(): 950000000
init_rt_bandwidth(&def_rt_bandwidth,
global_rt_period(), global_rt_runtime());
// init_rt_bandwidth에서 한일:
// (&def_rt_bandwidth)->rt_period: 1000000000
// (&def_rt_bandwidth)->rt_runtime: 950000000
// &(&def_rt_bandwidth)->rt_runtime_lock을 사용한 spinlock 초기화
// (&def_rt_bandwidth)->rt_period_timer의 값을 0으로 초기화
// &(&def_rt_bandwidth)->rt_period_timer)->base: &hrtimer_bases->clock_base[0]
// (&(&(&def_rt_bandwidth)->rt_period_timer)->node)->node의 RB Tree의 초기화
// &(&def_rt_bandwidth)->rt_period_timer.function: sched_rt_period_timer
#ifdef CONFIG_RT_GROUP_SCHED // CONFIG_RT_GROUP_SCHED=n
init_rt_bandwidth(&root_task_group.rt_bandwidth,
global_rt_period(), global_rt_runtime());
#endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_CGROUP_SCHED // CONFIG_CGROUP_SCHED=n
list_add(&root_task_group.list, &task_groups);
INIT_LIST_HEAD(&root_task_group.children);
INIT_LIST_HEAD(&root_task_group.siblings);
autogroup_init(&init_task);
#endif /* CONFIG_CGROUP_SCHED */
for_each_possible_cpu(i) {
// for ((i) = -1; (i) = cpumask_next((i), (cpu_possible_mask)), (i) < nr_cpu_ids; )
struct rq *rq;
// i: 0
// cpu_rq(0):
// &({
// do {
// const void __percpu *__vpp_verify = (typeof(&(runqueues)))NULL;
// (void)__vpp_verify;
// } while (0)
// (&(runqueues) + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋);
// })
rq = cpu_rq(i);
// rq: (&(runqueues) + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
// NOTE:
// rq: (&(runqueues) + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
// 의 주석을 간결하게 하기 위해 [pcp0] 를 변수 앞에 추가하고 rq: (&runqueue) 로 사용함
// [pcp0] &rq->lock: &(&runqueues)->lock
raw_spin_lock_init(&rq->lock);
// [pcp0] &rq->lock: &(&runqueues)->lock 을 사용한 spinlock 초기화 수행
// [pcp0] rq->nr_running: (&runqueues)->nr_running
rq->nr_running = 0;
// [pcp0] rq->nr_running: (&runqueues)->nr_running: 0
// [pcp0] rq->calc_load_active: (&runqueues)->calc_load_active
rq->calc_load_active = 0;
// [pcp0] rq->calc_load_active: (&runqueues)->calc_load_active: 0
// [pcp0] rq->calc_load_update: (&runqueues)->calc_load_update,
// jiffies: -30000 (0xFFFFFFFFFFFF8AD0): vmlinux.lds.S 에 있음, LOAD_FREQ: 501
rq->calc_load_update = jiffies + LOAD_FREQ;
// [pcp0] rq->calc_load_update: (&runqueues)->calc_load_update: -29499 (0xFFFFFFFFFFFF8CC5)
// [pcp0] &rq->cfs: &(&runqueues)->cfs
init_cfs_rq(&rq->cfs);
// init_cfs_rq 에서 한일:
// (&(&runqueues)->cfs)->tasks_timeline: (struct rb_root) { NULL, }
// (&(&runqueues)->cfs)->min_vruntime: 0xFFFFFFFFFFF00000
// (&(&runqueues)->cfs)->min_vruntime_copy: 0xFFFFFFFFFFF00000
// (&(&runqueues)->cfs)->decay_counter: 1
// (&(&runqueues)->cfs)->removed_load: 0
// [pcp0] &rq->rt: &(&runqueues)->rt, rq: (&runqueues)
init_rt_rq(&rq->rt, rq);
// init_rt_rq 에서 한일:
// (&(&(&runqueues)->rt)->active)->bitmap의 0 ... 99 bit를 클리어
// (&(&(&runqueues)->rt)->active)->queue[0 ... 99] 의 리스트 초기화
// (&(&(&runqueues)->rt)->active)->bitmap의 100 bit를 1로 세팅
// (&(&runqueues)->rt)->rt_runtime_lock 을 사용한 spinlock 초기화
// (&(&runqueues)->rt)->rt_runtime: 0
// (&(&runqueues)->rt)->rt_throttled: 0
// (&(&runqueues)->rt)->rt_time: 0
// (&(&(&runqueues)->rt)->pushable_tasks)->node_list 리스트 초기화
// (&(&runqueues)->rt)->overloaded: 0
// (&(&runqueues)->rt)->rt_nr_migratory: 0
// (&(&runqueues)->rt)->highest_prio.next: 100
// (&(&runqueues)->rt)->highest_prio.curr: 100
// 2014/08/30 종료
// 2014/09/13 시작
#ifdef CONFIG_FAIR_GROUP_SCHED // CONFIG_FAIR_GROUP_SCHED=n
root_task_group.shares = ROOT_TASK_GROUP_LOAD;
INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
/*
* How much cpu bandwidth does root_task_group get?
*
* In case of task-groups formed thr' the cgroup filesystem, it
* gets 100% of the cpu resources in the system. This overall
* system cpu resource is divided among the tasks of
* root_task_group and its child task-groups in a fair manner,
* based on each entity's (task or task-group's) weight
* (se->load.weight).
*
* In other words, if root_task_group has 10 tasks of weight
* 1024) and two child groups A0 and A1 (of weight 1024 each),
* then A0's share of the cpu resource is:
*
* A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
*
* We achieve this by letting root_task_group's tasks sit
* directly in rq->cfs (i.e root_task_group->se[] = NULL).
*/
init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
#endif /* CONFIG_FAIR_GROUP_SCHED */
// [pcp0] rq->rt.rt_runtime: (&runqueues)->rt.rt_runtime,
// def_rt_bandwidth.rt_runtime: 950000000
rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
// [pcp0] rq->rt.rt_runtime: (&runqueues)->rt.rt_runtime: 950000000
#ifdef CONFIG_RT_GROUP_SCHED // CONFIG_RT_GROUP_SCHED=n
INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
#endif
// CPU_LOAD_IDX_MAX: 5
for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
// [pcp0] j: 0, rq->cpu_load[0]: (&runqueues)->cpu_load[0]
rq->cpu_load[j] = 0;
// [pcp0] j: 0, rq->cpu_load[0]: (&runqueues)->cpu_load[0]: 0
// j: 1 .. 4 까지 수행
// [pcp0] rq->last_load_update_tick: (&runqueues)->last_load_update_tick
// jiffies: -30000 (0xFFFFFFFFFFFF8AD0): vmlinux.lds.S 에 있음
rq->last_load_update_tick = jiffies;
// [pcp0] rq->last_load_update_tick: (&runqueues)->last_load_update_tick: -30000 (0xFFFFFFFFFFFF8AD0)
#ifdef CONFIG_SMP // CONFIG_SMP=y
// [pcp0] rq->sd: (&runqueues)->sd
rq->sd = NULL;
// [pcp0] rq->sd: (&runqueues)->sd: NULL
// [pcp0] rq->rd: (&runqueues)->rd
rq->rd = NULL;
// [pcp0] rq->rd: (&runqueues)->rd: NULL
// [pcp0] rq->cpu_power: (&runqueues)->cpu_power, SCHED_POWER_SCALE: 0x400
rq->cpu_power = SCHED_POWER_SCALE;
// [pcp0] rq->cpu_power: (&runqueues)->cpu_power: 0x400
// [pcp0] rq->post_schedule: (&runqueues)->post_schedule
rq->post_schedule = 0;
// [pcp0] rq->post_schedule: (&runqueues)->post_schedule: 0
// [pcp0] rq->active_balance: (&runqueues)->active_balance
rq->active_balance = 0;
// [pcp0] rq->active_balance: (&runqueues)->active_balance: 0
// [pcp0] rq->next_balance: (&runqueues)->next_balance,
// jiffies: -30000 (0xFFFFFFFFFFFF8AD0): vmlinux.lds.S 에 있음
rq->next_balance = jiffies;
// [pcp0] rq->next_balance: (&runqueues)->next_balance: -30000 (0xFFFFFFFFFFFF8AD0)
// [pcp0] rq->push_cpu: (&runqueues)->push_cpu
rq->push_cpu = 0;
// [pcp0] rq->push_cpu: (&runqueues)->push_cpu: 0
// [pcp0] rq->cpu: (&runqueues)->cpu, i: 0
rq->cpu = i;
// [pcp0] rq->cpu: (&runqueues)->cpu: 0
// [pcp0] rq->online: (&runqueues)->online
rq->online = 0;
// [pcp0] rq->online: (&runqueues)->online: 0
// [pcp0] rq->idle_stamp: (&runqueues)->idle_stamp
rq->idle_stamp = 0;
// [pcp0] rq->idle_stamp: (&runqueues)->idle_stamp: 0
// [pcp0] rq->avg_idle: (&runqueues)->avg_idle,
// sysctl_sched_migration_cost: 500000UL
rq->avg_idle = 2*sysctl_sched_migration_cost;
// [pcp0] rq->avg_idle: (&runqueues)->avg_idle: 1000000UL
// [pcp0] rq->max_idle_balance_cost: (&runqueues)->max_idle_balance_cost,
// sysctl_sched_migration_cost: 500000UL
rq->max_idle_balance_cost = sysctl_sched_migration_cost;
// [pcp0] rq->max_idle_balance_cost: (&runqueues)->max_idle_balance_cost: 500000UL
// [pcp0] &rq->cfs_tasks: &(&runqueues)->cfs_tasks
INIT_LIST_HEAD(&rq->cfs_tasks);
// [pcp0] &rq->cfs_tasks: &(&runqueues)->cfs_tasks 의 list 초기화 수행
// [pcp0] rq: &runqueues
rq_attach_root(rq, &def_root_domain);
// rq_attach_root에서 한일:
// (&def_root_domain)->span: 1
// (&runqueues)->rd: &def_root_domain
// &(&runqueues)->refcount: 1
#ifdef CONFIG_NO_HZ_COMMON // CONFIG_NO_HZ_COMMON=y
// [pcp0] rq->nohz_flags: (&runqueues)->nohz_flags
rq->nohz_flags = 0;
// [pcp0] rq->nohz_flags: (&runqueues)->nohz_flags: 0
#endif
#ifdef CONFIG_NO_HZ_FULL // CONFIG_NO_HZ_FULL=n
rq->last_sched_tick = 0;
#endif
#endif
// [pcp0] rq: &runqueues
init_rq_hrtick(rq);
// init_rq_hrtick에서 한일:
// (&runqueues)->hrtick_csd_pending: 0
// (&runqueues)->hrtick_csd.flags: 0
// (&runqueues)->hrtick_csd.func: __hrtick_start
// (&runqueues)->hrtick_csd.info: &runqueues
// (&runqueues)->hrtick_timer의 값을 0으로 초기화
// (&(&runqueues)->hrtick_timer)->base: &hrtimer_bases->clock_base[0]
// RB Tree의 (&(&(&runqueues)->hrtick_timer)->node)->node 를 초기화
// &rq->hrtick_timer.function: &(&runqueues)->hrtick_timerhrtick_timer.function: hrtick
// [pcp0] &rq->nr_iowait: &(&runqueues)->nr_iowait
atomic_set(&rq->nr_iowait, 0);
// [pcp0] &rq->nr_iowait: &(&runqueues)->nr_iowait: 0
// i: 1 .. 3 까지 루프 수행
}
set_load_weight(&init_task);
// set_load_weight에서 한일:
// (&(&init_task)->se.load)->weight: 1024
// (&(&init_task)->se.load)->inv_weight: 4194304
#ifdef CONFIG_PREEMPT_NOTIFIERS // CONFIG_PREEMPT_NOTIFIERS=n
INIT_HLIST_HEAD(&init_task.preempt_notifiers);
#endif
#ifdef CONFIG_RT_MUTEXES // CONFIG_RT_MUTEXES=y
plist_head_init(&init_task.pi_waiters);
// plist_head_init에서 한일:
// (&init_task.pi_waiters)->node_list 리스트 초기화
#endif
/*
* The boot idle thread does lazy MMU switching as well:
*/
// init_mm.mm_count: 1
atomic_inc(&init_mm.mm_count);
// init_mm.mm_count: 2
// current: init_task
enter_lazy_tlb(&init_mm, current); // null function
/*
* Make us the idle thread. Technically, schedule() should not be
* called from this thread, however somewhere below it might be,
* but because we are the idle thread, we just pick up running again
* when this runqueue becomes "idle".
*/
// current: &init_task, smp_processor_id(): 0
init_idle(current, smp_processor_id());
// init_idle에서 한일:
// (&init_task)->on_rq: 0
// (&init_task)->se.on_rq: 0
// (&init_task)->se.exec_start: 0
// (&init_task)->se.sum_exec_runtime: 0
// (&init_task)->se.prev_sum_exec_runtime: 0
// (&init_task)->se.nr_migrations: 0
// (&init_task)->se.vruntime: 0
// &(&init_task)->se.group_node의 리스트 초기화
// &(&init_task)->rt.run_list의 리스트 초기화
// (&init_task)->state: TASK_RUNNING: 0
// (&init_task)->se.exec_start: 0
// (&init_task)->cpus_allowed->bits[0]: 1
// (&init_task)->nr_cpus_allowed: 1
// ((struct thread_info *)(&init_task)->stack)->cpu: 0
// (&init_task)->wake_cpu: 0
// [pcpu0] (&runqueues)->curr: &init_task
// [pcpu0] (&runqueues)->idle: &init_task
// (&init_task)->on_cpu: 1
// ((struct thread_info *)(&init_task)->stack)->preempt_count: PREEMPT_ENABLED: 0
// (&init_task)->sched_class: &idle_sched_class
// (&init_task)->comm: "swapper/0"
// jiffies: -30000 (0xFFFFFFFFFFFF8AD0): vmlinux.lds.S 에 있음, LOAD_FREQ: 501
calc_load_update = jiffies + LOAD_FREQ;
// calc_load_update: -29499 (0xFFFFFFFFFFFF8cc5)
/*
* During early bootup we pretend to be a normal task:
*/
// current->sched_class: (&init_task)->sched_class
current->sched_class = &fair_sched_class;
// current->sched_class: (&init_task)->sched_class: &fair_sched_class
#ifdef CONFIG_SMP // CONFIG_SMP=y
// GFP_NOWAIT: 0
zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
// sched_domains_tmpmask.bits[0]: 0
// 2014/09/13 종료
/* May be allocated at isolcpus cmdline parse time */
if (cpu_isolated_map == NULL)
zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
idle_thread_set_boot_cpu();
#endif
init_sched_fair_class();
scheduler_running = 1;
}
sched_init()에서 호출하는 주요 서브 함수
- sched_init()->init_defrootdomain()
- // def_root_domain의 맴버 값을 초기화 수행
- // (&(&(&def_root_domain)->cpupri)->pri_to_cpu[0 ... 101])->count: 0
- // (&(&(&def_root_domain)->cpupri)->pri_to_cpu[0 ... 101])->mask.bit[0]: 0
- // (&(&def_root_domain)->cpupri)->pri_to_cpu[0 ... 3]: -1
- // &def_root_domain.refcount: 1
- sched_init()->init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime());
- // init_rt_bandwidth에서 한일:
- // (&def_rt_bandwidth)->rt_period: 1000000000
- // (&def_rt_bandwidth)->rt_runtime: 950000000
- // &(&def_rt_bandwidth)->rt_runtime_lock을 사용한 spinlock 초기화
- // (&def_rt_bandwidth)->rt_period_timer의 값을 0으로 초기화
- // &(&def_rt_bandwidth)->rt_period_timer)->base: &hrtimer_bases->clock_base[0]
- // (&(&(&def_rt_bandwidth)->rt_period_timer)->node)->node의 RB Tree의 초기화
- // &(&def_rt_bandwidth)->rt_period_timer.function: sched_rt_period_timer
- sched_init()::for_each_possible_cpu(i) { ... }
- sched_init()->set_load_weight()
- // (&(&init_task)->se.load)->weight: 1024
- // (&(&init_task)->se.load)->inv_weight: 4194304
- sched_init()->plist_head_init()
- // (&init_task.pi_waiters)->node_list 리스트 초기화
- sched_init()->init_idle()
- // (&init_task)->on_rq: 0
- // (&init_task)->se.on_rq: 0
- // (&init_task)->se.exec_start: 0
- // (&init_task)->se.sum_exec_runtime: 0
- // (&init_task)->se.prev_sum_exec_runtime: 0
- // (&init_task)->se.nr_migrations: 0
- // (&init_task)->se.vruntime: 0
- // &(&init_task)->se.group_node의 리스트 초기화
- // &(&init_task)->rt.run_list의 리스트 초기화
- // (&init_task)->state: TASK_RUNNING: 0
- // (&init_task)->se.exec_start: 0
- // (&init_task)->cpus_allowed->bits[0]: 1
- // (&init_task)->nr_cpus_allowed: 1
- // ((struct thread_info *)(&init_task)->stack)->cpu: 0
- // (&init_task)->wake_cpu: 0
- // pcpu0->curr: &init_task
- // pcpu0->idle: &init_task
- // (&init_task)->on_cpu: 1
- // ((struct thread_info *)(&init_task)->stack)->preempt_count: PREEMPT_ENABLED: 0
- // (&init_task)->sched_class: &idle_sched_class
- // (&init_task)->comm: "swapper/0"
- sched_init()->zalloc_cpumask_var()
- // sched_domains_tmpmask.bits[0]: 0
- sched_init()->idle_thread_set_boot_cpu();
- 다음 시간에 분석
- sched_init()->init_sched_fair_class();
- 다음 시간에 분석
core.c::init_rq_hrtick()
// ARM10C 20140913
// [pcp0] rq: &runqueues
static void init_rq_hrtick(struct rq *rq)
{
#ifdef CONFIG_SMP // CONFIG_SMP=y
// rq->hrtick_csd_pending: (&runqueues)->hrtick_csd_pending
rq->hrtick_csd_pending = 0;
// rq->hrtick_csd_pending: (&runqueues)->hrtick_csd_pending: 0
// rq->hrtick_csd.flags: (&runqueues)->hrtick_csd.flags
rq->hrtick_csd.flags = 0;
// rq->hrtick_csd.flags: (&runqueues)->hrtick_csd.flags: 0
// rq->hrtick_csd.func: (&runqueues)->hrtick_csd.func
rq->hrtick_csd.func = __hrtick_start;
// rq->hrtick_csd.func: (&runqueues)->hrtick_csd.func: __hrtick_start
// rq->hrtick_csd.info: (&runqueues)->hrtick_csd.info, rq: &runqueues
rq->hrtick_csd.info = rq;
// rq->hrtick_csd.info: (&runqueues)->hrtick_csd.info: &runqueues
#endif
// &rq->hrtick_timer: &(&runqueues)->hrtick_timer, CLOCK_MONOTONIC: 1, HRTIMER_MODE_REL: 1
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
// hrtimer_init(hrtick_timer) 한일:
// (&runqueues)->hrtick_timer의 값을 0으로 초기화
// (&(&runqueues)->hrtick_timer)->base: &hrtimer_bases->clock_base[0]
// RB Tree의 (&(&(&runqueues)->hrtick_timer)->node)->node 를 초기화
// &rq->hrtick_timer.function: &(&runqueues)->hrtick_timerhrtick_timer.function
rq->hrtick_timer.function = hrtick;
// &rq->hrtick_timer.function: &(&runqueues)->hrtick_timerhrtick_timer.function: hrtick
}
hrtimer.c::hrtimer_init()
void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
enum hrtimer_mode mode)
{
// timer: &(&def_rt_bandwidth)->rt_period_timer, clock_id: 1, mode: 1
// timer: &(&runqueues)->hrtick_timer, clock_id: 1, mode: 1
debug_init(timer, clock_id, mode);
// timer: &(&def_rt_bandwidth)->rt_period_timer, clock_id: 1, mode: 1
// timer: &(&runqueues)->hrtick_timer, clock_id: 1, mode: 1
__hrtimer_init(timer, clock_id, mode);
// __hrtimer_init(rt_period_timer) 한일:
// (&def_rt_bandwidth)->rt_period_timer의 값을 0으로 초기화
// (&(&def_rt_bandwidth)->rt_period_timer)->base: &hrtimer_bases->clock_base[0]
// RB Tree의 (&(&(&def_rt_bandwidth)->rt_period_timer)->node)->node 를 초기화
//
// __hrtimer_init(hrtick_timer) 한일:
// (&runqueues)->hrtick_timer의 값을 0으로 초기화
// (&(&runqueues)->hrtick_timer)->base: &hrtimer_bases->clock_base[0]
// RB Tree의 (&(&(&runqueues)->hrtick_timer)->node)->node 를 초기화
}
EXPORT_SYMBOL_GPL(hrtimer_init);
hrtimer.c::__hrtimer_init()
// ARM10C 20140913
// timer: &(&runqueues)->hrtick_timer, clock_id: 1, mode: 1
static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
enum hrtimer_mode mode)
{
struct hrtimer_cpu_base *cpu_base;
int base;
// timer: &(&runqueues)->hrtick_timer, sizeof(struct hrtimer): 40 bytes
memset(timer, 0, sizeof(struct hrtimer));
// (&runqueues)->hrtick_timer의 값을 0으로 초기화
// __raw_get_cpu_var(hrtimer_bases):
// *({
// do {
// const void __percpu *__vpp_verify = (typeof((&(hrtimer_bases))))NULL;
// (void)__vpp_verify;
// } while (0)
// &(hrtimer_bases) + __my_cpu_offset;
// })
cpu_base = &__raw_get_cpu_var(hrtimer_bases);
// cpu_base:
// ({
// do {
// const void __percpu *__vpp_verify = (typeof((&(hrtimer_bases))))NULL;
// (void)__vpp_verify;
// } while (0)
// &(hrtimer_bases) + __my_cpu_offset;
// })
// clock_id: 1, CLOCK_REALTIME: 0, mode: 1, HRTIMER_MODE_ABS: 0
if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
clock_id = CLOCK_MONOTONIC;
// clock_id: 1, hrtimer_clockid_to_base(1): 0
base = hrtimer_clockid_to_base(clock_id);
// base: 0
// timer->base: (&(&runqueues)->hrtick_timer)->base,
// base: 0, &cpu_base->clock_base: &hrtimer_bases->clock_base
timer->base = &cpu_base->clock_base[base];
// timer->base: (&(&runqueues)->hrtick_timer)->base: &hrtimer_bases->clock_base[0]
// &timer->node: &(&(&runqueues)->hrtick_timer)->node
timerqueue_init(&timer->node);
// RB Tree의 &(&(&runqueues)->hrtick_timer)->node 를 초기화
#ifdef CONFIG_TIMER_STATS // CONFIG_TIMER_STATS=n
timer->start_site = NULL;
timer->start_pid = -1;
memset(timer->start_comm, 0, TASK_COMM_LEN);
#endif
}
core.c::set_load_weight()
// ARM10C 20140913
// &init_task
static void set_load_weight(struct task_struct *p)
{
// p->static_prio: (&init_task)->static_prio: 120, MAX_RT_PRIO: 100
int prio = p->static_prio - MAX_RT_PRIO;
// prio: 20
// &p->se.load: &(&init_task)->se.load
struct load_weight *load = &p->se.load;
// load: &(&init_task)->se.load
/*
* SCHED_IDLE tasks get minimal weight:
*/
// p->policy: (&init_task)->policy: SCHED_NORMAL: 0, SCHED_IDLE: 5
if (p->policy == SCHED_IDLE) {
load->weight = scale_load(WEIGHT_IDLEPRIO);
load->inv_weight = WMULT_IDLEPRIO;
return;
}
// load->weight: (&(&init_task)->se.load)->weight,
// prio: 20, prio_to_weight[20]: 1024, scale_load(1024): 1024
load->weight = scale_load(prio_to_weight[prio]);
// load->weight: (&(&init_task)->se.load)->weight: 1024
// load->inv_weight: (&(&init_task)->se.load)->inv_weight,
// prio: 20, prio_to_wmult[20]: 4194304
load->inv_weight = prio_to_wmult[prio];
// load->inv_weight: (&(&init_task)->se.load)->inv_weight: 4194304
}
- set_load_weight에서 한일:
- // (&(&init_task)->se.load)->weight: 1024
- // (&(&init_task)->se.load)->inv_weight: 4194304
plist.h::plist_head_init()
// ARM10C 20140913
// &init_task.pi_waiters
static inline void
plist_head_init(struct plist_head *head)
{
// &head->node_list: (&init_task.pi_waiters)->node_list
INIT_LIST_HEAD(&head->node_list);
// (&init_task.pi_waiters)->node_list 리스트 초기화
}
- plist_head_init에서 한일:
- // (&init_task.pi_waiters)->node_list 리스트 초기화
core.c::init_idle()
// ARM10C 20140913
// current: init_task, smp_processor_id(): 0
void init_idle(struct task_struct *idle, int cpu)
{
// cpu: 0, cpu_rq(0): [pcpu0] &runqueues
struct rq *rq = cpu_rq(cpu);
// rq: [pcpu0] &runqueues
unsigned long flags;
// &rq->lock: [pcpu0] &(&runqueues)->lock
raw_spin_lock_irqsave(&rq->lock, flags);
// &rq->lock: [pcpu0] &(&runqueues)->lock 을 사용하여 lock을 걸고 cpsr을 flags에 저장
// idle: init_task
__sched_fork(0, idle);
// __sched_fork에서 한일:
// (&init_task)->on_rq: 0
// (&init_task)->se.on_rq: 0
// (&init_task)->se.exec_start: 0
// (&init_task)->se.sum_exec_runtime: 0
// (&init_task)->se.prev_sum_exec_runtime: 0
// (&init_task)->se.nr_migrations: 0
// (&init_task)->se.vruntime: 0
// &(&init_task)->se.group_node의 리스트 초기화
// &(&init_task)->rt.run_list의 리스트 초기화
// idle->state: (&init_task)->state, TASK_RUNNING: 0
idle->state = TASK_RUNNING;
// idle->state: (&init_task)->state: TASK_RUNNING: 0
// idle->se.exec_start: (&init_task)->se.exec_start, sched_clock(): 0
idle->se.exec_start = sched_clock();
// idle->se.exec_start: (&init_task)->se.exec_start: 0
// idle: &init_task, cpu: 0, cpumask_of(0): &cpu_bit_bitmap[1][0]
do_set_cpus_allowed(idle, cpumask_of(cpu));
// do_set_cpus_allowed에서 한일:
// (&init_task)->cpus_allowed->bits[0]: 1
// (&init_task)->nr_cpus_allowed: 1
/*
* We're having a chicken and egg problem, even though we are
* holding rq->lock, the cpu isn't yet set to this cpu so the
* lockdep check in task_group() will fail.
*
* Similar case to sched_fork(). / Alternatively we could
* use task_rq_lock() here and obtain the other rq->lock.
*
* Silence PROVE_RCU
*/
rcu_read_lock();
// rcu_read_lock에서 한일:
// (&init_task)->rcu_read_lock_nesting: 1
// idle: &init_task, cpu: 0
__set_task_cpu(idle, cpu);
// __set_task_cpu에서 한일:
// ((struct thread_info *)(&init_task)->stack)->cpu: 0
// (&init_task)->wake_cpu: 0
rcu_read_unlock();
// rcu_read_unlock에서 한일:
// (&init_task)->rcu_read_lock_nesting: 0
// rq->curr: [pcpu0] (&runqueues)->curr, rq->idle: [pcpu0] (&runqueues)->idle,
// idle: &init_task
rq->curr = rq->idle = idle;
// rq->curr: [pcpu0] (&runqueues)->curr: &init_task
// rq->idle: [pcpu0] (&runqueues)->idle: &init_task
#if defined(CONFIG_SMP) // CONFIG_SMP=y
// idle->on_cpu: (&init_task)->on_cpu
idle->on_cpu = 1;
// idle->on_cpu: (&init_task)->on_cpu: 1
#endif
// &rq->lock: [pcpu0] &(&runqueues)->lock
raw_spin_unlock_irqrestore(&rq->lock, flags);
// &rq->lock: [pcpu0] &(&runqueues)->lock 을 사용하여 lock을 풀고 flags에 저장된 cpsr을 복원
/* Set the preempt count _outside_ the spinlocks! */
// idle: &init_task, cpu: 0
init_idle_preempt_count(idle, cpu);
// ((struct thread_info *)(&init_task)->stack)->preempt_count: PREEMPT_ENABLED: 0
/*
* The idle tasks have their own, simple scheduling class:
*/
// idle->sched_class: (&init_task)->sched_class
idle->sched_class = &idle_sched_class;
// idle->sched_class: (&init_task)->sched_class: &idle_sched_class
// idle: &init_task, cpu: 0
ftrace_graph_init_idle_task(idle, cpu); // null function
// idle: &init_task, cpu: 0
vtime_init_idle(idle, cpu); // null function
#if defined(CONFIG_SMP) // CONFIG_SMP=y
// idle->comm: (&init_task)->comm, INIT_TASK_COMM: "swapper", cpu: 0
sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
// idle->comm: (&init_task)->comm: "swapper/0"
#endif
}
- init_idle에서 한일:
- // (&init_task)->on_rq: 0
- // (&init_task)->se.on_rq: 0
- // (&init_task)->se.exec_start: 0
- // (&init_task)->se.sum_exec_runtime: 0
- // (&init_task)->se.prev_sum_exec_runtime: 0
- // (&init_task)->se.nr_migrations: 0
- // (&init_task)->se.vruntime: 0
- // &(&init_task)->se.group_node의 리스트 초기화
- // &(&init_task)->rt.run_list의 리스트 초기화
- // (&init_task)->state: TASK_RUNNING: 0
- // (&init_task)->se.exec_start: 0
- // (&init_task)->cpus_allowed->bits[0]: 1
- // (&init_task)->nr_cpus_allowed: 1
- // ((struct thread_info *)(&init_task)->stack)->cpu: 0
- // (&init_task)->wake_cpu: 0
- // pcpu0->curr: &init_task
- // pcpu0->idle: &init_task
- // (&init_task)->on_cpu: 1
- // ((struct thread_info *)(&init_task)->stack)->preempt_count: PREEMPT_ENABLED: 0
- // (&init_task)->sched_class: &idle_sched_class
- // (&init_task)->comm: "swapper/0"
core.c::__sched_fork()
// ARM10C 20140913
// 0, idle: &init_task
static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
{
// p->on_rq: (&init_task)->on_rq
p->on_rq = 0;
// p->on_rq: (&init_task)->on_rq: 0
// p->se.on_rq: (&init_task)->se.on_rq
p->se.on_rq = 0;
// p->se.on_rq: (&init_task)->se.on_rq: 0
// p->se.exec_start: (&init_task)->se.exec_start
p->se.exec_start = 0;
// p->se.exec_start: (&init_task)->se.exec_start: 0
// p->se.sum_exec_runtime: (&init_task)->se.sum_exec_runtime
p->se.sum_exec_runtime = 0;
// p->se.sum_exec_runtime: (&init_task)->se.sum_exec_runtime: 0
// p->se.prev_sum_exec_runtime: (&init_task)->se.prev_sum_exec_runtime
p->se.prev_sum_exec_runtime = 0;
// p->se.prev_sum_exec_runtime: (&init_task)->se.prev_sum_exec_runtime: 0
// p->se.nr_migrations: (&init_task)->se.nr_migrations
p->se.nr_migrations = 0;
// p->se.nr_migrations: (&init_task)->se.nr_migrations: 0
// p->se.vruntime: (&init_task)->se.vruntime
p->se.vruntime = 0;
// p->se.vruntime: (&init_task)->se.vruntime: 0
// &p->se.group_node: &(&init_task)->se.group_node
INIT_LIST_HEAD(&p->se.group_node);
// &p->se.group_node: &(&init_task)->se.group_node의 리스트 초기화
#ifdef CONFIG_SCHEDSTATS // CONFIG_SCHEDSTATS=n
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
#endif
// &p->rt.run_list: &(&init_task)->rt.run_list
INIT_LIST_HEAD(&p->rt.run_list);
// &p->rt.run_list: &(&init_task)->rt.run_list의 리스트 초기화
#ifdef CONFIG_PREEMPT_NOTIFIERS // CONFIG_PREEMPT_NOTIFIERS=n
INIT_HLIST_HEAD(&p->preempt_notifiers);
#endif
#ifdef CONFIG_NUMA_BALANCING // CONFIG_NUMA_BALANCING=n
if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
p->mm->numa_scan_seq = 0;
}
if (clone_flags & CLONE_VM)
p->numa_preferred_nid = current->numa_preferred_nid;
else
p->numa_preferred_nid = -1;
p->node_stamp = 0ULL;
p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0;
p->numa_scan_period = sysctl_numa_balancing_scan_delay;
p->numa_work.next = &p->numa_work;
p->numa_faults = NULL;
p->numa_faults_buffer = NULL;
INIT_LIST_HEAD(&p->numa_entry);
p->numa_group = NULL;
#endif /* CONFIG_NUMA_BALANCING */
}
- __sched_fork에서 한일:
- // (&init_task)->on_rq: 0
- // (&init_task)->se.on_rq: 0
- // (&init_task)->se.exec_start: 0
- // (&init_task)->se.sum_exec_runtime: 0
- // (&init_task)->se.prev_sum_exec_runtime: 0
- // (&init_task)->se.nr_migrations: 0
- // (&init_task)->se.vruntime: 0
- // &(&init_task)->se.group_node의 리스트 초기화
- // &(&init_task)->rt.run_list의 리스트 초기화
core.c::do_set_cpus_allowed()
// ARM10C 20140913
// idle: &init_task, cpu: 0, cpumask_of(0): &cpu_bit_bitmap[1][0]
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
// p->sched_class: (&init_task)->sched_class: NULL,
// p->sched_class->set_cpus_allowed: (&init_task)->sched_class->set_cpus_allowed: NULL
if (p->sched_class && p->sched_class->set_cpus_allowed)
p->sched_class->set_cpus_allowed(p, new_mask);
// p->cpus_allowed: (&init_task)->cpus_allowed, new_mask: &cpu_bit_bitmap[1][0]
cpumask_copy(&p->cpus_allowed, new_mask);
// (&init_task)->cpus_allowed->bits[0]: 1
// p->nr_cpus_allowed: (&init_task)->nr_cpus_allowed,
// new_mask: &cpu_bit_bitmap[1][0]
// cpumask_weight(&cpu_bit_bitmap[1][0]): 1
p->nr_cpus_allowed = cpumask_weight(new_mask);
// p->nr_cpus_allowed: (&init_task)->nr_cpus_allowed: 1
}
- do_set_cpus_allowed에서 한일:
- // (&init_task)->cpus_allowed->bits[0]: 1
- // (&init_task)->nr_cpus_allowed: 1
rcupdata.h::rcu_read_lock()
// ARM10C 20140913
static inline void rcu_read_lock(void)
{
__rcu_read_lock();
// __rcu_read_lock에서 한일:
// (&init_task)->rcu_read_lock_nesting: 1
__acquire(RCU); // null function
rcu_lock_acquire(&rcu_lock_map); // null function
// rcu_is_watching(): true
rcu_lockdep_assert(rcu_is_watching(),
"rcu_read_lock() used illegally while idle"); // null function
}
update.c::__rcu_read_lock()
// ARM10C 20140913
void __rcu_read_lock(void)
{
// current->rcu_read_lock_nesting: (&init_task)->rcu_read_lock_nesting: 0
current->rcu_read_lock_nesting++;
// current->rcu_read_lock_nesting: (&init_task)->rcu_read_lock_nesting: 1
barrier(); /* critical section after entry code. */
}
EXPORT_SYMBOL_GPL(__rcu_read_lock);
- __rcu_read_lock에서 한일:
- // (&init_task)->rcu_read_lock_nesting: 1
sched.h::__set_task_cpu()
// ARM10C 20140913
// idle: &init_task, cpu: 0
static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
{
// p: &init_task, cpu: 0
set_task_rq(p, cpu); // null function
#ifdef CONFIG_SMP // CONFIG_SMP=y
/*
* After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
* successfuly executed on another CPU. We must ensure that updates of
* per-task data have been completed by this moment.
*/
smp_wmb();
// memory barrier 수행
// p: &init_task, cpu: 0
// (&init_task)->stack: &init_thread_info
// task_thread_info(&init_task)->cpu: ((struct thread_info *)(&init_task)->stack)->cpu
task_thread_info(p)->cpu = cpu;
// task_thread_info(&init_task)->cpu: ((struct thread_info *)(&init_task)->stack)->cpu: 0
// p->wake_cpu: (&init_task)->wake_cpu, cpu: 0
p->wake_cpu = cpu;
// p->wake_cpu: (&init_task)->wake_cpu: 0
#endif
}
- __set_task_cpu에서 한일:
- // ((struct thread_info *)(&init_task)->stack)->cpu: 0
- // (&init_task)->wake_cpu: 0
rcupdata.h::rcu_read_unlock()
// ARM10C 20140913
static inline void rcu_read_unlock(void)
{
// rcu_is_watching(): true
rcu_lockdep_assert(rcu_is_watching(),
"rcu_read_unlock() used illegally while idle"); // null function
rcu_lock_release(&rcu_lock_map); // null function
__release(RCU); // null function
__rcu_read_unlock();
// __rcu_read_unlock에서 한일:
// (&init_task)->rcu_read_lock_nesting: 0
}
- rcu_read_unlock에서 한일:
- // (&init_task)->rcu_read_lock_nesting: 0
update.c::__rcu_read_unlock()
// ARM10C 20140913
void __rcu_read_unlock(void)
{
// current: &init_task
struct task_struct *t = current;
// t: &init_task
// t->rcu_read_lock_nesting: (&init_task)->rcu_read_lock_nesting: 1
if (t->rcu_read_lock_nesting != 1) {
--t->rcu_read_lock_nesting;
} else {
barrier(); /* critical section before exit code. */
// barrier 수행
// t->rcu_read_lock_nesting: (&init_task)->rcu_read_lock_nesting, INT_MIN: 0x80000000
t->rcu_read_lock_nesting = INT_MIN;
// t->rcu_read_lock_nesting: (&init_task)->rcu_read_lock_nesting: 0x80000000
#ifdef CONFIG_PROVE_RCU_DELAY // CONFIG_PROVE_RCU_DELAY=n
udelay(10); /* Make preemption more probable. */
#endif /* #ifdef CONFIG_PROVE_RCU_DELAY */
barrier(); /* assign before ->rcu_read_unlock_special load */
// barrier 수행
// t->rcu_read_unlock_special: (&init_task)->rcu_read_unlock_special: 0
if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
rcu_read_unlock_special(t);
barrier(); /* ->rcu_read_unlock_special load before assign */
// barrier 수행
// t->rcu_read_lock_nesting: (&init_task)->rcu_read_lock_nesting: 0x80000000
t->rcu_read_lock_nesting = 0;
// t->rcu_read_lock_nesting: (&init_task)->rcu_read_lock_nesting: 0
}
#ifdef CONFIG_PROVE_LOCKING // CONFIG_PROVE_LOCKING=n
{
int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
}
#endif /* #ifdef CONFIG_PROVE_LOCKING */
}
EXPORT_SYMBOL_GPL(__rcu_read_unlock);
- __rcu_read_unlock에서 한일:
- // (&init_task)->rcu_read_lock_nesting: 0
preempt.h::init_idle_preempt_count()
// ARM10C 20140913
// idle: &init_task, cpu: 0
// PREEMPT_ENABLED: 0
// task_thread_info(&init_task): ((struct thread_info *)(&init_task)->stack)
//
// #define init_idle_preempt_count(&init_task, cpu) do {
// ((struct thread_info *)(&init_task)->stack)->preempt_count = 0;
// } while (0)
#define init_idle_preempt_count(p, cpu) do { \
task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
} while (0)
cpumask.h::zalloc_cpumask_var()
// ARM10C 20140913
// &sched_domains_tmpmask, GFP_NOWAIT: 0
static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
// *mask: sched_domains_tmpmask
cpumask_clear(*mask);
// sched_domains_tmpmask.bits[0]: 0
return true;
}
study log
e1812e6..bb4db6a master -> origin/master
Updating e1812e6..bb4db6a
Fast-forward
arch/arm/include/asm/barrier.h | 2 +
arch/arm/include/asm/mmu_context.h | 1 +
arch/arm/include/asm/thread_info.h | 1 +
include/asm-generic/current.h | 1 +
include/asm-generic/param.h | 1 +
include/asm-generic/preempt.h | 10 ++
include/linux/bitmap.h | 20 ++-
include/linux/bitops.h | 7 +-
include/linux/compiler-gcc.h | 1 +
include/linux/compiler.h | 3 +
include/linux/cpumask.h | 42 +++++-
include/linux/ftrace.h | 2 +
include/linux/gfp.h | 1 +
include/linux/hrtimer.h | 1 +
include/linux/init_task.h | 5 +-
include/linux/jiffies.h | 1 +
include/linux/kernel.h | 2 +
include/linux/mm_types.h | 2 +
include/linux/plist.h | 4 +
include/linux/rcupdate.h | 33 +++--
include/linux/rcutiny.h | 3 +-
include/linux/sched.h | 19 ++-
include/linux/sched/rt.h | 1 +
include/linux/sched/sysctl.h | 3 +-
include/linux/seqlock.h | 30 ++++-
include/linux/smp.h | 2 +
include/linux/time.h | 2 +
include/linux/timerqueue.h | 6 +
include/linux/types.h | 2 +
include/linux/vtime.h | 4 +-
include/uapi/linux/sched.h | 4 +
include/uapi/linux/time.h | 1 +
init/init_task.c | 1 +
kernel/cpu.c | 8 ++
kernel/hrtimer.c | 33 ++++-
kernel/rcu/update.c | 25 +++-
kernel/sched/core.c | 269 ++++++++++++++++++++++++++++++++++---
kernel/sched/fair.c | 6 +-
kernel/sched/idle_task.c | 3 +-
kernel/sched/proc.c | 1 +
kernel/sched/sched.h | 30 ++++-
kernel/time/sched_clock.c | 33 +++++
mm/init-mm.c | 1 +
43 files changed, 569 insertions(+), 58 deletions(-)
댓글 없음:
댓글 쓰기