ARM10C 59주차 후기
일시 : 2014.06.21 (59주차)
모임명 : NAVER개발자커뮤니티지원_IAMROOT.ORG_10차ARM-C
장소 : 토즈 타워점
장소지원 : NAVER 개발자 커뮤니티 지원 프로그램
참여인원 : 4명
스터디 진도 :
- mem_init()을 계속 분석합니다.
- start_kernel()-> mm_init()->kmem_cache_init()->create_boot_cache() 분석중
list_each_entry_safe를 사용하는 이유
list_each_entry()만을 사용하면 각 list에서 page 주소를 순차적으로 가리키는데,
만약 이 루프에서 list를 변경할 경우 …safe()를 사용한다.
만약 이 루프에서 list를 변경할 경우 …safe()를 사용한다.
…safe()는 list_each_entry_safe()를 사용하면
page, page2를 사용하면 page는 지금 순차적으로 검색하는 리스트를
page2는 다음 리스트를 가리키므로
page를 삭제하여도 page2가 다음 리스트를 가리키기 때문에 삭제나 변경 후
page = page2로 사용해서 안전하게 리스트를 검색하게 할 수 있다.
page, page2를 사용하면 page는 지금 순차적으로 검색하는 리스트를
page2는 다음 리스트를 가리키므로
page를 삭제하여도 page2가 다음 리스트를 가리키기 때문에 삭제나 변경 후
page = page2로 사용해서 안전하게 리스트를 검색하게 할 수 있다.
main.c:: mm_init()
// ARM10C 20140329
static void __init mm_init(void)
{
/*
* page_cgroup requires contiguous pages,
* bigger than MAX_ORDER unless SPARSEMEM.
*/
page_cgroup_init_flatmem(); // null function
mem_init();
// bootmem으로 관리하던 메모리를 buddy로 이관.
// 각 section 메모리 크기를 출력.
// mm/Makefile 에서 CONFIG_SLUB 설정으로 slub.c 로 jump
kmem_cache_init();
// mm/Makefile 에서 CONFIG_SLUB 설정으로 slub.c 로 jump
kmem_cache_init()으로 이동.
slub.c::kmem_cache_init()
create_boot_cache()를 2번째 실행한다.
이 함수를 1번째 실행할때는 slab_state: DOWN에서 실행을 했었고.
2번째 실행할 때는 slab_state: PARTIAL로 바꾸어서 실행하는 것이다.
이 함수를 1번째 실행할때는 slab_state: DOWN에서 실행을 했었고.
2번째 실행할 때는 slab_state: PARTIAL로 바꾸어서 실행하는 것이다.
- slab_state 의미:
slab을 초기화한 단계를 나타냄, PARTIAL은 kmem_cache_node 만 사용이 가능함
void __init kmem_cache_init(void)
{
...
// slab_state: DOWN
slab_state = PARTIAL;
// slab_state: PARTIAL
...
// 2014/06/14 시작
// kmem_cache: &boot_kmem_cache,
// offsetof(struct kmem_cache, node): 128, nr_node_ids: 1
// sizeof(struct kmem_cache_node *): 4 SLAB_HWCACHE_ALIGN: 0x00002000UL
// create_boot_cache(&boot_kmem_cache, "kmem_cache", 132, 0x00002000UL)
create_boot_cache(kmem_cache, "kmem_cache",
offsetof(struct kmem_cache, node) +
nr_node_ids * sizeof(struct kmem_cache_node *),
SLAB_HWCACHE_ALIGN);
create_boot_cache(kmem_cache, “kmem_cache”,
offsetof(struct kmem_cache, node) +
nr_node_ids sizeof(struct kmem_cache_node ),
SLAB_HWCACHE_ALIGN);
slab_common.c::create_boot_cache()
// ARM10C 20140614
// &boot_kmem_cache, “kmem_cache”, 132, SLAB_HWCACHE_ALIGN: 0x00002000UL
// &boot_kmem_cache, “kmem_cache”, 132, SLAB_HWCACHE_ALIGN: 0x00002000UL
void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
unsigned long flags)
{
int err;
// s->name: boot_kmem_cache.name: NULL
s->name = name;
// s->name: boot_kmem_cache.name: "kmem_cache"
// s->size: boot_kmem_cache.size: 0
// s->object_size: boot_kmem_cache.object_size: 0
s->size = s->object_size = size;
// s->size: boot_kmem_cache.size: 132
// s->object_size: boot_kmem_cache.object_size: 132
// flags: SLAB_HWCACHE_ALIGN: 0x00002000UL, ARCH_KMALLOC_MINALIGN: 64, size: 132
// s->align: boot_kmem_cache.align: 0
s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
// s->align: boot_kmem_cache.align: 64
// s: &boot_kmem_cache, flags: SLAB_HWCACHE_ALIGN: 0x00002000UL
// __kmem_cache_create(&boot_kmem_cache, 0x00002000UL): 0
err = __kmem_cache_create(s, flags);
// s: &boot_kmem_cache, flags: SLAB_HWCACHE_ALIGN: 0x00002000UL
// __kmem_cache_create(&boot_kmem_cache, 0x00002000UL): 0
// __kmem_cache_create(&boot_kmem_cache, 0x00002000UL): 0
err = __kmem_cache_create(s, flags);
slub.c::__kmem_cache_create()
// ARM10C 20140614
// s: &boot_kmem_cache, flags: SLAB_HWCACHE_ALIGN: 0x00002000UL
// s: &boot_kmem_cache, flags: SLAB_HWCACHE_ALIGN: 0x00002000UL
int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
{
int err;
// s: &boot_kmem_cache, flags: SLAB_HWCACHE_ALIGN: 0x00002000UL
// kmem_cache_open(&boot_kmem_cache, 0x00002000UL): 0
err = kmem_cache_open(s, flags);
// kmem_cache_open(&boot_kmem_cache, 0x00002000UL): 0
err = kmem_cache_open(s, flags);
slub.c::kmem_cache_open()
// ARM10C 20140614
// s: &boot_kmem_cache, flags: SLAB_HWCACHE_ALIGN: 0x00002000UL
// s: &boot_kmem_cache, flags: SLAB_HWCACHE_ALIGN: 0x00002000UL
static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
{
// s->size: boot_kmem_cache.size: 132, flags: SLAB_HWCACHE_ALIGN: 0x00002000UL,
// s->name: boot_kmem_cache.name: "kmem_cache", s->ctor: boot_kmem_cache.ctor: NULL
s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
// s->flags: boot_kmem_cache.flags: SLAB_HWCACHE_ALIGN: 0x00002000UL
// s->reserved: boot_kmem_cache.reserved: 0
s->reserved = 0;
// s->reserved: boot_kmem_cache.reserved: 0
// need_reserve_slab_rcu: 0, s->flags: boot_kmem_cache.flags: SLAB_HWCACHE_ALIGN
if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
s->reserved = sizeof(struct rcu_head);
// s: &boot_kmem_cache, -1, calculate_sizes(&boot_kmem_cache, -1): 1
if (!calculate_sizes(s, -1))
goto error;
// calculate_sizes가 하는일?:
// object size 값에 맞게 내부 단편화 문제를 고려하여 최적의 order를 계산함
// kmem_cache의 맴버 inuse, size, allocflags, min, oo, max 값을 초기화해줌
// object size 값에 맞게 내부 단편화 문제를 고려하여 최적의 order를 계산함
// kmem_cache의 맴버 inuse, size, allocflags, min, oo, max 값을 초기화해줌
// disable_higher_order_debug: 0
if (disable_higher_order_debug) {
if (get_order(s->size) > get_order(s->object_size)) {
s->flags &= ~DEBUG_METADATA_FLAGS;
s->offset = 0;
if (!calculate_sizes(s, -1))
goto error;
}
}
// s->size: boot_kmem_cache.size: 192, ilog2(192): 7
// s: &boot_kmem_cache, 3
set_min_partial(s, ilog2(s->size) / 2);
// boot_kmem_cache.min_partial: 5
// s: &boot_kmem_cache, kmem_cache_has_cpu_partial(&boot_kmem_cache): 1
// s->size: boot_kmem_cache.size: 192, PAGE_SIZE: 0x1000
if (!kmem_cache_has_cpu_partial(s))
s->cpu_partial = 0;
else if (s->size >= PAGE_SIZE)
s->cpu_partial = 2;
else if (s->size >= 1024)
s->cpu_partial = 6;
else if (s->size >= 256)
s->cpu_partial = 13;
else
// s->cpu_partial: boot_kmem_cache.cpu_partial: 0
s->cpu_partial = 30;
// boot_kmem_cache.cpu_partial: 30
// s: &boot_kmem_cache, init_kmem_cache_nodes(&boot_kmem_cache): 1
if (!init_kmem_cache_nodes(s))
goto error;
// s: &boot_kmem_cache, init_kmem_cache_nodes(&boot_kmem_cache): 1
if (!init_kmem_cache_nodes(s))
slub.c::init_kmem_cache_nodes()
// ARM10C 20140614
// s: &boot_kmem_cache
// s: &boot_kmem_cache
static int init_kmem_cache_nodes(struct kmem_cache *s)
{
int node;
// N_NORMAL_MEMORY: 2
for_each_node_state(node, N_NORMAL_MEMORY) {
// for ( (node) = 0; (node) == 0; (node) = 1)
struct kmem_cache_node *n;
// slab_state: DOWN: 0
// slab_state: PARTIAL: 1, DOWN: 0
if (slab_state == DOWN) {
// node: 0
early_kmem_cache_node_alloc(node);
continue;
}
// kmem_cache_node: &boot_kmem_cache_node, GFP_KERNEL: 0xD0, node: 0
// kmem_cache_alloc_node(&boot_kmem_cache_node, GFP_KERNEL: 0xD0, 0):
// UNMOVABLE인 page 의 object의 시작 virtual address + 64
n = kmem_cache_alloc_node(kmem_cache_node,
GFP_KERNEL, node);
// kmem_cache_node: &boot_kmem_cache_node, GFP_KERNEL: 0xD0, node: 0
// kmem_cache_alloc_node(&boot_kmem_cache_node, GFP_KERNEL: 0xD0, 0):
// UNMOVABLE인 page 의 object의 시작 virtual address + 64
// kmem_cache_alloc_node(&boot_kmem_cache_node, GFP_KERNEL: 0xD0, 0):
// UNMOVABLE인 page 의 object의 시작 virtual address + 64
n = kmem_cache_alloc_node(kmem_cache_node, GFP_KERNEL, node);
slab.h::kmem_cache_alloc_node()
// ARM10C 20140614
// kmem_cache_node: &boot_kmem_cache_node, GFP_KERNEL: 0xD0, node: 0
// kmem_cache_node: &boot_kmem_cache_node, GFP_KERNEL: 0xD0, node: 0
static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
{
// s: &boot_kmem_cache_node, flags: GFP_KERNEL: 0xD0
// kmem_cache_alloc(&boot_kmem_cache_node, GFP_KERNEL: 0xD0):
// UNMOVABLE인 page 의 object의 시작 virtual address + 64
return kmem_cache_alloc(s, flags);
}
// s: &boot_kmem_cache_node, flags: GFP_KERNEL: 0xD0
return kmem_cache_alloc(s, flags);
slub.c::kmem_cache_alloc()
// ARM10C 20140614
// s: &boot_kmem_cache_node, flags: GFP_KERNEL: 0xD0
// s: &boot_kmem_cache_node, flags: GFP_KERNEL: 0xD0
void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
{
// s: &boot_kmem_cache_node, gfpflags: GFP_KERNEL: 0xD0
// slab_alloc(&boot_kmem_cache_node, GFP_KERNEL: 0xD0): UNMOVABLE인 page 의 object의 시작 virtual address + 64
void *ret = slab_alloc(s, gfpflags, _RET_IP_);
slub.c::slab_alloc()
// ARM10C 20140614
// s: &bootkmem_cache_node, gfpflags: GFP_KERNEL: 0xD0, _RET_IP
// s: &bootkmem_cache_node, gfpflags: GFP_KERNEL: 0xD0, _RET_IP
static __always_inline void *slab_alloc(struct kmem_cache *s,
gfp_t gfpflags, unsigned long addr)
{
// s: &boot_kmem_cache_node, gfpflags: GFP_KERNEL: 0xD0, NUMA_NO_NODE: -1, _RET_IP_
// slab_alloc_node(&boot_kmem_cache_node, GFP_KERNEL: 0xD0, -1, _RET_IP_):
// UNMOVABLE인 page 의 object의 시작 virtual address + 64
return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
}
// s: &bootkmem_cache_node, gfpflags: GFP_KERNEL: 0xD0, NUMA_NO_NODE: -1, _RET_IP
return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
slub.c::slab_alloc_node()
// ARM10C 20140614
// s: &bootkmem_cache_node, gfpflags: GFP_KERNEL: 0xD0, _RET_IP
// s: &bootkmem_cache_node, gfpflags: GFP_KERNEL: 0xD0, _RET_IP
static __always_inline void *slab_alloc(struct kmem_cache *s,
gfp_t gfpflags, unsigned long addr)
{
// s: &boot_kmem_cache_node, gfpflags: GFP_KERNEL: 0xD0, NUMA_NO_NODE: -1, _RET_IP_
// slab_alloc_node(&boot_kmem_cache_node, GFP_KERNEL: 0xD0, -1, _RET_IP_):
// UNMOVABLE인 page 의 object의 시작 virtual address + 64
return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
}
// s: &bootkmem_cache_node, gfpflags: GFP_KERNEL: 0xD0, NUMA_NO_NODE: -1, _RET_IP
// slaballoc_node(&boot_kmem_cache_node, GFP_KERNEL: 0xD0, -1, _RET_IP):
// UNMOVABLE인 page 의 object의 시작 virtual address + 64
// slaballoc_node(&boot_kmem_cache_node, GFP_KERNEL: 0xD0, -1, _RET_IP):
// UNMOVABLE인 page 의 object의 시작 virtual address + 64
return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
slub.c::slab_alloc_node()
// ARM10C 20140614
// s: &bootkmem_cache_node, gfpflags: GFP_KERNEL: 0xD0, NUMA_NO_NODE: -1, _RET_IP
// s: &bootkmem_cache_node, gfpflags: GFP_KERNEL: 0xD0, NUMA_NO_NODE: -1, _RET_IP
static __always_inline void *slab_alloc_node(struct kmem_cache *s,
gfp_t gfpflags, int node, unsigned long addr)
{
void **object;
struct kmem_cache_cpu *c;
struct page *page;
unsigned long tid;
// s: &boot_kmem_cache_node, gfpflags: GFP_KERNEL: 0xD0
// slab_pre_alloc_hook(&boot_kmem_cache_node, 0xD0): 0
if (slab_pre_alloc_hook(s, gfpflags))
return NULL;
// s: &boot_kmem_cache_node, gfpflags: GFP_KERNEL: 0xD0
// memcg_kmem_get_cache(&boot_kmem_cache_node, 0xD0): &boot_kmem_cache_node
s = memcg_kmem_get_cache(s, gfpflags);
// s: &boot_kmem_cache_node
redo:
preempt_disable();
// 선점 카운트 증가, barrier 적용
// s->cpu_slab: (&boot_kmem_cache_node)->cpu_slab: 0xc0502d00
// __this_cpu_ptr((&boot_kmem_cache_node)->cpu_slab: 0xc0502d00):
// (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
c = __this_cpu_ptr(s->cpu_slab);
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
// c->tid: ((&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋))->tid: 0
tid = c->tid;
// tid: 0
preempt_enable();
// barrier 적용, 선점 카운트 감소, should_resched 수행
// c->freelist: ((&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋))->freelist: 0
object = c->freelist;
// object: 0
// c->page: ((&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋))->page: 0
page = c->page;
// page: 0
// c->freelist, c->page 의 값을 초기화?:
// pcpu_populate_chunk에서 초기화 하고 왔음
// object: 0, page: 0, node: -1, node_match(0, -1): 1
if (unlikely(!object || !node_match(page, node)))
// s: &boot_kmem_cache_node, gfpflags: GFP_KERNEL: 0xD0, node: -1, addr: _RET_IP_,
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
// __slab_alloc(&boot_kmem_cache_node, GFP_KERNEL: 0xD0, -1, _RET_IP_,
// (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)):
// UNMOVABLE인 page 의 object의 시작 virtual address + 64
object = __slab_alloc(s, gfpflags, node, addr, c);
// s: &bootkmem_cache_node, gfpflags: GFP_KERNEL: 0xD0, node: -1, addr: _RET_IP,
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
object = __slab_alloc(s, gfpflags, node, addr, c);
slub.c::__slab_alloc()
// ARM10C 20140614
// s: &bootkmem_cache_node, gfpflags: GFP_KERNEL: 0xD0, node: -1, addr: _RET_IP,
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
// s: &bootkmem_cache_node, gfpflags: GFP_KERNEL: 0xD0, node: -1, addr: _RET_IP,
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
unsigned long addr, struct kmem_cache_cpu *c)
{
void *freelist;
struct page *page;
unsigned long flags;
local_irq_save(flags);
// cpsr을 flags에 저장
#ifdef CONFIG_PREEMPT // CONFIG_PREEMPT=y
/*
* We may have been preempted and rescheduled on a different
* cpu before disabling interrupts. Need to reload cpu area
* pointer.
*/
// s->cpu_slab: (&boot_kmem_cache_node)->cpu_slab: 0xc0502d00
// __this_cpu_ptr((&boot_kmem_cache_node)->cpu_slab: 0xc0502d00):
// (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
c = this_cpu_ptr(s->cpu_slab);
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
#endif
// c->page: ((&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋))->page: 0
page = c->page;
// page: 0
// page: 0
if (!page)
goto new_slab;
// new_slab 심볼로 이동
rego:
...
new_slab:
// c->partial: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)->partial: 0
if (c->partial) {
page = c->page = c->partial;
c->partial = page->next;
stat(s, CPU_PARTIAL_ALLOC);
c->freelist = NULL;
goto redo;
}
// s: &boot_kmem_cache_node, gfpflags: GFP_KERNEL: 0xD0, node: -1,
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
// new_slab_objects(&boot_kmem_cache_node, GFP_KERNEL: 0xD0, -1,
// (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)):
// UNMOVABLE인 page 의 object의 시작 virtual address + 64
freelist = new_slab_objects(s, gfpflags, node, &c);
// s: &boot_kmem_cache_node, gfpflags: GFP_KERNEL: 0xD0, node: -1,
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
freelist = new_slab_objects(s, gfpflags, node, &c);
slub.c::new_slab_objects()
// ARM10C 20140614
// s: &boot_kmem_cache_node, gfpflags: GFP_KERNEL: 0xD0, node: -1,
// &c: &(&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
// s: &boot_kmem_cache_node, gfpflags: GFP_KERNEL: 0xD0, node: -1,
// &c: &(&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
int node, struct kmem_cache_cpu **pc)
{
void *freelist;
// *pc: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
struct kmem_cache_cpu *c = *pc;
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
struct page *page;
// s: &boot_kmem_cache_node, flags: GFP_KERNEL: 0xD0, node: -1,
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
// get_partial(&boot_kmem_cache_node, GFP_KERNEL: 0xD0, -1,
// (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)):
// UNMOVABLE인 page 의 object의 시작 virtual address + 64
freelist = get_partial(s, flags, node, c);
// s: &boot_kmem_cache_node, flags: GFP_KERNEL: 0xD0, node: -1,
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + per_cpu_start에서의pcpu_base_addr의 옵셋)
// get_partial(&boot_kmem_cache_node, GFP_KERNEL: 0xD0, -1,
// (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + per_cpu_start에서의pcpu_base_addr의 옵셋)):
// UNMOVABLE인 page 의 object의 시작 virtual address + 64
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + per_cpu_start에서의pcpu_base_addr의 옵셋)
// get_partial(&boot_kmem_cache_node, GFP_KERNEL: 0xD0, -1,
// (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + per_cpu_start에서의pcpu_base_addr의 옵셋)):
// UNMOVABLE인 page 의 object의 시작 virtual address + 64
freelist = get_partial(s, flags, node, c);
slub.c::get_partial()
// ARM10C 20140614
// s: &boot_kmem_cache_node, flags: GFP_KERNEL: 0xD0, node: -1,
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
// s: &boot_kmem_cache_node, flags: GFP_KERNEL: 0xD0, node: -1,
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
struct kmem_cache_cpu *c)
{
void *object;
// node: -1, NUMA_NO_NODE: -1, numa_node_id(): 0
int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
// searchnode: 0
// s: &boot_kmem_cache_node, searchnode: 0
// get_node(&boot_kmem_cache_node, 0): (&boot_kmem_cache_node)->node[0],
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋),
// flags: GFP_KERNEL: 0xD0
// get_partial_node(&boot_kmem_cache_node, (&boot_kmem_cache_node)->node[0],
// (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋), GFP_KERNEL: 0xD0):
// UNMOVABLE인 page 의 object의 시작 virtual address + 64
object = get_partial_node(s, get_node(s, searchnode), c, flags);
// s: &boot_kmem_cache_node, searchnode: 0
// get_node(&boot_kmem_cache_node, 0): (&boot_kmem_cache_node)->node[0],
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + per_cpu_start에서의pcpu_base_addr의 옵셋),
// flags: GFP_KERNEL: 0xD0
// get_partial_node(&boot_kmem_cache_node, (&boot_kmem_cache_node)->node[0],
// (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + per_cpu_start에서의pcpu_base_addr의 옵셋), GFP_KERNEL: 0xD0):
// UNMOVABLE인 page 의 object의 시작 virtual address + 64
// get_node(&boot_kmem_cache_node, 0): (&boot_kmem_cache_node)->node[0],
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + per_cpu_start에서의pcpu_base_addr의 옵셋),
// flags: GFP_KERNEL: 0xD0
// get_partial_node(&boot_kmem_cache_node, (&boot_kmem_cache_node)->node[0],
// (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + per_cpu_start에서의pcpu_base_addr의 옵셋), GFP_KERNEL: 0xD0):
// UNMOVABLE인 page 의 object의 시작 virtual address + 64
object = get_partial_node(s, get_node(s, searchnode), c, flags);
slub.c::get_partial_node()
// ARM10C 20140614
// s: &boot_kmem_cache_node, (&boot_kmem_cache_node)->node[0],
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋),
// flags: GFP_KERNEL: 0xD0
// s: &boot_kmem_cache_node, (&boot_kmem_cache_node)->node[0],
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋),
// flags: GFP_KERNEL: 0xD0
static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
struct kmem_cache_cpu *c, gfp_t flags)
{
struct page *page, *page2;
void *object = NULL;
int available = 0;
int objects;
/*
* Racy check. If we mistakenly see no partial slabs then we
* just allocate an empty slab. If we mistakenly try to get a
* partial slab and there is none available then get_partials()
* will return NULL.
*/
// n: (&boot_kmem_cache_node)->node[0]: UNMOVABLE인 page 의 object의 시작 virtual address,
// n->nr_partial: ((&boot_kmem_cache_node)->node[0])->nr_partial: 1
if (!n || !n->nr_partial)
return NULL;
// early_kmem_cache_node_alloc에서 (&boot_kmem_cache_node)->node[0] 값을 설정함
// n->list_lock: ((&boot_kmem_cache_node)->node[0])->list_lock
spin_lock(&n->list_lock);
// ((&boot_kmem_cache_node)->node[0])->list_lock의 spinlock 획득
// 2014/06/14 종료
// 2014/06/21 시작
list_for_each_entry_safe(page, page2, &n->partial, lru) {
// for (page = list_first_entry(&n->partial, typeof(*page), lru),
// page2 = list_next_entry(page, lru);
// &page->lru != (&n->partial);
// page = page2, page2 = list_next_entry(page2, lru))
// page: MIGRATE_UNMOVABLE인 page, page2: page lru의 offset 만큼 계산된 주소
void *t;
// page: MIGRATE_UNMOVABLE인 page, flags: GFP_KERNEL: 0xD0
// pfmemalloc_match(MIGRATE_UNMOVABLE인 page, GFP_KERNEL: 0xD0): 1
if (!pfmemalloc_match(page, flags))
continue;
// page: MIGRATE_UNMOVABLE인 page, flags: GFP_KERNEL: 0xD0
// pfmemalloc_match(MIGRATE_UNMOVABLE인 page, GFP_KERNEL: 0xD0): 1
// pfmemalloc_match(MIGRATE_UNMOVABLE인 page, GFP_KERNEL: 0xD0): 1
if (!pfmemalloc_match(page, flags))
slub.c::pfmemalloc_match()
// ARM10C 20140621
// page: MIGRATE_UNMOVABLE인 page, flags: GFP_KERNEL: 0xD0
// page: MIGRATE_UNMOVABLE인 page, flags: GFP_KERNEL: 0xD0
static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
{
// page: MIGRATE_UNMOVABLE인 page,
// PageSlabPfmemalloc(MIGRATE_UNMOVABLE인 page): 0
if (unlikely(PageSlabPfmemalloc(page)))
return gfp_pfmemalloc_allowed(gfpflags);
return true;
// return true
}
page-flags.h::PageSlabPfmemalloc()
// ARM10C 20140621
// page: MIGRATE_UNMOVABLE인 page
// page: MIGRATE_UNMOVABLE인 page
static inline int PageSlabPfmemalloc(struct page *page)
{
// page: MIGRATE_UNMOVABLE인 page,
// PageSlab(MIGRATE_UNMOVABLE인 page): 1
VM_BUG_ON(!PageSlab(page));
// page: MIGRATE_UNMOVABLE인 page,
// PageActive(MIGRATE_UNMOVABLE인 page): 0
return PageActive(page);
// return 0
}
// page: MIGRATE_UNMOVABLE인 page,
// PageActive(MIGRATE_UNMOVABLE인 page): 0
// PageActive(MIGRATE_UNMOVABLE인 page): 0
return PageActive(page);
page-flags.h::PageActive()
// ARM10C 20140621
PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active)
// ARM10C 20140621
// __CLEARPAGEFLAG(Active, active)
// static inline void __ClearPageActive(struct page *page)
// { __clear_bit(PG_active, &page->flags); }
#define __CLEARPAGEFLAG(uname, lname) \
// ARM10C 20140621
// TESTCLEARFLAG(Active, active):
// static inline int TestClearPageActive(struct page *page)
// { return test_and_clear_bit(PG_active, &page->flags); }
#define TESTCLEARFLAG(uname, lname) \
static inline int TestClearPage##uname(struct page *page) \
{ return test_and_clear_bit(PG_##lname, &page->flags); }
slub.c::get_partial_node()
static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
struct kmem_cache_cpu *c, gfp_t flags)
{
...
list_for_each_entry_safe(page, page2, &n->partial, lru) {
void *t;
if (!pfmemalloc_match(page, flags))
continue;
// s: &boot_kmem_cache_node, n: (&boot_kmem_cache_node)->node[0],
// page: MIGRATE_UNMOVABLE인 page, object: NULL
// acquire_slab(&boot_kmem_cache_node, (&boot_kmem_cache_node)->node[0], MIGRATE_UNMOVABLE인 page, 1, objects):
// UNMOVABLE인 page 의 object의 시작 virtual address + 64
t = acquire_slab(s, n, page, object == NULL, &objects);
// s: &boot_kmem_cache_node, n: (&boot_kmem_cache_node)->node[0],
// page: MIGRATE_UNMOVABLE인 page, object: NULL
// acquire_slab(&boot_kmem_cache_node, (&boot_kmem_cache_node)->node[0], MIGRATE_UNMOVABLE인 page, 1, objects):
// UNMOVABLE인 page 의 object의 시작 virtual address + 64
// page: MIGRATE_UNMOVABLE인 page, object: NULL
// acquire_slab(&boot_kmem_cache_node, (&boot_kmem_cache_node)->node[0], MIGRATE_UNMOVABLE인 page, 1, objects):
// UNMOVABLE인 page 의 object의 시작 virtual address + 64
t = acquire_slab(s, n, page, object == NULL, &objects);
slub.c::acquire_slab()
// ARM10C 20140621
// s: &boot_kmem_cache_node, n: (&boot_kmem_cache_node)->node[0],
// page: MIGRATE_UNMOVABLE인 page, 1, objects
// s: &boot_kmem_cache_node, n: (&boot_kmem_cache_node)->node[0],
// page: MIGRATE_UNMOVABLE인 page, 1, objects
static inline void *acquire_slab(struct kmem_cache *s,
struct kmem_cache_node *n, struct page *page,
int mode, int *objects)
{
void *freelist;
unsigned long counters;
struct page new;
/*
* Zap the freelist and set the frozen bit.
* The old freelist is the list of objects for the
* per cpu allocation list.
*/
// page->freelist: UNMOVABLE인 page 의 object의 시작 virtual address + 64
freelist = page->freelist;
// freelist: UNMOVABLE인 page 의 object의 시작 virtual address + 64
// page->objects: 64, page->inuse: 1, page->frozen: 0 값에 의해
// page->counters: 0x400001 값으로 해석됨
counters = page->counters;
// counters: 0x400001
new.counters = counters;
// new.counters: 0x400001
// new.objects: 64, new.inuse: 1
*objects = new.objects - new.inuse;
// *objects: 63
// mode: 1
if (mode) {
// new.inuse: 1, page->objects: 64
new.inuse = page->objects;
// new.inuse: 64
// new.counters: 0x400040
new.freelist = NULL;
// new.freelist: NULL
} else {
new.freelist = freelist;
}
// new.frozen: 0
VM_BUG_ON(new.frozen);
// new.frozen: 0
new.frozen = 1;
// new.frozen: 1
// new.counters: 0x80400040
// s: &boot_kmem_cache_node, page: MIGRATE_UNMOVABLE인 page,
// freelist: UNMOVABLE인 page 의 object의 시작 virtual address + 64
// counters: 0x400001, new.freelist: NULL, new.counters: 0x80400040, "acquire_slab"
// __cmpxchg_double_slab(&boot_kmem_cache_node, MIGRATE_UNMOVABLE인 page,
// UNMOVABLE인 page 의 object의 시작 virtual address + 64, 0x400001, NULL, 0x80400040
// "acquire_slab"): 1
if (!__cmpxchg_double_slab(s, page,
freelist, counters,
new.freelist, new.counters,
"acquire_slab"))
return NULL;
// s: &boot_kmem_cache_node, page: MIGRATE_UNMOVABLE인 page,
// freelist: UNMOVABLE인 page 의 object의 시작 virtual address + 64
// counters: 0x400001, new.freelist: NULL, new.counters: 0x80400040, “acquire_slab”
// __cmpxchg_double_slab(&boot_kmem_cache_node, MIGRATE_UNMOVABLE인 page,
// UNMOVABLE인 page 의 object의 시작 virtual address + 64, 0x400001, NULL, 0x80400040
// “acquire_slab”): 1
// freelist: UNMOVABLE인 page 의 object의 시작 virtual address + 64
// counters: 0x400001, new.freelist: NULL, new.counters: 0x80400040, “acquire_slab”
// __cmpxchg_double_slab(&boot_kmem_cache_node, MIGRATE_UNMOVABLE인 page,
// UNMOVABLE인 page 의 object의 시작 virtual address + 64, 0x400001, NULL, 0x80400040
// “acquire_slab”): 1
if (!__cmpxchg_double_slab(s, page,
freelist, counters,
new.freelist, new.counters,
“acquire_slab”))
slub.c::__cmpxchg_double_slab()
// ARM10C 20140621
// s: &boot_kmem_cache_node, page: MIGRATE_UNMOVABLE인 page,
// freelist: UNMOVABLE인 page 의 object의 시작 virtual address + 64
// counters: 0x400001, new.freelist: NULL, new.counters: 0x80400040, “acquire_slab”
// s: &boot_kmem_cache_node, page: MIGRATE_UNMOVABLE인 page,
// freelist: UNMOVABLE인 page 의 object의 시작 virtual address + 64
// counters: 0x400001, new.freelist: NULL, new.counters: 0x80400040, “acquire_slab”
static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
void *freelist_old, unsigned long counters_old,
void *freelist_new, unsigned long counters_new,
const char *n)
{
// irqs_disabled(): 1
VM_BUG_ON(!irqs_disabled());
// CONFIG_HAVE_CMPXCHG_DOUBLE=n, CONFIG_HAVE_ALIGNED_STRUCT_PAGE=n
#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
if (s->flags & __CMPXCHG_DOUBLE) {
if (cmpxchg_double(&page->freelist, &page->counters,
freelist_old, counters_old,
freelist_new, counters_new))
return 1;
} else
#endif
{
// page: MIGRATE_UNMOVABLE인 page
slab_lock(page);
// page: MIGRATE_UNMOVABLE인 page
slab_lock(page);
slub.c::slab_lock()
// ARM10C 20140621
// page: MIGRATE_UNMOVABLE인 page
// page: MIGRATE_UNMOVABLE인 page
static __always_inline void slab_lock(struct page *page)
{
// PG_locked: 0, page->flags: (MIGRATE_UNMOVABLE인 page)->flags
bit_spin_lock(PG_locked, &page->flags);
}
// PG_locked: 0, page->flags: (MIGRATE_UNMOVABLE인 page)->flags
bit_spin_lock(PG_locked, &page->flags);
bit_spinlock.h::bit_spin_lock()
// ARM10C 20140621
// PG_locked: 0, page->flags: (MIGRATE_UNMOVABLE인 page)->flags
// PG_locked: 0, page->flags: (MIGRATE_UNMOVABLE인 page)->flags
static inline void bit_spin_lock(int bitnum, unsigned long *addr)
{
/*
* Assuming the lock is uncontended, this never enters
* the body of the outer loop. If it is contended, then
* within the inner loop a non-atomic test is used to
* busywait with less bus contention for a good time to
* attempt to acquire the lock bit.
*/
preempt_disable();
// preempt count 증가 후 memory barrier 적용
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) // CONFIG_SMP=y, CONFIG_DEBUG_SPINLOCK=y
// bitnum: 0, addr: &(MIGRATE_UNMOVABLE인 page)->flags
// test_and_set_bit_lock(0, &(MIGRATE_UNMOVABLE인 page)->flags): 0
while (unlikely(test_and_set_bit_lock(bitnum, addr))) {
preempt_enable();
do {
cpu_relax();
} while (test_bit(bitnum, addr));
preempt_disable();
}
#endif
// __acquire(bitlock): 0
__acquire(bitlock);
}
preempt.h::preempt_disable()
// ARM10C 20140621
#define preempt_disable()/*ARM10C this*/ \
do { \
preempt_count_inc(); \
barrier(); \
} while (0)
bit_spinlock.h::bit_spin_lock()
static inline void bit_spin_lock(int bitnum, unsigned long *addr)
{
preempt_disable();
// preempt count 증가 후 memory barrier 적용
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) // CONFIG_SMP=y, CONFIG_DEBUG_SPINLOCK=y
// bitnum: 0, addr: &(MIGRATE_UNMOVABLE인 page)->flags
// test_and_set_bit_lock(0, &(MIGRATE_UNMOVABLE인 page)->flags): 0
while (unlikely(test_and_set_bit_lock(bitnum, addr))) {
// bitnum: 0, addr: &(MIGRATE_UNMOVABLE인 page)->flags
// test_and_set_bit_lock(0, &(MIGRATE_UNMOVABLE인 page)->flags): 0
// test_and_set_bit_lock(0, &(MIGRATE_UNMOVABLE인 page)->flags): 0
while (unlikely(test_and_set_bit_lock(bitnum, addr))) {
lock.h::test_adn_set_bit_lock()
// ARM10C 20140621
// bitnum: 0, addr: &(MIGRATE_UNMOVABLE인 page)->flags
#define test_and_set_bit_lock(nr, addr) test_and_set_bit(nr, addr)
bitops.h:: test_and_set_bit()
// ARM10C 20140621
// bitnum: 0, addr: &(MIGRATE_UNMOVABLE인 page)->flags
// bitnum: 0, addr: &(MIGRATE_UNMOVABLE인 page)->flags
#define test_and_set_bit(nr,p) ATOMIC_BITOP(test_and_set_bit,nr,p)
bit_spinlock.h::bit_spin_lock()
// ARM10C 20140621
// PG_locked: 0, page->flags: (MIGRATE_UNMOVABLE인 page)->flags
// PG_locked: 0, page->flags: (MIGRATE_UNMOVABLE인 page)->flags
static inline void bit_spin_lock(int bitnum, unsigned long *addr)
{
/*
* Assuming the lock is uncontended, this never enters
* the body of the outer loop. If it is contended, then
* within the inner loop a non-atomic test is used to
* busywait with less bus contention for a good time to
* attempt to acquire the lock bit.
*/
preempt_disable();
// preempt count 증가 후 memory barrier 적용
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) // CONFIG_SMP=y, CONFIG_DEBUG_SPINLOCK=y
// bitnum: 0, addr: &(MIGRATE_UNMOVABLE인 page)->flags
// test_and_set_bit_lock(0, &(MIGRATE_UNMOVABLE인 page)->flags): 0
while (unlikely(test_and_set_bit_lock(bitnum, addr))) {
preempt_enable();
do {
cpu_relax();
} while (test_bit(bitnum, addr));
preempt_disable();
}
#endif
// __acquire(bitlock): 0
__acquire(bitlock);
}
// __acquire(bitlock): 0
__acquire(bitlock);
compiler.h::__acquire()
// ARM10C 20140621
# define __acquire(x) (void)0
slub.c::__cmpxchg_double_slab()
static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
void *freelist_old, unsigned long counters_old,
void *freelist_new, unsigned long counters_new,
const char *n)
{
// irqs_disabled(): 1
VM_BUG_ON(!irqs_disabled());
// CONFIG_HAVE_CMPXCHG_DOUBLE=n, CONFIG_HAVE_ALIGNED_STRUCT_PAGE=n
#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
if (s->flags & __CMPXCHG_DOUBLE) {
if (cmpxchg_double(&page->freelist, &page->counters,
freelist_old, counters_old,
freelist_new, counters_new))
return 1;
} else
#endif
{
// page: MIGRATE_UNMOVABLE인 page
slab_lock(page);
// preempt count 증가 후 memory barrier 적용
// page->freelist: UNMOVABLE인 page 의 object의 시작 virtual address + 64,
// freelist_old: UNMOVABLE인 page 의 object의 시작 virtual address + 64
// page->counters: 0x400001, counters_old: 0x400001
if (page->freelist == freelist_old &&
page->counters == counters_old) {
// freelist_new: NULL
page->freelist = freelist_new;
// page->freelist: NULL
// page->counters: 0x400001, counters_new: 0x80400040
page->counters = counters_new;
// page->counters: 0x80400040
// page: MIGRATE_UNMOVABLE인 page
slab_unlock(page);
// page: MIGRATE_UNMOVABLE인 page
slab_unlock(page);
slub.c::slab_unlock()
// ARM10C 20140621
// page: MIGRATE_UNMOVABLE인 page
static __always_inline void slab_unlock(struct page *page)
{
// PG_locked: 0, page->flags: (MIGRATE_UNMOVABLE인 page)->flags
__bit_spin_unlock(PG_locked, &page->flags);
}
bit_spinlock.h::__bit_spin_unlock()
// ARM10C 20140621
// PG_locked: 0, page->flags: &(MIGRATE_UNMOVABLE인 page)->flags
// PG_locked: 0, page->flags: &(MIGRATE_UNMOVABLE인 page)->flags
static inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
{
#ifdef CONFIG_DEBUG_SPINLOCK // CONFIG_DEBUG_SPINLOCK=y
// bitnum: 0, addr: &(MIGRATE_UNMOVABLE인 page)->flags
// test_bit(0, &(MIGRATE_UNMOVABLE인 page)->flags): 1
BUG_ON(!test_bit(bitnum, addr));
#endif
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) // CONFIG_SMP=y, CONFIG_DEBUG_SPINLOCK=y
// bitnum: 0, addr: &(MIGRATE_UNMOVABLE인 page)->flags
__clear_bit_unlock(bitnum, addr);
// bitnum: 0, addr: &(MIGRATE_UNMOVABLE인 page)->flags
__clear_bit_unlock(bitnum, addr);
lock.h::__clear_bit_unlock()
// ARM10C 20140621
// bitnum: 0, addr: &(MIGRATE_UNMOVABLE인 page)->flags
// bitnum: 0, addr: &(MIGRATE_UNMOVABLE인 page)->flags
#define __clear_bit_unlock(nr, addr) \
do { \
smp_mb(); \
__clear_bit(nr, addr); \
} while (0)
smp_mb(); \ __clear_bit(nr, addr); \
barrier.h::smb_mb()
// ARM10C 20140621
// A.R.M: A8.8.43 DMB
// ISH option:
// ISH Inner Shareable is the required shareability domain, reads and writes are the required
// access types. Encoded as option = 0b1011.
// 공유자원을 다른 cpu core가 사용할수 있게 해주는 옵션
// A.R.M: A8.8.43 DMB
// ISH option:
// ISH Inner Shareable is the required shareability domain, reads and writes are the required
// access types. Encoded as option = 0b1011.
// 공유자원을 다른 cpu core가 사용할수 있게 해주는 옵션
#define smp_mb() dmb(ish)
barrier.h::dmb()
// ARM10C 20140621
// A.R.M: A8.8.43 DMB
// ISH option:
// ISH Inner Shareable is the required shareability domain, reads and writes are the required
// access types. Encoded as option = 0b1011.
// A.R.M: A8.8.43 DMB
// ISH option:
// ISH Inner Shareable is the required shareability domain, reads and writes are the required
// access types. Encoded as option = 0b1011.
#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
non-atomic.h::__clear_bit()
// ARM10C 20140621
static inline void __clear_bit(int nr, volatile unsigned long *addr)
{
// mask : 0x1
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
// nr 값을 이용해서 해당 bit를 클리어
*p &= ~mask;
}
bit_spinlock.h::__bit_spin_unlock()
static inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
{
// bitnum: 0, addr: &(MIGRATE_UNMOVABLE인 page)->flags
// test_bit(0, &(MIGRATE_UNMOVABLE인 page)->flags): 1
BUG_ON(!test_bit(bitnum, addr));
// bitnum: 0, addr: &(MIGRATE_UNMOVABLE인 page)->flags
__clear_bit_unlock(bitnum, addr);
// (MIGRATE_UNMOVABLE인 page)->flags 의 bit 0을 클리어함
// dmb(ish)를 사용하여 공유 자원 (MIGRATE_UNMOVABLE인 page)->flags 값을 갱신
preempt_enable();
// memory barrier 적용 후 preempt count 감소 시킴
// __release(bitlock): 0
__release(bitlock);
}
compiler.h::__release()
// ARM10C 20140621
# define __release(x) (void)0
slub.c::__cmpxchg_double_slab()
static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
void *freelist_old, unsigned long counters_old,
void *freelist_new, unsigned long counters_new,
const char *n)
{
// irqs_disabled(): 1
VM_BUG_ON(!irqs_disabled());
// CONFIG_HAVE_CMPXCHG_DOUBLE=n, CONFIG_HAVE_ALIGNED_STRUCT_PAGE=n
#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
if (s->flags & __CMPXCHG_DOUBLE) {
if (cmpxchg_double(&page->freelist, &page->counters,
freelist_old, counters_old,
freelist_new, counters_new))
return 1;
} else
#endif
{
// page: MIGRATE_UNMOVABLE인 page
slab_lock(page);
// preempt count 증가 후 memory barrier 적용
// page->freelist: UNMOVABLE인 page 의 object의 시작 virtual address + 64,
// freelist_old: UNMOVABLE인 page 의 object의 시작 virtual address + 64
// page->counters: 0x400001, counters_old: 0x400001
if (page->freelist == freelist_old &&
page->counters == counters_old) {
// freelist_new: NULL
page->freelist = freelist_new;
// page->freelist: NULL
// page->counters: 0x400001, counters_new: 0x80400040
page->counters = counters_new;
// page->counters: 0x80400040
// page: MIGRATE_UNMOVABLE인 page
slab_unlock(page);
// (MIGRATE_UNMOVABLE인 page)->flags 의 bit 0을 클리어함
// dmb(ish)를 사용하여 공유 자원 (MIGRATE_UNMOVABLE인 page)->flags 값을 갱신
// memory barrier 적용 후 preempt count 감소 시킴
return 1;
// return 1
}
slab_unlock(page);
}
cpu_relax();
stat(s, CMPXCHG_DOUBLE_FAIL);
#ifdef SLUB_DEBUG_CMPXCHG
printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
#endif
return 0;
}
// return 1;
slub.c::acquire_slab()
static inline void *acquire_slab(struct kmem_cache *s,
struct kmem_cache_node *n, struct page *page,
int mode, int *objects)
{
void *freelist;
unsigned long counters;
struct page new;
// page->freelist: UNMOVABLE인 page 의 object의 시작 virtual address + 64
freelist = page->freelist;
// freelist: UNMOVABLE인 page 의 object의 시작 virtual address + 64
// page->objects: 64, page->inuse: 1, page->frozen: 0 값에 의해
// page->counters: 0x400001 값으로 해석됨
counters = page->counters;
// counters: 0x400001
new.counters = counters;
// new.counters: 0x400001
// new.objects: 64, new.inuse: 1
*objects = new.objects - new.inuse;
// *objects: 63
// mode: 1
if (mode) {
// new.inuse: 1, page->objects: 64
new.inuse = page->objects;
// new.inuse: 64
// new.counters: 0x400040
new.freelist = NULL;
// new.freelist: NULL
} else {
new.freelist = freelist;
}
// new.frozen: 0
VM_BUG_ON(new.frozen);
// new.frozen: 0
new.frozen = 1;
// new.frozen: 1
// new.counters: 0x80400040
// s: &boot_kmem_cache_node, page: MIGRATE_UNMOVABLE인 page,
// freelist: UNMOVABLE인 page 의 object의 시작 virtual address + 64
// counters: 0x400001, new.freelist: NULL, new.counters: 0x80400040, "acquire_slab"
// __cmpxchg_double_slab(&boot_kmem_cache_node, MIGRATE_UNMOVABLE인 page,
// UNMOVABLE인 page 의 object의 시작 virtual address + 64, 0x400001, NULL, 0x80400040
// "acquire_slab"): 1
if (!__cmpxchg_double_slab(s, page,
freelist, counters,
new.freelist, new.counters,
"acquire_slab"))
return NULL;
// __cmpxchg_double_slab에서 한일:
// MIGRATE_UNMOVABLE인 page의 멤버 필드 값
// page->freelist: NULL
// page->counters: 0x80400040
// 로 변경함
// n: (&boot_kmem_cache_node)->node[0], page: MIGRATE_UNMOVABLE인 page
remove_partial(n, page);
slub.c::remove_partial()
// ARM10C 20140621
// n: (&boot_kmem_cache_node)->node[0], page: MIGRATE_UNMOVABLE인 page
// n: (&boot_kmem_cache_node)->node[0], page: MIGRATE_UNMOVABLE인 page
static inline void remove_partial(struct kmem_cache_node *n,
struct page *page)
{
// page->lru: (MIGRATE_UNMOVABLE인 page)->lru
list_del(&page->lru);
// n->partial에 연결된 (MIGRATE_UNMOVABLE인 page)->lru 를 삭제
// n: UNMOVABLE인 page 의 object의 시작 virtual address
// n->nr_partial: 1
n->nr_partial--;
// n->nr_partial: 0
}
list.h::list_del()
// ARM10C 20140517
static inline void list_del(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
// LIST_POISON1: ((void *) 0x00100100)
entry->next = LIST_POISON1;
// LIST_POISON2: ((void *) 0x00200200)
entry->prev = LIST_POISON2;
}
slub.c::acquire_slab()
static inline void *acquire_slab(struct kmem_cache *s,
struct kmem_cache_node *n, struct page *page,
int mode, int *objects)
{
void *freelist;
unsigned long counters;
struct page new;
// page->freelist: UNMOVABLE인 page 의 object의 시작 virtual address + 64
freelist = page->freelist;
// freelist: UNMOVABLE인 page 의 object의 시작 virtual address + 64
// page->objects: 64, page->inuse: 1, page->frozen: 0 값에 의해
// page->counters: 0x400001 값으로 해석됨
counters = page->counters;
// counters: 0x400001
new.counters = counters;
// new.counters: 0x400001
// new.objects: 64, new.inuse: 1
*objects = new.objects - new.inuse;
// *objects: 63
// mode: 1
if (mode) {
// new.inuse: 1, page->objects: 64
new.inuse = page->objects;
// new.inuse: 64
// new.counters: 0x400040
new.freelist = NULL;
// new.freelist: NULL
} else {
new.freelist = freelist;
}
// new.frozen: 0
VM_BUG_ON(new.frozen);
// new.frozen: 0
new.frozen = 1;
// new.frozen: 1
// new.counters: 0x80400040
// s: &boot_kmem_cache_node, page: MIGRATE_UNMOVABLE인 page,
// freelist: UNMOVABLE인 page 의 object의 시작 virtual address + 64
// counters: 0x400001, new.freelist: NULL, new.counters: 0x80400040, "acquire_slab"
// __cmpxchg_double_slab(&boot_kmem_cache_node, MIGRATE_UNMOVABLE인 page,
// UNMOVABLE인 page 의 object의 시작 virtual address + 64, 0x400001, NULL, 0x80400040
// "acquire_slab"): 1
if (!__cmpxchg_double_slab(s, page,
freelist, counters,
new.freelist, new.counters,
"acquire_slab"))
return NULL;
// __cmpxchg_double_slab에서 한일:
// MIGRATE_UNMOVABLE인 page의 멤버 필드 값
// page->freelist: NULL
// page->counters: 0x80400040
// 로 변경함
// n: (&boot_kmem_cache_node)->node[0], page: MIGRATE_UNMOVABLE인 page
remove_partial(n, page);
// n->partial에 연결된 (MIGRATE_UNMOVABLE인 page)->lru 를 삭제
// n->nr_partial: 0
// freelist: UNMOVABLE인 page 의 object의 시작 virtual address + 64
WARN_ON(!freelist);
// freelist: UNMOVABLE인 page 의 object의 시작 virtual address + 64
return freelist;
// return UNMOVABLE인 page 의 object의 시작 virtual address + 64
}
// return freelist;
// return UNMOVABLE인 page 의 object의 시작 virtual address + 64
// return UNMOVABLE인 page 의 object의 시작 virtual address + 64
slub.c::get_partial_node()
static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
struct kmem_cache_cpu *c, gfp_t flags)
{
struct page *page, *page2;
void *object = NULL;
int available = 0;
int objects;
// n: (&boot_kmem_cache_node)->node[0]: UNMOVABLE인 page 의 object의 시작 virtual address,
// n->nr_partial: ((&boot_kmem_cache_node)->node[0])->nr_partial: 1
if (!n || !n->nr_partial)
return NULL;
// early_kmem_cache_node_alloc에서 (&boot_kmem_cache_node)->node[0] 값을 설정함
// n->list_lock: ((&boot_kmem_cache_node)->node[0])->list_lock
spin_lock(&n->list_lock);
// ((&boot_kmem_cache_node)->node[0])->list_lock의 spinlock 획득
// 2014/06/14 종료
// 2014/06/21 시작
list_for_each_entry_safe(page, page2, &n->partial, lru) {
// for (page = list_first_entry(&n->partial, typeof(*page), lru),
// page2 = list_next_entry(page, lru);
// &page->lru != (&n->partial);
// page = page2, page2 = list_next_entry(page2, lru))
// page: MIGRATE_UNMOVABLE인 page, page2: page lru의 offset 만큼 계산된 주소
void *t;
// page: MIGRATE_UNMOVABLE인 page, flags: GFP_KERNEL: 0xD0
// pfmemalloc_match(MIGRATE_UNMOVABLE인 page, GFP_KERNEL: 0xD0): 1
if (!pfmemalloc_match(page, flags))
continue;
// s: &boot_kmem_cache_node, n: (&boot_kmem_cache_node)->node[0],
// page: MIGRATE_UNMOVABLE인 page, object: NULL
// acquire_slab(&boot_kmem_cache_node, (&boot_kmem_cache_node)->node[0], MIGRATE_UNMOVABLE인 page, 1, objects):
// UNMOVABLE인 page 의 object의 시작 virtual address + 64
t = acquire_slab(s, n, page, object == NULL, &objects);
// t: UNMOVABLE인 page 의 object의 시작 virtual address + 64, objects: 63
// t: UNMOVABLE인 page 의 object의 시작 virtual address + 64
if (!t)
break;
// available: 0, objects: 63
available += objects;
// available: 63
// object: NULL
if (!object) {
// c->page: ((&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋))->page,
// page: MIGRATE_UNMOVABLE인 page
c->page = page;
// c->page: ((&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋))->page:
// MIGRATE_UNMOVABLE인 page
// s: &boot_kmem_cache_node, ALLOC_FROM_PARTIAL: 7
stat(s, ALLOC_FROM_PARTIAL); // null function
// object: NULL, t: UNMOVABLE인 page 의 object의 시작 virtual address + 64
object = t;
// object: UNMOVABLE인 page 의 object의 시작 virtual address + 64
} else {
put_cpu_partial(s, page, 0);
stat(s, CPU_PARTIAL_NODE);
}
// s: &boot_kmem_cache_node,
// kmem_cache_has_cpu_partial(&boot_kmem_cache_node): 1,
// available: 63, s->cpu_partial: boot_kmem_cache_node.cpu_partial: 30
if (!kmem_cache_has_cpu_partial(s)
|| available > s->cpu_partial / 2)
break;
// break 로 loop 탈출
}
// acquire_slab 이 한일?:
// 다음 object의 주소를 받아옴
// UNMOVABLE인 page 의 object의 시작 virtual address + 64
// MIGRATE_UNMOVABLE인 page의 멤버 필드 값 변경
// page->freelist: NULL
// page->counters: 0x80400040
// n->partial에 연결된 (MIGRATE_UNMOVABLE인 page)->lru 를 삭제
// n->nr_partial: 0
// 다음 object의 주소를 받아옴
// UNMOVABLE인 page 의 object의 시작 virtual address + 64
// MIGRATE_UNMOVABLE인 page의 멤버 필드 값 변경
// page->freelist: NULL
// page->counters: 0x80400040
// n->partial에 연결된 (MIGRATE_UNMOVABLE인 page)->lru 를 삭제
// n->nr_partial: 0
// s: &boot_kmem_cache_node,
// kmem_cache_has_cpu_partial(&boot_kmem_cache_node): 1,
// available: 63, s->cpu_partial: boot_kmem_cache_node.cpu_partial: 30
// kmem_cache_has_cpu_partial(&boot_kmem_cache_node): 1,
// available: 63, s->cpu_partial: boot_kmem_cache_node.cpu_partial: 30
if (!kmem_cache_has_cpu_partial(s)
slub.c::kmem_cache_has_cpu_partial()
// ARM10C 20140621
// s: &boot_kmem_cache
// s: &boot_kmem_cache
static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
{
#ifdef CONFIG_SLUB_CPU_PARTIAL // CONFIG_SLUB_CPU_PARTIAL=y
// s: &boot_kmem_cache_node, kmem_cache_debug(&boot_kmem_cache_node): 0
// s: &boot_kmem_cache, kmem_cache_debug(&boot_kmem_cache): 0
return !kmem_cache_debug(s);
// return 1
// return 1
#else
return false;
#endif
}
slub.c::get_partial_node()
static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
struct kmem_cache_cpu *c, gfp_t flags)
{
struct page *page, *page2;
void *object = NULL;
int available = 0;
int objects;
// n: (&boot_kmem_cache_node)->node[0]: UNMOVABLE인 page 의 object의 시작 virtual address,
// n->nr_partial: ((&boot_kmem_cache_node)->node[0])->nr_partial: 1
if (!n || !n->nr_partial)
return NULL;
// early_kmem_cache_node_alloc에서 (&boot_kmem_cache_node)->node[0] 값을 설정함
// n->list_lock: ((&boot_kmem_cache_node)->node[0])->list_lock
spin_lock(&n->list_lock);
// ((&boot_kmem_cache_node)->node[0])->list_lock의 spinlock 획득
// 2014/06/14 종료
// 2014/06/21 시작
list_for_each_entry_safe(page, page2, &n->partial, lru) {
// for (page = list_first_entry(&n->partial, typeof(*page), lru),
// page2 = list_next_entry(page, lru);
// &page->lru != (&n->partial);
// page = page2, page2 = list_next_entry(page2, lru))
// page: MIGRATE_UNMOVABLE인 page, page2: page lru의 offset 만큼 계산된 주소
void *t;
// page: MIGRATE_UNMOVABLE인 page, flags: GFP_KERNEL: 0xD0
// pfmemalloc_match(MIGRATE_UNMOVABLE인 page, GFP_KERNEL: 0xD0): 1
if (!pfmemalloc_match(page, flags))
continue;
// s: &boot_kmem_cache_node, n: (&boot_kmem_cache_node)->node[0],
// page: MIGRATE_UNMOVABLE인 page, object: NULL
// acquire_slab(&boot_kmem_cache_node, (&boot_kmem_cache_node)->node[0], MIGRATE_UNMOVABLE인 page, 1, objects):
// UNMOVABLE인 page 의 object의 시작 virtual address + 64
t = acquire_slab(s, n, page, object == NULL, &objects);
// t: UNMOVABLE인 page 의 object의 시작 virtual address + 64, objects: 63
// t: UNMOVABLE인 page 의 object의 시작 virtual address + 64
if (!t)
break;
// available: 0, objects: 63
available += objects;
// available: 63
// object: NULL
if (!object) {
// c->page: ((&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋))->page,
// page: MIGRATE_UNMOVABLE인 page
c->page = page;
// c->page: ((&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋))->page:
// MIGRATE_UNMOVABLE인 page
// s: &boot_kmem_cache_node, ALLOC_FROM_PARTIAL: 7
stat(s, ALLOC_FROM_PARTIAL); // null function
// object: NULL, t: UNMOVABLE인 page 의 object의 시작 virtual address + 64
object = t;
// object: UNMOVABLE인 page 의 object의 시작 virtual address + 64
} else {
put_cpu_partial(s, page, 0);
stat(s, CPU_PARTIAL_NODE);
}
// s: &boot_kmem_cache_node,
// kmem_cache_has_cpu_partial(&boot_kmem_cache_node): 1,
// available: 63, s->cpu_partial: boot_kmem_cache_node.cpu_partial: 30
if (!kmem_cache_has_cpu_partial(s)
|| available > s->cpu_partial / 2)
break;
// break 로 loop 탈출
}
// n->list_lock: ((&boot_kmem_cache_node)->node[0])->list_lock
spin_unlock(&n->list_lock);
// ((&boot_kmem_cache_node)->node[0])->list_lock의 spinlock 해제
// object: UNMOVABLE인 page 의 object의 시작 virtual address + 64
return object;
// return UNMOVABLE인 page 의 object의 시작 virtual address + 64
}
slub.c::get_partial()
static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
struct kmem_cache_cpu *c)
{
void *object;
// node: -1, NUMA_NO_NODE: -1, numa_node_id(): 0
int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
// searchnode: 0
// s: &boot_kmem_cache_node, searchnode: 0
// get_node(&boot_kmem_cache_node, 0): (&boot_kmem_cache_node)->node[0],
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋),
// flags: GFP_KERNEL: 0xD0
// get_partial_node(&boot_kmem_cache_node, (&boot_kmem_cache_node)->node[0],
// (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋), GFP_KERNEL: 0xD0):
// UNMOVABLE인 page 의 object의 시작 virtual address + 64
object = get_partial_node(s, get_node(s, searchnode), c, flags);
// object: UNMOVABLE인 page 의 object의 시작 virtual address + 64
// node: -1, NUMA_NO_NODE: -1
if (object || node != NUMA_NO_NODE)
// object: UNMOVABLE인 page 의 object의 시작 virtual address + 64
return object;
// return UNMOVABLE인 page 의 object의 시작 virtual address + 64
return get_any_partial(s, flags, c);
}
// return UNMOVABLE인 page 의 object의 시작 virtual address + 64
get_partial()이 한일
- object를 위한 page 의 사용 하지 않은 다음 object의 시작 virtual address 를 가져옴
- page->counters: 0x80400040
- page->inuse: 64
- page->objects: 64
- page->frozen: 1
- page->freelist: NULL
- n->partial에 연결된 (MIGRATE_UNMOVABLE인 page)->lru 를 삭제
- n->nr_partial: 0
slub.c::new_slab_objects()
// ARM10C 20140614
// s: &boot_kmem_cache_node, gfpflags: GFP_KERNEL: 0xD0, node: -1,
// &c: &(&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
// s: &boot_kmem_cache_node, gfpflags: GFP_KERNEL: 0xD0, node: -1,
// &c: &(&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
int node, struct kmem_cache_cpu **pc)
{
void *freelist;
// *pc: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
struct kmem_cache_cpu *c = *pc;
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
struct page *page;
// s: &boot_kmem_cache_node, flags: GFP_KERNEL: 0xD0, node: -1,
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
// get_partial(&boot_kmem_cache_node, GFP_KERNEL: 0xD0, -1,
// (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)):
// UNMOVABLE인 page 의 object의 시작 virtual address + 64
freelist = get_partial(s, flags, node, c);
// freelist: UNMOVABLE인 page 의 object의 시작 virtual address + 64
// freelist: UNMOVABLE인 page 의 object의 시작 virtual address + 64
if (freelist)
// freelist: UNMOVABLE인 page 의 object의 시작 virtual address + 64
return freelist;
// return UNMOVABLE인 page 의 object의 시작 virtual address + 64
page = new_slab(s, flags, node);
if (page) {
c = __this_cpu_ptr(s->cpu_slab);
if (c->page)
flush_slab(s, c);
freelist = page->freelist;
page->freelist = NULL;
stat(s, ALLOC_SLAB);
c->page = page;
*pc = c;
} else
freelist = NULL;
return freelist;
}
// retrun freelist
slub.c::__slab_alloc()
// ARM10C 20140614
// s: &bootkmem_cache_node, gfpflags: GFP_KERNEL: 0xD0, node: -1, addr: _RET_IP,
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
// s: &bootkmem_cache_node, gfpflags: GFP_KERNEL: 0xD0, node: -1, addr: _RET_IP,
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
unsigned long addr, struct kmem_cache_cpu *c)
{
...
goto new_slab;
// new_slab 심볼로 이동
...
new_slab:
// c->partial: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)->partial: 0
if (c->partial) {
page = c->page = c->partial;
c->partial = page->next;
stat(s, CPU_PARTIAL_ALLOC);
c->freelist = NULL;
goto redo;
}
// s: &boot_kmem_cache_node, gfpflags: GFP_KERNEL: 0xD0, node: -1,
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
// new_slab_objects(&boot_kmem_cache_node, GFP_KERNEL: 0xD0, -1,
// (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)):
// UNMOVABLE인 page 의 object의 시작 virtual address + 64
freelist = new_slab_objects(s, gfpflags, node, &c);
// freelist: UNMOVABLE인 page 의 object의 시작 virtual address + 64
if (unlikely(!freelist)) {
if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
slab_out_of_memory(s, gfpflags, node);
local_irq_restore(flags);
return NULL;
}
// c->page: ((&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋))->page:
// MIGRATE_UNMOVABLE인 page
page = c->page;
// page: MIGRATE_UNMOVABLE인 page
// s: &boot_kmem_cache_node, kmem_cache_debug(&boot_kmem_cache_node): 0
// page: MIGRATE_UNMOVABLE인 page, gfpflags: GFP_KERNEL: 0xD0
// pfmemalloc_match(MIGRATE_UNMOVABLE인 page, GFP_KERNEL: 0xD0): 1
if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
goto load_freelist;
// load_freelist 심볼로 점프
// load_freelist 심볼로 점프
goto load_freelist;
load_freelist:
/*
* freelist is pointing to the list of objects to be used.
* page is pointing to the page from which the objects are obtained.
* That page must be frozen for per cpu allocations to work.
*/
// c->page: ((&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋))->page:
// MIGRATE_UNMOVABLE인 page
// (MIGRATE_UNMOVABLE인 page)->frozen: 1
VM_BUG_ON(!c->page->frozen);
// s: &boot_kmem_cache_node, freelist: UNMOVABLE인 page 의 object의 시작 virtual address + 64
// c->freelist: ((&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋))->freelist
// get_freepointer(&boot_kmem_cache_node, UNMOVABLE인 page 의 object의 시작 virtual address + 64):
// UNMOVABLE인 page 의 object의 시작 virtual address + 128
c->freelist = get_freepointer(s, freelist);
slub.c::get_freepointer()
// ARM10C 20140621
// s: &boot_kmem_cache_node, freelist: UNMOVABLE인 page 의 object의 시작 virtual address + 64
// s: &boot_kmem_cache_node, freelist: UNMOVABLE인 page 의 object의 시작 virtual address + 64
static inline void *get_freepointer(struct kmem_cache *s, void *object)
{
// object: UNMOVABLE인 page 의 object의 시작 virtual address
// s->offset: (&boot_kmem_cache_node)->offset: 0
return *(void **)(object + s->offset);
// object: UNMOVABLE인 page 의 object의 시작 virtual address
}
slub.c::__slab_alloc()
// ARM10C 20140614
// s: &bootkmem_cache_node, gfpflags: GFP_KERNEL: 0xD0, node: -1, addr: _RET_IP,
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
// s: &bootkmem_cache_node, gfpflags: GFP_KERNEL: 0xD0, node: -1, addr: _RET_IP,
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
unsigned long addr, struct kmem_cache_cpu *c)
{
...
load_freelist:
// c->page: ((&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋))->page:
// MIGRATE_UNMOVABLE인 page
// (MIGRATE_UNMOVABLE인 page)->frozen: 1
VM_BUG_ON(!c->page->frozen);
// s: &boot_kmem_cache_node, freelist: UNMOVABLE인 page 의 object의 시작 virtual address + 64
// c->freelist: ((&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋))->freelist
// get_freepointer(&boot_kmem_cache_node, UNMOVABLE인 page 의 object의 시작 virtual address + 64):
// UNMOVABLE인 page 의 object의 시작 virtual address + 128
c->freelist = get_freepointer(s, freelist);
// c->freelist: ((&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋))->freelist:
// UNMOVABLE인 page 의 object의 시작 virtual address + 128
// c->tid: ((&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋))->tid: 0
// next_tid(0): 4
c->tid = next_tid(c->tid);
// c->tid: ((&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋))->tid: 0
// next_tid(0): 4
// next_tid(0): 4
c->tid = next_tid(c->tid);
slub.c::next_tid()
// ARM10C 20140621
// c->tid: ((&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋))->tid: 0
// c->tid: ((&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋))->tid: 0
static inline unsigned long next_tid(unsigned long tid)
{
// tid: 0, TID_STEP: 4
return tid + TID_STEP;
// return 4
}
slub.c::__slab_alloc()
// ARM10C 20140614
// s: &bootkmem_cache_node, gfpflags: GFP_KERNEL: 0xD0, node: -1, addr: _RET_IP,
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
// s: &bootkmem_cache_node, gfpflags: GFP_KERNEL: 0xD0, node: -1, addr: _RET_IP,
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
unsigned long addr, struct kmem_cache_cpu *c)
{
...
load_freelist:
// c->page: ((&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋))->page:
// MIGRATE_UNMOVABLE인 page
// (MIGRATE_UNMOVABLE인 page)->frozen: 1
VM_BUG_ON(!c->page->frozen);
// s: &boot_kmem_cache_node, freelist: UNMOVABLE인 page 의 object의 시작 virtual address + 64
// c->freelist: ((&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋))->freelist
// get_freepointer(&boot_kmem_cache_node, UNMOVABLE인 page 의 object의 시작 virtual address + 64):
// UNMOVABLE인 page 의 object의 시작 virtual address + 128
c->freelist = get_freepointer(s, freelist);
// c->freelist: ((&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋))->freelist:
// UNMOVABLE인 page 의 object의 시작 virtual address + 128
// c->tid: ((&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋))->tid: 0
// next_tid(0): 4
c->tid = next_tid(c->tid);
// c->tid: ((&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋))->tid: 4
local_irq_restore(flags);
// flags에 저장된 cpsr 을 복원
// freelist: UNMOVABLE인 page 의 object의 시작 virtual address + 64
return freelist;
// return UNMOVABLE인 page 의 object의 시작 virtual address + 64
new_slab_objects이 한일:
- object를 위한 page 의 사용 하지 않은 다음 object의 시작 virtual address 를 가져옴
- page->counters: 0x80400040
- page->inuse: 64
- page->objects: 64
- page->frozen: 1
- page->freelist: NULL
- n->partial에 연결된 (MIGRATE_UNMOVABLE인 page)->lru 를 삭제
- n->nr_partial: 0
slub.c::slab_alloc_node()
static __always_inline void *slab_alloc_node(struct kmem_cache *s,
gfp_t gfpflags, int node, unsigned long addr)
{
...
redo:
...
// object: 0, page: 0, node: -1, node_match(0, -1): 1
if (unlikely(!object || !node_match(page, node)))
// s: &boot_kmem_cache_node, gfpflags: GFP_KERNEL: 0xD0, node: -1, addr: _RET_IP_,
// c: (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)
// __slab_alloc(&boot_kmem_cache_node, GFP_KERNEL: 0xD0, -1, _RET_IP_,
// (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋)):
// UNMOVABLE인 page 의 object의 시작 virtual address + 64
object = __slab_alloc(s, gfpflags, node, addr, c);
// object: UNMOVABLE인 page 의 object의 시작 virtual address + 64
else {
...
}
// gfpflags: GFP_KERNEL: 0xD0, __GFP_ZERO: 0x8000u
// object: UNMOVABLE인 page 의 object의 시작 virtual address + 64
if (unlikely(gfpflags & __GFP_ZERO) && object)
memset(object, 0, s->object_size);
// s: &boot_kmem_cache_node, gfpflags: GFP_KERNEL: 0xD0,
// object: UNMOVABLE인 page 의 object의 시작 virtual address + 64
slab_post_alloc_hook(s, gfpflags, object);
}
// s: &boot_kmem_cache_node, gfpflags: GFP_KERNEL: 0xD0,
// object: UNMOVABLE인 page 의 object의 시작 virtual address + 64
// object: UNMOVABLE인 page 의 object의 시작 virtual address + 64
slab_post_alloc_hook(s, gfpflags, object);
slub.c::slab_post_alloc_hook()
// ARM10C 20140621
// s: &boot_kmem_cache_node, gfpflags: GFP_KERNEL: 0xD0,
// object: UNMOVABLE인 page 의 object의 시작 virtual address + 64
// s: &boot_kmem_cache_node, gfpflags: GFP_KERNEL: 0xD0,
// object: UNMOVABLE인 page 의 object의 시작 virtual address + 64
static inline void slab_post_alloc_hook(struct kmem_cache *s,
gfp_t flags, void *object)
{
// flags: GFP_KERNEL: 0xD0, gfp_allowed_mask: 0x1ffff2f
flags &= gfp_allowed_mask;
// flags: 0
// s: &boot_kmem_cache_node, flags: 0, object: UNMOVABLE인 page 의 object의 시작 virtual address + 64
// slab_ksize(&boot_kmem_cache_node): 64
kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); // null function
// object: UNMOVABLE인 page 의 object의 시작 virtual address + 64,
// s->object_size: boot_kmem_cache_node.object_size: 64,
// s->flags: boot_kmem_cache_node.flags: SLAB_HWCACHE_ALIGN: 0x00002000UL, flags: 0
kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags); // null function
}
slub.c::slab_alloc_node()
static __always_inline void *slab_alloc_node(struct kmem_cache *s,
gfp_t gfpflags, int node, unsigned long addr)
{
...
redo:
...
// s: &boot_kmem_cache_node, gfpflags: GFP_KERNEL: 0xD0,
// object: UNMOVABLE인 page 의 object의 시작 virtual address + 64
slab_post_alloc_hook(s, gfpflags, object);
// object: UNMOVABLE인 page 의 object의 시작 virtual address + 64
return object;
// return UNMOVABLE인 page 의 object의 시작 virtual address + 64
}
// return UNMOVABLE인 page 의 object의 시작 virtual address + 64
return object;
slub.c::slab_alloc()
// ARM10C 20140614
// s: &bootkmem_cache_node, gfpflags: GFP_KERNEL: 0xD0, _RET_IP
// s: &bootkmem_cache_node, gfpflags: GFP_KERNEL: 0xD0, _RET_IP
static __always_inline void *slab_alloc(struct kmem_cache *s,
gfp_t gfpflags, unsigned long addr)
{
// s: &boot_kmem_cache_node, gfpflags: GFP_KERNEL: 0xD0, NUMA_NO_NODE: -1, _RET_IP_
// slab_alloc_node(&boot_kmem_cache_node, GFP_KERNEL: 0xD0, -1, _RET_IP_):
// UNMOVABLE인 page 의 object의 시작 virtual address + 64
return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
}
// return UNMOVABLE인 page 의 object의 시작 virtual address + 64
return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
slub.c::kmem_cache_alloc()
// ARM10C 20140614
// s: &boot_kmem_cache_node, flags: GFP_KERNEL: 0xD0
// s: &boot_kmem_cache_node, flags: GFP_KERNEL: 0xD0
void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
{
// s: &boot_kmem_cache_node, gfpflags: GFP_KERNEL: 0xD0
// slab_alloc(&boot_kmem_cache_node, GFP_KERNEL: 0xD0): UNMOVABLE인 page 의 object의 시작 virtual address + 64
void *ret = slab_alloc(s, gfpflags, _RET_IP_);
// ret: UNMOVABLE인 page 의 object의 시작 virtual address + 64
// ret: UNMOVABLE인 page 의 object의 시작 virtual address + 64
// s->object_size: boot_kmem_cache_node.object_size: 64,
// s->size: boot_kmem_cache_node.size: 64,
// gfpflags: GFP_KERNEL: 0xD0
trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
s->size, gfpflags);
// ret: UNMOVABLE인 page 의 object의 시작 virtual address + 64
return ret;
// return UNMOVABLE인 page 의 object의 시작 virtual address + 64
}
// return UNMOVABLE인 page 의 object의 시작 virtual address + 64
return ret;
slab.h::kmem_cache_alloc_node()
// ARM10C 20140614
// kmem_cache_node: &boot_kmem_cache_node, GFP_KERNEL: 0xD0, node: 0
// kmem_cache_node: &boot_kmem_cache_node, GFP_KERNEL: 0xD0, node: 0
static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
{
// s: &boot_kmem_cache_node, flags: GFP_KERNEL: 0xD0
// kmem_cache_alloc(&boot_kmem_cache_node, GFP_KERNEL: 0xD0):
// UNMOVABLE인 page 의 object의 시작 virtual address + 64
return kmem_cache_alloc(s, flags);
// return UNMOVABLE인 page 의 object의 시작 virtual address + 64
}
// return UNMOVABLE인 page 의 object의 시작 virtual address + 64
return kmem_cache_alloc(s, flags);
return kmem_cache_alloc(s, flags);
kmem_cache_alloc_node 한일?
- MIGRATE_UNMOVABLE인 page 할당 받아 쪼개놓은 object들에서 object를 1개 할당받음
- page->counters: 0x80400040
- page->inuse: 64
- page->objects: 64
- page->frozen: 1
- page->freelist: NULL
- c->freelist: ((&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋))->freelist:
- UNMOVABLE인 page 의 object의 시작 virtual address + 128
- c->tid: ((&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋))->tid: 4
- object를 위한 page 의 사용 하지 않은 다음 object의 시작 virtual address 를 가져옴
- n->partial에 연결된 (MIGRATE_UNMOVABLE인 page)->lru 를 삭제
- n->nr_partial: 0
slub.c::init_kmem_cache_nodes()
// ARM10C 20140614
// s: &boot_kmem_cache
// s: &boot_kmem_cache
static int init_kmem_cache_nodes(struct kmem_cache *s)
{
int node;
// N_NORMAL_MEMORY: 2
for_each_node_state(node, N_NORMAL_MEMORY) {
// for ( (node) = 0; (node) == 0; (node) = 1)
struct kmem_cache_node *n;
// slab_state: DOWN: 0
// slab_state: PARTIAL: 1, DOWN: 0
if (slab_state == DOWN) {
...
continue;
}
// kmem_cache_node: &boot_kmem_cache_node, GFP_KERNEL: 0xD0, node: 0
// kmem_cache_alloc_node(&boot_kmem_cache_node, GFP_KERNEL: 0xD0, 0):
// UNMOVABLE인 page 의 object의 시작 virtual address + 64
n = kmem_cache_alloc_node(kmem_cache_node,
GFP_KERNEL, node);
// n: UNMOVABLE인 page 의 object의 시작 virtual address + 64
if (!n) {
free_kmem_cache_nodes(s);
return 0;
}
// node: 0, s->node[0]: boot_kmem_cache.node[0]
// n: UNMOVABLE인 page 의 object의 시작 virtual address + 64
s->node[node] = n;
// s->node[0]: boot_kmem_cache.node[0]: UNMOVABLE인 page 의 object의 시작 virtual address + 64
// n: UNMOVABLE인 page 의 object의 시작 virtual address + 64
init_kmem_cache_node(n);
// n: UNMOVABLE인 page 의 object의 시작 virtual address + 64
init_kmem_cache_node(n);
slub.c::init_kmem_cache_node()
// ARM10C 20140621
// n: UNMOVABLE인 page 의 object의 시작 virtual address + 64
// n: UNMOVABLE인 page 의 object의 시작 virtual address + 64
static void
init_kmem_cache_node(struct kmem_cache_node *n)
{
// n: UNMOVABLE인 page 의 object의 시작 virtual address
// n: UNMOVABLE인 page 의 object의 시작 virtual address + 64
n->nr_partial = 0;
// n->nr_partial: 0
spin_lock_init(&n->list_lock);
// n->list_lock: spinlock 초기화 수행
INIT_LIST_HEAD(&n->partial);
// n->partial: 리스트 초기화
#ifdef CONFIG_SLUB_DEBUG // CONFIG_SLUB_DEBUG=y
atomic_long_set(&n->nr_slabs, 0);
// n->nr_slabs: 0
atomic_long_set(&n->total_objects, 0);
// n->total_objects: 0
INIT_LIST_HEAD(&n->full);
// n->full: 리스트 초기화
#endif
}
- n->nr_partial: 0
- n->list_lock: spinlock 초기화 수행
- n->partial: 리스트 초기화
- n->nr_slabs: 0
- n->total_objects: 0
- n->full: 리스트 초기화
- 할당받은 slab object를 kmem_cache_node 로 사용하고
- kmem_cache_node의 멤버 필드를 초기화함
slub.c::init_kmem_cache_nodes()
// ARM10C 20140614
// s: &boot_kmem_cache
// s: &boot_kmem_cache
static int init_kmem_cache_nodes(struct kmem_cache *s)
{
int node;
// N_NORMAL_MEMORY: 2
for_each_node_state(node, N_NORMAL_MEMORY) {
// for ( (node) = 0; (node) == 0; (node) = 1)
struct kmem_cache_node *n;
// slab_state: DOWN: 0
// slab_state: PARTIAL: 1, DOWN: 0
if (slab_state == DOWN) {
...
continue;
}
// kmem_cache_node: &boot_kmem_cache_node, GFP_KERNEL: 0xD0, node: 0
// kmem_cache_alloc_node(&boot_kmem_cache_node, GFP_KERNEL: 0xD0, 0):
// UNMOVABLE인 page 의 object의 시작 virtual address + 64
n = kmem_cache_alloc_node(kmem_cache_node,
GFP_KERNEL, node);
// n: UNMOVABLE인 page 의 object의 시작 virtual address + 64
if (!n) {
free_kmem_cache_nodes(s);
return 0;
}
// node: 0, s->node[0]: boot_kmem_cache.node[0]
// n: UNMOVABLE인 page 의 object의 시작 virtual address + 64
s->node[node] = n;
// s->node[0]: boot_kmem_cache.node[0]: UNMOVABLE인 page 의 object의 시작 virtual address + 64
// n: UNMOVABLE인 page 의 object의 시작 virtual address + 64
init_kmem_cache_node(n);
}
return 1;
// return 1 수행
init_kmem_cache_nodes(&boot_kmem_cache_node) 가 한일:
- migratetype이 MIGRATE_UNMOVABLE인 page 할당 받음
- page 맴버를 셋팅함
- page->slab_cache: &boot_kmem_cache_node 주소를 set
- page->flags에 7 (PG_slab) bit를 set
- page->freelist: UNMOVABLE인 page 의 object의 시작 virtual address + 64
- page->inuse: 1, page->frozen: 0 page 맴버를 셋팅함
- slab 의 objects 들의 freepointer를 맵핑함
- 할당받은 slab object를 kmem_cache_node 로 사용하고 kmem_cache_node의 멤버 필드를 초기화함
- kmem_cache_node->nr_partial: 1
- kmem_cache_node->list_lock: spinlock 초기화 수행
- kmem_cache_node->slabs: 1, kmem_cache_node->total_objects: 64 로 세팀함
- kmem_cache_node->full: 리스트 초기화
- kmem_cache_node의 partial 맴버에 현재 page의 lru 리스트를 추가함
init_kmem_cache_nodes(&boot_kmem_cache) 가 한일:
MIGRATE_UNMOVABLE인 page 할당 받아 쪼개놓은 object들에서 object를 1개 할당받음
page->counters: 0x80400040
page->inuse: 64
page->objects: 64
page->frozen: 1
page->freelist: NULL
c->freelist: ((&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + per_cpu_start에서의pcpu_base_addr의 옵셋))->freelist:
UNMOVABLE인 page 의 object의 시작 virtual address + 128
c->tid: ((&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + per_cpu_start에서의pcpu_base_addr의 옵셋))->tid: 4
첫번째 object:
kmem_cache_node->partial에 연결된 (MIGRATE_UNMOVABLE인 page)->lru 를 삭제
kmem_cache_node->nr_partial: 0
두번째 object:
kmem_cache_node->nr_partial: 0
kmem_cache_node->list_lock: spinlock 초기화 수행
kmem_cache_node->slabs: 0, kmem_cache_node->total_objects: 0 로 세팀함
kmem_cache_node->full: 리스트 초기화
page->counters: 0x80400040
page->inuse: 64
page->objects: 64
page->frozen: 1
page->freelist: NULL
c->freelist: ((&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + per_cpu_start에서의pcpu_base_addr의 옵셋))->freelist:
UNMOVABLE인 page 의 object의 시작 virtual address + 128
c->tid: ((&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + per_cpu_start에서의pcpu_base_addr의 옵셋))->tid: 4
첫번째 object:
kmem_cache_node->partial에 연결된 (MIGRATE_UNMOVABLE인 page)->lru 를 삭제
kmem_cache_node->nr_partial: 0
두번째 object:
kmem_cache_node->nr_partial: 0
kmem_cache_node->list_lock: spinlock 초기화 수행
kmem_cache_node->slabs: 0, kmem_cache_node->total_objects: 0 로 세팀함
kmem_cache_node->full: 리스트 초기화
slub.c::kmem_cache_open()
// ARM10C 20140614
// s: &boot_kmem_cache, flags: SLAB_HWCACHE_ALIGN: 0x00002000UL
// s: &boot_kmem_cache, flags: SLAB_HWCACHE_ALIGN: 0x00002000UL
static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
{
...
// s: &boot_kmem_cache_node, init_kmem_cache_nodes(&boot_kmem_cache_node): 1
// s: &boot_kmem_cache, init_kmem_cache_nodes(&boot_kmem_cache): 1
if (!init_kmem_cache_nodes(s))
goto error;
// s: &boot_kmem_cache_node, alloc_kmem_cache_cpus(&boot_kmem_cache_node): 1
// s: &boot_kmem_cache, alloc_kmem_cache_cpus(&boot_kmem_cache): 1
if (alloc_kmem_cache_cpus(s))
// s: &boot_kmem_cache, alloc_kmem_cache_cpus(&boot_kmem_cache): 1
if (alloc_kmem_cache_cpus(s))
slub.c::alloc_kmem_cache_cpus()
// ARM10C 20140621
// s: &boot_kmem_cache
// s: &boot_kmem_cache
static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
{
// PERCPU_DYNAMIC_EARLY_SIZE: 0x3000, KMALLOC_SHIFT_HIGH: 13
// sizeof(struct kmem_cache_cpu): 16 bytes
BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
/*
* Must align to double word boundary for the double cmpxchg
* instructions to work; see __pcpu_double_call_return_bool().
*/
// s->cpu_slab: (&boot_kmem_cache_node)->cpu_slab
// sizeof(struct kmem_cache_cpu): 16 bytes, sizeof(void *): 8 bytes
// __alloc_percpu(16, 8): 0xc0502d00
// s->cpu_slab: (&boot_kmem_cache)->cpu_slab
// sizeof(struct kmem_cache_cpu): 16 bytes, sizeof(void *): 8 bytes
// __alloc_percpu(16, 8): 0xc0502d10
s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
2 * sizeof(void *));
// __alloc_percpu(16, 8): 0xc0502d10
s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
2 sizeof(void ));
percpu.c::__alloc_percpu()
// ARM10C 20140621
// __alloc_percpu(16, 8)
// __alloc_percpu(16, 8)
void __percpu *__alloc_percpu(size_t size, size_t align)
{
// size: 16, align: 8
return pcpu_alloc(size, align, false);
percpu.c::pcpu_alloc()
// ARM10C 20140621
// size: 16, align: 8, false
// size: 16, align: 8, false
static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
{
static int warn_limit = 10;
// warn_limit: 10
struct pcpu_chunk *chunk;
const char *err;
int slot, off, new_alloc;
unsigned long flags;
void __percpu *ptr;
// size: 16, align: 8, PCPU_MIN_UNIT_SIZE: 0x8000, PAGE_SIZE: 0x1000
// size: 16, align: 8, PCPU_MIN_UNIT_SIZE: 0x8000, PAGE_SIZE: 0x1000
if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
WARN(true, "illegal size (%zu) or align (%zu) for "
"percpu allocation\n", size, align);
return NULL;
}
mutex_lock(&pcpu_alloc_mutex);
// pcpu_alloc_mutex의 mutex lock을 수행
// pcpu_alloc_mutex의 mutex lock을 수행
spin_lock_irqsave(&pcpu_lock, flags);
// pcpu_lock의 spin lock 을 수행하고 cpsr을 flags에 저장
// pcpu_lock의 spin lock 을 수행하고 cpsr을 flags에 저장
/* serve reserved allocations from the reserved chunk if available */
// reserved: false, pcpu_reserved_chunk: pcpu_setup_first_chunk()함수에서 할당한 schunk
// reserved: false, pcpu_reserved_chunk: pcpu_setup_first_chunk()함수에서 할당한 schunk
if (reserved && pcpu_reserved_chunk) {
chunk = pcpu_reserved_chunk;
if (size > chunk->contig_hint) {
err = "alloc from reserved chunk failed";
goto fail_unlock;
}
while ((new_alloc = pcpu_need_to_extend(chunk))) {
spin_unlock_irqrestore(&pcpu_lock, flags);
if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
err = "failed to extend area map of reserved chunk";
goto fail_unlock_mutex;
}
spin_lock_irqsave(&pcpu_lock, flags);
}
off = pcpu_alloc_area(chunk, size, align);
if (off >= 0)
goto area_found;
err = "alloc from reserved chunk failed";
goto fail_unlock;
}
restart:
/* search through normal chunks */
// size: 16, pcpu_size_to_slot(16): 1, pcpu_nr_slots: 15
// size: 16, pcpu_size_to_slot(16): 1, pcpu_nr_slots: 15
for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
// slot: 1~10
// list_for_each_entry 의 &chunk->list != (&pcpu_slot[slot]) 조건에 의해
// 수행 되지 않음
list_for_each_entry(chunk, &pcpu_slot[slot], list) {
// for (chunk = list_first_entry(&pcpu_slot[slot], typeof(*chunk), list);
// &chunk->list != (&pcpu_slot[slot]); chunk = list_next_entry(chunk, list))
// chuck: &pcpu_slot[11]
// size: 16, chunk->contig_hint: (&pcpu_slot[11])->contig_hint: 0x3000
// size: 16, chunk->contig_hint: (&pcpu_slot[11])->contig_hint: 0x3000
if (size > chunk->contig_hint)
continue;
// chuck: &pcpu_slot[11]: dchunk: 4K만큼 할당 받은 주소
// chuck: &pcpu_slot[11]: dchunk: 4K만큼 할당 받은 주소
new_alloc = pcpu_need_to_extend(chunk);
// new_alloc: 0
// new_alloc: 0
if (new_alloc) {
spin_unlock_irqrestore(&pcpu_lock, flags);
if (pcpu_extend_area_map(chunk,
new_alloc) < 0) {
err = "failed to extend area map";
goto fail_unlock_mutex;
}
spin_lock_irqsave(&pcpu_lock, flags);
/*
* pcpu_lock has been dropped, need to
* restart cpu_slot list walking.
*/
goto restart;
}
// chuck: &pcpu_slot[11]: dchunk: 4K만큼 할당 받은 주소, size: 16, align: 8
// pcpu_alloc_area(&pcpu_slot[11], 16, 8): 0x1d00 + 0x2000
// chuck: &pcpu_slot[11]: dchunk: 4K만큼 할당 받은 주소, size: 16, align: 8
// pcpu_alloc_area(&pcpu_slot[11], 16, 8): 0x1d00 + 0x2000 + 16
off = pcpu_alloc_area(chunk, size, align);
// off: 0x1d00 + 0x2000
// off: 0x1d00 + 0x2000 + 16
// off: 0x1d00 + 0x2000
// off: 0x1d00 + 0x2000 + 16
if (off >= 0)
goto area_found;
// area_found 위치로 이동
// area_found 위치로 이동
}
}
/* hmmm... no space left, create a new chunk */
spin_unlock_irqrestore(&pcpu_lock, flags);
chunk = pcpu_create_chunk();
if (!chunk) {
err = "failed to allocate new chunk";
goto fail_unlock_mutex;
}
spin_lock_irqsave(&pcpu_lock, flags);
pcpu_chunk_relocate(chunk, -1);
goto restart;
area_found:
spin_unlock_irqrestore(&pcpu_lock, flags);
// pcpu_lock의 spin unlock 을 수행하고 flags에 저장되어있던 cpsr 복원
// pcpu_lock의 spin unlock 을 수행하고 flags에 저장되어있던 cpsr 복원
/* populate, map and clear the area */
// chuck: &pcpu_slot[11]: dchunk: 4K만큼 할당 받은 주소,
// off: 0x1d00 + 0x2000, size: 16
// pcpu_populate_chunk(&pcpu_slot[11], 0x3d00, 16): 0
// chuck: &pcpu_slot[11]: dchunk: 4K만큼 할당 받은 주소,
// off: 0x1d00 + 0x2000 + 16, size: 16
// pcpu_populate_chunk(&pcpu_slot[11], 0x3d10, 16): 0
if (pcpu_populate_chunk(chunk, off, size)) {
spin_lock_irqsave(&pcpu_lock, flags);
pcpu_free_area(chunk, off);
err = "failed to populate";
goto fail_unlock;
}
mutex_unlock(&pcpu_alloc_mutex);
// pcpu_alloc_mutex의 mutex unlock을 수행
// pcpu_alloc_mutex의 mutex unlock을 수행
/* return address relative to base address */
// chunk->base_addr: dchunk->base_addr: 128K 만큼 물리주소 0x5FFFFFFF 근처에 할당받은 주소
// off: 0x3d00
// __addr_to_pcpu_ptr(128K 만큼 물리주소 0x5FFFFFFF 근처에 할당받은 주소+0x3d00): 0xc0502d00
// chunk->base_addr: dchunk->base_addr: 128K 만큼 물리주소 0x5FFFFFFF 근처에 할당받은 주소
// off: 0x3d10
// __addr_to_pcpu_ptr(128K 만큼 물리주소 0x5FFFFFFF 근처에 할당받은 주소+0x3d10): 0xc0502d00
ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
// ptr: 0xc0502d00
// ptr: 0xc0502d10
// ptr: 0xc0502d00, size: 16
// ptr: 0xc0502d10, size: 16
kmemleak_alloc_percpu(ptr, size); // null function
// ptr: 0xc0502d00
// ptr: 0xc0502d10
return ptr;
// return 0xc0502d00
// return 0xc0502d10
fail_unlock:
spin_unlock_irqrestore(&pcpu_lock, flags);
fail_unlock_mutex:
mutex_unlock(&pcpu_alloc_mutex);
if (warn_limit) {
pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
"%s\n", size, align, err);
dump_stack();
if (!--warn_limit)
pr_info("PERCPU: limit reached, disable warning\n");
}
return NULL;
}
percpu.c::__alloc_percpu()
// ARM10C 20140621
// __alloc_percpu(16, 8)
// __alloc_percpu(16, 8)
void __percpu *__alloc_percpu(size_t size, size_t align)
{
// size: 16, align: 8
return pcpu_alloc(size, align, false);
// return 0xc0502d10
slub.c::alloc_kmem_cache_cpus()
static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
{
BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
// s->cpu_slab: (&boot_kmem_cache_node)->cpu_slab
// sizeof(struct kmem_cache_cpu): 16 bytes, sizeof(void *): 8 bytes
// __alloc_percpu(16, 8): 0xc0502d00
// s->cpu_slab: (&boot_kmem_cache)->cpu_slab
// sizeof(struct kmem_cache_cpu): 16 bytes, sizeof(void *): 8 bytes
// __alloc_percpu(16, 8): 0xc0502d10
s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
2 * sizeof(void *));
// s->cpu_slab: (&boot_kmem_cache_node)->cpu_slab: 0xc0502d00
// s->cpu_slab: (&boot_kmem_cache)->cpu_slab: 0xc0502d10
// s->cpu_slab: (&boot_kmem_cache_node)->cpu_slab: 0xc0502d00
// s->cpu_slab: (&boot_kmem_cache)->cpu_slab: 0xc0502d10
if (!s->cpu_slab)
return 0;
// s: &boot_kmem_cache_node
// s: &boot_kmem_cache
init_kmem_cache_cpus(s);
slub.c::init_kmem_cache_cpus()
// ARM10C 20140621
// s: &boot_kmem_cache
// s: &boot_kmem_cache
static void init_kmem_cache_cpus(struct kmem_cache *s)
{
int cpu;
// nr_cpu_ids: 4, cpu_possible_mask: cpu_possible_bits[1]
// cpumask_next((-1), cpu_possible_bits[1]): 0
for_each_possible_cpu(cpu)
// for ((cpu) = -1; (cpu) = cpumask_next((cpu), (cpu_possible_mask)), (cpu) < nr_cpu_ids; )
// s->cpu_slab: (&boot_kmem_cache_node)->cpu_slab: 0xc0502d00, cpu: 0
// per_cpu_ptr((&boot_kmem_cache_node)->cpu_slab, 0):
// (&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의 pcpu_base_addr의 옵셋)
// init_tid(0): 0
// s->cpu_slab: (&boot_kmem_cache)->cpu_slab: 0xc0502d10, cpu: 0
// per_cpu_ptr((&boot_kmem_cache)->cpu_slab, 0):
// (&boot_kmem_cache)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의 pcpu_base_addr의 옵셋)
// init_tid(0): 0
per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
// ((&boot_kmem_cache_node)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋))->tid: 0
// ((&boot_kmem_cache)->cpu_slab + (pcpu_unit_offsets[0] + __per_cpu_start에서의pcpu_base_addr의 옵셋))->tid: 0
// 할당받은 pcpu 들의 16 byte 공간에 각 cpu에 사용하는 kmem_cache_cpu의 tid 맵버를 설정
}
slub.c::alloc_kmem_cache_cpus()
static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
{
BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
// s->cpu_slab: (&boot_kmem_cache_node)->cpu_slab
// sizeof(struct kmem_cache_cpu): 16 bytes, sizeof(void *): 8 bytes
// __alloc_percpu(16, 8): 0xc0502d00
// s->cpu_slab: (&boot_kmem_cache)->cpu_slab
// sizeof(struct kmem_cache_cpu): 16 bytes, sizeof(void *): 8 bytes
// __alloc_percpu(16, 8): 0xc0502d10
s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
2 * sizeof(void *));
// s->cpu_slab: (&boot_kmem_cache_node)->cpu_slab: 0xc0502d00
// s->cpu_slab: (&boot_kmem_cache)->cpu_slab: 0xc0502d10
// s->cpu_slab: (&boot_kmem_cache_node)->cpu_slab: 0xc0502d00
// s->cpu_slab: (&boot_kmem_cache)->cpu_slab: 0xc0502d10
if (!s->cpu_slab)
return 0;
// s: &boot_kmem_cache_node
// s: &boot_kmem_cache
init_kmem_cache_cpus(s);
// 할당받은 pcpu 들의 16 byte 공간에 각 cpu에 사용하는 kmem_cache_cpu의 tid 맵버를 설정
// 할당받은 pcpu 들의 16 byte 공간에 각 cpu에 사용하는 kmem_cache_cpu의 tid 맵버를 설정
return 1;
// return 1
// return 1
}
kmem_cache_open 가 한일:
- boot_kmem_cache_node.flags: SLAB_HWCACHE_ALIGN: 0x00002000UL
- boot_kmem_cache_node.reserved: 0
- boot_kmem_cache_node.min_partial: 5
- boot_kmem_cache_node.cpu_partial: 30
* - migratetype이 MIGRATE_UNMOVABLE인 page 할당 받음
- page 맴버를 셋팅함
- page->slab_cache: &boot_kmem_cache_node 주소를 set
- page->flags에 7 (PG_slab) bit를 set
- page->freelist: UNMOVABLE인 page 의 object의 시작 virtual address + 64
- page->inuse: 1, page->frozen: 0 page 맴버를 셋팅함
- slab 의 objects 들의 freepointer를 맵핑함
- 할당받은 slab object를 kmem_cache_node 로 사용하고 kmem_cache_node의 멤버 필드를 초기화함
- kmem_cache_node->nr_partial: 1
- kmem_cache_node->list_lock: spinlock 초기화 수행
- kmem_cache_node->slabs: 1, kmem_cache_node->total_objects: 64 로 세팀함
- kmem_cache_node->full: 리스트 초기화
- kmem_cache_node의 partial 맴버에 현재 page의 lru 리스트를 추가함
* - kmem_cache_node 가 boot_kmem_cache_node.node[0]에 할당됨
* - 할당받은 pcpu 들의 16 byte 공간 (&boot_kmem_cache_node)->cpu_slab 에
- 각 cpu에 사용하는 kmem_cache_cpu의 tid 맵버를 설정
slub.c::__kmem_cache_create()
// ARM10C 20140614
// s: &boot_kmem_cache, flags: SLAB_HWCACHE_ALIGN: 0x00002000UL
// s: &boot_kmem_cache, flags: SLAB_HWCACHE_ALIGN: 0x00002000UL
int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
{
int err;
// s: &boot_kmem_cache_node, flags: SLAB_HWCACHE_ALIGN: 0x00002000UL
// kmem_cache_open(&boot_kmem_cache_node, 0x00002000UL): 0
// s: &boot_kmem_cache, flags: SLAB_HWCACHE_ALIGN: 0x00002000UL
// kmem_cache_open(&boot_kmem_cache, 0x00002000UL): 0
err = kmem_cache_open(s, flags);
// err: 0
// err: 0
// 2014/06/21 종료
git log
From github.com:arm10c/linux-stable
5ec2e72..e2b578f master -> origin/master
Updating 5ec2e72..e2b578f
Fast-forward
arch/arm/include/asm/barrier.h | 13 ++++-
arch/arm/include/asm/bitops.h | 9 ++--
arch/arm/lib/bitops.h | 1 +
arch/arm/lib/testsetbit.S | 1 +
include/asm-generic/bitops/lock.h | 4 ++
include/asm-generic/bitops/non-atomic.h | 1 +
include/linux/bit_spinlock.h | 23 +++++++--
include/linux/compiler.h | 2 +
include/linux/gfp.h | 1 +
include/linux/irqflags.h | 1 +
include/linux/kmemcheck.h | 3 ++
include/linux/kmemleak.h | 4 ++
include/linux/log2.h | 2 +
include/linux/mm_types.h | 14 ++++--
include/linux/page-flags-layout.h | 2 +-
include/linux/page-flags.h | 47 ++++++++++++++++++
include/linux/preempt.h | 2 +
include/linux/slab.h | 5 ++
include/linux/slub_def.h | 2 +
include/trace/events/kmem.h | 7 +++
lib/string.c | 1 +
mm/page_alloc.c | 1 +
mm/percpu-vm.c | 22 +++++++++
mm/percpu.c | 209 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-----------------
mm/slub.c | 341 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-----
e2b578f..78add0d master -> origin/master
Updating e2b578f..78add0d
Fast-forward
include/linux/mm_types.h | 16 +++++++++++++++-
include/linux/page-flags.h | 6 +++++-
include/linux/slab.h | 2 ++
include/trace/events/kmem.h | 3 +--
mm/percpu-vm.c | 4 ++--
mm/percpu.c | 51 +++++++++++++++++++++++++++++----------------------
mm/slub.c | 61 +++++++++++++++++++++++++++++++++++++++++++++++--------------
댓글 없음:
댓글 쓰기