Commit cddcb503 authored by Benoît du Garreau's avatar Benoît du Garreau
Browse files

Add diffs

parent 1c543501
diff --git a/kernel/exec.c b/kernel/exec.c
index 69f8b40..3499901 100644
--- a/kernel/exec.c
+++ b/kernel/exec.c
@@ -22,6 +22,12 @@ exec(char *path, char **argv)
struct proghdr ph;
pagetable_t pagetable = 0, oldpagetable;
struct proc *p = myproc();
+ struct vma *stack_vma = p->stack_vma, *heap_vma = p->heap_vma, *memory_areas = p->memory_areas;
+ struct vma *page;
+
+ p->memory_areas = 0;
+ p->stack_vma = 0;
+ p->heap_vma = 0;
begin_op(ROOTDEV);
@@ -73,7 +79,15 @@ exec(char *path, char **argv)
printf("exec: loadseg failed\n");
goto bad;
}
+ page = add_memory_area(p, ph.vaddr, ph.vaddr + ph.memsz);
+ if ((ph.flags & ELF_PROG_FLAG_READ) != 0)
+ page->vma_flags |= VMA_R;
+ if ((ph.flags & ELF_PROG_FLAG_WRITE) != 0)
+ page->vma_flags |= VMA_W;
+ if ((ph.flags & ELF_PROG_FLAG_EXEC) != 0)
+ page->vma_flags |= VMA_X;
}
+
iunlockput(ip);
end_op(ROOTDEV);
ip = 0;
@@ -84,6 +98,9 @@ exec(char *path, char **argv)
// Allocate two pages at the next page boundary.
// Use the second as the user stack.
sz = PGROUNDUP(sz);
+ add_memory_area(p, sz, sz + PGSIZE);
+ p->stack_vma = add_memory_area(p, sz + PGSIZE, sz + 2*PGSIZE);
+ p->stack_vma->vma_flags = VMA_R | VMA_W;
if((sz = uvmalloc(pagetable, sz, sz + 2*PGSIZE)) == 0){
printf("exec: uvmalloc failed for the stack\n");
goto bad;
@@ -91,6 +108,8 @@ exec(char *path, char **argv)
uvmclear(pagetable, sz-2*PGSIZE);
sp = sz;
stackbase = sp - PGSIZE;
+ p->heap_vma = add_memory_area(p, sz, sz);
+ p->heap_vma->vma_flags = VMA_R | VMA_W;
// Push argument strings, prepare rest of stack in ustack.
for(argc = 0; argv[argc]; argc++) {
@@ -146,9 +165,14 @@ exec(char *path, char **argv)
p->tf->sp = sp; // initial stack pointer
proc_freepagetable(oldpagetable, oldsz);
+ free_vma(memory_areas);
+
return argc; // this ends up in a0, the first argument to main(argc, argv)
bad:
+ p->memory_areas = memory_areas;
+ p->stack_vma = stack_vma;
+ p->heap_vma = heap_vma;
if(pagetable)
proc_freepagetable(pagetable, sz);
if(ip){
diff --git a/kernel/proc.c b/kernel/proc.c
index 31a6798..e245045 100644
--- a/kernel/proc.c
+++ b/kernel/proc.c
@@ -344,6 +344,8 @@ void userinit(void) {
uvminit(p->pagetable, initcode, sizeof(initcode));
p->sz = PGSIZE;
+ p->stack_vma = add_memory_area(p, 0, PGSIZE);
+
// prepare for the very first "return" from kernel to user.
p->tf->epc = 0; // user program counter
p->tf->sp = PGSIZE; // user stack pointer
@@ -394,6 +396,8 @@ int fork(void) {
}
np->sz = p->sz;
+ vma_copy(np, p);
+
np->parent = p;
// copy saved user registers.
diff --git a/kernel/vm.c b/kernel/vm.c
index c72eafc..8c252ce 100644
--- a/kernel/vm.c
+++ b/kernel/vm.c
@@ -420,6 +420,19 @@ allocate:;
}
int do_allocate_range(pagetable_t pagetable, struct proc* p, uint64 addr, uint64 len){
+ if (len==0) return 0;
+
+ uint64 begin = PGROUNDDOWN(addr);
+ uint64 end = PGROUNDUP(addr+len -1);
+ acquire(&p->vma_lock);
+ for (uint64 current_addr = begin; current_addr<end; current_addr+=PGSIZE) {
+ int flags = do_allocate(pagetable,p,current_addr);
+ if (flags<0) {
+ release(&p->vma_lock);
+ return flags;
+ }
+ }
+ release(&p->vma_lock);
return 0;
}
diff --git a/kernel/exec.c b/kernel/exec.c
index 3499901..579d784 100644
--- a/kernel/exec.c
+++ b/kernel/exec.c
@@ -24,6 +24,8 @@ exec(char *path, char **argv)
struct proc *p = myproc();
struct vma *stack_vma = p->stack_vma, *heap_vma = p->heap_vma, *memory_areas = p->memory_areas;
struct vma *page;
+ uint64 oldsz = max_addr_in_memory_areas(p);
+
p->memory_areas = 0;
p->stack_vma = 0;
@@ -92,9 +94,6 @@ exec(char *path, char **argv)
end_op(ROOTDEV);
ip = 0;
- p = myproc();
- uint64 oldsz = p->sz;
-
// Allocate two pages at the next page boundary.
// Use the second as the user stack.
sz = PGROUNDUP(sz);
@@ -160,7 +159,6 @@ exec(char *path, char **argv)
// Commit to the user image.
oldpagetable = p->pagetable;
p->pagetable = pagetable;
- p->sz = sz;
p->tf->epc = elf.entry; // initial program counter = main
p->tf->sp = sp; // initial stack pointer
proc_freepagetable(oldpagetable, oldsz);
diff --git a/kernel/proc.c b/kernel/proc.c
index c21d71b..1cde1e4 100644
--- a/kernel/proc.c
+++ b/kernel/proc.c
@@ -272,7 +272,7 @@ static void freeproc(struct proc *p) {
kfree((void *)p->tf);
p->tf = 0;
if (p->pagetable)
- proc_freepagetable(p->pagetable, p->sz);
+ proc_freepagetable(p->pagetable, max_addr_in_memory_areas(p));
if (p->cmd)
bd_free(p->cmd);
p->cmd = 0;
@@ -284,7 +284,6 @@ static void freeproc(struct proc *p) {
p->memory_areas = 0;
p->stack_vma = 0;
p->heap_vma = 0;
- p->sz = 0;
p->pid = 0;
p->parent = 0;
p->name[0] = 0;
@@ -342,7 +341,6 @@ void userinit(void) {
// allocate one user page and copy init's instructions
// and data into it.
uvminit(p->pagetable, initcode, sizeof(initcode));
- p->sz = PGSIZE;
p->stack_vma = add_memory_area(p, 0, PGSIZE);
@@ -361,7 +359,6 @@ void userinit(void) {
// Grow or shrink user memory by n bytes.
// Return 0 on success, -1 on failure.
int growproc(long n) {
- uint64 sz;
struct proc *p = myproc();
uint64 heap_addr = p->heap_vma->va_end;
uint64 va_end = heap_addr + n;
@@ -370,18 +367,12 @@ int growproc(long n) {
if (va_begin > va_end || (va_end - va_begin >= HEAP_THRESHOLD)) {
return -1;
}
- sz = p->sz;
p->heap_vma->va_end = va_end;
if(n > 0){
- sz=PGROUNDUP(va_end);
- p->sz = sz;
- // if((sz = uvmalloc(p->pagetable, sz, sz + n)) == 0) {
return 0;
- // }
} else if(n < 0){
- sz = uvmdealloc(p->pagetable, sz, sz + n);
+ uvmdealloc(p->pagetable, heap_addr, va_end);
}
- p->sz = sz;
return 0;
}
@@ -398,12 +389,11 @@ int fork(void) {
}
// Copy user memory from parent to child.
- if (uvmcopy(p->pagetable, np->pagetable, p->sz) < 0) {
+ if (uvmcopy(p->pagetable, np->pagetable, max_addr_in_memory_areas(p)) < 0) {
freeproc(np);
release(&np->lock);
return -1;
}
- np->sz = p->sz;
vma_copy(np, p);
diff --git a/kernel/proc.h b/kernel/proc.h
index 30ef880..f47c1da 100644
--- a/kernel/proc.h
+++ b/kernel/proc.h
@@ -144,7 +144,6 @@ struct proc {
// these are private to the process, so p->lock need not be held.
uint64 kstack; // Virtual address of kernel stack
- uint64 sz; // Size of process memory (bytes)
struct spinlock vma_lock;
struct vma * memory_areas; // VMAs du processus
struct vma * stack_vma; // Une VMA particulière pour la pile
diff --git a/kernel/syscall.c b/kernel/syscall.c
index d03d675..44a7e37 100644
--- a/kernel/syscall.c
+++ b/kernel/syscall.c
@@ -12,7 +12,8 @@ int
fetchaddr(uint64 addr, uint64 *ip)
{
struct proc *p = myproc();
- if(addr >= p->sz || addr+sizeof(uint64) > p->sz)
+ uint64 sz = max_addr_in_memory_areas(p);
+ if(addr >= sz || addr+sizeof(uint64) > sz)
return -1;
if(copyin(p->pagetable, (char *)ip, addr, sizeof(*ip)) != 0)
return -1;
diff --git a/kernel/sysproc.c b/kernel/sysproc.c
index 2b7454c..1f7d9c5 100644
--- a/kernel/sysproc.c
+++ b/kernel/sysproc.c
@@ -46,7 +46,7 @@ sys_sbrk(void)
if(argaddr(0, &n) < 0)
return -1;
- addr = myproc()->sz;
+ addr = myproc()->heap_vma->va_end;
if(growproc((long)n) < 0)
return -1;
return addr;
diff --git a/kernel/proc.c b/kernel/proc.c
index e245045..c21d71b 100644
--- a/kernel/proc.c
+++ b/kernel/proc.c
@@ -363,12 +363,21 @@ void userinit(void) {
int growproc(long n) {
uint64 sz;
struct proc *p = myproc();
+ uint64 heap_addr = p->heap_vma->va_end;
+ uint64 va_end = heap_addr + n;
+ uint64 va_begin = p->heap_vma->va_begin;
+ if (va_begin > va_end || (va_end - va_begin >= HEAP_THRESHOLD)) {
+ return -1;
+ }
sz = p->sz;
+ p->heap_vma->va_end = va_end;
if(n > 0){
- if((sz = uvmalloc(p->pagetable, sz, sz + n)) == 0) {
- return -1;
- }
+ sz=PGROUNDUP(va_end);
+ p->sz = sz;
+ // if((sz = uvmalloc(p->pagetable, sz, sz + n)) == 0) {
+ return 0;
+ // }
} else if(n < 0){
sz = uvmdealloc(p->pagetable, sz, sz + n);
}
diff --git a/kernel/vm.c b/kernel/vm.c
index ee6c99d..465d029 100644
--- a/kernel/vm.c
+++ b/kernel/vm.c
@@ -376,7 +376,19 @@ int load_from_file(char* file,
return 0;
}
-int do_allocate(pagetable_t pagetable, struct proc* p, uint64 addr){
+int do_allocate(pagetable_t pagetable, struct proc* p, uint64 addr) {
+ pte_t* pte = walk(pagetable, addr, 0);
+ if (pte == 0) {
+ return ENOMEM;
+ }
+
+ uint64 flags = PTE_FLAGS(*pte);
+ if ((flags & PTE_V) == 0) {
+ return ENOMEM;
+ }
+ if ((flags & PTE_U) == 0) {
+ return EBADPERM;
+ }
return 0;
}
diff --git a/kernel/vm.c b/kernel/vm.c
index 465d029..c72eafc 100644
--- a/kernel/vm.c
+++ b/kernel/vm.c
@@ -377,19 +377,46 @@ int load_from_file(char* file,
}
int do_allocate(pagetable_t pagetable, struct proc* p, uint64 addr) {
- pte_t* pte = walk(pagetable, addr, 0);
+ pte_t* pte = walk(pagetable, addr, 1);
if (pte == 0) {
return ENOMEM;
}
- uint64 flags = PTE_FLAGS(*pte);
- if ((flags & PTE_V) == 0) {
- return ENOMEM;
+ uint64 pte_flags = PTE_FLAGS(*pte);
+ if ((pte_flags & PTE_V) == 0) {
+ goto allocate;
}
- if ((flags & PTE_U) == 0) {
+ if ((pte_flags & PTE_U) == 0) {
return EBADPERM;
}
return 0;
+
+allocate:;
+ struct vma* vmap = get_memory_area(p, addr);
+ if(vmap == 0) {
+ return ENOVMA;
+ }
+
+ void* new_page;
+ if ((new_page = kalloc()) == 0) {
+ return ENOMEM;
+ }
+ memset(new_page, 0, PGSIZE);
+
+ uint64 flags = PTE_U;
+ unsigned char vma_flags = vmap->vma_flags;
+ if((vma_flags & VMA_R) != 0)
+ flags |= PTE_R;
+ if((vma_flags & VMA_W) != 0)
+ flags |= PTE_W;
+ if((vma_flags & VMA_X) != 0)
+ flags |= PTE_X;
+
+ if(mappages(pagetable, PGROUNDDOWN(addr), PGSIZE, (uint64)new_page, flags) == -1) {
+ kfree(new_page);
+ return EMAPFAILED;
+ }
+ return 0;
}
int do_allocate_range(pagetable_t pagetable, struct proc* p, uint64 addr, uint64 len){
diff --git a/kernel/exec.c b/kernel/exec.c
index 579d784..2ae4cbf 100644
--- a/kernel/exec.c
+++ b/kernel/exec.c
@@ -96,17 +96,12 @@ exec(char *path, char **argv)
// Allocate two pages at the next page boundary.
// Use the second as the user stack.
- sz = PGROUNDUP(sz);
- add_memory_area(p, sz, sz + PGSIZE);
- p->stack_vma = add_memory_area(p, sz + PGSIZE, sz + 2*PGSIZE);
+ p->stack_vma = add_memory_area(p, USTACK_BOTTOM, USTACK_TOP);
p->stack_vma->vma_flags = VMA_R | VMA_W;
- if((sz = uvmalloc(pagetable, sz, sz + 2*PGSIZE)) == 0){
- printf("exec: uvmalloc failed for the stack\n");
- goto bad;
- }
- uvmclear(pagetable, sz-2*PGSIZE);
- sp = sz;
- stackbase = sp - PGSIZE;
+ sp = USTACK_TOP;
+ stackbase = USTACK_BOTTOM;
+
+ sz = PGROUNDUP(sz);
p->heap_vma = add_memory_area(p, sz, sz);
p->heap_vma->vma_flags = VMA_R | VMA_W;
@@ -172,7 +167,7 @@ exec(char *path, char **argv)
p->stack_vma = stack_vma;
p->heap_vma = heap_vma;
if(pagetable)
- proc_freepagetable(pagetable, sz);
+ proc_freepagetable(pagetable, USTACK_TOP);
if(ip){
iunlockput(ip);
end_op(ROOTDEV);
diff --git a/kernel/memlayout.h b/kernel/memlayout.h
index d5a2f9a..4d68cb2 100644
--- a/kernel/memlayout.h
+++ b/kernel/memlayout.h
@@ -73,7 +73,7 @@
#define HEAP_THRESHOLD (8*1024*1024)
#define USTACK_BOTTOM (256*1024*1024)
-#define USTACK_LIMIT (4 * 1024)
+#define USTACK_LIMIT (64 * 4*1024)
#define USTACK_TOP (USTACK_BOTTOM + USTACK_LIMIT)
#endif
diff --git a/kernel/defs.h b/kernel/defs.h
index 0ae9b48..4ce9043 100644
--- a/kernel/defs.h
+++ b/kernel/defs.h
@@ -191,7 +191,7 @@ void plicinithart(void);
int plic_claim(void);
void plic_complete(int);
-int do_allocate(pagetable_t pagetable, struct proc*, uint64 addr);
+int do_allocate(pagetable_t pagetable, struct proc*, uint64 addr, uint64 scause);
// virtio_disk.c
void virtio_disk_init(int);
diff --git a/kernel/trap.c b/kernel/trap.c
index 1537634..17aeef7 100644
--- a/kernel/trap.c
+++ b/kernel/trap.c
@@ -38,7 +38,7 @@ int handle_page_fault(struct proc* p, uint64 scause, uint64 stval, uint64 sepc){
acquire(&p->vma_lock);
printf("handle_page_fault pid=%d (%s), scause=%p, stval=%p, sepc=%p\n", p->pid, p->name, scause, stval, sepc);
// proc_vmprint(p);
- int flags = do_allocate(p->pagetable, p, addr);
+ int flags = do_allocate(p->pagetable, p, addr, scause);
release(&p->vma_lock);
if(flags < 0){
if(flags == ENOVMA){
diff --git a/kernel/vm.c b/kernel/vm.c
index 8c252ce..fa11730 100644
--- a/kernel/vm.c
+++ b/kernel/vm.c
@@ -376,7 +376,7 @@ int load_from_file(char* file,
return 0;
}
-int do_allocate(pagetable_t pagetable, struct proc* p, uint64 addr) {
+int do_allocate(pagetable_t pagetable, struct proc* p, uint64 addr, uint64 scause) {
pte_t* pte = walk(pagetable, addr, 1);
if (pte == 0) {
return ENOMEM;
@@ -419,14 +419,14 @@ allocate:;
return 0;
}
-int do_allocate_range(pagetable_t pagetable, struct proc* p, uint64 addr, uint64 len){
+int do_allocate_range(pagetable_t pagetable, struct proc* p, uint64 addr, uint64 len, uint64 scause){
if (len==0) return 0;
uint64 begin = PGROUNDDOWN(addr);
uint64 end = PGROUNDUP(addr+len -1);
acquire(&p->vma_lock);
for (uint64 current_addr = begin; current_addr<end; current_addr+=PGSIZE) {
- int flags = do_allocate(pagetable,p,current_addr);
+ int flags = do_allocate(pagetable,p,current_addr, scause);
if (flags<0) {
release(&p->vma_lock);
return flags;
@@ -444,7 +444,7 @@ copyout(pagetable_t pagetable, uint64 dstva, char *src, uint64 len)
{
uint64 n, va0, pa0;
- int f = do_allocate_range(pagetable, myproc(), dstva, len);
+ int f = do_allocate_range(pagetable, myproc(), dstva, len, CAUSE_W);
if(f < 0) return -1;
while(len > 0){
@@ -472,7 +472,7 @@ copyin(pagetable_t pagetable, char *dst, uint64 srcva, uint64 len)
{
uint64 n, va0, pa0;
- int f = do_allocate_range(pagetable, myproc(), srcva, len);
+ int f = do_allocate_range(pagetable, myproc(), srcva, len, CAUSE_R);
if(f < 0) return -1;
while(len > 0){
@@ -505,7 +505,7 @@ copyinstr(pagetable_t pagetable, char *dst, uint64 srcva, uint64 max)
acquire(&myproc()->vma_lock);
while(got_null == 0 && max > 0){
va0 = PGROUNDDOWN(srcva);
- int f = do_allocate(pagetable, myproc(), srcva);
+ int f = do_allocate(pagetable, myproc(), srcva, CAUSE_R);
if(f < 0) {
release(&myproc()->vma_lock);
return -1;
diff --git a/kernel/proc.c b/kernel/proc.c
index 1cde1e4..897ae4e 100644
--- a/kernel/proc.c
+++ b/kernel/proc.c
@@ -343,6 +343,7 @@ void userinit(void) {
uvminit(p->pagetable, initcode, sizeof(initcode));
p->stack_vma = add_memory_area(p, 0, PGSIZE);
+ p->stack_vma->vma_flags |= VMA_R | VMA_W;
// prepare for the very first "return" from kernel to user.
p->tf->epc = 0; // user program counter
diff --git a/kernel/vm.c b/kernel/vm.c
index fa11730..0956130 100644
--- a/kernel/vm.c
+++ b/kernel/vm.c
@@ -377,6 +377,8 @@ int load_from_file(char* file,
}
int do_allocate(pagetable_t pagetable, struct proc* p, uint64 addr, uint64 scause) {
+ int allocate = 0;
+
pte_t* pte = walk(pagetable, addr, 1);
if (pte == 0) {
return ENOMEM;
@@ -384,19 +386,36 @@ int do_allocate(pagetable_t pagetable, struct proc* p, uint64 addr, uint64 scaus
uint64 pte_flags = PTE_FLAGS(*pte);
if ((pte_flags & PTE_V) == 0) {
- goto allocate;
- }
- if ((pte_flags & PTE_U) == 0) {
+ allocate = 1;
+ } else if ((pte_flags & PTE_U) == 0) {
return EBADPERM;
}
- return 0;
-allocate:;
struct vma* vmap = get_memory_area(p, addr);
if(vmap == 0) {
return ENOVMA;
}
+ unsigned char vma_flags = vmap->vma_flags;
+
+ switch (scause) {
+ case CAUSE_R:
+ if ((vma_flags & VMA_R) == 0)
+ return EBADPERM;
+ break;
+ case CAUSE_W:
+ if ((vma_flags & VMA_W) == 0)
+ return EBADPERM;
+ break;
+ case CAUSE_X:
+ if ((vma_flags & VMA_X) == 0)
+ return EBADPERM;
+ break;
+ }
+
+ if (!allocate)
+ return 0;
+
void* new_page;
if ((new_page = kalloc()) == 0) {
return ENOMEM;
@@ -404,7 +423,6 @@ allocate:;
memset(new_page, 0, PGSIZE);
uint64 flags = PTE_U;
- unsigned char vma_flags = vmap->vma_flags;
if((vma_flags & VMA_R) != 0)
flags |= PTE_R;
if((vma_flags & VMA_W) != 0)
diff --git a/kernel/exec.c b/kernel/exec.c
index 2ae4cbf..1c1349a 100644
--- a/kernel/exec.c
+++ b/kernel/exec.c
@@ -7,10 +7,6 @@
#include "defs.h"
#include "elf.h"
-static int loadseg(pde_t *pgdir, uint64 addr, struct inode *ip, uint offset, uint sz);
-
-
-
int
exec(char *path, char **argv)
{
@@ -69,19 +65,16 @@ exec(char *path, char **argv)
printf("exec: program header vaddr + memsz < vaddr\n");
goto bad;
}
- if((sz = uvmalloc(pagetable, sz, ph.vaddr + ph.memsz)) == 0){
- printf("exec: uvmalloc failed\n");
- goto bad;
- }
if(ph.vaddr % PGSIZE != 0){
printf("exec: vaddr not page aligned\n");
goto bad;
}
- if(loadseg(pagetable, ph.vaddr, ip, ph.off, ph.filesz) < 0){
- printf("exec: loadseg failed\n");
- goto bad;
- }
+
page = add_memory_area(p, ph.vaddr, ph.vaddr + ph.memsz);
+ sz = ph.vaddr + ph.memsz;
+ page->file = strdup(path);
+ page->file_offset = ph.off;
+ page->file_nbytes = ph.filesz;
if ((ph.flags & ELF_PROG_FLAG_READ) != 0)
page->vma_flags |= VMA_R;
if ((ph.flags & ELF_PROG_FLAG_WRITE) != 0)
@@ -174,31 +167,3 @@ exec(char *path, char **argv)
}
return -1;
}
-
-// Load a program segment into pagetable at virtual address va.
-// va must be page-aligned
-// and the pages from va to va+sz must already be mapped.
-// Returns 0 on success, -1 on failure.
-static int
-loadseg(pagetable_t pagetable, uint64 va, struct inode *ip, uint offset, uint sz)
-{
- uint i, n;
- uint64 pa;
-
- if((va % PGSIZE) != 0)
- panic("loadseg: va must be page aligned");
-
- for(i = 0; i < sz; i += PGSIZE){
- pa = walkaddr(pagetable, va + i);
- if(pa == 0)
- panic("loadseg: address should exist");
- if(sz - i < PGSIZE)
- n = sz - i;
- else
- n = PGSIZE;
- if(readi(ip, 0, (uint64)pa, offset+i, n) != n)
- return -1;