Commit f5fe475c authored by Léo Grange's avatar Léo Grange

closed #5 (Merge branch 'memory_areas')

parents 84461507 d1dfd46a
......@@ -13,6 +13,7 @@
#include <sys/memory.h>
#include <utils/log.h>
#include <sys/kdebug.h>
#include <sys/mem_area.h>
//void exception_handler() __attribute__ ((interrupt_handler, section(".handler.exception")));
......@@ -83,7 +84,7 @@ void exception_handler()
printk(LOG_ERR, "> TEA value = %p\n", (void*)tea);
printk(LOG_ERR, "> *TEA = (%p)\n", (void*)(*(int*)(tea-(tea%4))));
printk(LOG_ERR, "> SPC Value = %p\n", spcval);
if(EXP_CODE_BAD_SLOTINSTR)
if(evt == EXP_CODE_BAD_SLOTINSTR)
kdebug_oops("Illegal slot instruction");
else
kdebug_oops("Illegal instruction");
......@@ -134,6 +135,9 @@ void exception_handler()
}
// used to avoid infinite tlb fault loops when exception are allowed inside
// a page fault resolving process...
static int _recurcive_tlbfault = 0;
/**
* This handler is very important for Virtual Memory, it has to check
......@@ -147,15 +151,61 @@ void tlbmiss_handler()
union pm_page *page;
uint32 vpn;
struct process *curpr;
// the process which cause the TLB miss should be the current one
curpr = _proc_current;
if(_recurcive_tlbfault) {
void *spcval;
asm volatile("stc spc, %0":"=r"(spcval));
printk(LOG_EMERG, "> [%d] Page fault %p, PC=%p\n", MMU.PTEH.BIT.ASID,
PM_PHYSICAL_ADDR(MMU.PTEH.BIT.VPN), spcval);
kdebug_oops("Recurcive page fault");
}
// find the corresponding page, if exists
vpn = MMU.PTEH.BIT.VPN;
page = mem_find_page(curpr->dir_list,
(void*)(vpn << PM_PAGE_ORDER) );
// if page is not in dir list (or is invalid), maybe it exists in memory
// area (allocate it)
// FIXME not working for shared pages
if(page == NULL || !(page->private.flags & MEM_PAGE_VALID)) {
struct mem_area *area;
void *virtaddr;
// allow exception to occurs (realy helpful for debugging...)
sched_preempt_block();
_recurcive_tlbfault = 1;
arch_int_weak_atomic_block(1);
interrupt_inhibit_all(0);
virtaddr = PM_PHYSICAL_ADDR(vpn);
area = mem_area_find(curpr, virtaddr);
if(area != NULL) {
union pm_page pmpage;
// 'major' page fault, create and fill it
pmpage = mem_area_pagefault(area, virtaddr);
if(pmpage.private.ppn != 0) {
mem_insert_page(& curpr->dir_list, &pmpage, virtaddr);
// not optimized, but ensure page points to a valid page struct
page = mem_find_page(curpr->dir_list, virtaddr);
printk(LOG_DEBUG, "tlb major fault: page inserted (virt %p -> phy %p)\n",
virtaddr, PM_PHYSICAL_ADDR(page->private.ppn));
}
}
interrupt_inhibit_all(1);
_recurcive_tlbfault = 0;
sched_preempt_unblock();
}
if(page != NULL) {
unsigned int flags;
uint32 ppn = 0;
......
......@@ -4,15 +4,24 @@
#include <fs/inode.h>
#include <interface/fixos/errno.h>
#include "smemfs_primitives_ng.h"
// used for memory areas...
#include <fs/vfs_file.h>
const struct file_operations smemfs_file_operations = {
.release = smemfs_release,
.read = smemfs_read,
.lseek = smemfs_lseek
.lseek = smemfs_lseek,
.map_area = smemfs_map_area
};
const struct mem_area_ops smemfs_mem_ops = {
.area_pagefault = smemfs_area_pagefault,
.area_release = smemfs_area_release,
.area_duplicate = smemfs_area_duplicate
};
int smemfs_release (struct file *filep) {
// nothing special to do for now
......@@ -21,6 +30,63 @@ int smemfs_release (struct file *filep) {
}
/**
* Internal helper for reading data from a file
*/
static ssize_t smemfs_read_data (struct smemfs_file_preheader *header,
void *dest, size_t len, size_t atpos)
{
int j, n;
ssize_t max_read;
struct smemfs_frag_header *frag;
size_t pos_frag;
size_t pos_tmp;
size_t pos_buf;
size_t file_size;
frag = (void*)(header+1);
file_size = smemfs_prim_get_file_size(header);
max_read = file_size - atpos;
max_read = max_read < len ? max_read : len;
n = atpos + max_read;
j = atpos;
// TODO check everything
// look for fragment containing the first byte
pos_tmp = 0;
while(pos_tmp + frag->data_size+1 <= atpos) {
pos_tmp += frag->data_size + 1;
frag++;
}
// compute offset inside the fragment
pos_frag = atpos - pos_tmp;
// read data fragment after fragment
pos_buf = 0;
while(j<n) {
// chunk_size is the number of bytes to read in the current fragment
size_t chunk_size = frag->data_size + 1 - pos_frag;
size_t toread = (j+chunk_size) < n ? chunk_size : n-j;
memcpy((char*)dest + pos_buf, (char*)(smemfs_prim_get_frag_data(frag)) + pos_frag, toread);
j += toread;
pos_buf += toread;
if(toread == chunk_size) {
// full fragment read, go to next
pos_frag = 0;
frag++;
}
else pos_frag += toread;
}
return max_read;
}
ssize_t smemfs_read (struct file *filep, void *dest, size_t len) {
......@@ -35,60 +101,13 @@ ssize_t smemfs_read (struct file *filep, void *dest, size_t len) {
return 0;
}
else {
int j, n;
size_t max_read;
struct smemfs_file_preheader *header;
struct smemfs_frag_header *frag;
size_t pos_frag;
size_t pos_tmp;
size_t pos_buf;
size_t file_size;
header = filep->inode->abstract;
frag = (void*)(header+1);
file_size = smemfs_prim_get_file_size(header);
max_read = file_size - filep->pos;
max_read = max_read < len ? max_read : len;
n = filep->pos + max_read;
j = filep->pos;
// TODO check everything
// look for fragment containing the first byte
pos_tmp = 0;
while(pos_tmp + frag->data_size+1 <= filep->pos) {
pos_tmp += frag->data_size + 1;
frag++;
}
ssize_t ret;
// compute offset inside the fragment
pos_frag = filep->pos - pos_tmp;
// read data fragment after fragment
pos_buf = 0;
while(j<n) {
// chunk_size is the number of bytes to read in the current fragment
size_t chunk_size = frag->data_size + 1 - pos_frag;
size_t toread = (j+chunk_size) < n ? chunk_size : n-j;
memcpy((char*)dest + pos_buf, (char*)(smemfs_prim_get_frag_data(frag)) + pos_frag, toread);
j += toread;
pos_buf += toread;
if(toread == chunk_size) {
// full fragment read, go to next
pos_frag = 0;
frag++;
}
else pos_frag += toread;
}
ret = smemfs_read_data(filep->inode->abstract, dest, len, filep->pos);
if(ret > 0)
filep->pos += ret;
filep->pos += max_read;
//if(filep->pos >= file_size) filep->flags |= _FILE_EOF_REATCHED;
return max_read;
return ret;
}
}
......@@ -124,3 +143,72 @@ off_t smemfs_lseek (struct file *filep, off_t offset, int whence) {
return filep->pos;
}
int smemfs_map_area(struct file *filep, struct mem_area *area) {
// not a lot of stuff to do for now...
area->ops = &smemfs_mem_ops;
area->file.filep = filep;
// increase file usage count (mirrored in smemfs_area_release)
filep->count++;
return 0;
}
union pm_page smemfs_area_pagefault(struct mem_area *area, void *addr_fault) {
size_t readsize;
void *pmaddr;
union pm_page pmpage;
size_t offset = addr_fault - area->address;
// allocate a physical memory page
// FIXME UNCACHED due to temporary hack to be sure nothing is retained in cache
pmaddr = arch_pm_get_free_page(MEM_PM_UNCACHED);
if(pmaddr != NULL) {
pmpage.private.ppn = PM_PHYSICAL_PAGE(pmaddr);
pmpage.private.flags = MEM_PAGE_PRIVATE | MEM_PAGE_VALID; // | MEM_PAGE_CACHED;
}
// FIXME what to do if out of memory?
// fill with zeroes if needed
readsize = mem_area_fill_partial_page(area, offset, pmaddr);
if(readsize > 0) {
struct inode *inode = area->file.filep->inode;
size_t absoffset = area->file.base_offset + offset;
ssize_t nbread;
nbread = smemfs_read_data(inode->abstract, pmaddr, readsize, absoffset);
if(nbread != readsize) {
printk(LOG_ERR, "smemfs_area: failed loading %d bytes from offset 0x%x"
" [absolute 0x%x] (read returns %d)\n",
readsize, offset, absoffset, nbread);
}
else {
printk(LOG_DEBUG, "smemfs_area: loaded %d bytes @%p from file\n",
readsize, pmaddr);
}
}
return pmpage;
}
/*
int smemfs_area_resize(struct mem_area *area, size_t new_size) {
return -1;
}
*/
void smemfs_area_release(struct mem_area *area) {
// release the file, by closing it at vfs level?
vfs_close(area->file.filep);
}
int smemfs_area_duplicate(struct mem_area *orig, struct mem_area *copy) {
copy->file.filep->count++;
return 0;
}
......@@ -4,6 +4,7 @@
#include <fs/file_operations.h>
#include <fs/file.h>
#include <interface/fixos/stat.h>
#include <sys/mem_area.h>
/**
* Implementation of file operations for the Casio SMEM FS.
......@@ -11,6 +12,9 @@
extern const struct file_operations smemfs_file_operations;
// for memory-mapped files
extern const struct mem_area_ops smemfs_mem_ops;
int smemfs_release (struct file *filep);
......@@ -19,4 +23,15 @@ ssize_t smemfs_read (struct file *filep, void *dest, size_t len);
off_t smemfs_lseek (struct file *filep, off_t offset, int whence);
// memory-mapped operations
int smemfs_map_area(struct file *filep, struct mem_area *area);
union pm_page smemfs_area_pagefault(struct mem_area *area, void *addr_fault);
int smemfs_area_resize(struct mem_area *area, const struct mem_area *new_area);
void smemfs_area_release(struct mem_area *area);
int smemfs_area_duplicate(struct mem_area *orig, struct mem_area *copy);
#endif //_FS_SMEMFS_FILE_H
......@@ -15,6 +15,8 @@
struct file;
struct mem_area;
struct file_operations {
/**
* Release the file opened instance ("close" it).
......@@ -47,6 +49,24 @@ struct file_operations {
* data is specific to command and device, may be not used.
*/
int (*ioctl) (struct file *filep, int cmd, void *data);
/**
* Create a memory map of this object in memory.
* Devices may use it as they want to, for example to provide big buffers
* shared with userland.
* area should be set with all non-private fields having a valid value,
* which is not very well defined...
* At least field ops is not expected to be set, but the interface is not
* well designed for now.
* TODO either use a 'hints' argument with mem_area-like type, or define
* exactly what should be set and what is set by this function itself
*
* NULL if device of filesystem do not implements memory mapped areas.
*
* Return 0 if mapping is accepted, negative value else.
*/
int (*map_area) (struct file *filep, struct mem_area *area);
};
#endif //_FS_FILE_OPERATIONS_H
......
......@@ -7,6 +7,7 @@
#include "file_system.h"
#include "file_operations.h"
#include "vfs_directory.h"
#include <sys/mem_area.h>
// pool allocation for file struct
......@@ -157,3 +158,38 @@ int vfs_fstat(struct file *filep, struct stat *buf) {
return -1;
}
}
int vfs_map_area(struct file *filep, size_t size, size_t offset, void *address,
int flags, size_t infile_size, struct process *proc)
{
int ret = -EINVAL;
if(filep->op->map_area != NULL) {
struct mem_area *area;
area = mem_area_alloc();
if(area == NULL) {
ret = -ENOMEM;
}
else {
// prepare area struct from arguments
area->address = address;
area->max_size = size;
area->flags = flags | MEM_AREA_TYPE_FILE;
area->file.base_offset = offset;
area->file.infile_size = (flags & MEM_AREA_PARTIAL) ? infile_size : size;
area->file.filep = filep;
ret = filep->op->map_area(filep, area);
if(ret == 0) {
ret = mem_area_insert(proc, area);
}
else {
// failed, free area (do not *release* it, free directly)
mem_area_free(area);
}
}
}
return ret;
}
......@@ -90,4 +90,18 @@ int vfs_ioctl(struct file *filep, int cmd, void *data);
int vfs_fstat(struct file *filep, struct stat *buf);
struct process;
/**
* Map size bytes of the object in memory, from given offset, to given address
* in the address space of a given process.
* Area permissions, and additionnal flags, may be provided, using constants
* from sys/mem_area.h
* from_file is important only if flag contains MEM_AREA_PARTIAL
*
* TODO make a 'hints' structure to reduce number of arguments?
*/
int vfs_map_area(struct file *filep, size_t size, size_t offset, void *address,
int flags, size_t infile_size, struct process *proc);
#endif //_FS_VFS_FILE_H
......@@ -38,6 +38,7 @@
#include "sys/console.h"
#include "sys/sysctl.h"
#include "sys/mem_area.h"
extern char cmdargs_begin;
extern char cmdargs_end;
......@@ -131,6 +132,7 @@ void init() {
//DBG_WAIT;
// Initializing VFS and device sub-sytems, mount platform filesystems,
// register platform devices...
......@@ -215,6 +217,10 @@ void init() {
test_vfs();
*/
// memory area subsystem
mem_area_init();
process_init();
sched_init();
test_process();
......
......@@ -24,6 +24,7 @@
#define ESRCH 16
#define EACCES 17
#define EPERM 18
#define ENOMEM 19
#endif //_FIXOS_INTERFACE_ERRNO_H
......@@ -8,6 +8,7 @@
#include <utils/log.h>
#include <utils/strutils.h>
#include <sys/user.h>
#include <sys/mem_area.h>
// check static expected informations of the given header, returns non-zero
......@@ -41,21 +42,14 @@ int elfloader_load(struct file *filep, struct process *dest) {
struct elf_header header;
if(elfloader_load_all(filep, NULL, dest, &header, ELF_LOAD_SET_BRK) == 0) {
union pm_page page;
void *vmstack;
void *pageaddr;
// alloc physical page and set it as the VM process stack
vmstack = arch_pm_get_free_page(MEM_PM_CACHED);
if(vmstack == NULL) {
printk(LOG_ERR, "elfloader: no physical page\n");
return -1;
}
page.private.ppn = PM_PHYSICAL_PAGE(vmstack);
page.private.flags = MEM_PAGE_PRIVATE | MEM_PAGE_VALID | MEM_PAGE_CACHED;
mem_insert_page(& dest->dir_list , &page,
(void*)(ARCH_UNEWPROC_DEFAULT_STACK - PM_PAGE_BYTES));
// set user stack (the size used *is* a maximum, not the allocated one)
struct mem_area *user_stack;
user_stack = mem_area_make_anon((void*)(ARCH_UNEWPROC_DEFAULT_STACK
- PROCESS_DEFAULT_STACK_SIZE), PROCESS_DEFAULT_STACK_SIZE);
mem_area_insert(dest, user_stack);
// set kernel stack address, for now any physical memory
pageaddr = arch_pm_get_free_page(MEM_PM_CACHED);
......@@ -150,7 +144,6 @@ int elfloader_load_all(struct file *filep, void *offset, struct process *dest,
if(flags & ELF_LOAD_SET_BRK) {
dest->initial_brk = cur_brk;
dest->current_brk = cur_brk;
}
}
}
......@@ -192,47 +185,18 @@ int check_elf_header(struct elf_header *h) {
}
#include <fs/casio_smemfs/file.h>
int elfloader_load_segment(struct file *filep, void *offset,
const struct elf_prog_header *ph, struct process *dest)
{
if(ph->vaddr % PM_PAGE_BYTES == 0) {
int i;
void *vm_segaddr;
vfs_lseek(filep, ph->offset, SEEK_SET);
vm_segaddr = offset + ph->vaddr;
for(i=0; i<ph->memsz; i += PM_PAGE_BYTES, vm_segaddr += PM_PAGE_BYTES) {
ssize_t nbread;
ssize_t toread;
union pm_page page;
void *pageaddr;
int prot;
pageaddr = arch_pm_get_free_page(MEM_PM_CACHED);
if(pageaddr == NULL) {
printk(LOG_ERR, "elfloader: no physical page\n");
// TODO really dirty way to exit, need to clean all done job!
return -1;
}
page.private.ppn = PM_PHYSICAL_PAGE(pageaddr);
page.private.flags = MEM_PAGE_PRIVATE | MEM_PAGE_VALID | MEM_PAGE_CACHED;
// if we have a page, copy data from file
toread = ph->filesz - i;
toread = toread > PM_PAGE_BYTES ? PM_PAGE_BYTES : toread;
if(toread > 0) {
nbread = vfs_read(filep, pageaddr, PM_PAGE_BYTES);
printk(LOG_DEBUG, "[I] %d bytes read from ELF.\n", nbread);
}
mem_insert_page(& dest->dir_list , &page, vm_segaddr);
printk(LOG_DEBUG, "[I] ELF load VM (%p -> %p)\n", pageaddr, vm_segaddr);
}
return 0;
// TODO use real permissions from ELF
prot = MEM_AREA_PROT_R | MEM_AREA_PROT_W | MEM_AREA_PROT_X;
return vfs_map_area(filep, ph->memsz, ph->offset, offset + ph->vaddr,
MEM_AREA_PARTIAL | prot, ph->filesz, dest);
}
else {
printk(LOG_ERR, "elfloader: segment begin not page-aligned.\n");
......@@ -261,6 +225,8 @@ int elfloader_load_dynlib(const char *soname, struct process *dest) {
struct elf_section_header symtab;
printk(LOG_DEBUG, "elfloader: library '%s' loaded!\n", absname);
// FIXME don't work anymore with the new memory area system, should
// be re-written!
if(elf_get_symtab(lib, &header, &symtab) == 0) {
struct elf_symbol sym;
uint32 *reloc_got_b = NULL;
......
#include "mem_area.h"
#include <sys/process.h>
#include <utils/pool_alloc.h>
#include <fs/vfs_file.h>
#include <utils/log.h>
#include <utils/strutils.h>
// pool allocation data
static struct pool_alloc _mem_area_pool = POOL_INIT(struct mem_area);
// anonymous area operations
static union pm_page anon_area_pagefault(struct mem_area *area, void *addr_fault);
static int anon_area_resize(struct mem_area *area, size_t new_size);
//static void anon_area_release(struct mem_area *area);
static const struct mem_area_ops _anon_area_ops = {
.area_pagefault = anon_area_pagefault,
.area_resize = anon_area_resize
};
void mem_area_init() {
printk(LOG_DEBUG, "mem_area: area/page=%d\n", _mem_area_pool.perpage);
}
struct mem_area *mem_area_alloc() {
struct mem_area *ret;
ret = pool_alloc(&_mem_area_pool);
if(ret != NULL) {
ret->flags = 0;
ret->address = NULL;
ret->max_size = 0;
}
return ret;
}
void mem_area_free(struct mem_area *area) {
pool_free(&_mem_area_pool, area);
}
struct mem_area *mem_area_make_anon(void *vmaddr, size_t size) {
struct mem_area *area;
area = mem_area_alloc();
if(area != NULL) {
// nothing more, for now...
area->flags = MEM_AREA_TYPE_ANON | MEM_AREA_PROT_R | MEM_AREA_PROT_W
| MEM_AREA_PROT_X;
area->address = vmaddr;
area->max_size = size;
area->ops = &_anon_area_ops;
}
return area;
}
struct mem_area *mem_area_find(struct process *proc, void *address) {
// for now, the list is not sorted, so check for each
struct list_head *cur;
struct mem_area *ret = NULL;
list_for_each(cur, &(proc->mem_areas)) {
// check if address is inside this area
struct mem_area *area = container_of(cur, struct mem_area, list);
if(address >= area->address && address < (area->address + area->max_size)) {
ret = area;
break;
}
}
return ret;
}
/**
* internal function, check if an area is defined in the given address range
* Return 0 if this range is empty, 1 if an area exists.
*/
static int mem_area_check_range(struct process *proc, void *addr_begin, size_t size) {
struct list_head *cur;
list_for_each(cur, &(proc->mem_areas)) {
// check if address range intersects with this area
struct mem_area *area = container_of(cur, struct mem_area, list);
if(area->address < (addr_begin + size)
&& (area->address + area->max_size) > addr_begin)
return 1;
}
return 0;
}
int mem_area_insert(struct process *proc, struct mem_area *area) {
// check for any overlayed area
if(!mem_area_check_range(proc, area->address, area->max_size)) {
// not sorted, insert in front
list_push_front(& proc->mem_areas, & area->list);
return 0;
}
printk(LOG_ERR, "mem_area: unable to insert area (overlay)\n");
return -1;
}
int mem_area_resize(struct mem_area *area, size_t new_size, struct process *proc) {
int ret = 0;
size_t old_size = area->max_size;
// first, call area-specific resize callback if any
if(area->ops != NULL && area->ops->area_resize != NULL)
ret = area->ops->area_resize(area, new_size);
else
area->max_size = new_size;
new_size = area->max_size;
// if no error occurs and decreasing the size of the area, release uneeded
// pages
if(ret == 0 && area->max_size < old_size) {
void *old_last_page = MEM_PAGE_BEGINING(area->address + old_size);
void *new_last_page = MEM_PAGE_BEGINING(area->address + new_size);
union pm_page *page;
for( ; new_last_page < old_last_page; new_last_page += 1024) {
printk(LOG_DEBUG, "mem_area: release page @%p\n", new_last_page);
page = mem_find_page(proc->dir_list, new_last_page);
if(page != NULL) {
mem_release_page(page);
}
}
}
return ret;
}
size_t mem_area_fill_partial_page(struct mem_area *area, size_t offset, void *dest) {
size_t readsize = PM_PAGE_BYTES;
if(area->flags & MEM_AREA_TYPE_FILE && area->flags & MEM_AREA_PARTIAL) {
// fill with 0 if needed
if(area->file.infile_size < offset + readsize) {
size_t zeroed_size;