#include <kern/memory_manager.h>
#include <kern/file_manager.h>
#include <kern/user_environment.h>
#include <kern/file_manager.h>
extern uint32 number_of_frames;
extern uint32 size_of_base_mem;
extern uint32 size_of_extended_mem;
inline uint32 env_table_ws_get_size(struct Env *e);
inline void env_table_ws_invalidate(struct Env* e, uint32 virtual_address);
inline void env_table_ws_set_entry(struct Env* e, uint32 entry_index, uint32 virtual_address);
inline void env_table_ws_clear_entry(struct Env* e, uint32 entry_index);
inline uint32 env_table_ws_get_virtual_address(struct Env* e, uint32 entry_index);
inline uint32 env_table_ws_is_entry_empty(struct Env* e, uint32 entry_index);
void env_table_ws_print(struct Env *curenv);
inline uint32 pd_is_table_used(struct Env *e, uint32 virtual_address);
inline void pd_set_table_unused(struct Env *e, uint32 virtual_address);
inline void pd_clear_page_dir_entry(struct Env *e, uint32 virtual_address);
uint32* ptr_page_directory;
uint32 phys_page_directory;
struct Frame_Info* frames_info;
struct Frame_Info* disk_frames_info;
struct Linked_List free_frame_list;
struct Linked_List modified_frame_list;
void initialize_kernel_VM()
ptr_page_directory = boot_allocate_space(PAGE_SIZE, PAGE_SIZE);
memset(ptr_page_directory, 0, PAGE_SIZE);
phys_page_directory = STATIC_KERNEL_PHYSICAL_ADDRESS(ptr_page_directory);
boot_map_range(ptr_page_directory, KERNEL_STACK_TOP - KERNEL_STACK_SIZE, KERNEL_STACK_SIZE, STATIC_KERNEL_PHYSICAL_ADDRESS(ptr_stack_bottom), PERM_WRITEABLE) ;
unsigned long long sva = KERNEL_BASE;
for (;sva < 0xFFFFFFFF; sva += PTSIZE)
boot_get_page_table(ptr_page_directory, (uint32)sva, 1);
array_size = number_of_frames * sizeof(struct Frame_Info) ;
frames_info = boot_allocate_space(array_size, PAGE_SIZE);
memset(frames_info, 0, array_size);
uint32 disk_array_size = PAGES_PER_FILE * sizeof(struct Frame_Info);
disk_frames_info = boot_allocate_space(disk_array_size , PAGE_SIZE);
memset(disk_frames_info , 0, disk_array_size);
setup_listing_to_all_page_tables_entries();
cprintf("Max Envs = %d\n",NENV);
int envs_size = NENV * sizeof(struct Env) ;
envs = boot_allocate_space(envs_size, PAGE_SIZE);
memset(envs , 0, envs_size);
boot_map_range(ptr_page_directory, UENVS, envs_size, STATIC_KERNEL_PHYSICAL_ADDRESS(envs), PERM_USER) ;
ptr_page_directory[PDX(UENVS)] = ptr_page_directory[PDX(UENVS)]|(PERM_USER|(PERM_PRESENT & (~PERM_WRITEABLE)));
boot_map_range(ptr_page_directory, KERNEL_BASE, (uint32)ptr_free_mem - KERNEL_BASE, 0, PERM_WRITEABLE) ;
boot_map_range(ptr_page_directory, KERNEL_BASE, 0xFFFFFFFF - KERNEL_BASE, 0, PERM_WRITEABLE) ;
memory_scarce_threshold_percentage = DEFAULT_MEM_SCARCE_PERCENTAGE;
void* boot_allocate_space(uint32 size, uint32 align)
extern char end_of_kernel[];
ptr_free_mem = end_of_kernel;
ptr_free_mem = ROUNDUP(ptr_free_mem, align) ;
ptr_allocated_mem = ptr_free_mem ;
return ptr_allocated_mem ;
void boot_map_range(uint32 *ptr_page_directory, uint32 virtual_address, uint32 size, uint32 physical_address, int perm)
for (i = 0 ; i < size ; i += PAGE_SIZE)
uint32 *ptr_page_table = boot_get_page_table(ptr_page_directory, virtual_address, 1) ;
uint32 index_page_table = PTX(virtual_address);
ptr_page_table[index_page_table] = CONSTRUCT_ENTRY(physical_address, perm | PERM_PRESENT) ;
physical_address += PAGE_SIZE ;
virtual_address += PAGE_SIZE ;
uint32* boot_get_page_table(uint32 *ptr_page_directory, uint32 virtual_address, int create)
uint32 index_page_directory = PDX(virtual_address);
uint32 page_directory_entry = ptr_page_directory[index_page_directory];
uint32 phys_page_table = EXTRACT_ADDRESS(page_directory_entry);
uint32 *ptr_page_table = STATIC_KERNEL_VIRTUAL_ADDRESS(phys_page_table);
if (phys_page_table == 0)
ptr_page_table = boot_allocate_space(PAGE_SIZE, PAGE_SIZE) ;
phys_page_table = STATIC_KERNEL_PHYSICAL_ADDRESS(ptr_page_table);
ptr_page_directory[index_page_directory] = CONSTRUCT_ENTRY(phys_page_table, PERM_PRESENT | PERM_WRITEABLE);
extern void initialize_disk_page_file();
LIST_INIT(&free_frame_list);
LIST_INIT(&modified_frame_list);
frames_info[0].references = 1;
frames_info[1].references = 1;
frames_info[2].references = 1;
ptr_zero_page = (uint8*) KERNEL_BASE+PAGE_SIZE;
ptr_temp_page = (uint8*) KERNEL_BASE+2*PAGE_SIZE;
int range_end = ROUNDUP(PHYS_IO_MEM,PAGE_SIZE);
for (i = 3; i < range_end/PAGE_SIZE; i++)
initialize_frame_info(&(frames_info[i]));
LIST_INSERT_HEAD(&free_frame_list, &frames_info[i]);
for (i = PHYS_IO_MEM/PAGE_SIZE ; i < PHYS_EXTENDED_MEM/PAGE_SIZE; i++)
frames_info[i].references = 1;
range_end = ROUNDUP(STATIC_KERNEL_PHYSICAL_ADDRESS(ptr_free_mem), PAGE_SIZE);
for (i = PHYS_EXTENDED_MEM/PAGE_SIZE ; i < range_end/PAGE_SIZE; i++)
frames_info[i].references = 1;
for (i = range_end/PAGE_SIZE ; i < number_of_frames; i++)
initialize_frame_info(&(frames_info[i]));
LIST_INSERT_HEAD(&free_frame_list, &frames_info[i]);
initialize_disk_page_file();
void initialize_frame_info(struct Frame_Info *ptr_frame_info)
memset(ptr_frame_info, 0, sizeof(*ptr_frame_info));
extern void env_free(struct Env *e);
int allocate_frame(struct Frame_Info **ptr_frame_info)
*ptr_frame_info = LIST_FIRST(&free_frame_list);
if (*ptr_frame_info == NULL)
panic("ERROR: Kernel run out of memory... allocate_frame cannot find a free frame.\n");
LIST_REMOVE(&free_frame_list,*ptr_frame_info);
if((*ptr_frame_info)->isBuffered)
pt_clear_page_table_entry((*ptr_frame_info)->environment,(*ptr_frame_info)->va);
initialize_frame_info(*ptr_frame_info);
void free_frame(struct Frame_Info *ptr_frame_info)
initialize_frame_info(ptr_frame_info);
LIST_INSERT_HEAD(&free_frame_list, ptr_frame_info);
void decrement_references(struct Frame_Info* ptr_frame_info)
if (--(ptr_frame_info->references) == 0)
free_frame(ptr_frame_info);
int get_page_table(uint32 *ptr_page_directory, const void *virtual_address, uint32 **ptr_page_table)
uint32 page_directory_entry = ptr_page_directory[PDX(virtual_address)];
if(USE_KHEAP && !CHECK_IF_KERNEL_ADDRESS(virtual_address))
*ptr_page_table = (void *)kheap_virtual_address(EXTRACT_ADDRESS(page_directory_entry)) ;
*ptr_page_table = STATIC_KERNEL_VIRTUAL_ADDRESS(EXTRACT_ADDRESS(page_directory_entry)) ;
if ( (page_directory_entry & PERM_PRESENT) == PERM_PRESENT)
else if (page_directory_entry != 0)
lcr2((uint32)virtual_address) ;
page_directory_entry = ptr_page_directory[PDX(virtual_address)];
if(USE_KHEAP && !CHECK_IF_KERNEL_ADDRESS(virtual_address))
*ptr_page_table = (void *)kheap_virtual_address(EXTRACT_ADDRESS(page_directory_entry)) ;
*ptr_page_table = STATIC_KERNEL_VIRTUAL_ADDRESS(EXTRACT_ADDRESS(page_directory_entry)) ;
void * create_page_table(uint32 *ptr_page_directory, const uint32 virtual_address)
uint32 * ptr_page_table = kmalloc(PAGE_SIZE);
if(ptr_page_table == NULL)
panic("NOT ENOUGH KERNEL HEAP SPACE");
ptr_page_directory[PDX(virtual_address)] = CONSTRUCT_ENTRY(
kheap_physical_address((unsigned int)ptr_page_table)
, PERM_PRESENT | PERM_USER | PERM_WRITEABLE);
memset(ptr_page_table , 0, PAGE_SIZE);
void __static_cpt(uint32 *ptr_page_directory, const uint32 virtual_address, uint32 **ptr_page_table)
struct Frame_Info* ptr_new_frame_info;
int err = allocate_frame(&ptr_new_frame_info) ;
uint32 phys_page_table = to_physical_address(ptr_new_frame_info);
*ptr_page_table = STATIC_KERNEL_VIRTUAL_ADDRESS(phys_page_table) ;
ptr_new_frame_info->references = 1;
ptr_page_directory[PDX(virtual_address)] = CONSTRUCT_ENTRY(phys_page_table, PERM_PRESENT | PERM_USER | PERM_WRITEABLE);
memset(*ptr_page_table , 0, PAGE_SIZE);
int map_frame(uint32 *ptr_page_directory, struct Frame_Info *ptr_frame_info, void *virtual_address, int perm)
uint32 physical_address = to_physical_address(ptr_frame_info);
if( get_page_table(ptr_page_directory, virtual_address, &ptr_page_table) == TABLE_NOT_EXIST)
ptr_page_table = create_page_table(ptr_page_directory, (uint32)virtual_address);
uint32* ptr_page_table2 =NULL;
__static_cpt(ptr_page_directory, (uint32)virtual_address, &ptr_page_table);
uint32 page_table_entry = ptr_page_table[PTX(virtual_address)];
if ((page_table_entry & PERM_PRESENT) == PERM_PRESENT)
if (EXTRACT_ADDRESS(page_table_entry) == physical_address)
unmap_frame(ptr_page_directory , virtual_address);
ptr_frame_info->references++;
ptr_page_table[PTX(virtual_address)] = CONSTRUCT_ENTRY(physical_address , perm | PERM_PRESENT);
struct Frame_Info * get_frame_info(uint32 *ptr_page_directory, void *virtual_address, uint32 **ptr_page_table)
uint32 ret = get_page_table(ptr_page_directory, virtual_address, ptr_page_table) ;
if((*ptr_page_table) != 0)
uint32 index_page_table = PTX(virtual_address);
uint32 page_table_entry = (*ptr_page_table)[index_page_table];
if( page_table_entry != 0)
return to_frame_info( EXTRACT_ADDRESS ( page_table_entry ) );
void unmap_frame(uint32 *ptr_page_directory, void *virtual_address)
struct Frame_Info* ptr_frame_info = get_frame_info(ptr_page_directory, virtual_address, &ptr_page_table);
if( ptr_frame_info != 0 )
if (ptr_frame_info->isBuffered && !CHECK_IF_KERNEL_ADDRESS((uint32)virtual_address))
cprintf("Freeing BUFFERED frame at va %x!!!\n", virtual_address) ;
decrement_references(ptr_frame_info);
ptr_page_table[PTX(virtual_address)] = 0;
tlb_invalidate(ptr_page_directory, virtual_address);
int loadtime_map_frame(uint32 *ptr_page_directory, struct Frame_Info *ptr_frame_info, void *virtual_address, int perm)
uint32 physical_address = to_physical_address(ptr_frame_info);
uint32 page_directory_entry = ptr_page_directory[PDX(virtual_address)];
if(USE_KHEAP && !CHECK_IF_KERNEL_ADDRESS(virtual_address))
ptr_page_table = (uint32*)kheap_virtual_address(EXTRACT_ADDRESS(page_directory_entry)) ;
ptr_page_table = STATIC_KERNEL_VIRTUAL_ADDRESS(EXTRACT_ADDRESS(page_directory_entry)) ;
if (page_directory_entry == 0)
ptr_page_table = create_page_table(ptr_page_directory, (uint32)virtual_address);
__static_cpt(ptr_page_directory, (uint32)virtual_address, &ptr_page_table);
ptr_frame_info->references++;
ptr_page_table[PTX(virtual_address)] = CONSTRUCT_ENTRY(physical_address , perm | PERM_PRESENT);
void allocateMem(struct Env* e, uint32 virtual_address, uint32 size)
int size2 = ROUNDUP(size , PAGE_SIZE);
uint32 virtual_address2 = ROUNDDOWN(virtual_address , PAGE_SIZE);
for (int current = 0 ; current < size2; current +=PAGE_SIZE)
pf_add_empty_env_page(e,virtual_address2 , 0 );
virtual_address2 +=PAGE_SIZE;
void freeMem(struct Env* e, uint32 virtual_address, uint32 size)
int size1=ROUNDUP(size,PAGE_SIZE);
uint32 va1=ROUNDDOWN(virtual_address,PAGE_SIZE);
uint32 varfinal=va1+size1;
for(int i=va1;i<varfinal;i+=PAGE_SIZE)
uint32 va2=ROUNDDOWN(virtual_address,PAGE_SIZE);
int si= e->page_WS_max_size;
uint32 size4=size1/PAGE_SIZE;
for(int k=0; k<size4; k++)
if(env_page_ws_is_entry_empty(e,i)==1)
if(ROUNDDOWN(e->ptr_pageWorkingSet[i].virtual_address,PAGE_SIZE)==va2)
env_page_ws_clear_entry(e,i);
unmap_frame(e->env_page_directory, (void*)va2);
uint32 size3=size1/PAGE_SIZE;
uint32 va4=ROUNDDOWN(virtual_address,PAGE_SIZE);
unsigned char *va = (unsigned char *)(va4) ;
uint32 * ptr_page_table ;
get_page_table(e->env_page_directory, va, &ptr_page_table);
for(int cur=0;cur<1024;cur++)
int x=ptr_page_table[cur]& PERM_PRESENT;
uint32 table_pa =(uint32)(ptr_page_table)-KERNEL_BASE;
struct Frame_Info *table_frame_info = to_frame_info(table_pa);
table_frame_info->references = 0;
free_frame(table_frame_info);
uint32 dir_index = PDX(va);
e->env_page_directory[dir_index] = 0;
void __freeMem_with_buffering(struct Env* e, uint32 virtual_address, uint32 size)
panic("this function is not required...!!");
void moveMem(struct Env* e, uint32 src_virtual_address, uint32 dst_virtual_address, uint32 size)
panic("moveMem() is not implemented yet...!!");
uint32 calculate_required_frames(uint32* ptr_page_directory, uint32 start_virtual_address, uint32 size)
LOG_STATMENT(cprintf("calculate_required_frames: Starting at address %x",start_virtual_address));
uint32 number_of_tables = 0;
uint32 current_virtual_address = ROUNDDOWN(start_virtual_address, PAGE_SIZE*1024);
for(; current_virtual_address < (start_virtual_address+size); current_virtual_address+= PAGE_SIZE*1024)
get_page_table(ptr_page_directory, (void*) current_virtual_address, &ptr_page_table);
uint32 number_of_pages = 0;
current_virtual_address = ROUNDDOWN(start_virtual_address, PAGE_SIZE);
for(; current_virtual_address < (start_virtual_address+size); current_virtual_address+= PAGE_SIZE)
if (get_frame_info(ptr_page_directory, (void*) current_virtual_address, &ptr_page_table) == 0)
LOG_STATMENT(cprintf("calculate_required_frames: Done!"));
return number_of_tables+number_of_pages;
struct freeFramesCounters calculate_available_frames()
uint32 totalFreeUnBuffered = 0 ;
uint32 totalFreeBuffered = 0 ;
uint32 totalModified = 0 ;
LIST_FOREACH(ptr, &free_frame_list)
LIST_FOREACH(ptr, &modified_frame_list)
struct freeFramesCounters counters ;
counters.freeBuffered = totalFreeBuffered ;
counters.freeNotBuffered = totalFreeUnBuffered ;
counters.modified = totalModified;
uint32 calculate_free_frames()
return LIST_SIZE(&free_frame_list);
inline uint32 env_page_ws_get_size(struct Env *e)
for(;i<e->page_WS_max_size; i++) if(e->ptr_pageWorkingSet[i].empty == 0) counter++;
inline void env_page_ws_invalidate(struct Env* e, uint32 virtual_address)
for(;i<e->page_WS_max_size; i++)
if(ROUNDDOWN(e->ptr_pageWorkingSet[i].virtual_address,PAGE_SIZE) == ROUNDDOWN(virtual_address,PAGE_SIZE))
env_page_ws_clear_entry(e, i);
inline void env_page_ws_set_entry(struct Env* e, uint32 entry_index, uint32 virtual_address)
assert(entry_index >= 0 && entry_index < e->page_WS_max_size);
assert(virtual_address >= 0 && virtual_address < USER_TOP);
e->ptr_pageWorkingSet[entry_index].virtual_address = ROUNDDOWN(virtual_address,PAGE_SIZE);
e->ptr_pageWorkingSet[entry_index].empty = 0;
e->ptr_pageWorkingSet[entry_index].time_stamp = 0x80000000;
inline void env_page_ws_clear_entry(struct Env* e, uint32 entry_index)
assert(entry_index >= 0 && entry_index < (e->page_WS_max_size));
e->ptr_pageWorkingSet[entry_index].virtual_address = 0;
e->ptr_pageWorkingSet[entry_index].empty = 1;
e->ptr_pageWorkingSet[entry_index].time_stamp = 0;
inline uint32 env_page_ws_get_time_stamp(struct Env* e, uint32 entry_index)
assert(entry_index >= 0 && entry_index < (e->page_WS_max_size));
return e->ptr_pageWorkingSet[entry_index].time_stamp;
inline uint32 env_page_ws_get_virtual_address(struct Env* e, uint32 entry_index)
assert(entry_index >= 0 && entry_index < (e->page_WS_max_size));
return ROUNDDOWN(e->ptr_pageWorkingSet[entry_index].virtual_address,PAGE_SIZE);
inline uint32 env_page_ws_is_entry_empty(struct Env* e, uint32 entry_index)
return e->ptr_pageWorkingSet[entry_index].empty;
void env_page_ws_print(struct Env *curenv)
for(i=0; i< (curenv->page_WS_max_size); i++ )
if (curenv->ptr_pageWorkingSet[i].empty)
cprintf("EMPTY LOCATION");
if(i==curenv->page_last_WS_index )
uint32 virtual_address = curenv->ptr_pageWorkingSet[i].virtual_address;
uint32 time_stamp = curenv->ptr_pageWorkingSet[i].time_stamp;
uint32 perm = pt_get_page_permissions(curenv, virtual_address) ;
char isModified = ((perm&PERM_MODIFIED) ? 1 : 0);
char isUsed= ((perm&PERM_USED) ? 1 : 0);
char isBuffered= ((perm&PERM_BUFFERED) ? 1 : 0);
cprintf("address @ %d = %x",i, curenv->ptr_pageWorkingSet[i].virtual_address);
cprintf(", used= %d, modified= %d, buffered= %d, time stamp= %x", isUsed, isModified, isBuffered, time_stamp) ;
if(i==curenv->page_last_WS_index )
void env_table_ws_print(struct Env *curenv)
cprintf("---------------------------------------------------\n");
for(i=0; i< __TWS_MAX_SIZE; i++ )
if (curenv->__ptr_tws[i].empty)
cprintf("EMPTY LOCATION");
if(i==curenv->table_last_WS_index )
uint32 virtual_address = curenv->__ptr_tws[i].virtual_address;
cprintf("env address at %d = %x",i, curenv->__ptr_tws[i].virtual_address);
cprintf(", used bit = %d", pd_is_table_used(curenv, virtual_address));
if(i==curenv->table_last_WS_index )
inline uint32 env_table_ws_get_size(struct Env *e)
for(;i<__TWS_MAX_SIZE; i++) if(e->__ptr_tws[i].empty == 0) counter++;
inline void env_table_ws_invalidate(struct Env* e, uint32 virtual_address)
for(;i<__TWS_MAX_SIZE; i++)
if(ROUNDDOWN(e->__ptr_tws[i].virtual_address,PAGE_SIZE*1024) == ROUNDDOWN(virtual_address,PAGE_SIZE*1024))
env_table_ws_clear_entry(e, i);
inline void env_table_ws_set_entry(struct Env* e, uint32 entry_index, uint32 virtual_address)
assert(entry_index >= 0 && entry_index < __TWS_MAX_SIZE);
assert(virtual_address >= 0 && virtual_address < USER_TOP);
e->__ptr_tws[entry_index].virtual_address = ROUNDDOWN(virtual_address,PAGE_SIZE*1024);
e->__ptr_tws[entry_index].empty = 0;
e->__ptr_tws[entry_index].time_stamp = 0x80000000;
inline uint32 env_table_ws_get_time_stamp(struct Env* e, uint32 entry_index)
assert(entry_index >= 0 && entry_index < __TWS_MAX_SIZE);
return e->__ptr_tws[entry_index].time_stamp;
inline void env_table_ws_clear_entry(struct Env* e, uint32 entry_index)
assert(entry_index >= 0 && entry_index < __TWS_MAX_SIZE);
e->__ptr_tws[entry_index].virtual_address = 0;
e->__ptr_tws[entry_index].empty = 1;
inline uint32 env_table_ws_get_virtual_address(struct Env* e, uint32 entry_index)
assert(entry_index >= 0 && entry_index < __TWS_MAX_SIZE);
return ROUNDDOWN(e->__ptr_tws[entry_index].virtual_address,PAGE_SIZE*1024);
inline uint32 env_table_ws_is_entry_empty(struct Env* e, uint32 entry_index)
return e->__ptr_tws[entry_index].empty;
void addTableToTableWorkingSet(struct Env *e, uint32 tableAddress)
tableAddress = ROUNDDOWN(tableAddress, PAGE_SIZE*1024);
e->__ptr_tws[e->table_last_WS_index].virtual_address = tableAddress;
e->__ptr_tws[e->table_last_WS_index].empty = 0;
e->__ptr_tws[e->table_last_WS_index].time_stamp = 0x00000000;
e->table_last_WS_index ++;
e->table_last_WS_index %= __TWS_MAX_SIZE;
void bufferList_add_page(struct Linked_List* bufferList,struct Frame_Info *ptr_frame_info)
LIST_INSERT_TAIL(bufferList, ptr_frame_info);
void bufferlist_remove_page(struct Linked_List* bufferList, struct Frame_Info *ptr_frame_info)
LIST_REMOVE(bufferList, ptr_frame_info);
inline uint32 pd_is_table_used(struct Env* ptr_env, uint32 virtual_address)
return ( (ptr_env->env_page_directory[PDX(virtual_address)] & PERM_USED) == PERM_USED ? 1 : 0);
inline void pd_set_table_unused(struct Env* ptr_env, uint32 virtual_address)
ptr_env->env_page_directory[PDX(virtual_address)] &= (~PERM_USED);
tlb_invalidate((void *)NULL, (void *)virtual_address);
inline void pd_clear_page_dir_entry(struct Env* ptr_env, uint32 virtual_address)
uint32 * ptr_pgdir = ptr_env->env_page_directory ;
ptr_pgdir[PDX(virtual_address)] = 0 ;
extern int __pf_write_env_table( struct Env* ptr_env, uint32 virtual_address, uint32* tableKVirtualAddress);
extern int __pf_read_env_table(struct Env* ptr_env, uint32 virtual_address, uint32* tableKVirtualAddress);
inline void pt_set_page_permissions(struct Env* ptr_env, uint32 virtual_address, uint32 permissions_to_set, uint32 permissions_to_clear)
uint32 * ptr_pgdir = ptr_env->env_page_directory ;
uint32 page_directory_entry = ptr_pgdir[PDX(virtual_address)] ;
if ( (page_directory_entry & PERM_PRESENT) == PERM_PRESENT)
if(USE_KHEAP && !CHECK_IF_KERNEL_ADDRESS(virtual_address))
ptr_page_table = (uint32*)kheap_virtual_address(EXTRACT_ADDRESS(page_directory_entry)) ;
ptr_page_table = STATIC_KERNEL_VIRTUAL_ADDRESS(EXTRACT_ADDRESS(page_directory_entry)) ;
ptr_page_table[PTX(virtual_address)] |= (permissions_to_set);
ptr_page_table[PTX(virtual_address)] &= (~permissions_to_clear);
else if (page_directory_entry != 0)
int success = __pf_read_env_table(ptr_env, virtual_address, (void*) ptr_temp_page);
ptr_page_table = (uint32*) ptr_temp_page;
if(success == E_TABLE_NOT_EXIST_IN_PF)
panic("pt_set_page_permissions: table not found in PF when expected to find one !. please revise your table fault\
ptr_page_table[PTX(virtual_address)] |= (permissions_to_set);
ptr_page_table[PTX(virtual_address)] &= (~permissions_to_clear);
__pf_write_env_table(ptr_env, virtual_address, (void*) ptr_temp_page);
panic("function pt_set_page_permissions() called with invalid virtual address. The corresponding page table doesn't exist\n") ;
tlb_invalidate((void *)NULL, (void *)virtual_address);
inline void pt_clear_page_table_entry(struct Env* ptr_env, uint32 virtual_address)
uint32 * ptr_pgdir = ptr_env->env_page_directory ;
uint32 page_directory_entry = ptr_pgdir[PDX(virtual_address)] ;
if ((page_directory_entry & PERM_PRESENT) == PERM_PRESENT)
if(USE_KHEAP && !CHECK_IF_KERNEL_ADDRESS(virtual_address))
ptr_page_table = (uint32*)kheap_virtual_address(EXTRACT_ADDRESS(page_directory_entry)) ;
ptr_page_table = STATIC_KERNEL_VIRTUAL_ADDRESS(EXTRACT_ADDRESS(page_directory_entry)) ;
ptr_page_table[PTX(virtual_address)] = 0 ;
else if (page_directory_entry != 0)
int success = __pf_read_env_table(ptr_env, virtual_address, (void*) ptr_temp_page);
ptr_page_table = (uint32*) ptr_temp_page;
if(success == E_TABLE_NOT_EXIST_IN_PF)
panic("pt_clear_page_table_entry: table not found in PF when expected to find one !. please revise your table fault\
ptr_page_table[PTX(virtual_address)] = 0 ;
__pf_write_env_table(ptr_env, virtual_address, (void*) ptr_temp_page);
panic("function pt_clear_page_table_entry() called with invalid virtual address. The corresponding page table doesn't exist\n") ;
tlb_invalidate((void *)NULL, (void *)virtual_address);
inline uint32 pt_get_page_permissions(struct Env* ptr_env, uint32 virtual_address )
uint32 * ptr_pgdir = ptr_env->env_page_directory ;
uint32 page_directory_entry = ptr_pgdir[PDX(virtual_address)] ;
if ( (page_directory_entry & PERM_PRESENT) == PERM_PRESENT)
if(USE_KHEAP && !CHECK_IF_KERNEL_ADDRESS(virtual_address))
ptr_page_table = (uint32*)kheap_virtual_address(EXTRACT_ADDRESS(page_directory_entry)) ;
ptr_page_table = STATIC_KERNEL_VIRTUAL_ADDRESS(EXTRACT_ADDRESS(page_directory_entry)) ;
else if (page_directory_entry != 0)
int success = __pf_read_env_table(ptr_env, virtual_address, (void*) ptr_temp_page);
ptr_page_table = (uint32*) ptr_temp_page;
if(success == E_TABLE_NOT_EXIST_IN_PF)
panic("pt_get_page_permissions: table not found in PF when expected to find one !. please revise your table fault\
return (ptr_page_table[PTX(virtual_address)] & 0x00000FFF);
inline uint32* create_frames_storage()
uint32* frames_storage = (void *)kmalloc(PAGE_SIZE);
if(frames_storage == NULL)
panic("NOT ENOUGH KERNEL HEAP SPACE");
inline void add_frame_to_storage(uint32* frames_storage, struct Frame_Info* ptr_frame_info, uint32 index)
uint32 va = index * PAGE_SIZE ;
int r = get_page_table(frames_storage, (void*) va, &ptr_page_table);
ptr_page_table = create_page_table(frames_storage, (uint32)va);
__static_cpt(frames_storage, (uint32)va, &ptr_page_table);
ptr_page_table[PTX(va)] = CONSTRUCT_ENTRY(to_physical_address(ptr_frame_info), 0 | PERM_PRESENT);
inline struct Frame_Info* get_frame_from_storage(uint32* frames_storage, uint32 index)
struct Frame_Info* ptr_frame_info;
uint32 va = index * PAGE_SIZE ;
ptr_frame_info = get_frame_info(frames_storage, (void*) va, &ptr_page_table);
inline void clear_frames_storage(uint32* frames_storage)
int fourMega = 1024 * PAGE_SIZE ;
for (i = 0 ; i < 1024 ; i++)
if (frames_storage[i] != 0)
kfree((void*)kheap_virtual_address(EXTRACT_ADDRESS(frames_storage[i])));
free_frame(to_frame_info(EXTRACT_ADDRESS(frames_storage[i])));
void setUHeapPlacementStrategyFIRSTFIT(){_UHeapPlacementStrategy = UHP_PLACE_FIRSTFIT;}
void setUHeapPlacementStrategyBESTFIT(){_UHeapPlacementStrategy = UHP_PLACE_BESTFIT;}
void setUHeapPlacementStrategyNEXTFIT(){_UHeapPlacementStrategy = UHP_PLACE_NEXTFIT;}
void setUHeapPlacementStrategyWORSTFIT(){_UHeapPlacementStrategy = UHP_PLACE_WORSTFIT;}
uint32 isUHeapPlacementStrategyFIRSTFIT(){if(_UHeapPlacementStrategy == UHP_PLACE_FIRSTFIT) return 1; return 0;}
uint32 isUHeapPlacementStrategyBESTFIT(){if(_UHeapPlacementStrategy == UHP_PLACE_BESTFIT) return 1; return 0;}
uint32 isUHeapPlacementStrategyNEXTFIT(){if(_UHeapPlacementStrategy == UHP_PLACE_NEXTFIT) return 1; return 0;}
uint32 isUHeapPlacementStrategyWORSTFIT(){if(_UHeapPlacementStrategy == UHP_PLACE_WORSTFIT) return 1; return 0;}
void setKHeapPlacementStrategyCONTALLOC(){_KHeapPlacementStrategy = KHP_PLACE_CONTALLOC;}
void setKHeapPlacementStrategyFIRSTFIT(){_KHeapPlacementStrategy = KHP_PLACE_FIRSTFIT;}
void setKHeapPlacementStrategyBESTFIT(){_KHeapPlacementStrategy = KHP_PLACE_BESTFIT;}
void setKHeapPlacementStrategyNEXTFIT(){_KHeapPlacementStrategy = KHP_PLACE_NEXTFIT;}
void setKHeapPlacementStrategyWORSTFIT(){_KHeapPlacementStrategy = KHP_PLACE_WORSTFIT;}
uint32 isKHeapPlacementStrategyCONTALLOC(){if(_KHeapPlacementStrategy == KHP_PLACE_CONTALLOC) return 1; return 0;}
uint32 isKHeapPlacementStrategyFIRSTFIT(){if(_KHeapPlacementStrategy == KHP_PLACE_FIRSTFIT) return 1; return 0;}
uint32 isKHeapPlacementStrategyBESTFIT(){if(_KHeapPlacementStrategy == KHP_PLACE_BESTFIT) return 1; return 0;}
uint32 isKHeapPlacementStrategyNEXTFIT(){if(_KHeapPlacementStrategy == KHP_PLACE_NEXTFIT) return 1; return 0;}
uint32 isKHeapPlacementStrategyWORSTFIT(){if(_KHeapPlacementStrategy == KHP_PLACE_WORSTFIT) return 1; return 0;}