forked from xuos/xiuos
				
			fix sys_poll_session performance bug.
This commit is contained in:
		
							parent
							
								
									f5ad8437b5
								
							
						
					
					
						commit
						a2e5b49b19
					
				| 
						 | 
					@ -46,16 +46,14 @@ static void _sys_clock_init()
 | 
				
			||||||
        gpt_counter_enable(kGPTOutputCompare1);
 | 
					        gpt_counter_enable(kGPTOutputCompare1);
 | 
				
			||||||
        break;
 | 
					        break;
 | 
				
			||||||
    case 1:
 | 
					    case 1:
 | 
				
			||||||
        gpt_set_compare_event(kGPTOutputCompare2, OUTPUT_CMP_DISABLE, 1000);
 | 
					        gpt_set_compare_event(kGPTOutputCompare2, OUTPUT_CMP_DISABLE, 5000);
 | 
				
			||||||
        gpt_counter_enable(kGPTOutputCompare2);
 | 
					        gpt_counter_enable(kGPTOutputCompare2);
 | 
				
			||||||
        break;
 | 
					        break;
 | 
				
			||||||
    case 2:
 | 
					    case 2:
 | 
				
			||||||
        gpt_set_compare_event(kGPTOutputCompare3, OUTPUT_CMP_DISABLE, 1000);
 | 
					        gpt_set_compare_event(kGPTOutputCompare3, OUTPUT_CMP_DISABLE, 10000);
 | 
				
			||||||
        gpt_counter_enable(kGPTOutputCompare3);
 | 
					        gpt_counter_enable(kGPTOutputCompare3);
 | 
				
			||||||
        break;
 | 
					        break;
 | 
				
			||||||
    case 3:
 | 
					    default:
 | 
				
			||||||
        gpt_set_compare_event(kGPTOutputCompare1, OUTPUT_CMP_DISABLE, 1000);
 | 
					 | 
				
			||||||
        gpt_counter_enable(kGPTOutputCompare1);
 | 
					 | 
				
			||||||
        break;
 | 
					        break;
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -77,7 +75,20 @@ static uint64_t _get_second()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void _clear_clock_intr()
 | 
					static void _clear_clock_intr()
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
					    switch (cur_cpuid()) {
 | 
				
			||||||
 | 
					    case 0:
 | 
				
			||||||
        gpt_get_compare_event(kGPTOutputCompare1);
 | 
					        gpt_get_compare_event(kGPTOutputCompare1);
 | 
				
			||||||
 | 
					        break;
 | 
				
			||||||
 | 
					    case 1:
 | 
				
			||||||
 | 
					        gpt_get_compare_event(kGPTOutputCompare2);
 | 
				
			||||||
 | 
					        break;
 | 
				
			||||||
 | 
					    case 2:
 | 
				
			||||||
 | 
					        gpt_get_compare_event(kGPTOutputCompare3);
 | 
				
			||||||
 | 
					        break;
 | 
				
			||||||
 | 
					    case 3:
 | 
				
			||||||
 | 
					        gpt_get_compare_event(kGPTOutputCompare1);
 | 
				
			||||||
 | 
					        break;
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static bool _is_timer_expired()
 | 
					static bool _is_timer_expired()
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -116,6 +116,9 @@ void dabort_handler(struct trapframe* r)
 | 
				
			||||||
        LOG("data abort at 0x%x, status 0x%x\n", dfa, dfs);
 | 
					        LOG("data abort at 0x%x, status 0x%x\n", dfa, dfs);
 | 
				
			||||||
        _abort_reason(dfs);
 | 
					        _abort_reason(dfs);
 | 
				
			||||||
        dump_tf(r);
 | 
					        dump_tf(r);
 | 
				
			||||||
 | 
					        if (is_spinlock_locked(&whole_kernel_lock) && whole_kernel_lock.owner_cpu == cur_cpuid()) {
 | 
				
			||||||
 | 
					            spinlock_unlock(&whole_kernel_lock);
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
        panic("data abort exception\n");
 | 
					        panic("data abort exception\n");
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -143,6 +146,9 @@ void iabort_handler(struct trapframe* r)
 | 
				
			||||||
        LOG("prefetch abort at 0x%x, status 0x%x\n", ifa, ifs);
 | 
					        LOG("prefetch abort at 0x%x, status 0x%x\n", ifa, ifs);
 | 
				
			||||||
        _abort_reason(ifs);
 | 
					        _abort_reason(ifs);
 | 
				
			||||||
        dump_tf(r);
 | 
					        dump_tf(r);
 | 
				
			||||||
 | 
					        if (is_spinlock_locked(&whole_kernel_lock) && whole_kernel_lock.owner_cpu == cur_cpuid()) {
 | 
				
			||||||
 | 
					            spinlock_unlock(&whole_kernel_lock);
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
        panic("prefetch abort exception\n");
 | 
					        panic("prefetch abort exception\n");
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -94,7 +94,7 @@ static void _sys_irq_init(int cpu_id)
 | 
				
			||||||
        // Set Interrupt handler start address
 | 
					        // Set Interrupt handler start address
 | 
				
			||||||
        vector_base[1] = (uint32_t)trap_undefined_instruction; // Undefined Instruction
 | 
					        vector_base[1] = (uint32_t)trap_undefined_instruction; // Undefined Instruction
 | 
				
			||||||
        vector_base[2] = (uint32_t)user_trap_swi_enter; // Software Interrupt
 | 
					        vector_base[2] = (uint32_t)user_trap_swi_enter; // Software Interrupt
 | 
				
			||||||
        vector_base[3] = (uint32_t)trap_iabort; // Prefetch Abort
 | 
					        // vector_base[3] = (uint32_t)trap_iabort; // Prefetch Abort
 | 
				
			||||||
        vector_base[4] = (uint32_t)trap_dabort; // Data Abort
 | 
					        vector_base[4] = (uint32_t)trap_dabort; // Data Abort
 | 
				
			||||||
        vector_base[5] = (uint32_t)handle_reserved; // Reserved
 | 
					        vector_base[5] = (uint32_t)handle_reserved; // Reserved
 | 
				
			||||||
        vector_base[6] = (uint32_t)trap_irq_enter; // IRQ
 | 
					        vector_base[6] = (uint32_t)trap_irq_enter; // IRQ
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -34,6 +34,7 @@ Modification:
 | 
				
			||||||
#define OUTPUT_LEVLE_ERROR 2
 | 
					#define OUTPUT_LEVLE_ERROR 2
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define OUTPUT_LEVLE OUTPUT_LEVLE_DEBUG
 | 
					#define OUTPUT_LEVLE OUTPUT_LEVLE_DEBUG
 | 
				
			||||||
 | 
					// #define OUTPUT_LEVLE OUTPUT_LEVLE_LOG
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern void KPrintf(char* fmt, ...);
 | 
					extern void KPrintf(char* fmt, ...);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -59,33 +59,35 @@ struct Thread {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* Process Control Block */
 | 
					/* Process Control Block */
 | 
				
			||||||
struct TaskMicroDescriptor {
 | 
					struct TaskMicroDescriptor {
 | 
				
			||||||
    struct double_list_node node;
 | 
					    /* task debug resources */
 | 
				
			||||||
 | 
					 | 
				
			||||||
    struct spinlock lock;
 | 
					 | 
				
			||||||
    /* task->lock needed */
 | 
					 | 
				
			||||||
    int pid;
 | 
					    int pid;
 | 
				
			||||||
 | 
					    char name[TASK_NAME_MAX_LEN];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    /// @todo support return value
 | 
				
			||||||
 | 
					    int ret; // state val that be returned to parent
 | 
				
			||||||
    /// @todo support parent
 | 
					    /// @todo support parent
 | 
				
			||||||
    struct TaskMicroDescriptor* parent;
 | 
					    struct TaskMicroDescriptor* parent;
 | 
				
			||||||
    enum ProcState state;
 | 
					
 | 
				
			||||||
    /// @todo support ret value
 | 
					    /* task context resources */
 | 
				
			||||||
    int ret; // state val that be returned to parent
 | 
					    struct Thread main_thread; // will only access by task itself
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    /* task memory resources */
 | 
				
			||||||
    struct TopLevelPageDirectory pgdir; // [phy] vm pgtbl base address
 | 
					    struct TopLevelPageDirectory pgdir; // [phy] vm pgtbl base address
 | 
				
			||||||
 | 
					    uintptr_t heap_base; // mem size of proc used(allocated by kernel)
 | 
				
			||||||
 | 
					    /// @todo support heap_base
 | 
				
			||||||
 | 
					    uintptr_t mem_size;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    /* task communication resources */
 | 
				
			||||||
    struct double_list_node cli_sess_listhead;
 | 
					    struct double_list_node cli_sess_listhead;
 | 
				
			||||||
    struct double_list_node svr_sess_listhead;
 | 
					    struct double_list_node svr_sess_listhead;
 | 
				
			||||||
    struct TraceTag server_identifier;
 | 
					    struct TraceTag server_identifier;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* task->lock not necessary */
 | 
					    /* task schedule attributes */
 | 
				
			||||||
    struct Thread main_thread; // will only access by task itself
 | 
					    struct double_list_node node;
 | 
				
			||||||
 | 
					    enum ProcState state;
 | 
				
			||||||
 | 
					    int priority; // priority
 | 
				
			||||||
    int remain_tick;
 | 
					    int remain_tick;
 | 
				
			||||||
    int maxium_tick;
 | 
					    int maxium_tick;
 | 
				
			||||||
 | 
					 | 
				
			||||||
    struct TraceTag cwd; // current directory
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    int priority; // priority
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    /// @todo support mem_size
 | 
					 | 
				
			||||||
    uintptr_t mem_size; // mem size of proc used(allocated by kernel)
 | 
					 | 
				
			||||||
    char name[TASK_NAME_MAX_LEN];
 | 
					 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct SchedulerRightGroup {
 | 
					struct SchedulerRightGroup {
 | 
				
			||||||
| 
						 | 
					@ -94,17 +96,10 @@ struct SchedulerRightGroup {
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct XiziTaskManager {
 | 
					struct XiziTaskManager {
 | 
				
			||||||
    struct spinlock lock; // lock to organize free and used task list
 | 
					 | 
				
			||||||
    struct double_list_node task_list_head[TASK_MAX_PRIORITY]; /* list of task control blocks that are allocated */
 | 
					    struct double_list_node task_list_head[TASK_MAX_PRIORITY]; /* list of task control blocks that are allocated */
 | 
				
			||||||
    int nr_pcb_used; // for debug
 | 
					 | 
				
			||||||
    struct slab_allocator task_allocator;
 | 
					    struct slab_allocator task_allocator;
 | 
				
			||||||
 | 
					 | 
				
			||||||
    /// @todo Add pid to task
 | 
					 | 
				
			||||||
    uint32_t next_pid;
 | 
					    uint32_t next_pid;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* number of tcbs in which one page contains */
 | 
					 | 
				
			||||||
    int nr_tcb_per_page;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    /* init task manager */
 | 
					    /* init task manager */
 | 
				
			||||||
    void (*init)();
 | 
					    void (*init)();
 | 
				
			||||||
    /* new a task control block, checkout #sys_spawn for usage */
 | 
					    /* new a task control block, checkout #sys_spawn for usage */
 | 
				
			||||||
| 
						 | 
					@ -112,7 +107,7 @@ struct XiziTaskManager {
 | 
				
			||||||
    /* free a task control block, this calls #free_user_pgdir to free all vitual spaces */
 | 
					    /* free a task control block, this calls #free_user_pgdir to free all vitual spaces */
 | 
				
			||||||
    void (*free_pcb)(struct TaskMicroDescriptor*);
 | 
					    void (*free_pcb)(struct TaskMicroDescriptor*);
 | 
				
			||||||
    /* init a task control block, set name, remain_tick, state, cwd, priority, etc. */
 | 
					    /* init a task control block, set name, remain_tick, state, cwd, priority, etc. */
 | 
				
			||||||
    void (*task_set_default_schedule_attr)(struct TaskMicroDescriptor*, struct TraceTag* cwd);
 | 
					    void (*task_set_default_schedule_attr)(struct TaskMicroDescriptor*);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* use by task_scheduler, find next READY task, should be in locked */
 | 
					    /* use by task_scheduler, find next READY task, should be in locked */
 | 
				
			||||||
    struct TaskMicroDescriptor* (*next_runnable_task)(void);
 | 
					    struct TaskMicroDescriptor* (*next_runnable_task)(void);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -36,8 +36,9 @@ Modification:
 | 
				
			||||||
#include "task.h"
 | 
					#include "task.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern uint32_t _binary_init_start[], _binary_default_fs_start[];
 | 
					extern uint32_t _binary_init_start[], _binary_default_fs_start[];
 | 
				
			||||||
static struct TraceTag hardkernel_tag, softkernel_tag;
 | 
					extern int sys_spawn(char* img_start, char* name, char** argv);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static struct TraceTag hardkernel_tag, softkernel_tag;
 | 
				
			||||||
static int core_init_done = 0;
 | 
					static int core_init_done = 0;
 | 
				
			||||||
int main(void)
 | 
					int main(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					@ -80,9 +81,9 @@ int main(void)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        /* start first task */
 | 
					        /* start first task */
 | 
				
			||||||
        char* init_task_param[2] = { "/app/init", 0 };
 | 
					        char* init_task_param[2] = { "/app/init", 0 };
 | 
				
			||||||
        spawn_embedded_task((char*)_binary_init_start, "init", init_task_param);
 | 
					        sys_spawn((char*)_binary_init_start, "init", init_task_param);
 | 
				
			||||||
        char* fs_server_task_param[2] = { "/app/fs_server", 0 };
 | 
					        char* fs_server_task_param[2] = { "/app/fs_server", 0 };
 | 
				
			||||||
        spawn_embedded_task((char*)_binary_default_fs_start, "memfs", fs_server_task_param);
 | 
					        sys_spawn((char*)_binary_default_fs_start, "memfs", fs_server_task_param);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* start scheduler */
 | 
					    /* start scheduler */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -178,12 +178,14 @@ struct session_backend* create_share_pages(struct TaskMicroDescriptor* client, s
 | 
				
			||||||
    uintptr_t client_vaddr = map_task_share_page(client, V2P_WO(kern_vaddr), nr_pages);
 | 
					    uintptr_t client_vaddr = map_task_share_page(client, V2P_WO(kern_vaddr), nr_pages);
 | 
				
			||||||
    if (UNLIKELY(client_vaddr == 0)) {
 | 
					    if (UNLIKELY(client_vaddr == 0)) {
 | 
				
			||||||
        kfree((char*)kern_vaddr);
 | 
					        kfree((char*)kern_vaddr);
 | 
				
			||||||
 | 
					        slab_free(SessionAllocator(), session_backend);
 | 
				
			||||||
        return NULL;
 | 
					        return NULL;
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
    uintptr_t server_vaddr = map_task_share_page(server, V2P_WO(kern_vaddr), nr_pages);
 | 
					    uintptr_t server_vaddr = map_task_share_page(server, V2P_WO(kern_vaddr), nr_pages);
 | 
				
			||||||
    if (UNLIKELY(server_vaddr == 0)) {
 | 
					    if (UNLIKELY(server_vaddr == 0)) {
 | 
				
			||||||
        unmap_task_share_pages(client, client_vaddr, nr_pages);
 | 
					        unmap_task_share_pages(client, client_vaddr, nr_pages);
 | 
				
			||||||
        kfree((char*)kern_vaddr);
 | 
					        kfree((char*)kern_vaddr);
 | 
				
			||||||
 | 
					        slab_free(SessionAllocator(), session_backend);
 | 
				
			||||||
        return NULL;
 | 
					        return NULL;
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -208,6 +210,9 @@ struct session_backend* create_share_pages(struct TaskMicroDescriptor* client, s
 | 
				
			||||||
    doubleListNodeInit(&session_backend->server_side.node);
 | 
					    doubleListNodeInit(&session_backend->server_side.node);
 | 
				
			||||||
    doubleListAddOnBack(&session_backend->server_side.node, &server->svr_sess_listhead);
 | 
					    doubleListAddOnBack(&session_backend->server_side.node, &server->svr_sess_listhead);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    server->mem_size += true_capacity;
 | 
				
			||||||
 | 
					    client->mem_size += true_capacity;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    return session_backend;
 | 
					    return session_backend;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -232,6 +237,9 @@ int delete_share_pages(struct session_backend* session_backend)
 | 
				
			||||||
        doubleListDel(&session_backend->server_side.node);
 | 
					        doubleListDel(&session_backend->server_side.node);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    session_backend->server->mem_size -= session_backend->nr_pages * PAGE_SIZE;
 | 
				
			||||||
 | 
					    session_backend->client->mem_size -= session_backend->nr_pages * PAGE_SIZE;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* free seesion backend */
 | 
					    /* free seesion backend */
 | 
				
			||||||
    kfree((void*)session_backend->buf_kernel_addr);
 | 
					    kfree((void*)session_backend->buf_kernel_addr);
 | 
				
			||||||
    slab_free(SessionAllocator(), (void*)session_backend);
 | 
					    slab_free(SessionAllocator(), (void*)session_backend);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -169,13 +169,11 @@ int task_exec(struct TaskMicroDescriptor* task, char* img_start, char* name, cha
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
    strncpy(task->name, last, sizeof(task->name));
 | 
					    strncpy(task->name, last, sizeof(task->name));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    struct TopLevelPageDirectory old_pgdir = task->pgdir;
 | 
					    xizi_pager.free_user_pgdir(&task->pgdir);
 | 
				
			||||||
    task->pgdir = pgdir;
 | 
					    task->pgdir = pgdir;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /// @todo record mem size used b task
 | 
					    task->heap_base = ALIGNUP(load_size, PAGE_SIZE);
 | 
				
			||||||
    task->mem_size = ALIGNUP(load_size, PAGE_SIZE);
 | 
					    task->mem_size = task->heap_base + USER_STACK_SIZE;
 | 
				
			||||||
 | 
					 | 
				
			||||||
    xizi_pager.free_user_pgdir(&old_pgdir);
 | 
					 | 
				
			||||||
    return 0;
 | 
					    return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
error_exec:
 | 
					error_exec:
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -63,5 +63,7 @@ int sys_mmap(uintptr_t vaddr, uintptr_t paddr, int len, int is_dev)
 | 
				
			||||||
            load_len += PAGE_SIZE;
 | 
					            load_len += PAGE_SIZE;
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    cur_task->mem_size += true_len;
 | 
				
			||||||
    return vaddr + true_len;
 | 
					    return vaddr + true_len;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -61,13 +61,14 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity)
 | 
				
			||||||
        doubleListAddOnBack(cur_node, &cur_task->svr_sess_listhead);
 | 
					        doubleListAddOnBack(cur_node, &cur_task->svr_sess_listhead);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* handle sessions for condition 2, ref. delete_share_pages() */
 | 
					    /* poll with new sessions */
 | 
				
			||||||
    bool has_delete = true;
 | 
					    int i = 0;
 | 
				
			||||||
    while (has_delete) {
 | 
					 | 
				
			||||||
        has_delete = false;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    DOUBLE_LIST_FOR_EACH_ENTRY(server_session, &cur_task->svr_sess_listhead, node)
 | 
					    DOUBLE_LIST_FOR_EACH_ENTRY(server_session, &cur_task->svr_sess_listhead, node)
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
 | 
					        if (i >= arr_capacity) {
 | 
				
			||||||
 | 
					            break;
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        if (SERVER_SESSION_BACKEND(server_session)->client_side.closed) {
 | 
					        if (SERVER_SESSION_BACKEND(server_session)->client_side.closed) {
 | 
				
			||||||
            // client had closed it, then server will close it too
 | 
					            // client had closed it, then server will close it too
 | 
				
			||||||
            struct session_backend* session_backend = SERVER_SESSION_BACKEND(server_session);
 | 
					            struct session_backend* session_backend = SERVER_SESSION_BACKEND(server_session);
 | 
				
			||||||
| 
						 | 
					@ -77,19 +78,9 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity)
 | 
				
			||||||
                xizi_share_page_manager.unmap_task_share_pages(cur_task, session_backend->server_side.buf_addr, session_backend->nr_pages);
 | 
					                xizi_share_page_manager.unmap_task_share_pages(cur_task, session_backend->server_side.buf_addr, session_backend->nr_pages);
 | 
				
			||||||
            }
 | 
					            }
 | 
				
			||||||
            xizi_share_page_manager.delete_share_pages(session_backend);
 | 
					            xizi_share_page_manager.delete_share_pages(session_backend);
 | 
				
			||||||
                has_delete = true;
 | 
					 | 
				
			||||||
            break;
 | 
					            break;
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
        }
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* poll with new sessions */
 | 
					 | 
				
			||||||
    int i = 0;
 | 
					 | 
				
			||||||
    DOUBLE_LIST_FOR_EACH_ENTRY(server_session, &cur_task->svr_sess_listhead, node)
 | 
					 | 
				
			||||||
    {
 | 
					 | 
				
			||||||
        if (i >= arr_capacity) {
 | 
					 | 
				
			||||||
            break;
 | 
					 | 
				
			||||||
        }
 | 
					 | 
				
			||||||
        userland_session_arr[i++] = (struct Session) {
 | 
					        userland_session_arr[i++] = (struct Session) {
 | 
				
			||||||
            .buf = (void*)server_session->buf_addr,
 | 
					            .buf = (void*)server_session->buf_addr,
 | 
				
			||||||
            .capacity = server_session->capacity,
 | 
					            .capacity = server_session->capacity,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -49,7 +49,7 @@ int sys_spawn(char* img_start, char* name, char** argv)
 | 
				
			||||||
        return -1;
 | 
					        return -1;
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
    // init pcb
 | 
					    // init pcb
 | 
				
			||||||
    xizi_task_manager.task_set_default_schedule_attr(new_task_cb, RequireRootTag());
 | 
					    xizi_task_manager.task_set_default_schedule_attr(new_task_cb);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    return 0;
 | 
					    return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -65,7 +65,7 @@ void show_tasks(void)
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
    LOG_PRINTF("******************************************************\n");
 | 
					    LOG_PRINTF("******************************************************\n");
 | 
				
			||||||
    LOG_PRINTF("STAT     ID   TASK            PRI   LEFT_TICKS\n");
 | 
					    LOG_PRINTF("STAT     ID   TASK            PRI   MEM(KB)\n");
 | 
				
			||||||
    for (int i = 0; i < TASK_MAX_PRIORITY; i++) {
 | 
					    for (int i = 0; i < TASK_MAX_PRIORITY; i++) {
 | 
				
			||||||
        if (IS_DOUBLE_LIST_EMPTY(&xizi_task_manager.task_list_head[i])) {
 | 
					        if (IS_DOUBLE_LIST_EMPTY(&xizi_task_manager.task_list_head[i])) {
 | 
				
			||||||
            continue;
 | 
					            continue;
 | 
				
			||||||
| 
						 | 
					@ -82,7 +82,7 @@ void show_tasks(void)
 | 
				
			||||||
                LOG_PRINTF("   DEAD ");
 | 
					                LOG_PRINTF("   DEAD ");
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            _padding(task->name);
 | 
					            _padding(task->name);
 | 
				
			||||||
            LOG_PRINTF("  %d   %s  %d       %d\n", task->pid, task->name, task->priority, task->remain_tick);
 | 
					            LOG_PRINTF("  %d   %s  %d       %d\n", task->pid, task->name, task->priority, task->mem_size >> 10);
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
    LOG_PRINTF("******************************************************\n");
 | 
					    LOG_PRINTF("******************************************************\n");
 | 
				
			||||||
| 
						 | 
					@ -138,7 +138,7 @@ int sys_state(sys_state_option option, sys_state_info* info)
 | 
				
			||||||
        info->memblock_info.memblock_start = (uintptr_t)V2P(_binary_fs_img_start);
 | 
					        info->memblock_info.memblock_start = (uintptr_t)V2P(_binary_fs_img_start);
 | 
				
			||||||
        info->memblock_info.memblock_end = (uintptr_t)V2P(_binary_fs_img_end);
 | 
					        info->memblock_info.memblock_end = (uintptr_t)V2P(_binary_fs_img_end);
 | 
				
			||||||
    } else if (option == SYS_STATE_GET_HEAP_BASE) {
 | 
					    } else if (option == SYS_STATE_GET_HEAP_BASE) {
 | 
				
			||||||
        return cur_cpu()->task->mem_size;
 | 
					        return cur_cpu()->task->heap_base;
 | 
				
			||||||
    } else if (option == SYS_STATE_SET_TASK_PRIORITY) {
 | 
					    } else if (option == SYS_STATE_SET_TASK_PRIORITY) {
 | 
				
			||||||
        xizi_task_manager.set_cur_task_priority(info->priority);
 | 
					        xizi_task_manager.set_cur_task_priority(info->priority);
 | 
				
			||||||
    } else if (option == SYS_STATE_SHOW_TASKS) {
 | 
					    } else if (option == SYS_STATE_SHOW_TASKS) {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1,3 +1,3 @@
 | 
				
			||||||
SRC_FILES := task.c scheduler.c spawn_default_task.c
 | 
					SRC_FILES := task.c scheduler.c 
 | 
				
			||||||
 | 
					
 | 
				
			||||||
include $(KERNEL_ROOT)/compiler.mk
 | 
					include $(KERNEL_ROOT)/compiler.mk
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1,151 +0,0 @@
 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * Copyright (c) 2020 AIIT XUOS Lab
 | 
					 | 
				
			||||||
 * XiUOS is licensed under Mulan PSL v2.
 | 
					 | 
				
			||||||
 * You can use this software according to the terms and conditions of the Mulan PSL v2.
 | 
					 | 
				
			||||||
 * You may obtain a copy of Mulan PSL v2 at:
 | 
					 | 
				
			||||||
 *        http://license.coscl.org.cn/MulanPSL2
 | 
					 | 
				
			||||||
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
 | 
					 | 
				
			||||||
 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
 | 
					 | 
				
			||||||
 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
 | 
					 | 
				
			||||||
 * See the Mulan PSL v2 for more details.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
/**
 | 
					 | 
				
			||||||
 * @file spawn_default_task.c
 | 
					 | 
				
			||||||
 * @brief spawn task that embeded in kernel image
 | 
					 | 
				
			||||||
 * @version 3.0
 | 
					 | 
				
			||||||
 * @author AIIT XUOS Lab
 | 
					 | 
				
			||||||
 * @date 2023.08.25
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*************************************************
 | 
					 | 
				
			||||||
File name: spawn_default_task.c
 | 
					 | 
				
			||||||
Description: spawn task that embeded in kernel image
 | 
					 | 
				
			||||||
Others:
 | 
					 | 
				
			||||||
History:
 | 
					 | 
				
			||||||
1. Date: 2023-08-28
 | 
					 | 
				
			||||||
Author: AIIT XUOS Lab
 | 
					 | 
				
			||||||
Modification:
 | 
					 | 
				
			||||||
1. first version
 | 
					 | 
				
			||||||
*************************************************/
 | 
					 | 
				
			||||||
#include "actracer.h"
 | 
					 | 
				
			||||||
#include "assert.h"
 | 
					 | 
				
			||||||
#include "kalloc.h"
 | 
					 | 
				
			||||||
#include "task.h"
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#include "execelf.h"
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
int spawn_embedded_task(char* img_start, char* name, char** argv)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
    struct TaskMicroDescriptor* new_task_cb = xizi_task_manager.new_task_cb();
 | 
					 | 
				
			||||||
    if (UNLIKELY(!new_task_cb)) {
 | 
					 | 
				
			||||||
        ERROR("Unable to new task control block.\n");
 | 
					 | 
				
			||||||
        return -1;
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
    // init trapframe
 | 
					 | 
				
			||||||
    arch_init_trapframe(new_task_cb->main_thread.trapframe, 0, 0);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    /* load img to task */
 | 
					 | 
				
			||||||
    /* 1. load elf header */
 | 
					 | 
				
			||||||
    struct elfhdr* elf = (struct elfhdr*)img_start;
 | 
					 | 
				
			||||||
    // pgdir for new task
 | 
					 | 
				
			||||||
    struct TopLevelPageDirectory pgdir;
 | 
					 | 
				
			||||||
    if (UNLIKELY(!xizi_pager.new_pgdir(&pgdir))) {
 | 
					 | 
				
			||||||
        ERROR("create new pgdir failed.\n");
 | 
					 | 
				
			||||||
        goto error_exec;
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
    memcpy(pgdir.pd_addr, kern_pgdir.pd_addr, TOPLEVLE_PAGEDIR_SIZE);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    /* 2. load elf content */
 | 
					 | 
				
			||||||
    uint32_t load_size = 0;
 | 
					 | 
				
			||||||
    struct proghdr ph;
 | 
					 | 
				
			||||||
    for (int sec_idx = 0, off = elf->phoff; sec_idx < elf->phnum; sec_idx++, off += sizeof(ph)) {
 | 
					 | 
				
			||||||
        // load proghdr
 | 
					 | 
				
			||||||
        memcpy((char*)&ph, img_start + off, sizeof(ph));
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        if (ph.type != ELF_PROG_LOAD)
 | 
					 | 
				
			||||||
            continue;
 | 
					 | 
				
			||||||
        if (ph.memsz < ph.filesz) {
 | 
					 | 
				
			||||||
            ERROR("elf header mem size less than file size\n");
 | 
					 | 
				
			||||||
            goto error_exec;
 | 
					 | 
				
			||||||
        }
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        // read section
 | 
					 | 
				
			||||||
        // 1. alloc space
 | 
					 | 
				
			||||||
        if ((load_size = xizi_pager.resize_user_pgdir(&pgdir, load_size, ph.vaddr + ph.memsz))
 | 
					 | 
				
			||||||
            != ph.vaddr + ph.memsz) {
 | 
					 | 
				
			||||||
            goto error_exec;
 | 
					 | 
				
			||||||
        }
 | 
					 | 
				
			||||||
        // 2. copy inode to space
 | 
					 | 
				
			||||||
        assert(ph.vaddr % PAGE_SIZE == 0);
 | 
					 | 
				
			||||||
        for (int addr_offset = 0; addr_offset < ph.filesz; addr_offset += PAGE_SIZE) {
 | 
					 | 
				
			||||||
            uintptr_t page_paddr = xizi_pager.address_translate(&pgdir, ph.vaddr + addr_offset);
 | 
					 | 
				
			||||||
            if (page_paddr == 0) {
 | 
					 | 
				
			||||||
                panic("copy elf file to unmapped addr");
 | 
					 | 
				
			||||||
            }
 | 
					 | 
				
			||||||
            uintptr_t read_size = (ph.filesz - addr_offset < PAGE_SIZE ? ph.filesz - addr_offset : PAGE_SIZE);
 | 
					 | 
				
			||||||
            memcpy(P2V(page_paddr), img_start + (ph.off + addr_offset), read_size);
 | 
					 | 
				
			||||||
        }
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    /// elf file content now in memory
 | 
					 | 
				
			||||||
    // alloc stack page and map to TOP of user vspace
 | 
					 | 
				
			||||||
    uintptr_t* stack_bottom = (uintptr_t*)kalloc(USER_STACK_SIZE);
 | 
					 | 
				
			||||||
    if (UNLIKELY(stack_bottom == NULL)) {
 | 
					 | 
				
			||||||
        ERROR("No memory.\n");
 | 
					 | 
				
			||||||
        goto error_exec;
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
    xizi_pager.map_pages(pgdir.pd_addr, USER_MEM_TOP - USER_STACK_SIZE, V2P(stack_bottom), USER_STACK_SIZE, false);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    uintptr_t user_vspace_sp = USER_MEM_TOP;
 | 
					 | 
				
			||||||
    /// @todo change 32 to some macro
 | 
					 | 
				
			||||||
    uintptr_t user_stack_init[32];
 | 
					 | 
				
			||||||
    uintptr_t argc = 0;
 | 
					 | 
				
			||||||
    uintptr_t copy_len = 0;
 | 
					 | 
				
			||||||
    for (argc = 0; argv != NULL && argv[argc] != NULL; argc++) {
 | 
					 | 
				
			||||||
        /// @todo handle with large number of parameters
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        // copy param to user stack
 | 
					 | 
				
			||||||
        copy_len = strlen(argv[argc]) + 1;
 | 
					 | 
				
			||||||
        user_vspace_sp = (user_vspace_sp - copy_len) & ~3;
 | 
					 | 
				
			||||||
        uintptr_t copied_len = xizi_pager.cross_vspace_data_copy(&pgdir, user_vspace_sp, (uintptr_t)argv[argc], copy_len);
 | 
					 | 
				
			||||||
        if (UNLIKELY(copied_len != copy_len)) {
 | 
					 | 
				
			||||||
            ERROR("Something went wrong when copying params.\n");
 | 
					 | 
				
			||||||
            goto error_exec;
 | 
					 | 
				
			||||||
        }
 | 
					 | 
				
			||||||
        user_stack_init[argc] = user_vspace_sp;
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
    user_stack_init[argc] = 0;
 | 
					 | 
				
			||||||
    copy_len = (argc + 1) * sizeof(uintptr_t);
 | 
					 | 
				
			||||||
    user_vspace_sp -= copy_len;
 | 
					 | 
				
			||||||
    uintptr_t copied_len = xizi_pager.cross_vspace_data_copy(&pgdir, user_vspace_sp, (uintptr_t)user_stack_init, copy_len);
 | 
					 | 
				
			||||||
    if (UNLIKELY(copied_len != copy_len)) {
 | 
					 | 
				
			||||||
        ERROR("Something went wrong when copying params.\n");
 | 
					 | 
				
			||||||
        goto error_exec;
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    // init task trapframe, which stores in svc stack
 | 
					 | 
				
			||||||
    // do not go tp error_exec once we change trapframe!
 | 
					 | 
				
			||||||
    assert(copied_len == (argc + 1) * sizeof(uintptr_t));
 | 
					 | 
				
			||||||
    arch_trapframe_set_sp_pc(new_task_cb->main_thread.trapframe, user_vspace_sp, elf->entry);
 | 
					 | 
				
			||||||
    arch_set_main_params(new_task_cb->main_thread.trapframe, argc, user_vspace_sp);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    // save program name
 | 
					 | 
				
			||||||
    strncpy(new_task_cb->name, name, sizeof(new_task_cb->name));
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    struct TopLevelPageDirectory old_pgdir = new_task_cb->pgdir;
 | 
					 | 
				
			||||||
    new_task_cb->pgdir = pgdir;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    /// @todo record mem size used b task
 | 
					 | 
				
			||||||
    new_task_cb->mem_size = ALIGNUP(load_size, PAGE_SIZE);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    xizi_pager.free_user_pgdir(&old_pgdir);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    xizi_task_manager.task_set_default_schedule_attr(new_task_cb, RequireRootTag());
 | 
					 | 
				
			||||||
    return 0;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
error_exec:
 | 
					 | 
				
			||||||
    if (pgdir.pd_addr != NULL) {
 | 
					 | 
				
			||||||
        xizi_pager.free_user_pgdir(&pgdir);
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
    return -1;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
| 
						 | 
					@ -69,8 +69,6 @@ static struct TaskMicroDescriptor* _alloc_task_cb()
 | 
				
			||||||
    // set pid once task is allocated
 | 
					    // set pid once task is allocated
 | 
				
			||||||
    memset(task, 0, sizeof(*task));
 | 
					    memset(task, 0, sizeof(*task));
 | 
				
			||||||
    task->pid = xizi_task_manager.next_pid++;
 | 
					    task->pid = xizi_task_manager.next_pid++;
 | 
				
			||||||
    // update pcb used
 | 
					 | 
				
			||||||
    xizi_task_manager.nr_pcb_used += 1;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
    return task;
 | 
					    return task;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -98,7 +96,6 @@ static void _dealloc_task_cb(struct TaskMicroDescriptor* task)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    // free task back to allocator
 | 
					    // free task back to allocator
 | 
				
			||||||
    slab_free(&xizi_task_manager.task_allocator, (void*)task);
 | 
					    slab_free(&xizi_task_manager.task_allocator, (void*)task);
 | 
				
			||||||
    xizi_task_manager.nr_pcb_used -= 1;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
    // remove priority
 | 
					    // remove priority
 | 
				
			||||||
    if (IS_DOUBLE_LIST_EMPTY(&xizi_task_manager.task_list_head[task->priority])) {
 | 
					    if (IS_DOUBLE_LIST_EMPTY(&xizi_task_manager.task_list_head[task->priority])) {
 | 
				
			||||||
| 
						 | 
					@ -155,12 +152,11 @@ static struct TaskMicroDescriptor* _new_task_cb()
 | 
				
			||||||
    return task;
 | 
					    return task;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void _task_set_default_schedule_attr(struct TaskMicroDescriptor* task, struct TraceTag* cwd)
 | 
					static void _task_set_default_schedule_attr(struct TaskMicroDescriptor* task)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
    task->remain_tick = TASK_CLOCK_TICK;
 | 
					    task->remain_tick = TASK_CLOCK_TICK;
 | 
				
			||||||
    task->maxium_tick = TASK_CLOCK_TICK * 10;
 | 
					    task->maxium_tick = TASK_CLOCK_TICK * 10;
 | 
				
			||||||
    task->state = READY;
 | 
					    task->state = READY;
 | 
				
			||||||
    task->cwd = *cwd;
 | 
					 | 
				
			||||||
    task->priority = TASK_DEFAULT_PRIORITY;
 | 
					    task->priority = TASK_DEFAULT_PRIORITY;
 | 
				
			||||||
    doubleListAddOnHead(&task->node, &xizi_task_manager.task_list_head[task->priority]);
 | 
					    doubleListAddOnHead(&task->node, &xizi_task_manager.task_list_head[task->priority]);
 | 
				
			||||||
    ready_task_priority |= (1 << task->priority);
 | 
					    ready_task_priority |= (1 << task->priority);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in New Issue