forked from xuos/xiuos
				
			Support blocking task.
This commit is contained in:
		
							parent
							
								
									a24d73f710
								
							
						
					
					
						commit
						e5df6012af
					
				|  | @ -85,6 +85,7 @@ struct TaskMicroDescriptor { | ||||||
|     /* task communication resources */ |     /* task communication resources */ | ||||||
|     struct double_list_node cli_sess_listhead; |     struct double_list_node cli_sess_listhead; | ||||||
|     struct double_list_node svr_sess_listhead; |     struct double_list_node svr_sess_listhead; | ||||||
|  |     bool current_ipc_handled; | ||||||
|     struct KBuddy* massive_ipc_allocator; |     struct KBuddy* massive_ipc_allocator; | ||||||
|     struct TraceTag server_identifier; |     struct TraceTag server_identifier; | ||||||
| 
 | 
 | ||||||
|  | @ -103,6 +104,7 @@ struct SchedulerRightGroup { | ||||||
| 
 | 
 | ||||||
| struct XiziTaskManager { | struct XiziTaskManager { | ||||||
|     struct double_list_node task_list_head[TASK_MAX_PRIORITY]; /* list of task control blocks that are allocated */ |     struct double_list_node task_list_head[TASK_MAX_PRIORITY]; /* list of task control blocks that are allocated */ | ||||||
|  |     struct double_list_node task_blocked_list_head; | ||||||
|     struct slab_allocator task_allocator; |     struct slab_allocator task_allocator; | ||||||
|     struct slab_allocator task_buddy_allocator; |     struct slab_allocator task_buddy_allocator; | ||||||
|     uint32_t next_pid; |     uint32_t next_pid; | ||||||
|  | @ -120,8 +122,13 @@ struct XiziTaskManager { | ||||||
|     struct TaskMicroDescriptor* (*next_runnable_task)(void); |     struct TaskMicroDescriptor* (*next_runnable_task)(void); | ||||||
|     /* function that's runing by kernel thread context, schedule use tasks */ |     /* function that's runing by kernel thread context, schedule use tasks */ | ||||||
|     void (*task_scheduler)(struct SchedulerRightGroup); |     void (*task_scheduler)(struct SchedulerRightGroup); | ||||||
|  | 
 | ||||||
|  |     /* handle task state */ | ||||||
|     /* call to yield current use task */ |     /* call to yield current use task */ | ||||||
|     void (*task_yield_noschedule)(struct TaskMicroDescriptor* task, bool is_blocking); |     void (*task_yield_noschedule)(struct TaskMicroDescriptor* task, bool is_blocking); | ||||||
|  |     /* block and unblock task */ | ||||||
|  |     void (*task_block)(struct TaskMicroDescriptor* task); | ||||||
|  |     void (*task_unblock)(struct TaskMicroDescriptor* task); | ||||||
|     /* set task priority */ |     /* set task priority */ | ||||||
|     void (*set_cur_task_priority)(int priority); |     void (*set_cur_task_priority)(int priority); | ||||||
| }; | }; | ||||||
|  |  | ||||||
|  | @ -55,6 +55,15 @@ int sys_poll_session(struct Session* userland_session_arr, int arr_capacity) | ||||||
|             return -1; |             return -1; | ||||||
|         } |         } | ||||||
|         // update session_backend
 |         // update session_backend
 | ||||||
|  |         // if current session is handled
 | ||||||
|  |         if (server_session->head != userland_session_arr[i].head) { | ||||||
|  |             struct TaskMicroDescriptor* client = SERVER_SESSION_BACKEND(server_session)->client; | ||||||
|  |             if (client->state == BLOCKED) { | ||||||
|  |                 xizi_task_manager.task_unblock(client); | ||||||
|  |             } else { | ||||||
|  |                 client->current_ipc_handled = true; | ||||||
|  |             } | ||||||
|  |         } | ||||||
|         server_session->head = userland_session_arr[i].head; |         server_session->head = userland_session_arr[i].head; | ||||||
|         server_session->tail = userland_session_arr[i].tail; |         server_session->tail = userland_session_arr[i].tail; | ||||||
|         doubleListDel(cur_node); |         doubleListDel(cur_node); | ||||||
|  |  | ||||||
|  | @ -85,6 +85,14 @@ void show_tasks(void) | ||||||
|             LOG_PRINTF("  %d   %s  %d       %d\n", task->pid, task->name, task->priority, task->mem_size >> 10); |             LOG_PRINTF("  %d   %s  %d       %d\n", task->pid, task->name, task->priority, task->mem_size >> 10); | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
|  | 
 | ||||||
|  |     DOUBLE_LIST_FOR_EACH_ENTRY(task, &xizi_task_manager.task_blocked_list_head, node) | ||||||
|  |     { | ||||||
|  |         LOG_PRINTF("  BLOCK "); | ||||||
|  |         _padding(task->name); | ||||||
|  |         LOG_PRINTF("  %d   %s  %d       %d\n", task->pid, task->name, task->priority, task->mem_size >> 10); | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|     LOG_PRINTF("******************************************************\n"); |     LOG_PRINTF("******************************************************\n"); | ||||||
|     return; |     return; | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -35,6 +35,16 @@ Modification: | ||||||
| 
 | 
 | ||||||
| int sys_yield(task_yield_reason reason) | int sys_yield(task_yield_reason reason) | ||||||
| { | { | ||||||
|     xizi_task_manager.task_yield_noschedule(cur_cpu()->task, false); |     struct TaskMicroDescriptor* cur_task = cur_cpu()->task; | ||||||
|  |     xizi_task_manager.task_yield_noschedule(cur_task, false); | ||||||
|  | 
 | ||||||
|  |     // handle ipc block
 | ||||||
|  |     if ((reason & SYS_TASK_YIELD_BLOCK_IPC) != 0) { | ||||||
|  |         if (cur_task->current_ipc_handled) { | ||||||
|  |             cur_task->current_ipc_handled = false; | ||||||
|  |         } else { | ||||||
|  |             xizi_task_manager.task_block(cur_task); | ||||||
|  |         } | ||||||
|  |     } | ||||||
|     return 0; |     return 0; | ||||||
| } | } | ||||||
|  | @ -48,6 +48,7 @@ static void _task_manager_init() | ||||||
|     for (int i = 0; i < TASK_MAX_PRIORITY; i++) { |     for (int i = 0; i < TASK_MAX_PRIORITY; i++) { | ||||||
|         doubleListNodeInit(&xizi_task_manager.task_list_head[i]); |         doubleListNodeInit(&xizi_task_manager.task_list_head[i]); | ||||||
|     } |     } | ||||||
|  |     doubleListNodeInit(&xizi_task_manager.task_blocked_list_head); | ||||||
|     // init task (slab) allocator
 |     // init task (slab) allocator
 | ||||||
|     slab_init(&xizi_task_manager.task_allocator, sizeof(struct TaskMicroDescriptor)); |     slab_init(&xizi_task_manager.task_allocator, sizeof(struct TaskMicroDescriptor)); | ||||||
|     slab_init(&xizi_task_manager.task_buddy_allocator, sizeof(struct KBuddy)); |     slab_init(&xizi_task_manager.task_buddy_allocator, sizeof(struct KBuddy)); | ||||||
|  | @ -271,11 +272,7 @@ static void _task_yield_noschedule(struct TaskMicroDescriptor* task, bool blocki | ||||||
|     // rearrage current task position
 |     // rearrage current task position
 | ||||||
|     doubleListDel(&task->node); |     doubleListDel(&task->node); | ||||||
|     if (task->state == RUNNING) { |     if (task->state == RUNNING) { | ||||||
|         if (!blocking) { |         task->state = READY; | ||||||
|             task->state = READY; |  | ||||||
|         } else { |  | ||||||
|             task->state = BLOCKED; |  | ||||||
|         } |  | ||||||
|     } |     } | ||||||
|     task->remain_tick = TASK_CLOCK_TICK; |     task->remain_tick = TASK_CLOCK_TICK; | ||||||
|     if (task == cur_cpu()->task) { |     if (task == cur_cpu()->task) { | ||||||
|  | @ -284,6 +281,28 @@ static void _task_yield_noschedule(struct TaskMicroDescriptor* task, bool blocki | ||||||
|     doubleListAddOnBack(&task->node, &xizi_task_manager.task_list_head[task->priority]); |     doubleListAddOnBack(&task->node, &xizi_task_manager.task_list_head[task->priority]); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | static void _task_block(struct TaskMicroDescriptor* task) | ||||||
|  | { | ||||||
|  |     assert(task != NULL); | ||||||
|  |     assert(task->state != RUNNING); | ||||||
|  |     doubleListDel(&task->node); | ||||||
|  |     if (xizi_task_manager.task_list_head[task->priority].next == &xizi_task_manager.task_list_head[task->priority]) { | ||||||
|  |         ready_task_priority &= ~(1 << task->priority); | ||||||
|  |     } | ||||||
|  |     task->state = BLOCKED; | ||||||
|  |     doubleListAddOnHead(&task->node, &xizi_task_manager.task_blocked_list_head); | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static void _task_unblock(struct TaskMicroDescriptor* task) | ||||||
|  | { | ||||||
|  |     assert(task != NULL); | ||||||
|  |     assert(task->state == BLOCKED); | ||||||
|  |     doubleListDel(&task->node); | ||||||
|  |     task->state = READY; | ||||||
|  |     doubleListAddOnHead(&task->node, &xizi_task_manager.task_list_head[task->priority]); | ||||||
|  |     ready_task_priority |= (1 << task->priority); | ||||||
|  | } | ||||||
|  | 
 | ||||||
| static void _set_cur_task_priority(int priority) | static void _set_cur_task_priority(int priority) | ||||||
| { | { | ||||||
|     if (priority < 0 || priority >= TASK_MAX_PRIORITY) { |     if (priority < 0 || priority >= TASK_MAX_PRIORITY) { | ||||||
|  | @ -312,6 +331,9 @@ struct XiziTaskManager xizi_task_manager = { | ||||||
| 
 | 
 | ||||||
|     .next_runnable_task = max_priority_runnable_task, |     .next_runnable_task = max_priority_runnable_task, | ||||||
|     .task_scheduler = _scheduler, |     .task_scheduler = _scheduler, | ||||||
|  | 
 | ||||||
|  |     .task_block = _task_block, | ||||||
|  |     .task_unblock = _task_unblock, | ||||||
|     .task_yield_noschedule = _task_yield_noschedule, |     .task_yield_noschedule = _task_yield_noschedule, | ||||||
|     .set_cur_task_priority = _set_cur_task_priority |     .set_cur_task_priority = _set_cur_task_priority | ||||||
| }; | }; | ||||||
|  |  | ||||||
|  | @ -65,6 +65,7 @@ void dabort_handler(struct trapframe* r) | ||||||
|     sys_exit(cur_task); |     sys_exit(cur_task); | ||||||
|     assert(cur_cpu()->task == NULL); |     assert(cur_cpu()->task == NULL); | ||||||
|     context_switch(&cur_task->main_thread.context, cur_cpu()->scheduler); |     context_switch(&cur_task->main_thread.context, cur_cpu()->scheduler); | ||||||
|  |     panic("dabort end should never be reashed.\n"); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void iabort_handler(struct trapframe* r) | void iabort_handler(struct trapframe* r) | ||||||
|  | @ -83,4 +84,5 @@ void iabort_handler(struct trapframe* r) | ||||||
|     sys_exit(cur_task); |     sys_exit(cur_task); | ||||||
|     assert(cur_cpu()->task == NULL); |     assert(cur_cpu()->task == NULL); | ||||||
|     context_switch(&cur_task->main_thread.context, cur_cpu()->scheduler); |     context_switch(&cur_task->main_thread.context, cur_cpu()->scheduler); | ||||||
|  |     panic("iabort end should never be reashed.\n"); | ||||||
| } | } | ||||||
|  |  | ||||||
		Loading…
	
		Reference in New Issue