fix scheduler bug

This commit is contained in:
Xuan Sang LE 2018-09-27 15:00:19 +02:00
parent d50b1352f6
commit cbd574cbc0
3 changed files with 143 additions and 71 deletions

View File

@ -4,18 +4,15 @@
private data & methods
*/
static antd_scheduler_t scheduler;
static void enqueue(antd_task_t* task)
static void enqueue(antd_task_queue_t* q, antd_task_t* task)
{
// check if task is exist
int prio = task->priority>N_PRIORITY-1?N_PRIORITY-1:task->priority;
antd_task_queue_t q = scheduler.task_queue[prio];
antd_task_item_t it = q;
antd_task_item_t it = *q;
while(it && it->task->id != task->id && it->next != NULL)
it = it->next;
if(it && it->task->id == task->id)
{
LOG("Task %d exists, ignore it\n", task->id);
//assert(it->task->id == task->id );
return;
}
antd_task_item_t taski = (antd_task_item_t)malloc(sizeof *taski);
@ -23,7 +20,7 @@ static void enqueue(antd_task_t* task)
taski->next = NULL;
if(!it) // first task
{
scheduler.task_queue[prio] = taski;
*q = taski;
}
else
{
@ -33,26 +30,35 @@ static void enqueue(antd_task_t* task)
static int working()
{
return scheduler.status;
int stat;
pthread_mutex_lock(&scheduler.scheduler_lock);
stat = scheduler.status;
pthread_mutex_unlock(&scheduler.scheduler_lock);
return stat;
}
static void stop()
{
pthread_mutex_lock(&scheduler.server_lock);
pthread_mutex_lock(&scheduler.scheduler_lock);
scheduler.status = 0;
pthread_mutex_unlock(&scheduler.server_lock);
pthread_mutex_unlock(&scheduler.scheduler_lock);
for (int i = 0; i < scheduler.n_workers; i++)
pthread_join(scheduler.workers[i].pid, NULL);
if(scheduler.workers) free(scheduler.workers);
// destroy all the mutex
pthread_mutex_destroy(&scheduler.scheduler_lock);
pthread_mutex_destroy(&scheduler.task_lock);
pthread_mutex_destroy(&scheduler.queue_lock);
pthread_mutex_destroy(&scheduler.worker_lock);
}
static antd_task_item_t dequeue(int priority)
static antd_task_item_t dequeue(antd_task_queue_t* q)
{
int prio = priority>N_PRIORITY-1?N_PRIORITY-1:priority;
antd_task_item_t it = scheduler.task_queue[prio];
antd_task_item_t it = *q;
if(it)
{
scheduler.task_queue[prio] = it->next;
*q = it->next;
it->next = NULL;
}
return it;
}
@ -60,26 +66,13 @@ static antd_task_item_t dequeue(int priority)
static antd_task_item_t next_task()
{
antd_task_item_t it = NULL;
pthread_mutex_lock(&scheduler.server_lock);
for(int i = 0; i< N_PRIORITY; i++)
{
it = dequeue(i);
if(it) break;
}
pthread_mutex_unlock(&scheduler.server_lock);
pthread_mutex_lock(&scheduler.queue_lock);
it = dequeue(&scheduler.workers_queue);
pthread_mutex_unlock(&scheduler.queue_lock);
return it;
}
static int available_workers()
{
int n = 0;
pthread_mutex_lock(&scheduler.server_lock);
for(int i=0; i < scheduler.n_workers; i++)
if(scheduler.workers[i].status == 0) n++;
pthread_mutex_unlock(&scheduler.server_lock);
return n;
}
static antd_callback_t* callback_of( void* (*callback)(void*) )
{
antd_callback_t* cb = NULL;
@ -138,28 +131,48 @@ static void work(void* data)
}
}
static void destroy_queue(antd_task_queue_t q)
{
antd_task_item_t it, curr;
it = q;
while(it)
{
// first free the task
if(it->task && it->task->callback) free_callback(it->task->callback);
if(it->task) free(it->task);
// then free the placeholder
curr = it;
it = it->next;
free(curr);
}
}
/*
Main API methods
init the main scheduler
*/
int antd_available_workers()
{
int n = 0;
pthread_mutex_lock(&scheduler.worker_lock);
for(int i=0; i < scheduler.n_workers; i++)
if(scheduler.workers[i].status == 0) n++;
pthread_mutex_unlock(&scheduler.worker_lock);
return n;
}
/*
* assign task to a worker
*/
void antd_attach_task(antd_worker_t* worker)
{
antd_task_item_t it;
pthread_mutex_lock(&scheduler.server_lock);
worker->status = 0;
pthread_mutex_unlock(&scheduler.server_lock);
// fetch the next in queue
pthread_mutex_lock(&scheduler.worker_lock);
it = next_task();
if(!it) return;
//LOG("worker processing \n");
pthread_mutex_lock(&scheduler.server_lock);
worker->status = 0;
if(it)
worker->status = 1;
pthread_mutex_unlock(&scheduler.server_lock);
pthread_mutex_unlock(&scheduler.worker_lock);
// execute the task
//LOG("task executed by worker %d\n", worker->pid);
antd_execute_task(it);
}
@ -169,6 +182,12 @@ void antd_scheduler_init(int n)
srand((unsigned) time(&t));
scheduler.n_workers = n;
scheduler.status = 1;
scheduler.workers_queue = NULL;
// init lock
pthread_mutex_init(&scheduler.scheduler_lock,NULL);
pthread_mutex_init(&scheduler.task_lock,NULL);
pthread_mutex_init(&scheduler.worker_lock,NULL);
pthread_mutex_init(&scheduler.queue_lock,NULL);
for(int i = 0; i < N_PRIORITY; i++) scheduler.task_queue[i] = NULL;
// create scheduler.workers
if(n > 0)
@ -201,26 +220,17 @@ void antd_task_unlock()
}
/*
destroy all pending task
pthread_mutex_lock(&scheduler.queue_lock);
*/
void antd_scheduler_destroy()
{
// free all the chains
antd_task_item_t it, curr;
stop();
for(int i=0; i < N_PRIORITY; i++)
{
it = scheduler.task_queue[i];
while(it)
{
// first free the task
if(it->task && it->task->callback) free_callback(it->task->callback);
if(it->task) free(it->task);
// then free the placeholder
curr = it;
it = it->next;
free(curr);
}
destroy_queue(scheduler.task_queue[i]);
}
destroy_queue(scheduler.workers_queue);
}
/*
@ -235,6 +245,7 @@ antd_task_t* antd_create_task(void* (*handle)(void*), void *data, void* (*callba
task->handle = handle;
task->callback = callback_of(callback);
task->priority = NORMAL_PRIORITY;
task->type = LIGHT;
return task;
}
@ -243,14 +254,17 @@ antd_task_t* antd_create_task(void* (*handle)(void*), void *data, void* (*callba
*/
void antd_add_task(antd_task_t* task)
{
pthread_mutex_lock(&scheduler.server_lock);
enqueue(task);
pthread_mutex_unlock(&scheduler.server_lock);
// check if task is exist
int prio = task->priority>N_PRIORITY-1?N_PRIORITY-1:task->priority;
pthread_mutex_lock(&scheduler.scheduler_lock);
enqueue(&scheduler.task_queue[prio], task);
pthread_mutex_unlock(&scheduler.scheduler_lock);
}
void antd_execute_task(antd_task_item_t taski)
{
if(!taski) return;
// execute the task
void *ret = (*(taski->task->handle))(taski->task->data);
// check the return data if it is a new task
@ -289,22 +303,69 @@ void antd_execute_task(antd_task_item_t taski)
}
}
}
int antd_scheduler_busy()
int antd_has_pending_task()
{
int ret = 0;
if(available_workers() != scheduler.n_workers) return 1;
pthread_mutex_lock(&scheduler.server_lock);
pthread_mutex_lock(&scheduler.scheduler_lock);
for(int i = 0; i < N_PRIORITY; i++)
if(scheduler.task_queue[i] != NULL)
{
ret = 1;
break;
}
pthread_mutex_unlock(&scheduler.server_lock);
pthread_mutex_unlock(&scheduler.scheduler_lock);
if(!ret)
{
pthread_mutex_lock(&scheduler.queue_lock);
ret = (scheduler.workers_queue != NULL);
pthread_mutex_unlock(&scheduler.queue_lock);
}
return ret;
}
int antd_scheduler_busy()
{
if(antd_available_workers() != scheduler.n_workers) return 1;
//return 0;
return antd_has_pending_task();
}
int antd_scheduler_status()
{
return scheduler.status;
}
void antd_task_schedule()
{
// fetch next task from the task_queue
antd_task_item_t it = NULL;
pthread_mutex_lock(&scheduler.scheduler_lock);
for(int i = 0; i< N_PRIORITY; i++)
{
it = dequeue(&scheduler.task_queue[i]);
if(it)
break;
}
pthread_mutex_unlock(&scheduler.scheduler_lock);
if(!it)
{
return;
}
// has the task now
// check the type of tas
if(it->task->type == LIGHT || scheduler.n_workers <= 0)
{
// do it by myself
antd_execute_task(it);
}
else
{
// delegate to other workers by
//pushing to the worker queue
LOG("delegate to workers\n");
pthread_mutex_lock(&scheduler.queue_lock);
enqueue(&scheduler.workers_queue, it->task);
free(it);
pthread_mutex_unlock(&scheduler.queue_lock);
}
}

View File

@ -9,6 +9,7 @@
#define LOW_PRIORITY (N_PRIORITY - 1)
#define HIGH_PRIORITY 0
typedef enum { LIGHT, HEAVY } antd_task_type_t;
// callback definition
typedef struct __callback_t{
void* (*handle)(void*);
@ -38,7 +39,13 @@ typedef struct {
user data if any
*/
void * data;
/*
type of a task
light task is executed directly by
the leader
heavy tasks is delegated to workers
*/
antd_task_type_t type;
} antd_task_t;
typedef struct {
@ -55,9 +62,12 @@ typedef struct __task_item_t{
typedef antd_task_item_t antd_task_queue_t;
typedef struct {
pthread_mutex_t server_lock;
pthread_mutex_t queue_lock;
pthread_mutex_t scheduler_lock;
pthread_mutex_t worker_lock;
pthread_mutex_t task_lock;
antd_task_queue_t task_queue[N_PRIORITY];
antd_task_queue_t workers_queue;
uint8_t status; // 0 stop, 1 working
antd_worker_t* workers;
int n_workers;
@ -95,4 +105,7 @@ void antd_execute_task(antd_task_item_t);
int antd_scheduler_busy();
void antd_attach_task(antd_worker_t* worker);
void antd_task_schedule();
int antd_available_workers();
int antd_has_pending_task();
#endif

12
relay.c
View File

@ -90,8 +90,6 @@ int main(int argc, char* argv[])
timeout.tv_usec = 500;
// 0 worker
antd_scheduler_init(0);
antd_worker_t worker;
worker.status = 0;
// set server socket to non blocking
fcntl(server_sock, F_SETFL, O_NONBLOCK); /* Change the socket into non-blocking state */
LOG("relayd running on port %d\n", port);
@ -99,7 +97,7 @@ int main(int argc, char* argv[])
while (antd_scheduler_status())
{
// execute task
antd_attach_task(&worker);
antd_task_schedule();
client_sock = accept(server_sock,(struct sockaddr *)&client_name,&client_name_len);
if (client_sock == -1)
{
@ -108,11 +106,11 @@ int main(int argc, char* argv[])
antd_client_t* client = (antd_client_t*)malloc(sizeof(antd_client_t));
// set timeout to socket
if (setsockopt (client_sock, SOL_SOCKET, SO_RCVTIMEO, (char *)&timeout,sizeof(timeout)) < 0)
perror("setsockopt failed\n");
//if (setsockopt (client_sock, SOL_SOCKET, SO_RCVTIMEO, (char *)&timeout,sizeof(timeout)) < 0)
// perror("setsockopt failed\n");
if (setsockopt (client_sock, SOL_SOCKET, SO_SNDTIMEO, (char *)&timeout,sizeof(timeout)) < 0)
perror("setsockopt failed\n");
//if (setsockopt (client_sock, SOL_SOCKET, SO_SNDTIMEO, (char *)&timeout,sizeof(timeout)) < 0)
// perror("setsockopt failed\n");
/*
get the remote IP