1
0
mirror of https://github.com/lxsang/ant-http synced 2024-07-03 13:39:46 +02:00

fix scheduler bug

This commit is contained in:
Xuan Sang LE 2018-09-27 15:00:19 +02:00
parent d50b1352f6
commit cbd574cbc0
3 changed files with 143 additions and 71 deletions

View File

@ -4,18 +4,15 @@
private data & methods private data & methods
*/ */
static antd_scheduler_t scheduler; static antd_scheduler_t scheduler;
static void enqueue(antd_task_queue_t* q, antd_task_t* task)
static void enqueue(antd_task_t* task)
{ {
// check if task is exist antd_task_item_t it = *q;
int prio = task->priority>N_PRIORITY-1?N_PRIORITY-1:task->priority;
antd_task_queue_t q = scheduler.task_queue[prio];
antd_task_item_t it = q;
while(it && it->task->id != task->id && it->next != NULL) while(it && it->task->id != task->id && it->next != NULL)
it = it->next; it = it->next;
if(it && it->task->id == task->id) if(it && it->task->id == task->id)
{ {
LOG("Task %d exists, ignore it\n", task->id); LOG("Task %d exists, ignore it\n", task->id);
//assert(it->task->id == task->id );
return; return;
} }
antd_task_item_t taski = (antd_task_item_t)malloc(sizeof *taski); antd_task_item_t taski = (antd_task_item_t)malloc(sizeof *taski);
@ -23,7 +20,7 @@ static void enqueue(antd_task_t* task)
taski->next = NULL; taski->next = NULL;
if(!it) // first task if(!it) // first task
{ {
scheduler.task_queue[prio] = taski; *q = taski;
} }
else else
{ {
@ -33,26 +30,35 @@ static void enqueue(antd_task_t* task)
static int working() static int working()
{ {
return scheduler.status; int stat;
pthread_mutex_lock(&scheduler.scheduler_lock);
stat = scheduler.status;
pthread_mutex_unlock(&scheduler.scheduler_lock);
return stat;
} }
static void stop() static void stop()
{ {
pthread_mutex_lock(&scheduler.server_lock); pthread_mutex_lock(&scheduler.scheduler_lock);
scheduler.status = 0; scheduler.status = 0;
pthread_mutex_unlock(&scheduler.server_lock); pthread_mutex_unlock(&scheduler.scheduler_lock);
for (int i = 0; i < scheduler.n_workers; i++) for (int i = 0; i < scheduler.n_workers; i++)
pthread_join(scheduler.workers[i].pid, NULL); pthread_join(scheduler.workers[i].pid, NULL);
if(scheduler.workers) free(scheduler.workers); if(scheduler.workers) free(scheduler.workers);
// destroy all the mutex
pthread_mutex_destroy(&scheduler.scheduler_lock);
pthread_mutex_destroy(&scheduler.task_lock);
pthread_mutex_destroy(&scheduler.queue_lock);
pthread_mutex_destroy(&scheduler.worker_lock);
} }
static antd_task_item_t dequeue(int priority) static antd_task_item_t dequeue(antd_task_queue_t* q)
{ {
int prio = priority>N_PRIORITY-1?N_PRIORITY-1:priority; antd_task_item_t it = *q;
antd_task_item_t it = scheduler.task_queue[prio];
if(it) if(it)
{ {
scheduler.task_queue[prio] = it->next; *q = it->next;
it->next = NULL;
} }
return it; return it;
} }
@ -60,26 +66,13 @@ static antd_task_item_t dequeue(int priority)
static antd_task_item_t next_task() static antd_task_item_t next_task()
{ {
antd_task_item_t it = NULL; antd_task_item_t it = NULL;
pthread_mutex_lock(&scheduler.server_lock); pthread_mutex_lock(&scheduler.queue_lock);
for(int i = 0; i< N_PRIORITY; i++) it = dequeue(&scheduler.workers_queue);
{ pthread_mutex_unlock(&scheduler.queue_lock);
it = dequeue(i);
if(it) break;
}
pthread_mutex_unlock(&scheduler.server_lock);
return it; return it;
} }
static int available_workers()
{
int n = 0;
pthread_mutex_lock(&scheduler.server_lock);
for(int i=0; i < scheduler.n_workers; i++)
if(scheduler.workers[i].status == 0) n++;
pthread_mutex_unlock(&scheduler.server_lock);
return n;
}
static antd_callback_t* callback_of( void* (*callback)(void*) ) static antd_callback_t* callback_of( void* (*callback)(void*) )
{ {
antd_callback_t* cb = NULL; antd_callback_t* cb = NULL;
@ -138,28 +131,48 @@ static void work(void* data)
} }
} }
static void destroy_queue(antd_task_queue_t q)
{
antd_task_item_t it, curr;
it = q;
while(it)
{
// first free the task
if(it->task && it->task->callback) free_callback(it->task->callback);
if(it->task) free(it->task);
// then free the placeholder
curr = it;
it = it->next;
free(curr);
}
}
/* /*
Main API methods Main API methods
init the main scheduler init the main scheduler
*/ */
int antd_available_workers()
{
int n = 0;
pthread_mutex_lock(&scheduler.worker_lock);
for(int i=0; i < scheduler.n_workers; i++)
if(scheduler.workers[i].status == 0) n++;
pthread_mutex_unlock(&scheduler.worker_lock);
return n;
}
/* /*
* assign task to a worker * assign task to a worker
*/ */
void antd_attach_task(antd_worker_t* worker) void antd_attach_task(antd_worker_t* worker)
{ {
antd_task_item_t it; antd_task_item_t it;
pthread_mutex_lock(&scheduler.server_lock); pthread_mutex_lock(&scheduler.worker_lock);
worker->status = 0;
pthread_mutex_unlock(&scheduler.server_lock);
// fetch the next in queue
it = next_task(); it = next_task();
if(!it) return; worker->status = 0;
//LOG("worker processing \n"); if(it)
pthread_mutex_lock(&scheduler.server_lock); worker->status = 1;
worker->status = 1; pthread_mutex_unlock(&scheduler.worker_lock);
pthread_mutex_unlock(&scheduler.server_lock);
// execute the task // execute the task
//LOG("task executed by worker %d\n", worker->pid);
antd_execute_task(it); antd_execute_task(it);
} }
@ -169,6 +182,12 @@ void antd_scheduler_init(int n)
srand((unsigned) time(&t)); srand((unsigned) time(&t));
scheduler.n_workers = n; scheduler.n_workers = n;
scheduler.status = 1; scheduler.status = 1;
scheduler.workers_queue = NULL;
// init lock
pthread_mutex_init(&scheduler.scheduler_lock,NULL);
pthread_mutex_init(&scheduler.task_lock,NULL);
pthread_mutex_init(&scheduler.worker_lock,NULL);
pthread_mutex_init(&scheduler.queue_lock,NULL);
for(int i = 0; i < N_PRIORITY; i++) scheduler.task_queue[i] = NULL; for(int i = 0; i < N_PRIORITY; i++) scheduler.task_queue[i] = NULL;
// create scheduler.workers // create scheduler.workers
if(n > 0) if(n > 0)
@ -201,26 +220,17 @@ void antd_task_unlock()
} }
/* /*
destroy all pending task destroy all pending task
pthread_mutex_lock(&scheduler.queue_lock);
*/ */
void antd_scheduler_destroy() void antd_scheduler_destroy()
{ {
// free all the chains // free all the chains
antd_task_item_t it, curr;
stop(); stop();
for(int i=0; i < N_PRIORITY; i++) for(int i=0; i < N_PRIORITY; i++)
{ {
it = scheduler.task_queue[i]; destroy_queue(scheduler.task_queue[i]);
while(it)
{
// first free the task
if(it->task && it->task->callback) free_callback(it->task->callback);
if(it->task) free(it->task);
// then free the placeholder
curr = it;
it = it->next;
free(curr);
}
} }
destroy_queue(scheduler.workers_queue);
} }
/* /*
@ -235,6 +245,7 @@ antd_task_t* antd_create_task(void* (*handle)(void*), void *data, void* (*callba
task->handle = handle; task->handle = handle;
task->callback = callback_of(callback); task->callback = callback_of(callback);
task->priority = NORMAL_PRIORITY; task->priority = NORMAL_PRIORITY;
task->type = LIGHT;
return task; return task;
} }
@ -243,14 +254,17 @@ antd_task_t* antd_create_task(void* (*handle)(void*), void *data, void* (*callba
*/ */
void antd_add_task(antd_task_t* task) void antd_add_task(antd_task_t* task)
{ {
pthread_mutex_lock(&scheduler.server_lock); // check if task is exist
enqueue(task); int prio = task->priority>N_PRIORITY-1?N_PRIORITY-1:task->priority;
pthread_mutex_unlock(&scheduler.server_lock); pthread_mutex_lock(&scheduler.scheduler_lock);
enqueue(&scheduler.task_queue[prio], task);
pthread_mutex_unlock(&scheduler.scheduler_lock);
} }
void antd_execute_task(antd_task_item_t taski) void antd_execute_task(antd_task_item_t taski)
{ {
if(!taski) return;
// execute the task // execute the task
void *ret = (*(taski->task->handle))(taski->task->data); void *ret = (*(taski->task->handle))(taski->task->data);
// check the return data if it is a new task // check the return data if it is a new task
@ -289,22 +303,69 @@ void antd_execute_task(antd_task_item_t taski)
} }
} }
} }
int antd_scheduler_busy() int antd_has_pending_task()
{ {
int ret = 0; int ret = 0;
if(available_workers() != scheduler.n_workers) return 1; pthread_mutex_lock(&scheduler.scheduler_lock);
pthread_mutex_lock(&scheduler.server_lock);
for(int i = 0; i < N_PRIORITY; i++) for(int i = 0; i < N_PRIORITY; i++)
if(scheduler.task_queue[i] != NULL) if(scheduler.task_queue[i] != NULL)
{ {
ret = 1; ret = 1;
break; break;
} }
pthread_mutex_unlock(&scheduler.server_lock); pthread_mutex_unlock(&scheduler.scheduler_lock);
if(!ret)
{
pthread_mutex_lock(&scheduler.queue_lock);
ret = (scheduler.workers_queue != NULL);
pthread_mutex_unlock(&scheduler.queue_lock);
}
return ret; return ret;
} }
int antd_scheduler_busy()
{
if(antd_available_workers() != scheduler.n_workers) return 1;
//return 0;
return antd_has_pending_task();
}
int antd_scheduler_status() int antd_scheduler_status()
{ {
return scheduler.status; return scheduler.status;
}
void antd_task_schedule()
{
// fetch next task from the task_queue
antd_task_item_t it = NULL;
pthread_mutex_lock(&scheduler.scheduler_lock);
for(int i = 0; i< N_PRIORITY; i++)
{
it = dequeue(&scheduler.task_queue[i]);
if(it)
break;
}
pthread_mutex_unlock(&scheduler.scheduler_lock);
if(!it)
{
return;
}
// has the task now
// check the type of tas
if(it->task->type == LIGHT || scheduler.n_workers <= 0)
{
// do it by myself
antd_execute_task(it);
}
else
{
// delegate to other workers by
//pushing to the worker queue
LOG("delegate to workers\n");
pthread_mutex_lock(&scheduler.queue_lock);
enqueue(&scheduler.workers_queue, it->task);
free(it);
pthread_mutex_unlock(&scheduler.queue_lock);
}
} }

View File

@ -9,6 +9,7 @@
#define LOW_PRIORITY (N_PRIORITY - 1) #define LOW_PRIORITY (N_PRIORITY - 1)
#define HIGH_PRIORITY 0 #define HIGH_PRIORITY 0
typedef enum { LIGHT, HEAVY } antd_task_type_t;
// callback definition // callback definition
typedef struct __callback_t{ typedef struct __callback_t{
void* (*handle)(void*); void* (*handle)(void*);
@ -37,8 +38,14 @@ typedef struct {
/* /*
user data if any user data if any
*/ */
void * data; void * data;
/*
type of a task
light task is executed directly by
the leader
heavy tasks is delegated to workers
*/
antd_task_type_t type;
} antd_task_t; } antd_task_t;
typedef struct { typedef struct {
@ -55,9 +62,12 @@ typedef struct __task_item_t{
typedef antd_task_item_t antd_task_queue_t; typedef antd_task_item_t antd_task_queue_t;
typedef struct { typedef struct {
pthread_mutex_t server_lock; pthread_mutex_t queue_lock;
pthread_mutex_t scheduler_lock;
pthread_mutex_t worker_lock;
pthread_mutex_t task_lock; pthread_mutex_t task_lock;
antd_task_queue_t task_queue[N_PRIORITY]; antd_task_queue_t task_queue[N_PRIORITY];
antd_task_queue_t workers_queue;
uint8_t status; // 0 stop, 1 working uint8_t status; // 0 stop, 1 working
antd_worker_t* workers; antd_worker_t* workers;
int n_workers; int n_workers;
@ -95,4 +105,7 @@ void antd_execute_task(antd_task_item_t);
int antd_scheduler_busy(); int antd_scheduler_busy();
void antd_attach_task(antd_worker_t* worker); void antd_attach_task(antd_worker_t* worker);
void antd_task_schedule();
int antd_available_workers();
int antd_has_pending_task();
#endif #endif

12
relay.c
View File

@ -90,8 +90,6 @@ int main(int argc, char* argv[])
timeout.tv_usec = 500; timeout.tv_usec = 500;
// 0 worker // 0 worker
antd_scheduler_init(0); antd_scheduler_init(0);
antd_worker_t worker;
worker.status = 0;
// set server socket to non blocking // set server socket to non blocking
fcntl(server_sock, F_SETFL, O_NONBLOCK); /* Change the socket into non-blocking state */ fcntl(server_sock, F_SETFL, O_NONBLOCK); /* Change the socket into non-blocking state */
LOG("relayd running on port %d\n", port); LOG("relayd running on port %d\n", port);
@ -99,7 +97,7 @@ int main(int argc, char* argv[])
while (antd_scheduler_status()) while (antd_scheduler_status())
{ {
// execute task // execute task
antd_attach_task(&worker); antd_task_schedule();
client_sock = accept(server_sock,(struct sockaddr *)&client_name,&client_name_len); client_sock = accept(server_sock,(struct sockaddr *)&client_name,&client_name_len);
if (client_sock == -1) if (client_sock == -1)
{ {
@ -108,11 +106,11 @@ int main(int argc, char* argv[])
antd_client_t* client = (antd_client_t*)malloc(sizeof(antd_client_t)); antd_client_t* client = (antd_client_t*)malloc(sizeof(antd_client_t));
// set timeout to socket // set timeout to socket
if (setsockopt (client_sock, SOL_SOCKET, SO_RCVTIMEO, (char *)&timeout,sizeof(timeout)) < 0) //if (setsockopt (client_sock, SOL_SOCKET, SO_RCVTIMEO, (char *)&timeout,sizeof(timeout)) < 0)
perror("setsockopt failed\n"); // perror("setsockopt failed\n");
if (setsockopt (client_sock, SOL_SOCKET, SO_SNDTIMEO, (char *)&timeout,sizeof(timeout)) < 0) //if (setsockopt (client_sock, SOL_SOCKET, SO_SNDTIMEO, (char *)&timeout,sizeof(timeout)) < 0)
perror("setsockopt failed\n"); // perror("setsockopt failed\n");
/* /*
get the remote IP get the remote IP