clamd: revert patch from bb#1028 (bb#1113)

git-svn: trunk@4029
0.95
Tomasz Kojm 17 years ago
parent 2b9e5d2971
commit 5a66732fdc
  1. 4
      ChangeLog
  2. 26
      clamd/server-th.c
  3. 132
      clamd/thrmgr.c
  4. 17
      clamd/thrmgr.h

@ -1,3 +1,7 @@
Tue Jul 29 23:18:23 CEST 2008 (tk)
----------------------------------
* clamd: revert patch from bb#1028 (bb#1113)
Tue Jul 29 13:18:24 EEST 2008 (edwin)
------------------------------------
* libclamav/regex_*.[ch]: handle multiple matches (bb #1110)

@ -83,16 +83,6 @@ typedef struct client_conn_tag {
int nsockets;
} client_conn_t;
static void scanner_thread_cleanup(void *arg)
{
client_conn_t *conn = (client_conn_t *) arg;
shutdown(conn->sd, 2);
closesocket(conn->sd);
cl_free(conn->engine);
free(conn);
}
static void scanner_thread(void *arg)
{
client_conn_t *conn = (client_conn_t *) arg;
@ -120,9 +110,6 @@ static void scanner_thread(void *arg)
if(!timeout)
timeout = -1;
/* register cleanup procedure in this thread */
pthread_cleanup_push(scanner_thread_cleanup, arg);
do {
ret = command(conn->sd, conn->engine, conn->limits, conn->options, conn->copt, timeout);
if (ret < 0) {
@ -168,7 +155,10 @@ static void scanner_thread(void *arg)
}
} while (session);
pthread_cleanup_pop(1);
shutdown(conn->sd, 2);
closesocket(conn->sd);
cl_free(conn->engine);
free(conn);
return;
}
@ -644,10 +634,6 @@ int acceptloop_th(int *socketds, int nsockets, struct cl_engine *engine, unsigne
pthread_mutex_lock(&reload_mutex);
if(reload) {
#ifdef OPTIMIZE_MEMORY_FOOTPRINT
/* Signal all worker threads to STOP, wait till no more acive threads */
thrmgr_worker_stop_wait(thr_pool);
#endif
pthread_mutex_unlock(&reload_mutex);
engine = reload_db(engine, dboptions, copt, FALSE, &ret);
if(ret) {
@ -665,10 +651,6 @@ int acceptloop_th(int *socketds, int nsockets, struct cl_engine *engine, unsigne
reload = 0;
time(&reloaded_time);
pthread_mutex_unlock(&reload_mutex);
#ifdef OPTIMIZE_MEMORY_FOOTPRINT
/* Resume thread pool worker threads */
thrmgr_setstate(thr_pool, POOL_VALID);
#endif
#ifdef CLAMUKO
if(cfgopt(copt, "ClamukoScanOnAccess")->enabled) {
logg("Stopping and restarting Clamuko.\n");

@ -215,98 +215,6 @@ threadpool_t *thrmgr_new(int max_threads, int idle_timeout, void (*handler)(void
return threadpool;
}
#ifdef OPTIMIZE_MEMORY_FOOTPRINT
/**
* thrmgr_worker_stop_wait : set state to POOL_STOP, wake all thread worker, wait for them
* to exit before continuing.
*/
void thrmgr_worker_stop_wait(threadpool_t * const threadpool)
{
struct timespec timeout;
int ret_cond;
int loop = 2;
if (!threadpool || (threadpool->state != POOL_VALID)) {
return;
}
if (pthread_mutex_lock(&threadpool->pool_mutex) != 0) {
logg("!Mutex lock failed\n");
exit(-1);
}
threadpool->state = POOL_STOP;
/* wait for threads to exit */
if (threadpool->thr_alive > 0) {
#ifdef CL_DEBUG
logg("*%u active threads: waking them and entering wait loop\n", threadpool->thr_alive);
#endif
if (pthread_cond_broadcast(&(threadpool->pool_cond)) != 0) {
pthread_mutex_unlock(&threadpool->pool_mutex);
logg("!Fatal: failed in cond broadcast 'pool_cond'\n");
return;
}
}
/* now, wait for the threads to exit, make 'loop' number of tries, */
while (threadpool->thr_alive > 0 && loop--) {
#ifdef CL_DEBUG
logg("*%u active threads. Waiting.\n", threadpool->thr_alive);
#endif
timeout.tv_sec = time(NULL) + (threadpool->idle_timeout/2) + 10L;
timeout.tv_nsec = 0;
ret_cond = pthread_cond_timedwait (&threadpool->pool_cond, &threadpool->pool_mutex, &timeout);
if (ret_cond == ETIMEDOUT) {
#ifdef CL_DEBUG
logg("*%u active threads. Continue to wait.\n", threadpool->thr_alive);
#endif
} else if (ret_cond == 0) {
#ifdef CL_DEBUG
logg("*Received signal. %u active threads.\n", threadpool->thr_alive);
#endif
}
}
if (pthread_mutex_unlock(&threadpool->pool_mutex) != 0) {
logg("!Mutex unlock failed\n");
exit(-1);
}
}
#endif
#ifdef OPTIMIZE_MEMORY_FOOTPRINT
void thrmgr_setstate(threadpool_t * const threadpool, pool_state_t state )
{
if (pthread_mutex_lock(&threadpool->pool_mutex) != 0) {
logg("!Mutex lock failed\n");
exit(-1);
}
threadpool->state = state;
if (pthread_mutex_unlock(&threadpool->pool_mutex) != 0) {
logg("!Mutex unlock failed\n");
exit(-1);
}
}
#endif
static void *thrmgr_worker_cleanup(void *arg)
{
threadpool_t *threadpool = (threadpool_t *) arg;
if (pthread_mutex_lock(&(threadpool->pool_mutex)) != 0) {
/* Fatal error */
logg("!Fatal: mutex lock failed\n");
exit(-2);
}
(threadpool->thr_alive) && threadpool->thr_alive--;
/* logg("*Thread clean up, %u active threads.", threadpool->thr_alive); */
if (threadpool->thr_alive == 0) {
/* signal that all threads are finished */
pthread_cond_broadcast(&threadpool->pool_cond);
}
if (pthread_mutex_unlock(&(threadpool->pool_mutex)) != 0) {
/* Fatal error */
logg("!Fatal: mutex unlock failed\n");
exit(-2);
}
}
static void *thrmgr_worker(void *arg)
{
threadpool_t *threadpool = (threadpool_t *) arg;
@ -314,9 +222,6 @@ static void *thrmgr_worker(void *arg)
int retval, must_exit = FALSE;
struct timespec timeout;
/* Register cleanup procedure for worker in current thread */
pthread_cleanup_push(thrmgr_worker_cleanup, arg);
/* loop looking for work */
for (;;) {
if (pthread_mutex_lock(&(threadpool->pool_mutex)) != 0) {
@ -326,15 +231,16 @@ static void *thrmgr_worker(void *arg)
timeout.tv_sec = time(NULL) + threadpool->idle_timeout;
timeout.tv_nsec = 0;
threadpool->thr_idle++;
while ( must_exit == FALSE
&& ((job_data = work_queue_pop(threadpool->queue)) == NULL)
while (((job_data=work_queue_pop(threadpool->queue)) == NULL)
&& (threadpool->state != POOL_EXIT)) {
/* Sleep, awaiting wakeup */
pthread_cond_signal(&threadpool->idle_cond);
retval = pthread_cond_timedwait(&(threadpool->pool_cond),
&(threadpool->pool_mutex), &timeout);
if (retval == ETIMEDOUT)
if (retval == ETIMEDOUT) {
must_exit = TRUE;
break;
}
}
threadpool->thr_idle--;
if (threadpool->state == POOL_EXIT) {
@ -345,17 +251,27 @@ static void *thrmgr_worker(void *arg)
logg("!Fatal: mutex unlock failed\n");
exit(-2);
}
if (must_exit) break;
if (job_data) threadpool->handler(job_data);
if (threadpool->state == POOL_STOP) break;
if (job_data) {
threadpool->handler(job_data);
} else if (must_exit) {
break;
}
}
if (pthread_mutex_lock(&(threadpool->pool_mutex)) != 0) {
/* Fatal error */
logg("!Fatal: mutex lock failed\n");
exit(-2);
}
threadpool->thr_alive--;
if (threadpool->thr_alive == 0) {
/* signal that all threads are finished */
pthread_cond_broadcast(&threadpool->pool_cond);
}
if (pthread_mutex_unlock(&(threadpool->pool_mutex)) != 0) {
/* Fatal error */
logg("!Fatal: mutex unlock failed\n");
exit(-2);
}
#ifdef HAVE_PTHREAD_YIELD
pthread_yield(); /* do not remove on premptive kernel e.g linux 2.6 */
#elif HAVE_SCHED_YIELD
sched_yield();
#endif
pthread_cleanup_pop(1);
return NULL;
}

@ -26,13 +26,6 @@
#include <sys/time.h>
#endif
/**
* OPTIMIZE_MEMORY_FOOTPRINT : #ifdef, it will force all worker threads to terminate
* before switching to new database, thereby, avoiding fragmenting memory due which
* will cause swelling resident memory during run-time.
*/
#define OPTIMIZE_MEMORY_FOOTPRINT
typedef struct work_item_tag {
struct work_item_tag *next;
void *data;
@ -48,7 +41,6 @@ typedef struct work_queue_tag {
typedef enum {
POOL_INVALID,
POOL_VALID,
POOL_STOP, /* All worker threads should exit */
POOL_EXIT
} pool_state_t;
@ -74,13 +66,4 @@ threadpool_t *thrmgr_new(int max_threads, int idle_timeout, void (*handler)(void
void thrmgr_destroy(threadpool_t *threadpool);
int thrmgr_dispatch(threadpool_t *threadpool, void *user_data);
#ifdef OPTIMIZE_MEMORY_FOOTPRINT
/**
* thrmgr_worker_stop_wait : set state to POOL_STOP, wake all thread worker, wait for them
* to exit before continuing.
*/
void thrmgr_worker_stop_wait(threadpool_t * const threadpool);
void thrmgr_setstate(threadpool_t * const threadpool, pool_state_t state);
#endif
#endif

Loading…
Cancel
Save