clamonacc - add consumer queue; add thread pool library; add thread pool support in consumer queue; flesh out consumer queue code; refactor scan functions into thread pool worker functions; refactor scan functions to work off slimmed down params and event metadata instead of a single, giant context; sundry fixups

pull/111/head
Mickey Sola 6 years ago committed by Micah Snyder
parent 0d78af13f1
commit b365aa5884
  1. 7
      clamonacc/Makefile.am
  2. 21
      clamonacc/c-thread-pool/LICENSE
  3. 551
      clamonacc/c-thread-pool/thpool.c
  4. 187
      clamonacc/c-thread-pool/thpool.h
  5. 15
      clamonacc/clamonacc.c
  6. 7
      clamonacc/clamonacc.h
  7. 28
      clamonacc/client/onaccess_client.c
  8. 4
      clamonacc/client/onaccess_client.h
  9. 39
      clamonacc/client/onaccess_proto.c
  10. 5
      clamonacc/client/onaccess_proto.h
  11. 22
      clamonacc/curl.COPYING
  12. 72
      clamonacc/fanotif/onaccess_fan.c
  13. 290
      clamonacc/scan/onaccess_scque.c
  14. 43
      clamonacc/scan/onaccess_scque.h
  15. 242
      clamonacc/scan/onaccess_scth.c
  16. 33
      clamonacc/scan/onaccess_scth.h

@ -54,7 +54,12 @@ clamonacc_SOURCES = \
./misc/onaccess_others.h \
./misc/priv_fts.h \
./scan/onaccess_scth.c \
./scan/onaccess_scth.h
./scan/onaccess_scth.h \
./scan/onaccess_scque.c \
./scan/onaccess_scque.h \
./c-thread-pool/thpool.c \
./c-thread-pool/thpool.h
if !SYSTEM_LFS_FTS
clamonacc_SOURCES += ./misc/fts.c

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2016 Johan Hanssen Seferidis
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

@ -0,0 +1,551 @@
/* ********************************
* Author: Johan Hanssen Seferidis
* License: MIT
* Description: Library providing a threading pool where you can add
* work. For usage, check the thpool.h file or README.md
*
*//** @file thpool.h *//*
*
********************************/
#define _POSIX_C_SOURCE 200809L
#include <unistd.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
#include <errno.h>
#include <time.h>
#if defined(__linux__)
#include <sys/prctl.h>
#endif
#include "thpool.h"
#ifdef THPOOL_DEBUG
#define THPOOL_DEBUG 1
#else
#define THPOOL_DEBUG 0
#endif
#if !defined(DISABLE_PRINT) || defined(THPOOL_DEBUG)
#define err(str) fprintf(stderr, str)
#else
#define err(str)
#endif
static volatile int threads_keepalive;
static volatile int threads_on_hold;
/* ========================== STRUCTURES ============================ */
/* Binary semaphore */
typedef struct bsem {
pthread_mutex_t mutex;
pthread_cond_t cond;
int v;
} bsem;
/* Job */
typedef struct job{
struct job* prev; /* pointer to previous job */
void (*function)(void* arg); /* function pointer */
void* arg; /* function's argument */
} job;
/* Job queue */
typedef struct jobqueue{
pthread_mutex_t rwmutex; /* used for queue r/w access */
job *front; /* pointer to front of queue */
job *rear; /* pointer to rear of queue */
bsem *has_jobs; /* flag as binary semaphore */
int len; /* number of jobs in queue */
} jobqueue;
/* Thread */
typedef struct thread{
int id; /* friendly id */
pthread_t pthread; /* pointer to actual thread */
struct thpool_* thpool_p; /* access to thpool */
} thread;
/* Threadpool */
typedef struct thpool_{
thread** threads; /* pointer to threads */
volatile int num_threads_alive; /* threads currently alive */
volatile int num_threads_working; /* threads currently working */
pthread_mutex_t thcount_lock; /* used for thread count etc */
pthread_cond_t threads_all_idle; /* signal to thpool_wait */
jobqueue jobqueue; /* job queue */
} thpool_;
/* ========================== PROTOTYPES ============================ */
static int thread_init(thpool_* thpool_p, struct thread** thread_p, int id);
static void* thread_do(struct thread* thread_p);
static void thread_hold(int sig_id);
static void thread_destroy(struct thread* thread_p);
static int jobqueue_init(jobqueue* jobqueue_p);
static void jobqueue_clear(jobqueue* jobqueue_p);
static void jobqueue_push(jobqueue* jobqueue_p, struct job* newjob_p);
static struct job* jobqueue_pull(jobqueue* jobqueue_p);
static void jobqueue_destroy(jobqueue* jobqueue_p);
static void bsem_init(struct bsem *bsem_p, int value);
static void bsem_reset(struct bsem *bsem_p);
static void bsem_post(struct bsem *bsem_p);
static void bsem_post_all(struct bsem *bsem_p);
static void bsem_wait(struct bsem *bsem_p);
/* ========================== THREADPOOL ============================ */
/* Initialise thread pool */
struct thpool_* thpool_init(int num_threads){
threads_on_hold = 0;
threads_keepalive = 1;
if (num_threads < 0){
num_threads = 0;
}
/* Make new thread pool */
thpool_* thpool_p;
thpool_p = (struct thpool_*)malloc(sizeof(struct thpool_));
if (thpool_p == NULL){
err("thpool_init(): Could not allocate memory for thread pool\n");
return NULL;
}
thpool_p->num_threads_alive = 0;
thpool_p->num_threads_working = 0;
/* Initialise the job queue */
if (jobqueue_init(&thpool_p->jobqueue) == -1){
err("thpool_init(): Could not allocate memory for job queue\n");
free(thpool_p);
return NULL;
}
/* Make threads in pool */
thpool_p->threads = (struct thread**)malloc(num_threads * sizeof(struct thread *));
if (thpool_p->threads == NULL){
err("thpool_init(): Could not allocate memory for threads\n");
jobqueue_destroy(&thpool_p->jobqueue);
free(thpool_p);
return NULL;
}
pthread_mutex_init(&(thpool_p->thcount_lock), NULL);
pthread_cond_init(&thpool_p->threads_all_idle, NULL);
/* Thread init */
int n;
for (n=0; n<num_threads; n++){
thread_init(thpool_p, &thpool_p->threads[n], n);
#if THPOOL_DEBUG
printf("THPOOL_DEBUG: Created thread %d in pool \n", n);
#endif
}
/* Wait for threads to initialize */
while (thpool_p->num_threads_alive != num_threads) {}
return thpool_p;
}
/* Add work to the thread pool */
int thpool_add_work(thpool_* thpool_p, void (*function_p)(void*), void* arg_p){
job* newjob;
newjob=(struct job*)malloc(sizeof(struct job));
if (newjob==NULL){
err("thpool_add_work(): Could not allocate memory for new job\n");
return -1;
}
/* add function and argument */
newjob->function=function_p;
newjob->arg=arg_p;
/* add job to queue */
jobqueue_push(&thpool_p->jobqueue, newjob);
return 0;
}
/* Wait until all jobs have finished */
void thpool_wait(thpool_* thpool_p){
pthread_mutex_lock(&thpool_p->thcount_lock);
while (thpool_p->jobqueue.len || thpool_p->num_threads_working) {
pthread_cond_wait(&thpool_p->threads_all_idle, &thpool_p->thcount_lock);
}
pthread_mutex_unlock(&thpool_p->thcount_lock);
}
/* Destroy the threadpool */
void thpool_destroy(thpool_* thpool_p){
/* No need to destory if it's NULL */
if (thpool_p == NULL) return ;
volatile int threads_total = thpool_p->num_threads_alive;
/* End each thread 's infinite loop */
threads_keepalive = 0;
/* Give one second to kill idle threads */
double TIMEOUT = 1.0;
time_t start, end;
double tpassed = 0.0;
time (&start);
while (tpassed < TIMEOUT && thpool_p->num_threads_alive){
bsem_post_all(thpool_p->jobqueue.has_jobs);
time (&end);
tpassed = difftime(end,start);
}
/* Poll remaining threads */
while (thpool_p->num_threads_alive){
bsem_post_all(thpool_p->jobqueue.has_jobs);
sleep(1);
}
/* Job queue cleanup */
jobqueue_destroy(&thpool_p->jobqueue);
/* Deallocs */
int n;
for (n=0; n < threads_total; n++){
thread_destroy(thpool_p->threads[n]);
}
free(thpool_p->threads);
free(thpool_p);
}
/* Pause all threads in threadpool */
void thpool_pause(thpool_* thpool_p) {
int n;
for (n=0; n < thpool_p->num_threads_alive; n++){
pthread_kill(thpool_p->threads[n]->pthread, SIGUSR1);
}
}
/* Resume all threads in threadpool */
void thpool_resume(thpool_* thpool_p) {
// resuming a single threadpool hasn't been
// implemented yet, meanwhile this supresses
// the warnings
(void)thpool_p;
threads_on_hold = 0;
}
int thpool_num_threads_working(thpool_* thpool_p){
return thpool_p->num_threads_working;
}
/* ============================ THREAD ============================== */
/* Initialize a thread in the thread pool
*
* @param thread address to the pointer of the thread to be created
* @param id id to be given to the thread
* @return 0 on success, -1 otherwise.
*/
static int thread_init (thpool_* thpool_p, struct thread** thread_p, int id){
*thread_p = (struct thread*)malloc(sizeof(struct thread));
if (thread_p == NULL){
err("thread_init(): Could not allocate memory for thread\n");
return -1;
}
(*thread_p)->thpool_p = thpool_p;
(*thread_p)->id = id;
pthread_create(&(*thread_p)->pthread, NULL, (void *)thread_do, (*thread_p));
pthread_detach((*thread_p)->pthread);
return 0;
}
/* Sets the calling thread on hold */
static void thread_hold(int sig_id) {
(void)sig_id;
threads_on_hold = 1;
while (threads_on_hold){
sleep(1);
}
}
/* What each thread is doing
*
* In principle this is an endless loop. The only time this loop gets interuppted is once
* thpool_destroy() is invoked or the program exits.
*
* @param thread thread that will run this function
* @return nothing
*/
static void* thread_do(struct thread* thread_p){
/* Set thread name for profiling and debuging */
char thread_name[128] = {0};
sprintf(thread_name, "thread-pool-%d", thread_p->id);
#if defined(__linux__)
/* Use prctl instead to prevent using _GNU_SOURCE flag and implicit declaration */
prctl(PR_SET_NAME, thread_name);
#elif defined(__APPLE__) && defined(__MACH__)
pthread_setname_np(thread_name);
#else
err("thread_do(): pthread_setname_np is not supported on this system");
#endif
/* Assure all threads have been created before starting serving */
thpool_* thpool_p = thread_p->thpool_p;
/* Register signal handler */
struct sigaction act;
sigemptyset(&act.sa_mask);
act.sa_flags = 0;
act.sa_handler = thread_hold;
if (sigaction(SIGUSR1, &act, NULL) == -1) {
err("thread_do(): cannot handle SIGUSR1");
}
/* Mark thread as alive (initialized) */
pthread_mutex_lock(&thpool_p->thcount_lock);
thpool_p->num_threads_alive += 1;
pthread_mutex_unlock(&thpool_p->thcount_lock);
while(threads_keepalive){
bsem_wait(thpool_p->jobqueue.has_jobs);
if (threads_keepalive){
pthread_mutex_lock(&thpool_p->thcount_lock);
thpool_p->num_threads_working++;
pthread_mutex_unlock(&thpool_p->thcount_lock);
/* Read job from queue and execute it */
void (*func_buff)(void*);
void* arg_buff;
job* job_p = jobqueue_pull(&thpool_p->jobqueue);
if (job_p) {
func_buff = job_p->function;
arg_buff = job_p->arg;
func_buff(arg_buff);
free(job_p);
}
pthread_mutex_lock(&thpool_p->thcount_lock);
thpool_p->num_threads_working--;
if (!thpool_p->num_threads_working) {
pthread_cond_signal(&thpool_p->threads_all_idle);
}
pthread_mutex_unlock(&thpool_p->thcount_lock);
}
}
pthread_mutex_lock(&thpool_p->thcount_lock);
thpool_p->num_threads_alive --;
pthread_mutex_unlock(&thpool_p->thcount_lock);
return NULL;
}
/* Frees a thread */
static void thread_destroy (thread* thread_p){
free(thread_p);
}
/* ============================ JOB QUEUE =========================== */
/* Initialize queue */
static int jobqueue_init(jobqueue* jobqueue_p){
jobqueue_p->len = 0;
jobqueue_p->front = NULL;
jobqueue_p->rear = NULL;
jobqueue_p->has_jobs = (struct bsem*)malloc(sizeof(struct bsem));
if (jobqueue_p->has_jobs == NULL){
return -1;
}
pthread_mutex_init(&(jobqueue_p->rwmutex), NULL);
bsem_init(jobqueue_p->has_jobs, 0);
return 0;
}
/* Clear the queue */
static void jobqueue_clear(jobqueue* jobqueue_p){
while(jobqueue_p->len){
free(jobqueue_pull(jobqueue_p));
}
jobqueue_p->front = NULL;
jobqueue_p->rear = NULL;
bsem_reset(jobqueue_p->has_jobs);
jobqueue_p->len = 0;
}
/* Add (allocated) job to queue
*/
static void jobqueue_push(jobqueue* jobqueue_p, struct job* newjob){
pthread_mutex_lock(&jobqueue_p->rwmutex);
newjob->prev = NULL;
switch(jobqueue_p->len){
case 0: /* if no jobs in queue */
jobqueue_p->front = newjob;
jobqueue_p->rear = newjob;
break;
default: /* if jobs in queue */
jobqueue_p->rear->prev = newjob;
jobqueue_p->rear = newjob;
}
jobqueue_p->len++;
bsem_post(jobqueue_p->has_jobs);
pthread_mutex_unlock(&jobqueue_p->rwmutex);
}
/* Get first job from queue(removes it from queue)
<<<<<<< HEAD
*
* Notice: Caller MUST hold a mutex
=======
>>>>>>> da2c0fe45e43ce0937f272c8cd2704bdc0afb490
*/
static struct job* jobqueue_pull(jobqueue* jobqueue_p){
pthread_mutex_lock(&jobqueue_p->rwmutex);
job* job_p = jobqueue_p->front;
switch(jobqueue_p->len){
case 0: /* if no jobs in queue */
break;
case 1: /* if one job in queue */
jobqueue_p->front = NULL;
jobqueue_p->rear = NULL;
jobqueue_p->len = 0;
break;
default: /* if >1 jobs in queue */
jobqueue_p->front = job_p->prev;
jobqueue_p->len--;
/* more than one job in queue -> post it */
bsem_post(jobqueue_p->has_jobs);
}
pthread_mutex_unlock(&jobqueue_p->rwmutex);
return job_p;
}
/* Free all queue resources back to the system */
static void jobqueue_destroy(jobqueue* jobqueue_p){
jobqueue_clear(jobqueue_p);
free(jobqueue_p->has_jobs);
}
/* ======================== SYNCHRONISATION ========================= */
/* Init semaphore to 1 or 0 */
static void bsem_init(bsem *bsem_p, int value) {
if (value < 0 || value > 1) {
err("bsem_init(): Binary semaphore can take only values 1 or 0");
exit(1);
}
pthread_mutex_init(&(bsem_p->mutex), NULL);
pthread_cond_init(&(bsem_p->cond), NULL);
bsem_p->v = value;
}
/* Reset semaphore to 0 */
static void bsem_reset(bsem *bsem_p) {
bsem_init(bsem_p, 0);
}
/* Post to at least one thread */
static void bsem_post(bsem *bsem_p) {
pthread_mutex_lock(&bsem_p->mutex);
bsem_p->v = 1;
pthread_cond_signal(&bsem_p->cond);
pthread_mutex_unlock(&bsem_p->mutex);
}
/* Post to all threads */
static void bsem_post_all(bsem *bsem_p) {
pthread_mutex_lock(&bsem_p->mutex);
bsem_p->v = 1;
pthread_cond_broadcast(&bsem_p->cond);
pthread_mutex_unlock(&bsem_p->mutex);
}
/* Wait on semaphore until semaphore has value 0 */
static void bsem_wait(bsem* bsem_p) {
pthread_mutex_lock(&bsem_p->mutex);
while (bsem_p->v != 1) {
pthread_cond_wait(&bsem_p->cond, &bsem_p->mutex);
}
bsem_p->v = 0;
pthread_mutex_unlock(&bsem_p->mutex);
}

@ -0,0 +1,187 @@
/**********************************
* @author Johan Hanssen Seferidis
* License: MIT
*
**********************************/
#ifndef _THPOOL_
#define _THPOOL_
#ifdef __cplusplus
extern "C" {
#endif
/* =================================== API ======================================= */
typedef struct thpool_* threadpool;
/**
* @brief Initialize threadpool
*
* Initializes a threadpool. This function will not return untill all
* threads have initialized successfully.
*
* @example
*
* ..
* threadpool thpool; //First we declare a threadpool
* thpool = thpool_init(4); //then we initialize it to 4 threads
* ..
*
* @param num_threads number of threads to be created in the threadpool
* @return threadpool created threadpool on success,
* NULL on error
*/
threadpool thpool_init(int num_threads);
/**
* @brief Add work to the job queue
*
* Takes an action and its argument and adds it to the threadpool's job queue.
* If you want to add to work a function with more than one arguments then
* a way to implement this is by passing a pointer to a structure.
*
* NOTICE: You have to cast both the function and argument to not get warnings.
*
* @example
*
* void print_num(int num){
* printf("%d\n", num);
* }
*
* int main() {
* ..
* int a = 10;
* thpool_add_work(thpool, (void*)print_num, (void*)a);
* ..
* }
*
* @param threadpool threadpool to which the work will be added
* @param function_p pointer to function to add as work
* @param arg_p pointer to an argument
* @return 0 on successs, -1 otherwise.
*/
int thpool_add_work(threadpool, void (*function_p)(void*), void* arg_p);
/**
* @brief Wait for all queued jobs to finish
*
* Will wait for all jobs - both queued and currently running to finish.
* Once the queue is empty and all work has completed, the calling thread
* (probably the main program) will continue.
*
* Smart polling is used in wait. The polling is initially 0 - meaning that
* there is virtually no polling at all. If after 1 seconds the threads
* haven't finished, the polling interval starts growing exponentially
* untill it reaches max_secs seconds. Then it jumps down to a maximum polling
* interval assuming that heavy processing is being used in the threadpool.
*
* @example
*
* ..
* threadpool thpool = thpool_init(4);
* ..
* // Add a bunch of work
* ..
* thpool_wait(thpool);
* puts("All added work has finished");
* ..
*
* @param threadpool the threadpool to wait for
* @return nothing
*/
void thpool_wait(threadpool);
/**
* @brief Pauses all threads immediately
*
* The threads will be paused no matter if they are idle or working.
* The threads return to their previous states once thpool_resume
* is called.
*
* While the thread is being paused, new work can be added.
*
* @example
*
* threadpool thpool = thpool_init(4);
* thpool_pause(thpool);
* ..
* // Add a bunch of work
* ..
* thpool_resume(thpool); // Let the threads start their magic
*
* @param threadpool the threadpool where the threads should be paused
* @return nothing
*/
void thpool_pause(threadpool);
/**
* @brief Unpauses all threads if they are paused
*
* @example
* ..
* thpool_pause(thpool);
* sleep(10); // Delay execution 10 seconds
* thpool_resume(thpool);
* ..
*
* @param threadpool the threadpool where the threads should be unpaused
* @return nothing
*/
void thpool_resume(threadpool);
/**
* @brief Destroy the threadpool
*
* This will wait for the currently active threads to finish and then 'kill'
* the whole threadpool to free up memory.
*
* @example
* int main() {
* threadpool thpool1 = thpool_init(2);
* threadpool thpool2 = thpool_init(2);
* ..
* thpool_destroy(thpool1);
* ..
* return 0;
* }
*
* @param threadpool the threadpool to destroy
* @return nothing
*/
void thpool_destroy(threadpool);
/**
* @brief Show currently working threads
*
* Working threads are the threads that are performing work (not idle).
*
* @example
* int main() {
* threadpool thpool1 = thpool_init(2);
* threadpool thpool2 = thpool_init(2);
* ..
* printf("Working threads: %d\n", thpool_num_threads_working(thpool1));
* ..
* return 0;
* }
*
* @param threadpool the threadpool of interest
* @return integer number of threads working
*/
int thpool_num_threads_working(threadpool);
#ifdef __cplusplus
}
#endif
#endif

@ -47,6 +47,7 @@
#include "./client/onaccess_client.h"
#include "./fanotif/onaccess_fan.h"
#include "./inotif/onaccess_ddd.h"
#include "./scan/onaccess_scque.h"
pthread_t ddd_pid = 0;
@ -80,6 +81,20 @@ int main(int argc, char **argv)
}
ctx->clamdopts = clamdopts;
/* Setup our event queue */
switch(onas_scanque_start(&ctx)) {
case CL_SUCCESS:
break;
case CL_BREAK:
case CL_EARG:
case CL_ECREAT:
default:
ret = 2;
logg("!Clamonacc: can't setup event consumer queue\n");
goto clean_up;
break;
}
/* Setup our client */
switch(onas_setup_client(&ctx)) {
case CL_SUCCESS:

@ -45,9 +45,9 @@ struct onas_context {
int fan_fd;
uint64_t fan_mask;
int retry_on_error;
int retry_attempts;
int deny_on_error;
uint8_t retry_on_error;
uint8_t retry_attempts;
uint8_t deny_on_error;
uint64_t sizelimit;
uint64_t extinfo;
@ -55,6 +55,7 @@ struct onas_context {
int scantype;
int isremote;
int session;
int timeout;
int64_t portnum;
} __attribute__((packed));

@ -160,7 +160,7 @@ int onas_check_remote(struct onas_context **ctx, cl_error_t *err) {
}
}
CURLcode onas_curl_init(CURL **curl, char *ipaddr, int64_t port, int64_t timeout) {
CURLcode onas_curl_init(CURL **curl, const char *ipaddr, int64_t port, int64_t timeout) {
CURLcode curlcode = CURLE_OK;
@ -281,6 +281,8 @@ cl_error_t onas_setup_client (struct onas_context **ctx) {
return CL_EARG;
}
(*ctx)->timeout = optget((*ctx)->clamdopts, "OnAccessCurlTimeout")->numarg;
(*ctx)->isremote = onas_check_remote(ctx, &err);
if (err) {
return CL_EARG;
@ -406,30 +408,24 @@ int onas_get_clamd_version(struct onas_context **ctx)
return 0;
}
int onas_client_scan(struct onas_context **ctx, const char *fname, STATBUF sb, int *infected, int *err, cl_error_t *ret_code)
int onas_client_scan(const char *tcpaddr, int64_t portnum, int32_t scantype, uint64_t maxstream, const char *fname, int64_t timeout, STATBUF sb, int *infected, int *err, cl_error_t *ret_code)
{
CURL *curl = NULL;
CURLcode curlcode = CURLE_OK;
int scantype, errors = 0;
int errors = 0;
int sockd, ret;
int64_t timeout;
timeout = optget((*ctx)->clamdopts, "OnAccessCurlTimeout")->numarg;
*infected = 0;
if((sb.st_mode & S_IFMT) != S_IFREG) {
scantype = STREAM;
} else {
scantype = (*ctx)->scantype;
}
}
curlcode = onas_curl_init(&curl, optget((*ctx)->clamdopts, "TCPAddr")->strarg, (*ctx)->portnum, timeout);
curlcode = onas_curl_init(&curl, tcpaddr, portnum, timeout);
if (CURLE_OK != curlcode) {
logg("!ClamClient: could not setup curl with tcp address and port, %s\n", curl_easy_strerror(curlcode));
/* curl cleanup done in ons_curl_init on error */
return 2;
/* curl cleanup done in onas_curl_init on error */
return CL_ECREAT;
}
/* logg here is noisy even for debug, enable only for dev work if something has gone very wrong. */
@ -437,11 +433,11 @@ int onas_client_scan(struct onas_context **ctx, const char *fname, STATBUF sb, i
curlcode = curl_easy_perform(curl);
if (CURLE_OK != curlcode) {
logg("!ClamClient: could not establish connection, %s\n", curl_easy_strerror(curlcode));
return 2;
return CL_ECREAT;
}
if((ret = onas_dsresult(ctx, curl, scantype, fname, &ret, err, ret_code)) >= 0) {
if((ret = onas_dsresult(curl, scantype, maxstream, fname, timeout, &ret, err, ret_code)) >= 0) {
*infected = ret;
} else {
logg("*ClamClient: connection could not be established ... return code %d\n", *ret_code);
@ -451,6 +447,6 @@ int onas_client_scan(struct onas_context **ctx, const char *fname, STATBUF sb, i
//logg("*ClamClient: done, closing connection ...\n");
curl_easy_cleanup(curl);
return *infected ? 1 : (errors ? 2 : 0);
return *infected ? CL_VIRUS : (errors ? CL_ECREAT : CL_CLEAN);
}

@ -37,8 +37,8 @@ enum {
};
int onas_client_scan(struct onas_context **ctx, const char *fname, STATBUF sb, int *infected, int *err, cl_error_t *ret_code);
CURLcode onas_curl_init(CURL **curl, char *ipaddr, int64_t port, int64_t timeout);
int onas_client_scan(const char *tcpaddr, int64_t portnum, int32_t scantype, uint64_t maxstream, const char *fname, int64_t timeout, STATBUF sb, int *infected, int *err, cl_error_t *ret_code);
CURLcode onas_curl_init(CURL **curl, const char *ipaddr, int64_t port, int64_t timeout);
int onas_get_clamd_version(struct onas_context **ctx);
cl_error_t onas_setup_client(struct onas_context **ctx);
int onas_check_remote(struct onas_context **ctx, cl_error_t *err);

@ -73,13 +73,10 @@ static const char *scancmd[] = { "CONTSCAN", "MULTISCAN", "INSTREAM", "FILDES",
/* Issues an INSTREAM command to clamd and streams the given file
* Returns >0 on success, 0 soft fail, -1 hard fail */
static int onas_send_stream(struct onas_context **ctx, CURL *curl, const char *filename) {
static int onas_send_stream(CURL *curl, const char *filename, int64_t timeout, uint64_t maxstream) {
uint32_t buf[BUFSIZ/sizeof(uint32_t)];
int fd, len;
unsigned long int todo = (*ctx)->maxstream;
int64_t timeout;
timeout = optget((*ctx)->clamdopts, "OnAccessCurlTimeout")->numarg;
uint64_t fd, len;
uint64_t todo = maxstream;
if(filename) {
if((fd = safe_open(filename, O_RDONLY | O_BINARY))<0) {
@ -94,7 +91,7 @@ static int onas_send_stream(struct onas_context **ctx, CURL *curl, const char *f
}
while((len = read(fd, &buf[1], sizeof(buf) - sizeof(uint32_t))) > 0) {
if((unsigned int)len > todo) len = todo;
if((uint64_t)len > todo) len = todo;
buf[0] = htonl(len);
if (onas_sendln(curl, (const char *)buf, len+sizeof(uint32_t), timeout)) {
close(fd);
@ -162,39 +159,15 @@ static int onas_send_fdpass(CURL *curl, const char *filename, int64_t timeout) {
}
#endif
/* 0: scan, 1: skip */
static int chkpath(struct onas_context **ctx, const char *path)
{
const struct optstruct *opt;
if((opt = optget((*ctx)->clamdopts, "ExcludePath"))->enabled) {
while(opt) {
if(match_regex(path, opt->strarg) == 1) {
if ((*ctx)->printinfected != 1)
logg("~%s: Excluded\n", path);
return 1;
}
opt = opt->nextarg;
}
}
return 0;
}
/* Sends a proper scan request to clamd and parses its replies
* This is used only in non IDSESSION mode
* Returns the number of infected files or -1 on error
* NOTE: filename may be NULL for STREAM scantype. */
int onas_dsresult(struct onas_context **ctx, CURL *curl, int scantype, const char *filename, int *printok, int *errors, cl_error_t *ret_code) {
int onas_dsresult(CURL *curl, int scantype, uint64_t maxstream, const char *filename, int64_t timeout, int *printok, int *errors, cl_error_t *ret_code) {
int infected = 0, len = 0, beenthere = 0;
char *bol, *eol;
struct RCVLN rcv;
STATBUF sb;
int64_t timeout;
timeout = optget((*ctx)->clamdopts, "OnAccessCurlTimeout")->numarg;
if(filename && chkpath(ctx, filename))
return 0;
onas_recvlninit(&rcv, curl);
@ -234,7 +207,7 @@ int onas_dsresult(struct onas_context **ctx, CURL *curl, int scantype, const cha
case STREAM:
/* NULL filename safe in send_stream() */
len = onas_send_stream(ctx, curl, filename);
len = onas_send_stream(curl, filename, timeout, maxstream);
break;
#ifdef HAVE_FD_PASSING
case FILDES:

@ -2,7 +2,7 @@
* Copyright (C) 2015 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
* Copyright (C) 2009 Sourcefire, Inc.
*
* Authors: Tomasz Kojm, aCaB
* Authors: Tomasz Kojm, aCaB, Mickey Sola
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@ -27,6 +27,5 @@
#include "shared/misc.h"
#include "../clamonacc.h"
/*int onas_dconnect(struct onas_context **ctx);*/
int onas_dsresult(struct onas_context **ctx, CURL *curl, int scantype, const char *filename, int *printok, int *errors, cl_error_t *ret_code);
int onas_dsresult(CURL *curl, int scantype, uint64_t maxstream, const char *filename, int64_t timeout, int *printok, int *errors, cl_error_t *ret_code);
#endif

@ -0,0 +1,22 @@
COPYRIGHT AND PERMISSION NOTICE
Copyright (c) 1996 - 2019, Daniel Stenberg, <daniel@haxx.se>, and many
contributors, see the THANKS file.
All rights reserved.
Permission to use, copy, modify, and distribute this software for any purpose
with or without fee is hereby granted, provided that the above copyright
notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of a copyright holder shall not
be used in advertising or otherwise to promote the sale, use or other dealings
in this Software without prior written authorization of the copyright holder.

@ -54,8 +54,10 @@
#include "../client/onaccess_client.h"
#include "../scan/onaccess_scth.h"
#include "../scan/onaccess_scque.h"
extern pthread_t ddd_pid;
extern pthread_t scque_pid;
/*static void onas_fan_exit(int sig)
{
@ -68,42 +70,13 @@ extern pthread_t ddd_pid;
pthread_join(ddd_pid, NULL);
}
pthread_exit(NULL);
logg("ClamFanotif: stopped\n");
}*/
/* TODO: rework this to feed multithreading consumer queue
* static int onas_fan_scanfile(const char *fname, struct fanotify_event_metadata *fmd, STATBUF sb, int scan, struct onas_context **ctx)
{
struct fanotify_response res;
int infected = 0;
int err = 0;
int ret = 0;
int i = 0;
cl_error_t ret_code = 0;
res.fd = fmd->fd;
res.response = FAN_ALLOW;
if (scan) {
ret = onas_scan(ctx, fname, sb, &infected, &err, &ret_code);
if (err && ret_code != CL_SUCCESS) {
logg("*ClamFanotif: scan failed with error code %d\n", ret_code);
}
if ((err && ret_code && (*ctx)->deny_on_error) || infected) {
res.response = FAN_DENY;
}
if (scque_pid > 0) {
pthread_kill(ddd_pid, SIGUSR1);
pthread_join(ddd_pid, NULL);
}
if (fmd->mask & FAN_ALL_PERM_EVENTS) {
ret = write((*ctx)->fan_fd, &res, sizeof(res));
if (ret == -1)
logg("!ClamFanotif: internal error (can't write to fanotify)\n");
}
return ret;
pthread_exit(NULL);
logg("ClamFanotif: stopped\n");
}*/
cl_error_t onas_setup_fanotif(struct onas_context **ctx) {
@ -186,9 +159,6 @@ cl_error_t onas_setup_fanotif(struct onas_context **ctx) {
extinfo = optget((*ctx)->clamdopts, "ExtendedDetectionInfo")->enabled;
//(*ctx)->sizelimit = sizelimit;
//(*ctx)->extinfo = extinfo;
return CL_SUCCESS;
}
@ -239,12 +209,7 @@ int onas_fan_eloop(struct onas_context **ctx) {
if((check = onas_fan_checkowner(fmd->pid, (*ctx)->clamdopts))) {
scan = 0;
/* TODO: Re-enable OnAccessExtraScanning once the thread resource consumption issue is resolved. */
#if 0
if ((check != CHK_SELF) || !(optget(tharg->opts, "OnAccessExtraScanning")->enabled))
#else
if (check != CHK_SELF) {
#endif
logg("*ClamFanotif: %s skipped (excluded UID)\n", fname);
}
}
@ -253,23 +218,26 @@ int onas_fan_eloop(struct onas_context **ctx) {
struct onas_scan_event *event_data;
event_data = cli_calloc(1, sizeof(struct onas_scan_event));
if (NULL == event_data) {
logg("!ClamFanotif: could not allocate memory for event data struct\n");
return 2;
}
event_data->b_fanotify = 1;
/* general mapping */
onas_map_context_info_to_event_data(*ctx, &event_data);
scan ? event_data->bool_opts |= ONAS_SCTH_B_SCAN : scan;
/* fanotify specific stuffs */
event_data->bool_opts |= ONAS_SCTH_B_FANOTIFY;
event_data->fmd = fmd;
event_data->b_scan = scan;
/* TODO: rework to feed consumer queue */
if (onas_scth_handle_file(ctx, fname, event_data) == -1) {
/* feed consumer queue */
if (CL_SUCCESS != onas_queue_event(event_data)) {
close(fmd->fd);
logg("!ClamFanotif: unrecoverable fanotify error occurred :(\n");
logg("!ClamFanotif: error occurred while feeding consumer queue :(\n");
return 2;
}
}
if (close(fmd->fd) == -1) {
printf("!ClamFanotif: internal error (close(%d) failed)\n", fmd->fd);
return 2;
}
}
fmd = FAN_EVENT_NEXT(fmd, bread);
}

@ -0,0 +1,290 @@
/*
* Copyright (C) 2019 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*
* Authors: Mickey Sola
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
#if HAVE_CONFIG_H
#include "clamav-config.h"
#endif
#if defined(FANOTIFY)
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <signal.h>
#include <pthread.h>
#include <string.h>
#include <errno.h>
#include <stdbool.h>
#include <sys/fanotify.h>
#include <sys/inotify.h>
#include "../fanotif/onaccess_fan.h"
#include "onaccess_hash.h"
#include "onaccess_ddd.h"
#include "../scan/onaccess_scth.h"
#include "../misc/onaccess_others.h"
#include "libclamav/clamav.h"
#include "libclamav/scanners.h"
#include "shared/optparser.h"
#include "shared/output.h"
#include "clamd/server.h"
#include "clamd/others.h"
#include "clamd/scanner.h"
#include "../c-thread-pool/thpool.h"
static void onas_scanque_exit(int sig);
static int onas_consume_event(struct *event_data);
static pthread_mutex_t onas_queue_lock = PTHREAD_MUTEX_INITIALIZER;
static threadpool g_thpool;
static struct onas_event_queue_node *g_onas_event_queue_head = NULL;
static struct onas_event_queue_node *g_onas_event_queue_tail = NULL;
static struct onas_event_queue g_onas_event_queue = {
head = g_onas_event_queue_head,
tail = g_onas_event_queue_tail,
size = 0;
};
static void *onas_init_event_queue() {
*g_onas_event_queue_head = (struct event_queue_node) {
.next = NULL,
.prev = NULL,
.data = NULL
};
*g_onas_event_queue_tail = &(struct event_queue_node) {
.next = NULL,
.prev = NULL,
.data = NULL
};
g_onas_event_queue_tail->prev = g_onas_event_queue_head;
g_onas_event_queue_head->next = g_onas_event_queue_tail;
}
extern pthread_t scque_pid;
static cl_error_t onas_new_event_queue_node(struct event_queue_node **node) {
*node = malloc(sizeof(struct onas_event_queue));
if (NULL == *node) {
return CL_EMEM;
}
**node = (struct event_queue_node) {
.next = NULL,
.prev = NULL,
.data = NULL
};
return CL_SUCCESS;
}
static void onas_destroy_event_queue_node(struct event_queue_node *node) {
if (NULL == node) {
return;
}
node->next = NULL;
node->prev = NULL;
node->data = NULL;
free(node);
node = NULL;
return;
}
static void onas_destroy_event_queue() {
struct onas_event_queue_node *curr = g_onas_event_queue_head;
struct onas_event_queue_node *next = curr->next;
do {
onas_destroy_event_queue_node(curr);
curr = next;
if (curr) {
next = curr->next;
}
} while (curr);
return;
}
void *onas_scanque_th(void *arg) {
/* not a ton of use for context right now, but perhaps in the future we can pass in more options */
struct onas_context *ctx = (struct onas_context *) arg;
sigset_t sigset;
struct sigaction act;
const struct optstruct *pt;
int ret, len, idx;
cl_error_t err;
/* ignore all signals except SIGUSR1 */
sigfillset(&sigset);
sigdelset(&sigset, SIGUSR1);
/* The behavior of a process is undefined after it ignores a
* SIGFPE, SIGILL, SIGSEGV, or SIGBUS signal */
sigdelset(&sigset, SIGFPE);
sigdelset(&sigset, SIGILL);
sigdelset(&sigset, SIGSEGV);
#ifdef SIGBUS
sigdelset(&sigset, SIGBUS);
#endif
pthread_sigmask(SIG_SETMASK, &sigset, NULL);
memset(&act, 0, sizeof(struct sigaction));
act.sa_handler = onas_scanque_exit;
sigfillset(&(act.sa_mask));
sigaction(SIGUSR1, &act, NULL);
sigaction(SIGSEGV, &act, NULL);
onas_init_event_queue();
threadpool thpool = thpool_init(ctx->maxthreads);
g_thpool = thpool;
/* loop w/ onas_consume_event until we die */
do {
/* if there's no event to consume ... */
if (!onas_consume_event(thpool)) {
/* sleep for a bit */
usleep(500);
}
} while(1);
return;
}
static int onas_queue_is_b_empty() {
if (g_onas_event_queue->head->next == g_onas_event_queue->tail) {
return 1;
}
return 0;
}
static int onas_consume_event(threadpool thpool) {
pthread_mutex_lock(&onas_queue_lock);
struct onas_event_queue_node *popped_node = g_onas_event_queue_head->next;
/* TODO: create scth arg using head event data, use get queue head here before lock*/
if (onas_queue_is_b_empty()) {
return 1;
}
thpool_add_work(thpool, (void *) onas_scan_worker, (void *) popped_node->data);
g_onas_event_queue_head->next = g_onas_event_queue_head->next->next;
g_onas_event_queue_head->next->prev = g_onas_event_head;
onas_destroy_event_queue_node(popped_node);
g_onas_event_queue->size--;
pthread_mutex_unlock(&onas_queue_lock);
return 0;
}
cl_error_t onas_queue_event(struct onas_scan_event *event_data) {
pthread_mutex_lock(&onas_queue_lock);
struct onas_event_queue_node *node = NULL;
if (CL_EMEM == onas_new_event_queue_node(&node)) {
return CL_EMEM;
}
node->next = g_onas_event_queue_tail;
node->prev = g_onas_event_queue_tail->prev;
node->data = event_data;
/* tail will always have a .prev */
((struct onas_event_queue_node *) g_onas_event_queue_tail->prev)->next = node;
g_onas_event_queue_tail->prev = node;
g_onas_event_queue->size++;
pthread_mutex_unlock(&onas_queue_lock);
return CL_SUCCESS;
}
cl_error_t onas_scanque_start(struct onas_context **ctx) {
pthread_attr_t scque_attr;
int32_t thread_started = 1;
if (!ctx || !*ctx) {
logg("*ClamQueue: unable to start clamonacc. (bad context)\n");
return CL_EARG;
}
if(pthread_attr_init(&scque_attr)) {
return CL_BREAK;
}
pthread_attr_setdetachstate(&scque_attr, PTHREAD_CREATE_JOINABLE);
thread_started = pthread_create(&scque_pid, &scque_attr, onas_scanque_th, *ctx);
if (0 != thread_started) {
/* Failed to create thread */
logg("*ClamQueue: Unable to start event consumer queue thread ... \n");
return CL_ECREAT;
}
return CL_SUCCESS;
}
static void onas_scanque_exit(int sig) {
logg("*ClamScanque: onas_scanque_exit(), signal %d\n", sig);
/* TODO: cleanup queue struct */
onas_destroy_event_queue();
thpool_destroy(g_thpool);
pthread_exit(NULL);
logg("ClamScanque: stopped\n");
}
#endif

@ -0,0 +1,43 @@
/*
* Copyright (C) 2019 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*
* Authors: Mickey Sola
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
#ifndef __ONAS_SCQUE_H
#define __ONAS_SCQUE_H
/* extremely simple event queue implmentation w/ obj number tracking in case we want to place limits later */
struct onas_event_queue {
struct onas_event_queue_node *head;
struct onas_event_queue_node *tail;
uint64_t size;
};
struct onas_event_queue_node {
struct onas_event_queue_node *next;
struct onas_event_queue_node *prev;
struct onas_scan_event *data;
};
void *onas_scanque_th(void *arg);
cl_error_t onas_queue_event(struct onas_scan_event *event_data);
cl_error_t onas_scanque_start(struct onas_context **ctx);
#endif

@ -47,10 +47,10 @@
static pthread_mutex_t onas_scan_lock = PTHREAD_MUTEX_INITIALIZER;
//static int onas_scan(struct onas_context **ctx, const char *fname, STATBUF sb, int *infected, int *err, cl_error_t *ret_code);
static int onas_scan_safe(struct onas_context **ctx, const char *fname, STATBUF sb, int *infected, int *err, cl_error_t *ret_code);
static int onas_scth_scanfile(struct onas_context **ctx, const char *fname, STATBUF sb, struct onas_scan_event *event_data, int *infected, int *err, cl_error_t *ret_code);
static int onas_scth_handle_dir(struct onas_context **ctx, const char *pathname, struct onas_scan_event *event_data);
//static int onas_scth_handle_file(struct onas_context **ctx, const char *pathname, struct onas_scan_event *event_data);
static cl_error_t onas_scan_safe(struct onas_scan_event *event_data, const char *fname, STATBUF sb, int *infected, int *err, cl_error_t *ret_code);
static cl_error_t onas_scth_scanfile(struct onas_scan_event *event_data, const char *fname, STATBUF sb, int *infected, int *err, cl_error_t *ret_code);
static cl_error_t onas_scth_handle_dir(struct onas_scan_event *event_data, const char *pathname);
static cl_error_t onas_scth_handle_file(struct onas_scan_event *event_data, const char *pathname);
static void onas_scth_exit(int sig);
@ -65,12 +65,13 @@ static void onas_scth_exit(int sig)
* Scan wrapper, used by both inotify and fanotify threads. Owned by scanthread to force multithreaded client archtiecture
* which better avoids kernel level deadlocks from fanotify blocking/prevention
*/
int onas_scan(struct onas_context **ctx, const char *fname, STATBUF sb, int *infected, int *err, cl_error_t *ret_code)
int onas_scan(struct onas_scan_event *event_data, const char *fname, STATBUF sb, int *infected, int *err, cl_error_t *ret_code)
{
int ret = 0;
int i = 0;
uint8_t retry_on_error = event_data->bool_opts & ONAS_SCTH_B_RETRY_ON_E;
ret = onas_scan_safe(ctx, fname, sb, infected, err, ret_code);
ret = onas_scan_safe(event_data, fname, sb, infected, err, ret_code);
if (*err) {
switch (*ret_code) {
@ -88,13 +89,13 @@ int onas_scan(struct onas_context **ctx, const char *fname, STATBUF sb, int *inf
default:
logg("~ClamMisc: internal issue (client failed to scan)\n");
}
if ((*ctx)->retry_on_error) {
if (retry_on_error) {
logg("*ClamMisc: reattempting scan ... \n");
while (err) {
ret = onas_scan_safe(ctx, fname, sb, infected, err, ret_code);
ret = onas_scan_safe(event_data, fname, sb, infected, err, ret_code);
i++;
if (*err && i == (*ctx)->retry_attempts) {
if (*err && i == event_data->retry_attempts) {
*err = 0;
}
}
@ -107,185 +108,232 @@ int onas_scan(struct onas_context **ctx, const char *fname, STATBUF sb, int *inf
/**
* Thread-safe scan wrapper to ensure there's no processs contention over use of the socket.
*/
static int onas_scan_safe(struct onas_context **ctx, const char *fname, STATBUF sb, int *infected, int *err, cl_error_t *ret_code)
{
static cl_error_t onas_scan_safe(struct onas_scan_event *event_data, const char *fname, STATBUF sb, int *infected, int *err, cl_error_t *ret_code) {
int ret = 0;
pthread_mutex_lock(&onas_scan_lock);
ret = onas_client_scan(ctx, fname, sb, infected, err, ret_code);
ret = onas_client_scan(event_data->tcpaddr, event_data->portnum, event_data->scantype, event_data->maxstream,
fname, event_data->timeout, sb, infected, err, ret_code);
pthread_mutex_unlock(&onas_scan_lock);
return ret;
}
int onas_scth_scanfile(struct onas_context **ctx, const char *fname, STATBUF sb, struct onas_scan_event *event_data, int *infected, int *err, cl_error_t *ret_code)
{
static cl_error_t onas_scth_scanfile(struct onas_scan_event *event_data, const char *fname, STATBUF sb, int *infected, int *err, cl_error_t *ret_code) {
struct fanotify_response res;
int ret = 0;
int i = 0;
if (event_data->b_fanotify) {
uint8_t b_scan;
uint8_t b_fanotify;
uint8_t b_deny_on_error;
if (NULL == event_data || NULL == fname || NULL == infected || NULL == err || NULL == ret_code) {
/* TODO: log */
return CL_ENULLARG;
}
b_scan = event_data->bool_opts & ONAS_SCTH_B_SCAN ? 1 : 0;
b_fanotify = event_data->bool_opts & ONAS_SCTH_B_FANOTIFY ? 1 : 0;
b_deny_on_error = event_data->bool_opts & ONAS_SCTH_B_DENY_ON_E ? 1 : 0;
if (b_fanotify) {
res.fd = event_data->fmd->fd;
res.response = FAN_ALLOW;
}
if (event_data->b_scan) {
ret = onas_scan(ctx, fname, sb, infected, err, ret_code);
if (b_scan) {
ret = onas_scan(event_data, fname, sb, infected, err, ret_code);
if (*err && *ret_code != CL_SUCCESS) {
logg("*Clamonacc: scan failed with error code %d\n", *ret_code);
logg("*ClamWorker: scan failed with error code %d\n", *ret_code);
}
if (event_data->b_fanotify) {
if ((*err && *ret_code && (*ctx)->deny_on_error) || *infected) {
if (b_fanotify) {
if ((*err && *ret_code && b_deny_on_error) || *infected) {
res.response = FAN_DENY;
}
}
}
if (event_data->b_fanotify) {
if (b_fanotify) {
if(event_data->fmd->mask & FAN_ALL_PERM_EVENTS) {
ret = write((*ctx)->fan_fd, &res, sizeof(res));
if(ret == -1)
logg("!Clamonacc: internal error (can't write to fanotify)\n");
ret = write(event_data->fan_fd, &res, sizeof(res));
if(ret == -1) {
logg("!ClamWorker: internal error (can't write to fanotify)\n");
ret = CL_EWRITE;
}
}
}
if (b_fanotify) {
if (-1 == close(event_data->fmd->fd) ) {
logg("!ClamWorker: internal error (can't close fanotify meta fd)\n");
ret = CL_EUNLINK;
}
}
return ret;
}
static int onas_scth_handle_dir(struct onas_context **ctx, const char *pathname, struct onas_scan_event *event_data) {
static cl_error_t onas_scth_handle_dir(struct onas_scan_event *event_data, const char *pathname) {
FTS *ftsp = NULL;
int32_t ftspopts = FTS_PHYSICAL | FTS_XDEV;
FTSENT *curr = NULL;
int32_t infected = 0;
int32_t err = 0;
cl_error_t ret_code = CL_SUCCESS;
int32_t ret = 0;
cl_error_t ret = CL_SUCCESS;
int32_t fres = 0;
FTSENT *curr = NULL;
STATBUF sb;
char *const pathargv[] = {(char *)pathname, NULL};
if (!(ftsp = _priv_fts_open(pathargv, ftspopts, NULL))) return CL_EOPEN;
if (!(ftsp = _priv_fts_open(pathargv, ftspopts, NULL))) {
return CL_EOPEN;
}
while ((curr = _priv_fts_read(ftsp))) {
if (curr->fts_info != FTS_D) {
fres = CLAMSTAT(curr->fts_path, &sb);
if ((*ctx)->sizelimit) {
if (fres != 0 || sb.st_size > (*ctx)->sizelimit) {
//okay to skip, directory from inotify events (probably) won't block w/ protection enabled
//log here later
if (event_data->sizelimit) {
if (fres != 0 || sb.st_size > event_data->sizelimit) {
/* okay to skip w/o allow/deny since dir comes from inotify
* events and (probably) won't block w/ protection enabled */
// TODO: log here later ??
continue;
}
}
ret = onas_scth_scanfile(ctx, curr->fts_path, sb, event_data, &infected, &err, &ret_code);
// probs need to error check here later, or at least log
ret = onas_scth_scanfile(event_data, curr->fts_path, sb, &infected, &err, &ret_code);
// TODO: probs need to error check here later, or at least log
}
}
return ret;
}
int onas_scth_handle_file(struct onas_context **ctx, const char *pathname, struct onas_scan_event *event_data) {
static cl_error_t onas_scth_handle_file(struct onas_scan_event *event_data, const char *pathname) {
STATBUF sb;
int32_t infected = 0;
int32_t err = 0;
cl_error_t ret_code = CL_SUCCESS;
int fres = 0;
int ret = 0;
cl_error_t ret = 0;
if (!pathname) return CL_ENULLARG;
if (NULL == pathname || NULL == event_data) {
return CL_ENULLARG;
}
fres = CLAMSTAT(pathname, &sb);
if ((*ctx)->sizelimit) {
if (fres != 0 || sb.st_size > (*ctx)->sizelimit) {
/* don't skip so we avoid lockups, but don't scan either */
event_data->b_scan = 0;
if (event_data->sizelimit) {
if (fres != 0 || sb.st_size > event_data->sizelimit) {
/* don't skip so we avoid lockups, but don't scan either;
* while it should be obvious, this will unconditionally set
* the bit in the map to 0 regardless of original orientation */
event_data->bool_opts &= ((uint16_t) ~ONAS_SCTH_B_SCAN);
}
}
ret = onas_scth_scanfile(ctx, pathname, sb, event_data, &infected, &err, &ret_code);
ret = onas_scth_scanfile(event_data, pathname, sb, &infected, &err, &ret_code);
// probs need to error check here later, or at least log
return ret;
}
void *onas_scan_th(void *arg) {
struct scth_thrarg *tharg = (struct scth_thrarg *)arg;
struct onas_scan_event *event_data = NULL;
struct onas_context **ctx = NULL;
sigset_t sigset;
struct sigaction act;
/* ignore all signals except SIGUSR1 */
sigfillset(&sigset);
sigdelset(&sigset, SIGUSR1);
/* The behavior of a process is undefined after it ignores a
* SIGFPE, SIGILL, SIGSEGV, or SIGBUS signal */
sigdelset(&sigset, SIGFPE);
sigdelset(&sigset, SIGILL);
//sigdelset(&sigset, SIGSEGV);
#ifdef SIGBUS
sigdelset(&sigset, SIGBUS);
#endif
pthread_sigmask(SIG_SETMASK, &sigset, NULL);
memset(&act, 0, sizeof(struct sigaction));
act.sa_handler = onas_scth_exit;
sigfillset(&(act.sa_mask));
sigaction(SIGUSR1, &act, NULL);
sigaction(SIGSEGV, &act, NULL);
if (NULL == tharg || NULL == tharg->ctx || NULL == tharg->event_data || NULL == tharg->event_data->pathname || NULL == (*(tharg->ctx))->opts) {
logg("ScanOnAccess: Invalid thread arguments for extra scanning\n");
void *onas_scan_worker(void *arg) {
struct onas_scan_event *event_data = (struct onas_scan_event *) arg;
uint8_t b_dir;
uint8_t b_file;
uint8_t b_inotify;
uint8_t b_fanotify;
if (NULL == event_data || NULL == event_data->pathname) {
logg("ClamWorker: invalid worker arguments for scanning thread\n");
goto done;
}
/* this event_data is ours and ours alone */
event_data = tharg->event_data;
/* load in boolean info from event struct; makes for easier reading--you're welcome */
b_dir = event_data->bool_opts & ONAS_SCTH_B_DIR ? 1 : 0;
b_file = event_data->bool_opts & ONAS_SCTH_B_FILE ? 1 : 0;
b_inotify = event_data->bool_opts & ONAS_SCTH_B_INOTIFY ? 1 : 0;
b_fanotify = event_data->bool_opts & ONAS_SCTH_B_FANOTIFY ? 1 : 0;
if (b_inotify) {
logg("*ClamWorker: handling inotify event ...\n");
/* we share this context globally--it's not ours to touch/edit */
ctx = tharg->ctx;
if (b_dir) {
logg("*ClamWorker: performing (extra) scanning on directory '%s'\n", event_data->pathname);
onas_scth_handle_dir(event_data, event_data->pathname);
} else if (b_file) {
logg("*ClamWorker: performing (extra) scanning on file '%s'\n", event_data->pathname);
onas_scth_handle_file(event_data, event_data->pathname);
if (event_data->b_inotify) {
if (event_data->extra_options & ONAS_SCTH_ISDIR) {
logg("*ScanOnAccess: Performing additional scanning on directory '%s'\n", event_data->pathname);
onas_scth_handle_dir(ctx, event_data->pathname, event_data);
} else if (event_data->extra_options & ONAS_SCTH_ISFILE) {
logg("*ScanOnAccess: Performing additional scanning on file '%s'\n", event_data->pathname);
onas_scth_handle_file(ctx, event_data->pathname, event_data);
}
} else if (event_data->b_fanotify) {
logg("*ScanOnAccess: Performing scanning on file '%s'\n", event_data->pathname);
onas_scth_handle_file(ctx, event_data->pathname, event_data);
} else if (b_fanotify) {
logg("*ClamWorker: performing scanning on file '%s'\n", event_data->pathname);
onas_scth_handle_file(event_data, event_data->pathname);
}
/* TODO: else something went wrong and we should error out here */
/* TODO: else something went wrong and we should probably error out here, maybe try to recover somehow */
done:
/* our job to cleanup event data: worker queue just kicks us off, drops the event object
* from the queue and forgets about us. */
/* our job to cleanup event data: worker queue just kicks us off in a thread pool, drops the event object
* from the queue and forgets about us */
if (NULL != tharg) {
if (NULL != tharg->event_data) {
if (NULL != tharg->event_data->pathname) {
free(tharg->event_data->pathname);
if (NULL != event_data) {
if (NULL != event_data->pathname) {
free(event_data->pathname);
event_data->pathname = NULL;
}
free(tharg->event_data);
tharg->event_data = NULL;
}
/* don't free context, cleanup for context is handled at the highest layer */
free(tharg);
free(event_data);
event_data = NULL;
}
return NULL;
}
/* Simple utility function for external interfaces to add relevant context information to scan_event struct;
* doing this mapping cuts down significantly on memory overhead when queueing hundreds of these scan_event structs */
cl_error_t onas_map_context_info_to_event_data(struct onas_context *ctx, struct onas_scan_event **event_data) {
if(NULL == ctx || NULL == event_data || NULL == *event_data) {
logg("*ClamScThread: context and scan event struct are null ...\n");
return CL_ENULLARG;
}
(*event_data)->scantype = ctx->scantype;
(*event_data)->timeout = ctx->timeout;
(*event_data)->maxstream = ctx->maxstream;
(*event_data)->tcpaddr = optget(ctx->clamdopts, "TCPAddr")->strarg;
(*event_data)->portnum = ctx->portnum;
(*event_data)->fan_fd = ctx->fan_fd;
(*event_data)->sizelimit = ctx->sizelimit;
(*event_data)->retry_attempts = ctx->retry_attempts;
if (ctx->retry_on_error) {
(*event_data)->bool_opts |= ONAS_SCTH_B_RETRY_ON_E;
}
if (ctx->deny_on_error) {
(*event_data)->bool_opts |= ONAS_SCTH_B_DENY_ON_E;
}
return CL_SUCCESS;
}
#endif

@ -26,26 +26,33 @@
#include "shared/optparser.h"
#include "libclamav/clamav.h"
#define ONAS_SCTH_ISDIR 0x01
#define ONAS_SCTH_ISFILE 0x02
#define ONAS_SCTH_B_DIR 0x01
#define ONAS_SCTH_B_FILE 0x02
#define ONAS_SCTH_B_INOTIFY 0x04
#define ONAS_SCTH_B_FANOTIFY 0x08
#define ONAS_SCTH_B_SCAN 0x10
#define ONAS_SCTH_B_RETRY_ON_E 0x20
#define ONAS_SCTH_B_DENY_ON_E 0x40
struct onas_scan_event {
const char *tcpaddr;
int64_t portnum;
char *pathname;
int fan_fd;
struct fanotify_event_metadata *fmd;
int16_t b_inotify;
int16_t b_fanotify;
int16_t b_scan;
uint32_t extra_options;
};
struct scth_thrarg {
struct onas_scan_event *event_data;
struct onas_context **ctx;
uint8_t retry_attempts;
uint64_t sizelimit;
int32_t scantype;
int64_t maxstream;
int64_t timeout;
uint8_t bool_opts;
};
void *onas_scan_th(void *arg);
int onas_scan(struct onas_context **ctx, const char *fname, STATBUF sb, int *infected, int *err, cl_error_t *ret_code);
int onas_scth_handle_file(struct onas_context **ctx, const char *pathname, struct onas_scan_event *event_data);
void *onas_scan_worker(void *arg);
int onas_scan(struct onas_scan_event *event_data, const char *fname, STATBUF sb, int *infected, int *err, cl_error_t *ret_code);
cl_error_t onas_map_context_info_to_event_data(struct onas_context *ctx, struct onas_scan_event **event_data);
#endif

Loading…
Cancel
Save