clsync
Loading...
Searching...
No Matches
sync.c
Go to the documentation of this file.
1/*
2 clsync - file tree sync utility based on inotify/kqueue
3
4 Copyright (C) 2013-2014 Dmitry Yu Okunev <dyokunev@ut.mephi.ru> 0x8E30679C
5
6 This program is free software: you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation, either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "common.h"
21
22#if KQUEUE_SUPPORT
23# include "mon_kqueue.h"
24#endif
25#if INOTIFY_SUPPORT
26# include "mon_inotify.h"
27#endif
28#if FANOTIFY_SUPPORT
29# include "mon_fanotify.h"
30#endif
31#if BSM_SUPPORT
32# include "mon_bsm.h"
33# include <bsm/audit_kevents.h>
34#endif
35#if GIO_SUPPORT
36# include <gio/gio.h>
37# include "mon_gio.h"
38#endif
39
40#include "main.h"
41#include "error.h"
42#include "fileutils.h"
43#include "malloc.h"
44#include "cluster.h"
45#include "sync.h"
46#include "glibex.h"
47#include "control.h"
48#include "indexes.h"
49#include "privileged.h"
50#include "rules.h"
51#if CGROUP_SUPPORT
52# include "cgroup.h"
53#endif
54
55#include <stdio.h>
56#include <dlfcn.h>
57
58
60
61// seqid - is a counter of main loop. But it may overflow and it's required to compare
62// seqid-values anyway.
63// So if (a-b) is too big, let's assume, that "b<a".
64#define SEQID_WINDOW (((unsigned int)~0)>>1)
65#define SEQID_EQ(a, b) ((a)==(b))
66#define SEQID_GE(a, b) ((a)-(b) < SEQID_WINDOW)
67#define SEQID_LE(a, b) ((b)-(a) < SEQID_WINDOW)
68#define SEQID_GT(a, b) ((!SEQID_EQ(a, b)) && (SEQID_GE(a, b)))
69#define SEQID_LT(a, b) ((!SEQID_EQ(a, b)) && (SEQID_LE(a, b)))
70static unsigned int _sync_seqid_value = 0;
71static inline unsigned int sync_seqid()
72{
73 return _sync_seqid_value++;
74}
75
76static inline void setenv_iteration ( uint32_t iteration_num )
77{
78 char iterations[sizeof ( "4294967296" )]; // 4294967296 == 2**32
79 sprintf ( iterations, "%i", iteration_num );
80 setenv ( "CLSYNC_ITERATION", iterations, 1 );
81 return;
82}
83
84static inline void finish_iteration ( ctx_t *ctx_p )
85{
86 if ( ctx_p->iteration_num < ( typeof ( ctx_p->iteration_num ) ) ~0 ) // ~0 is the max value for unsigned variables
88
89#ifdef THREADING_SUPPORT
90
91 if ( !ctx_p->flags[THREADING] )
92#endif
94
95 debug ( 3, "next iteration: %u/%u",
97 return;
98}
99
100gpointer eidup ( gpointer ei_gp )
101{
102 eventinfo_t *ei = ( eventinfo_t * ) ei_gp;
103 eventinfo_t *ei_dup = ( eventinfo_t * ) xmalloc ( sizeof ( *ei ) );
104 memcpy ( ei_dup, ei, sizeof ( *ei ) );
105 return ( gpointer ) ei_dup;
106}
107
108static inline void evinfo_merge ( ctx_t *ctx_p, eventinfo_t *evinfo_dst, eventinfo_t *evinfo_src )
109{
110 debug ( 3, "evinfo_dst: seqid_min == %u; seqid_max == %u; objtype_old == %i; objtype_new == %i; \t"
111 "evinfo_src: seqid_min == %u; seqid_max == %u; objtype_old == %i; objtype_new == %i",
112 evinfo_dst->seqid_min, evinfo_dst->seqid_max, evinfo_dst->objtype_old, evinfo_dst->objtype_new,
113 evinfo_src->seqid_min, evinfo_src->seqid_max, evinfo_src->objtype_old, evinfo_src->objtype_new
114 );
115#if KQUEUE_SUPPORT | INOTIFY_SUPPORT
116
117 switch ( ctx_p->flags[MONITOR] ) {
118#ifdef KQUEUE_SUPPORT
119
120 case NE_KQUEUE:
121#endif
122#ifdef INOTIFY_SUPPORT
123 case NE_INOTIFY:
124#endif
125 evinfo_dst->evmask |= evinfo_src->evmask;
126 break;
127 }
128
129#endif
130 evinfo_dst->flags |= evinfo_src->flags;
131
132 if ( SEQID_LE ( evinfo_src->seqid_min, evinfo_dst->seqid_min ) ) {
133 evinfo_dst->objtype_old = evinfo_src->objtype_old;
134 evinfo_dst->seqid_min = evinfo_src->seqid_min;
135 }
136
137 if ( SEQID_GE ( evinfo_src->seqid_max, evinfo_dst->seqid_max ) ) {
138 evinfo_dst->objtype_new = evinfo_src->objtype_new;
139 evinfo_dst->seqid_max = evinfo_src->seqid_max;
140
141 switch ( ctx_p->flags[MONITOR] ) {
142#ifdef GIO_SUPPORT
143
144 case NE_GIO:
145 evinfo_dst->evmask = evinfo_src->evmask;
146 break;
147#endif
148#ifdef BSM_SUPPORT
149
150 case NE_BSM:
151 case NE_BSM_PREFETCH:
152 evinfo_dst->evmask = evinfo_src->evmask;
153 break;
154#endif
155
156 default:
157 break;
158 }
159 }
160
161 return;
162}
163
164static inline int _exitcode_process ( ctx_t *ctx_p, int exitcode )
165{
166 if ( ctx_p->isignoredexitcode[ ( unsigned char ) exitcode] )
167 return 0;
168
169 if ( exitcode && ! ( ( ctx_p->flags[MODE] == MODE_RSYNCDIRECT ) && ( exitcode == 24 ) ) ) {
170 error ( "Got non-zero exitcode %i from __sync_exec().", exitcode );
171 return exitcode;
172 }
173
174 return 0;
175}
176
178{
179 int err = _exitcode_process ( ctx_p, exitcode );
180
181 if ( err ) error ( "Got error-report from exitcode_process().\nExitcode is %i, strerror(%i) returns \"%s\". However strerror() is not ensures compliance "
182 "between exitcode and error description for every utility. So, e.g if you're using rsync, you should look for the error description "
183 "into rsync's manpage (\"man 1 rsync\"). Also some advices about diagnostics can be found in clsync's manpage (\"man 1 clsync\", see DIAGNOSTICS)",
184 exitcode, exitcode, strerror ( exitcode ) );
185
186 return err;
187}
188
189
190threadsinfo_t *thread_info() // TODO: optimize this
191{
192 static threadsinfo_t threadsinfo = {0};
193
194 if ( !threadsinfo.mutex_init ) {
195 int i = 0;
196
197 while ( i < PTHREAD_MUTEX_MAX ) {
198 if ( pthread_mutex_init ( &threadsinfo.mutex[i], NULL ) ) {
199 error ( "Cannot pthread_mutex_init()." );
200 return NULL;
201 }
202
203 if ( pthread_cond_init ( &threadsinfo.cond [i], NULL ) ) {
204 error ( "Cannot pthread_cond_init()." );
205 return NULL;
206 }
207
208 i++;
209 }
210
212 }
213
214 return &threadsinfo;
215}
216
217#ifdef THREADING_SUPPORT
218#define thread_info_lock() _thread_info_lock(__func__)
219static inline threadsinfo_t *_thread_info_lock ( const char *const function_name )
220{
221 threadsinfo_t *threadsinfo_p = thread_info();
222 debug ( 4, "used by %s()", function_name );
223 pthread_mutex_lock ( &threadsinfo_p->mutex[PTHREAD_MUTEX_THREADSINFO] );
224 return threadsinfo_p;
225}
226
227#define thread_info_unlock(...) _thread_info_unlock(__func__, __VA_ARGS__)
228static inline int _thread_info_unlock ( const char *const function_name, int rc )
229{
230 threadsinfo_t *threadsinfo_p = thread_info();
231 debug ( 4, "used by %s()", function_name );
232 pthread_mutex_unlock ( &threadsinfo_p->mutex[PTHREAD_MUTEX_THREADSINFO] );
233 return rc;
234}
235
236int threads_foreach ( int ( *funct ) ( threadinfo_t *, void * ), state_t state, void *arg )
237{
238 int i, rc;
239 threadsinfo_t *threadsinfo_p = thread_info_lock();
240#ifdef PARANOID
241
242 if ( threadsinfo_p == NULL )
243 return thread_info_unlock ( EINVAL );
244
245#endif
246 rc = 0;
247 i = 0;
248
249 while ( i < threadsinfo_p->used ) {
250 threadinfo_t *threadinfo_p = &threadsinfo_p->threads[i++];
251
252 if ( ( state == STATE_UNKNOWN ) || ( threadinfo_p->state == state ) ) {
253 if ( ( rc = funct ( threadinfo_p, arg ) ) )
254 break;
255 }
256 }
257
258 return thread_info_unlock ( rc );
259}
260
262{
263 time_t nextexpiretime = 0;
264 threadsinfo_t *threadsinfo_p = thread_info_lock();
265#ifdef PARANOID
266
267 if ( threadsinfo_p == NULL )
268 return thread_info_unlock ( 0 );
269
270#endif
271 int thread_num = threadsinfo_p->used;
272
273 while ( thread_num-- ) {
274 threadinfo_t *threadinfo_p = &threadsinfo_p->threads[thread_num];
275 debug ( 3, "threadsinfo_p->threads[%i].state == %i;\tthreadsinfo_p->threads[%i].pthread == %p;\tthreadsinfo_p->threads[%i].expiretime == %i",
276 thread_num, threadinfo_p->state, thread_num, threadinfo_p->pthread, thread_num, threadinfo_p->expiretime );
277
278 if ( threadinfo_p->state == STATE_EXIT )
279 continue;
280
281 if ( threadinfo_p->expiretime ) {
282 if ( nextexpiretime )
283 nextexpiretime = MIN ( nextexpiretime, threadinfo_p->expiretime );
284 else
285 nextexpiretime = threadinfo_p->expiretime;
286 }
287 }
288
289 thread_info_unlock ( 0 );
290 debug ( 3, "nextexpiretime == %i", nextexpiretime );
291 return nextexpiretime;
292}
293
294threadinfo_t *thread_new()
295{
296 threadsinfo_t *threadsinfo_p = thread_info_lock();
297#ifdef PARANOID
298
299 if ( threadsinfo_p == NULL ) {
300 thread_info_unlock ( 0 );
301 return NULL;
302 }
303
304#endif
305 int thread_num;
306 threadinfo_t *threadinfo_p;
307
308 if ( threadsinfo_p->stacklen ) {
309 threadinfo_p = threadsinfo_p->threadsstack[--threadsinfo_p->stacklen];
310 thread_num = threadinfo_p->thread_num;
311 } else {
312 if ( threadsinfo_p->used >= threadsinfo_p->allocated ) {
313 threadsinfo_p->allocated += ALLOC_PORTION;
314 debug ( 2, "Reallocated memory for threadsinfo -> %i.", threadsinfo_p->allocated );
315 threadsinfo_p->threads = ( threadinfo_t * ) xrealloc ( ( char * ) threadsinfo_p->threads,
316 sizeof ( *threadsinfo_p->threads ) * ( threadsinfo_p->allocated + 2 ) );
317 threadsinfo_p->threadsstack = ( threadinfo_t ** ) xrealloc ( ( char * ) threadsinfo_p->threadsstack,
318 sizeof ( *threadsinfo_p->threadsstack ) * ( threadsinfo_p->allocated + 2 ) );
319 }
320
321 thread_num = threadsinfo_p->used++;
322 threadinfo_p = &threadsinfo_p->threads[thread_num];
323 }
324
325#ifdef PARANOID
326 memset ( threadinfo_p, 0, sizeof ( *threadinfo_p ) );
327#else
328 threadinfo_p->expiretime = 0;
329 threadinfo_p->errcode = 0;
330 threadinfo_p->exitcode = 0;
331#endif
332 threadinfo_p->thread_num = thread_num;
333 threadinfo_p->state = STATE_RUNNING;
334 debug ( 2, "thread_new -> thread_num: %i; used: %i", thread_num, threadsinfo_p->used );
335 thread_info_unlock ( 0 );
336 return threadinfo_p;
337}
338
339int thread_del_bynum ( int thread_num )
340{
341 debug ( 2, "thread_del_bynum(%i)", thread_num );
342 threadsinfo_t *threadsinfo_p = thread_info_lock();
343#ifdef PARANOID
344
345 if ( threadsinfo_p == NULL )
346 return thread_info_unlock ( errno );
347
348#endif
349
350 if ( thread_num >= threadsinfo_p->used )
351 return thread_info_unlock ( EINVAL );
352
353 threadinfo_t *threadinfo_p = &threadsinfo_p->threads[thread_num];
354 threadinfo_p->state = STATE_EXIT;
355 char **ptr = threadinfo_p->argv;
356
357 if ( ptr != NULL ) {
358 while ( *ptr )
359 free ( * ( ptr++ ) );
360
361 free ( threadinfo_p->argv );
362 }
363
364 if ( thread_num == ( threadsinfo_p->used - 1 ) ) {
365 threadsinfo_p->used--;
366 debug ( 3, "thread_del_bynum(%i): there're %i threads left (#0).", thread_num, threadsinfo_p->used - threadsinfo_p->stacklen );
367 return thread_info_unlock ( 0 );
368 }
369
370 threadinfo_t *t = &threadsinfo_p->threads[threadsinfo_p->used - 1];
371
372 if ( t->state == STATE_EXIT ) {
373 threadsinfo_p->used--;
374 debug ( 3, "%i [%p] -> %i [%p]; left: %i",
375 threadsinfo_p->used, t->pthread, thread_num, threadinfo_p->pthread, threadsinfo_p->used - threadsinfo_p->stacklen );
376 memcpy ( threadinfo_p, t, sizeof ( *threadinfo_p ) );
377 } else {
378#ifdef PARANOID
379
380 if ( threadsinfo_p->stacklen >= threadsinfo_p->allocated ) {
381 error ( "Threads metadata structures pointers stack overflowed!" );
382 return thread_info_unlock ( EINVAL );
383 }
384
385#endif
386 threadsinfo_p->threadsstack[threadsinfo_p->stacklen++] = threadinfo_p;
387 }
388
389 debug ( 3, "thread_del_bynum(%i): there're %i threads left (#1).", thread_num, threadsinfo_p->used - threadsinfo_p->stacklen );
390 return thread_info_unlock ( 0 );
391}
392
393int thread_gc ( ctx_t *ctx_p )
394{
395 int thread_num;
396 time_t tm = time ( NULL );
397 debug ( 3, "tm == %i; thread %p", tm, pthread_self() );
398
399 if ( !ctx_p->flags[THREADING] )
400 return 0;
401
402 threadsinfo_t *threadsinfo_p = thread_info_lock();
403#ifdef PARANOID
404
405 if ( threadsinfo_p == NULL )
406 return thread_info_unlock ( errno );
407
408#endif
409 debug ( 2, "There're %i threads.", threadsinfo_p->used );
410 thread_num = -1;
411
412 while ( ++thread_num < threadsinfo_p->used ) {
413 int err;
414 threadinfo_t *threadinfo_p = &threadsinfo_p->threads[thread_num];
415 debug ( 3, "Trying thread #%i (==%i) (state: %i; expire at: %i, now: %i, exitcode: %i, errcode: %i; i_p: %p; p: %p).",
416 thread_num, threadinfo_p->thread_num, threadinfo_p->state, threadinfo_p->expiretime, tm, threadinfo_p->exitcode,
417 threadinfo_p->errcode, threadinfo_p, threadinfo_p->pthread );
418
419 if ( threadinfo_p->state == STATE_EXIT )
420 continue;
421
422 if ( threadinfo_p->expiretime && ( threadinfo_p->expiretime <= tm ) ) {
423 if ( pthread_tryjoin_np ( threadinfo_p->pthread, NULL ) ) { // TODO: check this pthread_tryjoin_np() on error returnings
424 error ( "Debug3: thread_gc(): Thread #%i is alive too long: %lu <= %lu (started at %lu)", thread_num, threadinfo_p->expiretime, tm, threadinfo_p->starttime );
425 return thread_info_unlock ( ETIME );
426 }
427 }
428
429#ifndef VERYPARANOID
430
431 if ( threadinfo_p->state != STATE_TERM ) {
432 debug ( 3, "Thread #%i is busy, skipping (#0).", thread_num );
433 continue;
434 }
435
436#endif
437 debug ( 3, "Trying to join thread #%i: %p", thread_num, threadinfo_p->pthread );
438#ifndef VERYPARANOID
439
440 switch ( ( err = pthread_join ( threadinfo_p->pthread, NULL ) ) ) {
441#else
442
443 switch ( ( err = pthread_tryjoin_np ( threadinfo_p->pthread, NULL ) ) ) {
444 case EBUSY:
445 debug ( 3, "Thread #%i is busy, skipping (#1).", thread_num );
446 continue;
447#endif
448
449 case EDEADLK:
450 case EINVAL:
451 case 0:
452 debug ( 3, "Thread #%i is finished with exitcode %i (errcode %i), deleting. threadinfo_p == %p",
453 thread_num, threadinfo_p->exitcode, threadinfo_p->errcode, threadinfo_p );
454 break;
455
456 default:
457 error ( "Got error while pthread_join() or pthread_tryjoin_np().", strerror ( err ), err );
458 return thread_info_unlock ( errno );
459 }
460
461 if ( threadinfo_p->errcode ) {
462 error ( "Got error from thread #%i: errcode %i.", thread_num, threadinfo_p->errcode );
463 thread_info_unlock ( 0 );
464 thread_del_bynum ( thread_num );
465 return threadinfo_p->errcode;
466 }
467
468 thread_info_unlock ( 0 );
469
470 if ( thread_del_bynum ( thread_num ) )
471 return errno;
472
473 thread_info_lock();
474 }
475
476 debug ( 3, "There're %i threads left.", threadsinfo_p->used - threadsinfo_p->stacklen );
477 return thread_info_unlock ( 0 );
478}
479
480int thread_cleanup ( ctx_t *ctx_p )
481{
482 ( void ) ctx_p;
483 debug ( 3, "" );
484 threadsinfo_t *threadsinfo_p = thread_info_lock();
485#ifdef PARANOID
486
487 if ( threadsinfo_p == NULL )
488 return thread_info_unlock ( errno );
489
490#endif
491 // Waiting for threads:
492 debug ( 1, "There're %i opened threads. Waiting.", threadsinfo_p->used );
493
494 while ( threadsinfo_p->used ) {
495// int err;
496 threadinfo_t *threadinfo_p = &threadsinfo_p->threads[--threadsinfo_p->used];
497
498 if ( threadinfo_p->state == STATE_EXIT )
499 continue;
500
501 //pthread_kill(threadinfo_p->pthread, SIGTERM);
502 debug ( 1, "killing pid %i with SIGTERM", threadinfo_p->child_pid );
503 kill ( threadinfo_p->child_pid, SIGTERM );
504 pthread_join ( threadinfo_p->pthread, NULL );
505 debug ( 2, "thread #%i exitcode: %i", threadsinfo_p->used, threadinfo_p->exitcode );
506 /*
507 if(threadinfo_p->callback)
508 if((err=threadinfo_p->callback(ctx_p, threadinfo_p->argv)))
509 warning("Got error from callback function.", strerror(err), err);
510 */
511 char **ptr = threadinfo_p->argv;
512
513 while ( *ptr )
514 free ( * ( ptr++ ) );
515
516 free ( threadinfo_p->argv );
517 }
518
519 debug ( 3, "All threads are closed." );
520
521 // Freeing
522 if ( threadsinfo_p->allocated ) {
523 free ( threadsinfo_p->threads );
524 free ( threadsinfo_p->threadsstack );
525 }
526
527 if ( threadsinfo_p->mutex_init ) {
528 int i = 0;
529
530 while ( i < PTHREAD_MUTEX_MAX ) {
531 pthread_mutex_destroy ( &threadsinfo_p->mutex[i] );
532 pthread_cond_destroy ( &threadsinfo_p->cond [i] );
533 i++;
534 }
535 }
536
537#ifdef PARANOID
538 // Reseting
539 memset ( threadsinfo_p, 0, sizeof ( *threadsinfo_p ) ); // Just in case;
540#endif
541 debug ( 3, "done." );
542 return thread_info_unlock ( 0 );
543}
544#endif
545
546volatile state_t *state_p = NULL;
547volatile int exitcode = 0;
548#define SHOULD_THREAD(ctx_p) ((ctx_p->flags[THREADING] != PM_OFF) && (ctx_p->flags[THREADING] != PM_SAFE || ctx_p->iteration_num))
549
550int exec_argv ( char **argv, int *child_pid )
551{
552 debug ( 3, "Thread %p.", pthread_self() );
553 pid_t pid;
554 int status;
555 // Forking
556 pid = privileged_fork_execvp ( argv[0], ( char *const * ) argv );
557// debug(3, "After fork thread %p"")".", pthread_self() );
558 debug ( 3, "Child pid is %u", pid );
559
560 // Setting *child_pid value
561 if ( child_pid )
562 *child_pid = pid;
563
564 // Waiting for process end
565#ifdef VERYPARANOID
566 sigset_t sigset_exec, sigset_old;
567 sigemptyset ( &sigset_exec );
568 sigaddset ( &sigset_exec, SIGUSR_BLOPINT );
569 pthread_sigmask ( SIG_BLOCK, &sigset_exec, &sigset_old );
570#endif
571
572// debug(3, "Pre-wait thread %p"")".", pthread_self() );
573 if ( privileged_waitpid ( pid, &status, 0 ) != pid ) {
574 switch ( errno ) {
575 case ECHILD:
576 debug ( 2, "Child %u is already dead.", pid );
577 break;
578
579 default:
580 error ( "Cannot waitid()." );
581 return errno;
582 }
583 }
584
585// debug(3, "After-wait thread %p"")".", pthread_self() );
586#ifdef VERYPARANOID
587 pthread_sigmask ( SIG_SETMASK, &sigset_old, NULL );
588#endif
589 // Return
590 int exitcode = WEXITSTATUS ( status );
591 debug ( 3, "execution completed with exitcode %i", exitcode );
592 return exitcode;
593}
594
595#ifdef THREADING_SUPPORT
596static inline int thread_exit ( threadinfo_t *threadinfo_p, int exitcode )
597{
598 int err = 0;
599 threadinfo_p->exitcode = exitcode;
600#if _DEBUG_FORCE | VERYPARANOID
601
602 if ( threadinfo_p->pthread != pthread_self() ) {
603 error ( "pthread id mismatch! (i_p->p) %p != (p) %p""", threadinfo_p->pthread, pthread_self() );
604 return EINVAL;
605 }
606
607#endif
608
609 if ( threadinfo_p->callback ) {
610 if ( threadinfo_p->ctx_p->flags[DEBUG] > 2 ) {
611 debug ( 3, "thread %p, argv: ", threadinfo_p->pthread );
612 char **argv = threadinfo_p->argv;
613
614 while ( *argv ) {
615 debug ( 3, "\t%p == %s", *argv, *argv );
616 argv++;
617 }
618 }
619
620 if ( ( err = threadinfo_p->callback ( threadinfo_p->ctx_p, threadinfo_p->callback_arg ) ) ) {
621 error ( "Got error from callback function.", strerror ( err ), err );
622 threadinfo_p->errcode = err;
623 }
624 }
625
626 // Notifying the parent-thread, that it's time to collect garbage threads
627 threadinfo_p->state = STATE_TERM;
628 debug ( 3, "thread %p is sending signal to sighandler to call GC", threadinfo_p->pthread );
629 return pthread_kill ( pthread_sighandler, SIGUSR_THREAD_GC );
630}
631#endif
632
633static inline void so_call_sync_finished ( int n, api_eventinfo_t *ei )
634{
635 int i = 0;
636 api_eventinfo_t *ei_i = ei;
637
638 while ( i < n ) {
639#ifdef PARANOID
640
641 if ( ei_i->path == NULL ) {
642 warning ( "ei_i->path == NULL" );
643 i++;
644 continue;
645 }
646
647#endif
648 free ( ( char * ) ei_i->path );
649 ei_i++;
650 i++;
651 }
652
653 if ( ei != NULL )
654 free ( ei );
655
656 return;
657}
658
659#ifdef THREADING_SUPPORT
660int so_call_sync_thread ( threadinfo_t *threadinfo_p )
661{
662 debug ( 3, "thread_num == %i; threadinfo_p == %p; i_p->pthread %p; thread %p",
663 threadinfo_p->thread_num, threadinfo_p, threadinfo_p->pthread, pthread_self() );
664 ctx_t *ctx_p = threadinfo_p->ctx_p;
665 int n = threadinfo_p->n;
666 api_eventinfo_t *ei = threadinfo_p->ei;
667 int err = 0, rc = 0, try_again = 0;
668
669 do {
670 try_again = 0;
671 threadinfo_p->try_n++;
672 rc = ctx_p->handler_funct.sync ( n, ei );
673
674 if ( ( err = exitcode_process ( threadinfo_p->ctx_p, rc ) ) ) {
675 try_again = ( ( !ctx_p->retries ) || ( threadinfo_p->try_n < ctx_p->retries ) ) && ( ctx_p->state != STATE_TERM ) && ( ctx_p->state != STATE_EXIT );
676 warning ( "Bad exitcode %i (errcode %i). %s.", rc, err, try_again ? "Retrying" : "Give up" );
677
678 if ( try_again ) {
679 debug ( 2, "Sleeping for %u seconds before the retry.", ctx_p->syncdelay );
680 sleep ( ctx_p->syncdelay );
681 }
682 }
683 } while ( err && ( ( !ctx_p->retries ) || ( threadinfo_p->try_n < ctx_p->retries ) ) && ( ctx_p->state != STATE_TERM ) && ( ctx_p->state != STATE_EXIT ) );
684
685 if ( err && !ctx_p->flags[IGNOREFAILURES] ) {
686 error ( "Bad exitcode %i (errcode %i)", rc, err );
687 threadinfo_p->errcode = err;
688 }
689
690 so_call_sync_finished ( n, ei );
691
692 if ( ( err = thread_exit ( threadinfo_p, rc ) ) ) {
693 exitcode = err; // This's global variable "exitcode"
694 pthread_kill ( pthread_sighandler, SIGTERM );
695 }
696
697 return rc;
698}
699#endif
700
701static inline int so_call_sync ( ctx_t *ctx_p, indexes_t *indexes_p, int n, api_eventinfo_t *ei )
702{
703 debug ( 2, "n == %i", n );
704#ifdef THREADING_SUPPORT
705
706 if ( !SHOULD_THREAD ( ctx_p ) ) {
707#endif
708 int rc = 0, ret = 0, err = 0;
709 int try_n = 0, try_again;
710 state_t status = STATE_UNKNOWN;
711// indexes_p->nonthreaded_syncing_fpath2ei_ht = g_hash_table_dup(indexes_p->fpath2ei_ht, g_str_hash, g_str_equal, free, free, (gpointer(*)(gpointer))strdup, eidup);
712 indexes_p->nonthreaded_syncing_fpath2ei_ht = indexes_p->fpath2ei_ht;
713
714 do {
715 try_again = 0;
716 try_n++;
717 alarm ( ctx_p->synctimeout );
718 rc = ctx_p->handler_funct.sync ( n, ei );
719 alarm ( 0 );
720
721 if ( ( err = exitcode_process ( ctx_p, rc ) ) ) {
722 if ( ( try_n == 1 ) && ( ctx_p->state != STATE_TERM ) && ( ctx_p->state != STATE_EXIT ) ) {
723 status = ctx_p->state;
726 }
727
728 try_again = ( ( !ctx_p->retries ) || ( try_n < ctx_p->retries ) ) && ( ctx_p->state != STATE_TERM ) && ( ctx_p->state != STATE_EXIT );
729 warning ( "Bad exitcode %i (errcode %i). %s.", rc, err, try_again ? "Retrying" : "Give up" );
730
731 if ( try_again ) {
732 debug ( 2, "Sleeping for %u seconds before the retry.", ctx_p->syncdelay );
733 sleep ( ctx_p->syncdelay );
734 }
735 }
736 } while ( err && ( ( !ctx_p->retries ) || ( try_n < ctx_p->retries ) ) && ( ctx_p->state != STATE_TERM ) && ( ctx_p->state != STATE_EXIT ) );
737
738 if ( err && !ctx_p->flags[IGNOREFAILURES] ) {
739 error ( "Bad exitcode %i (errcode %i)", rc, err );
740 ret = err;
741 } else if ( status != STATE_UNKNOWN ) {
742 ctx_p->state = status;
744 }
745
746// g_hash_table_destroy(indexes_p->nonthreaded_syncing_fpath2ei_ht);
747 indexes_p->nonthreaded_syncing_fpath2ei_ht = NULL;
748 so_call_sync_finished ( n, ei );
749 return ret;
750#ifdef THREADING_SUPPORT
751 }
752
753 threadinfo_t *threadinfo_p = thread_new();
754
755 if ( threadinfo_p == NULL )
756 return errno;
757
758 threadinfo_p->try_n = 0;
759 threadinfo_p->callback = NULL;
760 threadinfo_p->argv = NULL;
761 threadinfo_p->ctx_p = ctx_p;
762 threadinfo_p->starttime = time ( NULL );
763 threadinfo_p->fpath2ei_ht = g_hash_table_dup ( indexes_p->fpath2ei_ht, g_str_hash, g_str_equal, free, free, ( gpointer ( * ) ( gpointer ) ) strdup, eidup );
764 threadinfo_p->n = n;
765 threadinfo_p->ei = ei;
766 threadinfo_p->iteration = ctx_p->iteration_num;
767
768 if ( ctx_p->synctimeout )
769 threadinfo_p->expiretime = threadinfo_p->starttime + ctx_p->synctimeout;
770
771 if ( pthread_create ( &threadinfo_p->pthread, NULL, ( void * ( * ) ( void * ) ) so_call_sync_thread, threadinfo_p ) ) {
772 error ( "Cannot pthread_create()." );
773 return errno;
774 }
775
776 debug ( 3, "thread %p", threadinfo_p->pthread );
777 return 0;
778#endif
779}
780
781static inline int so_call_rsync_finished ( ctx_t *ctx_p, const char *inclistfile, const char *exclistfile )
782{
783 int ret0, ret1;
784 debug ( 5, "" );
785
786 if ( ctx_p->flags[DONTUNLINK] )
787 return 0;
788
789 if ( inclistfile == NULL ) {
790 error ( "inclistfile == NULL." );
791 return EINVAL;
792 }
793
794 debug ( 3, "unlink()-ing \"%s\"", inclistfile );
795 ret0 = unlink ( inclistfile );
796
798 return ret0;
799
800 if ( exclistfile == NULL ) {
801 error ( "exclistfile == NULL." );
802 return EINVAL;
803 }
804
805 debug ( 3, "unlink()-ing \"%s\"", exclistfile );
806 ret1 = unlink ( exclistfile );
807 return ret0 == 0 ? ret1 : ret0;
808}
809
810#ifdef THREADING_SUPPORT
811int so_call_rsync_thread ( threadinfo_t *threadinfo_p )
812{
813 debug ( 3, "thread_num == %i; threadinfo_p == %p; i_p->pthread %p; thread %p",
814 threadinfo_p->thread_num, threadinfo_p, threadinfo_p->pthread, pthread_self() );
815 ctx_t *ctx_p = threadinfo_p->ctx_p;
816 char **argv = threadinfo_p->argv;
817 int err = 0, rc = 0, try_again;
818
819 do {
820 try_again = 0;
821 threadinfo_p->try_n++;
822 rc = ctx_p->handler_funct.rsync ( argv[0], argv[1] );
823
824 if ( ( err = exitcode_process ( threadinfo_p->ctx_p, rc ) ) ) {
825 try_again = ( ( !ctx_p->retries ) || ( threadinfo_p->try_n < ctx_p->retries ) ) && ( ctx_p->state != STATE_TERM ) && ( ctx_p->state != STATE_EXIT );
826 warning ( "Bad exitcode %i (errcode %i). %s.", rc, err, try_again ? "Retrying" : "Give up" );
827
828 if ( try_again ) {
829 debug ( 2, "Sleeping for %u seconds before the retry.", ctx_p->syncdelay );
830 sleep ( ctx_p->syncdelay );
831 }
832 }
833 } while ( try_again );
834
835 if ( err && !ctx_p->flags[IGNOREFAILURES] ) {
836 error ( "Bad exitcode %i (errcode %i)", rc, err );
837 threadinfo_p->errcode = err;
838 }
839
840 if ( ( err = so_call_rsync_finished ( ctx_p, argv[0], argv[1] ) ) ) {
841 exitcode = err; // This's global variable "exitcode"
842 pthread_kill ( pthread_sighandler, SIGTERM );
843 }
844
845 free ( argv[0] );
846 free ( argv[1] );
847 free ( argv );
848
849 if ( ( err = thread_exit ( threadinfo_p, rc ) ) ) {
850 exitcode = err; // This's global variable "exitcode"
851 pthread_kill ( pthread_sighandler, SIGTERM );
852 }
853
854 return rc;
855}
856#endif
857
858static inline int so_call_rsync ( ctx_t *ctx_p, indexes_t *indexes_p, const char *inclistfile, const char *exclistfile )
859{
860 debug ( 2, "inclistfile == \"%s\"; exclistfile == \"%s\"", inclistfile, exclistfile );
861#ifdef THREADING_SUPPORT
862
863 if ( !SHOULD_THREAD ( ctx_p ) ) {
864#endif
865 debug ( 3, "ctx_p->handler_funct.rsync == %p", ctx_p->handler_funct.rsync );
866// indexes_p->nonthreaded_syncing_fpath2ei_ht = g_hash_table_dup(indexes_p->fpath2ei_ht, g_str_hash, g_str_equal, free, free, (gpointer(*)(gpointer))strdup, eidup);
867 indexes_p->nonthreaded_syncing_fpath2ei_ht = indexes_p->fpath2ei_ht;
868 int rc = 0, err = 0;
869 int try_n = 0, try_again;
870 state_t status = STATE_UNKNOWN;
871
872 do {
873 try_again = 0;
874 try_n++;
875 alarm ( ctx_p->synctimeout );
876 rc = ctx_p->handler_funct.rsync ( inclistfile, exclistfile );
877 alarm ( 0 );
878
879 if ( ( err = exitcode_process ( ctx_p, rc ) ) ) {
880 if ( ( try_n == 1 ) && ( ctx_p->state != STATE_TERM ) && ( ctx_p->state != STATE_EXIT ) ) {
881 status = ctx_p->state;
884 }
885
886 try_again = ( ( !ctx_p->retries ) || ( try_n < ctx_p->retries ) ) && ( ctx_p->state != STATE_TERM ) && ( ctx_p->state != STATE_EXIT );
887 warning ( "Bad exitcode %i (errcode %i). %s.", rc, err, try_again ? "Retrying" : "Give up" );
888
889 if ( try_again ) {
890 debug ( 2, "Sleeping for %u seconds before the retry.", ctx_p->syncdelay );
891 sleep ( ctx_p->syncdelay );
892 }
893 }
894 } while ( try_again );
895
896 if ( err && !ctx_p->flags[IGNOREFAILURES] ) {
897 error ( "Bad exitcode %i (errcode %i)", rc, err );
898 rc = err;
899 } else if ( ( status != STATE_UNKNOWN ) && ( ctx_p->state != STATE_TERM ) && ( ctx_p->state != STATE_EXIT ) ) {
900 ctx_p->state = status;
902 }
903
904// g_hash_table_destroy(indexes_p->nonthreaded_syncing_fpath2ei_ht);
905 indexes_p->nonthreaded_syncing_fpath2ei_ht = NULL;
906 int ret_cleanup;
907
908 if ( ( ret_cleanup = so_call_rsync_finished ( ctx_p, inclistfile, exclistfile ) ) )
909 return rc ? rc : ret_cleanup;
910
911 return rc;
912#ifdef THREADING_SUPPORT
913 }
914
915 threadinfo_t *threadinfo_p = thread_new();
916
917 if ( threadinfo_p == NULL )
918 return errno;
919
920 threadinfo_p->try_n = 0;
921 threadinfo_p->callback = NULL;
922 threadinfo_p->argv = xmalloc ( sizeof ( char * ) * 3 );
923 threadinfo_p->ctx_p = ctx_p;
924 threadinfo_p->starttime = time ( NULL );
925 threadinfo_p->fpath2ei_ht = g_hash_table_dup ( indexes_p->fpath2ei_ht, g_str_hash, g_str_equal, free, free, ( gpointer ( * ) ( gpointer ) ) strdup, eidup );
926 threadinfo_p->iteration = ctx_p->iteration_num;
927 threadinfo_p->argv[0] = strdup ( inclistfile );
928 threadinfo_p->argv[1] = strdup ( exclistfile );
929
930 if ( ctx_p->synctimeout )
931 threadinfo_p->expiretime = threadinfo_p->starttime + ctx_p->synctimeout;
932
933 if ( pthread_create ( &threadinfo_p->pthread, NULL, ( void * ( * ) ( void * ) ) so_call_rsync_thread, threadinfo_p ) ) {
934 error ( "Cannot pthread_create()." );
935 return errno;
936 }
937
938 debug ( 3, "thread %p", threadinfo_p->pthread );
939 return 0;
940#endif
941}
942
943// === SYNC_EXEC() === {
944
945//#define SYNC_EXEC(...) (SHOULD_THREAD(ctx_p) ? sync_exec_thread : sync_exec )(__VA_ARGS__)
946#ifdef THREADING_SUPPORT
947#define SYNC_EXEC_ARGV(...) (SHOULD_THREAD(ctx_p) ? sync_exec_argv_thread : sync_exec_argv)(__VA_ARGS__)
948#else
949#define SYNC_EXEC_ARGV(...) sync_exec_argv(__VA_ARGS__)
950#endif
951
952#define debug_argv_dump(level, argv)\
953 if (unlikely(ctx_p->flags[DEBUG] >= level))\
954 argv_dump(level, argv)
955
956static inline void argv_dump ( int debug_level, char **argv )
957{
958#ifdef _DEBUG_FORCE
959 debug ( 19, "(%u, %p)", debug_level, argv );
960#endif
961 char **argv_p = argv;
962
963 while ( *argv_p != NULL ) {
964 debug ( debug_level, "%p: \"%s\"", *argv_p, *argv_p );
965 argv_p++;
966 }
967
968 return;
969}
970
971#define _sync_exec_getargv(argv, firstarg, COPYARG) {\
972 va_list arglist;\
973 va_start(arglist, firstarg);\
974 \
975 int i = 0;\
976 do {\
977 char *arg;\
978 if(i >= MAXARGUMENTS) {\
979 error("Too many arguments (%i >= %i).", i, MAXARGUMENTS);\
980 return ENOMEM;\
981 }\
982 arg = (char *)va_arg(arglist, const char *const);\
983 argv[i] = arg!=NULL ? COPYARG : NULL;\
984 } while(argv[i++] != NULL);\
985 va_end(arglist);\
986 }
987
988char *sync_path_rel2abs ( ctx_t *ctx_p, const char *path_rel, ssize_t path_rel_len, size_t *path_abs_len_p, char *path_abs_oldptr )
989{
990 if ( path_rel == NULL )
991 return NULL;
992
993 if ( path_rel_len == -1 )
994 path_rel_len = strlen ( path_rel );
995
996 char *path_abs;
997 size_t watchdirlen =
999 // if [watchdir == "/"] ? 0 : watchdir.length()
1000 size_t path_abs_len = path_rel_len + watchdirlen + 1;
1001 path_abs = ( path_abs_len_p == NULL || path_abs_len >= *path_abs_len_p ) ?
1002 xrealloc ( path_abs_oldptr, path_abs_len + 1 ) :
1003 path_abs_oldptr;
1004
1005 if ( path_abs_oldptr == NULL ) {
1006 memcpy ( path_abs, ctx_p->watchdir, watchdirlen );
1007 path_abs[watchdirlen] = '/';
1008 }
1009
1010 memcpy ( &path_abs[watchdirlen + 1], path_rel, path_rel_len + 1 );
1011
1012 if ( path_abs_len_p != NULL )
1013 *path_abs_len_p = path_abs_len;
1014
1015 return path_abs;
1016}
1017
1018char *sync_path_abs2rel ( ctx_t *ctx_p, const char *path_abs, ssize_t path_abs_len, size_t *path_rel_len_p, char *path_rel_oldptr )
1019{
1020 if ( path_abs == NULL )
1021 return NULL;
1022
1023 if ( path_abs_len == -1 )
1024 path_abs_len = strlen ( path_abs );
1025
1026 size_t path_rel_len;
1027 char *path_rel;
1028 size_t watchdirlen =
1030 signed long path_rel_len_signed = path_abs_len - ( watchdirlen + 1 );
1031 path_rel_len = ( path_rel_len_signed > 0 ) ? path_rel_len_signed : 0;
1032 path_rel = ( path_rel_len_p == NULL || path_rel_len >= *path_rel_len_p ) ?
1033 xrealloc ( path_rel_oldptr, path_rel_len + 1 ) :
1034 path_rel_oldptr;
1035
1036 if ( !path_rel_len ) {
1037 path_rel[0] = 0;
1038 return path_rel;
1039 }
1040
1041 memcpy ( path_rel, &path_abs[watchdirlen + 1], path_rel_len + 1 );
1042#ifdef VERYPARANOID
1043 // Removing "/" on the end
1044 debug ( 3, "\"%s\" (len: %i) --%i--> \"%s\" (len: %i) + ",
1045 path_abs, path_abs_len, path_rel[path_rel_len - 1] == '/',
1046 ctx_p->watchdirwslash, watchdirlen + 1 );
1047
1048 if ( path_rel[path_rel_len - 1] == '/' )
1049 path_rel[--path_rel_len] = 0x00;
1050
1051 debug ( 3, "\"%s\" (len: %i)", path_rel, path_rel_len );
1052#endif
1053
1054 if ( path_rel_len_p != NULL )
1055 *path_rel_len_p = path_rel_len;
1056
1057 return path_rel;
1058}
1059
1061{
1062// if(ctx_p->flags[THREADING])
1063// return fork();
1064 // Cleaning stale pids. TODO: Optimize this. Remove this GC.
1065 int i = 0;
1066
1067 while ( i < ctx_p->children ) {
1068 if ( privileged_waitpid ( ctx_p->child_pid[i], NULL, WNOHANG ) < 0 )
1069 if ( errno == ECHILD )
1071
1072 i++;
1073 }
1074
1075 // Too many children
1076 if ( ctx_p->children >= MAXCHILDREN ) {
1077 errno = ECANCELED;
1078 return -1;
1079 }
1080
1081 // Forking
1082 pid_t pid = fork();
1083 ctx_p->child_pid[ctx_p->children++] = pid;
1084 return pid;
1085}
1086
1087int sync_exec_argv ( ctx_t *ctx_p, indexes_t *indexes_p, thread_callbackfunct_t callback, thread_callbackfunct_arg_t *callback_arg_p, char **argv )
1088{
1089 debug ( 2, "" );
1090 debug_argv_dump ( 2, argv );
1091// indexes_p->nonthreaded_syncing_fpath2ei_ht = g_hash_table_dup(indexes_p->fpath2ei_ht, g_str_hash, g_str_equal, free, free, (gpointer(*)(gpointer))strdup, eidup);
1092 indexes_p->nonthreaded_syncing_fpath2ei_ht = indexes_p->fpath2ei_ht;
1093 int exitcode = 0, ret = 0, err = 0;
1094 int try_n = 0, try_again;
1095 state_t status = STATE_UNKNOWN;
1096
1097 do {
1098 try_again = 0;
1099 try_n++;
1100 debug ( 2, "try_n == %u (retries == %u)", try_n, ctx_p->retries );
1101 alarm ( ctx_p->synctimeout );
1102 ctx_p->children = 1;
1104 ctx_p->children = 0;
1105 alarm ( 0 );
1106
1107 if ( ( err = exitcode_process ( ctx_p, exitcode ) ) ) {
1108 if ( ( try_n == 1 ) && ( ctx_p->state != STATE_TERM ) && ( ctx_p->state != STATE_EXIT ) ) {
1109 status = ctx_p->state;
1112 }
1113
1114 try_again = ( ( !ctx_p->retries ) || ( try_n < ctx_p->retries ) ) && ( ctx_p->state != STATE_TERM ) && ( ctx_p->state != STATE_EXIT );
1115 warning ( "Bad exitcode %i (errcode %i). %s.", exitcode, err, try_again ? "Retrying" : "Give up" );
1116
1117 if ( try_again ) {
1118 debug ( 2, "Sleeping for %u seconds before the retry.", ctx_p->syncdelay );
1119 sleep ( ctx_p->syncdelay );
1120 }
1121 }
1122 } while ( try_again );
1123
1124 if ( err && !ctx_p->flags[IGNOREFAILURES] ) {
1125 error ( "Bad exitcode %i (errcode %i)", exitcode, err );
1126 ret = err;
1127 } else if ( ( status != STATE_UNKNOWN ) && ( ctx_p->state != STATE_TERM ) && ( ctx_p->state != STATE_EXIT ) ) {
1128 ctx_p->state = status;
1130 }
1131
1132 if ( callback != NULL ) {
1133 int nret = callback ( ctx_p, callback_arg_p );
1134
1135 if ( nret ) {
1136 error ( "Got error while callback()." );
1137
1138 if ( !ret ) ret = nret;
1139 }
1140 }
1141
1142// g_hash_table_destroy(indexes_p->nonthreaded_syncing_fpath2ei_ht);
1143 indexes_p->nonthreaded_syncing_fpath2ei_ht = NULL;
1144 return ret;
1145}
1146
1147/*
1148static inline int sync_exec(ctx_t *ctx_p, indexes_t *indexes_p, thread_callbackfunct_t callback, thread_callbackfunct_arg_t *callback_arg_p, ...) {
1149 int rc;
1150 debug(2, "");
1151
1152 char **argv = (char **)xcalloc(sizeof(char *), MAXARGUMENTS);
1153 memset(argv, 0, sizeof(char *)*MAXARGUMENTS);
1154
1155 _sync_exec_getargv(argv, callback_arg_p, arg);
1156
1157 rc = sync_exec_argv(ctx_p, indexes_p, callback, callback_arg_p, argv);
1158 free(argv);
1159 return rc;
1160}
1161*/
1162
1163#ifdef THREADING_SUPPORT
1164int __sync_exec_thread ( threadinfo_t *threadinfo_p )
1165{
1166 char **argv = threadinfo_p->argv;
1167 ctx_t *ctx_p = threadinfo_p->ctx_p;
1168 debug ( 3, "thread_num == %i; threadinfo_p == %p; i_p->pthread %p; thread %p""",
1169 threadinfo_p->thread_num, threadinfo_p, threadinfo_p->pthread, pthread_self() );
1170 int err = 0, exec_exitcode = 0, try_again;
1171
1172 do {
1173 try_again = 0;
1174 threadinfo_p->try_n++;
1175 exec_exitcode = exec_argv ( argv, &threadinfo_p->child_pid );
1176
1177 if ( ( err = exitcode_process ( threadinfo_p->ctx_p, exec_exitcode ) ) ) {
1178 try_again = ( ( !ctx_p->retries ) || ( threadinfo_p->try_n < ctx_p->retries ) ) && ( ctx_p->state != STATE_TERM ) && ( ctx_p->state != STATE_EXIT );
1179 warning ( "__sync_exec_thread(): Bad exitcode %i (errcode %i). %s.", exec_exitcode, err, try_again ? "Retrying" : "Give up" );
1180
1181 if ( try_again ) {
1182 debug ( 2, "Sleeping for %u seconds before the retry.", ctx_p->syncdelay );
1183 sleep ( ctx_p->syncdelay );
1184 }
1185 }
1186 } while ( try_again );
1187
1188 if ( err && !ctx_p->flags[IGNOREFAILURES] ) {
1189 error ( "Bad exitcode %i (errcode %i)", exec_exitcode, err );
1190 threadinfo_p->errcode = err;
1191 }
1192
1193 g_hash_table_destroy ( threadinfo_p->fpath2ei_ht );
1194
1195 if ( ( err = thread_exit ( threadinfo_p, exec_exitcode ) ) ) {
1196 exitcode = err; // This's global variable "exitcode"
1197 pthread_kill ( pthread_sighandler, SIGTERM );
1198 }
1199
1200 debug ( 3, "thread_num == %i; threadinfo_p == %p; i_p->pthread %p; thread %p""; errcode %i",
1201 threadinfo_p->thread_num, threadinfo_p, threadinfo_p->pthread, pthread_self(), threadinfo_p->errcode );
1202 return exec_exitcode;
1203}
1204
1205static inline int sync_exec_argv_thread ( ctx_t *ctx_p, indexes_t *indexes_p, thread_callbackfunct_t callback, thread_callbackfunct_arg_t *callback_arg_p, char **argv )
1206{
1207 debug ( 2, "" );
1208 debug_argv_dump ( 2, argv );
1209 threadinfo_t *threadinfo_p = thread_new();
1210
1211 if ( threadinfo_p == NULL )
1212 return errno;
1213
1214 threadinfo_p->try_n = 0;
1215 threadinfo_p->callback = callback;
1216 threadinfo_p->callback_arg = callback_arg_p;
1217 threadinfo_p->argv = argv;
1218 threadinfo_p->ctx_p = ctx_p;
1219 threadinfo_p->starttime = time ( NULL );
1220 threadinfo_p->fpath2ei_ht = g_hash_table_dup ( indexes_p->fpath2ei_ht, g_str_hash, g_str_equal, free, free, ( gpointer ( * ) ( gpointer ) ) strdup, eidup );
1221 threadinfo_p->iteration = ctx_p->iteration_num;
1222
1223 if ( ctx_p->synctimeout )
1224 threadinfo_p->expiretime = threadinfo_p->starttime + ctx_p->synctimeout;
1225
1226 if ( pthread_create ( &threadinfo_p->pthread, NULL, ( void * ( * ) ( void * ) ) __sync_exec_thread, threadinfo_p ) ) {
1227 error ( "Cannot pthread_create()." );
1228 return errno;
1229 }
1230
1231 debug ( 3, "thread %p", threadinfo_p->pthread );
1232 return 0;
1233}
1234
1235/*
1236static inline int sync_exec_thread(ctx_t *ctx_p, indexes_t *indexes_p, thread_callbackfunct_t callback, thread_callbackfunct_arg_t *callback_arg_p, ...) {
1237 debug(2, "");
1238
1239 char **argv = (char **)xcalloc(sizeof(char *), MAXARGUMENTS);
1240 memset(argv, 0, sizeof(char *)*MAXARGUMENTS);
1241
1242 _sync_exec_getargv(argv, callback_arg_p, strdup(arg));
1243
1244 return sync_exec_argv_thread(ctx_p, indexes_p, callback, callback_arg_p, argv);
1245}
1246*/
1247#endif
1248
1249// } === SYNC_EXEC() ===
1250
1251static int sync_queuesync ( const char *fpath_rel, eventinfo_t *evinfo, ctx_t *ctx_p, indexes_t *indexes_p, queue_id_t queue_id )
1252{
1253 debug ( 3, "sync_queuesync(\"%s\", ...): fsize == %lu; tres == %lu, queue_id == %u", fpath_rel, evinfo->fsize, ctx_p->bfilethreshold, queue_id );
1254
1255 if ( queue_id == QUEUE_AUTO )
1257
1259
1260 if ( !queueinfo->stime )
1261 queueinfo->stime = time ( NULL );
1262
1263// char *fpath_rel = sync_path_abs2rel(ctx_p, fpath, -1, NULL, NULL);
1264
1265 // Filename can contain "" character that conflicts with event-row separator of list-files.
1266 if ( strchr ( fpath_rel, '\n' ) ) {
1267 // At the moment, we will just ignore events of such files :(
1268 debug ( 3, "There's \"\\n\" character in path \"%s\". Ignoring it :(. Feedback to: https://github.com/clsync/clsync/issues/12", fpath_rel );
1269 return 0;
1270 }
1271
1272#ifdef CLUSTER_SUPPORT
1273
1274 if ( ctx_p->cluster_iface )
1275 cluster_capture ( fpath_rel );
1276
1277#endif
1278 eventinfo_t *evinfo_q = indexes_lookupinqueue ( indexes_p, fpath_rel, queue_id );
1279
1280 if ( evinfo_q == NULL ) {
1281 eventinfo_t *evinfo_dup = ( eventinfo_t * ) xmalloc ( sizeof ( *evinfo_dup ) );
1282 memcpy ( evinfo_dup, evinfo, sizeof ( *evinfo_dup ) );
1283 return indexes_queueevent ( indexes_p, strdup ( fpath_rel ), evinfo_dup, queue_id );
1284 } else {
1285 evinfo_merge ( ctx_p, evinfo_q, evinfo );
1286 }
1287
1288 return 0;
1289}
1290
1291static inline void evinfo_initialevmask ( ctx_t *ctx_p, eventinfo_t *evinfo_p, int isdir )
1292{
1293 switch ( ctx_p->flags[MONITOR] ) {
1294#ifdef FANOTIFY_SUPPORT
1295
1296 case NE_FANOTIFY:
1297 critical ( "fanotify is not supported" );
1298 break;
1299#endif
1300#if INOTIFY_SUPPORT | KQUEUE_SUPPORT
1301#ifdef INOTIFY_SUPPORT
1302
1303 case NE_INOTIFY:
1304#endif
1305#ifdef KQUEUE_SUPPORT
1306 case NE_KQUEUE:
1307#endif
1308 evinfo_p->evmask = IN_CREATE_SELF;
1309
1310 if ( isdir )
1311 evinfo_p->evmask |= IN_ISDIR;
1312
1313 break;
1314#endif
1315#ifdef BSM_SUPPORT
1316
1317 case NE_BSM:
1318 case NE_BSM_PREFETCH:
1319 evinfo_p->evmask = ( isdir ? AUE_MKDIR : AUE_OPEN_RWC );
1320 break;
1321#endif
1322#ifdef GIO_SUPPORT
1323
1324 case NE_GIO:
1325 evinfo_p->evmask = G_FILE_MONITOR_EVENT_CREATED;
1326 break;
1327#endif
1328#ifdef VERYPARANOID
1329
1330 default:
1331 critical ( "Unknown monitor subsystem: %u", ctx_p->flags[MONITOR] );
1332#endif
1333 }
1334
1335 return;
1336}
1337
1338static inline void api_evinfo_initialevmask ( ctx_t *ctx_p, api_eventinfo_t *evinfo_p, int isdir )
1339{
1340 eventinfo_t evinfo = {0};
1341 evinfo_initialevmask ( ctx_p, &evinfo, isdir );
1342 evinfo_p->evmask = evinfo.evmask;
1343 return;
1344}
1345
1346int sync_dosync ( const char *fpath, uint32_t evmask, ctx_t *ctx_p, indexes_t *indexes_p );
1348{
1349 int ret = 0;
1350 const char *rootpaths[] = {dirpath, NULL};
1351 eventinfo_t evinfo;
1352 FTS *tree;
1353 rule_t *rules_p = ctx_p->rules;
1354 debug ( 2, "(ctx_p, \"%s\", indexes_p, %i, %i).", dirpath, queue_id, initsync );
1355 char skip_rules = ( initsync == INITSYNC_FULL ) && ctx_p->flags[INITFULL];
1356 char rsync_and_prefer_excludes =
1357 (
1358 ( ctx_p->flags[MODE] == MODE_RSYNCDIRECT ) ||
1359 ( ctx_p->flags[MODE] == MODE_RSYNCSHELL ) ||
1360 ( ctx_p->flags[MODE] == MODE_RSYNCSO )
1361 ) &&
1363
1364 if ( ( !ctx_p->flags[RSYNCPREFERINCLUDE] ) && skip_rules )
1365 return 0;
1366
1367 skip_rules |= ( ctx_p->rules_count == 0 );
1368 char fts_no_stat =
1369 (
1370 (
1372 ) ||
1373 (
1376 ) ||
1377 (
1378 ctx_p->bfilethreshold == 0
1379 )
1380 ) && ! ( ctx_p->flags[EXCLUDEMOUNTPOINTS] );
1381 int fts_opts = FTS_NOCHDIR | FTS_PHYSICAL |
1382 ( fts_no_stat ? FTS_NOSTAT : 0 ) |
1383 ( ctx_p->flags[ONEFILESYSTEM] ? FTS_XDEV : 0 );
1384 debug ( 3, "fts_opts == %p", ( void * ) ( long ) fts_opts );
1385 tree = privileged_fts_open ( ( char *const * ) &rootpaths, fts_opts, NULL, PC_SYNC_INIIALSYNC_WALK_FTS_OPEN );
1386
1387 if ( tree == NULL ) {
1388 error ( "Cannot privileged_fts_open() on \"%s\".", dirpath );
1389 return errno;
1390 }
1391
1392 memset ( &evinfo, 0, sizeof ( evinfo ) );
1393 FTSENT *node;
1394 char *path_rel = NULL;
1395 size_t path_rel_len = 0;
1396#ifdef VERYPARANOID
1397 errno = 0;
1398#endif
1399
1400 while ( ( node = privileged_fts_read ( tree, PC_SYNC_INIIALSYNC_WALK_FTS_READ ) ) ) {
1401 switch ( node->fts_info ) {
1402 // Duplicates:
1403 case FTS_DP:
1404 continue;
1405
1406 // To sync:
1407 case FTS_DEFAULT:
1408 case FTS_SL:
1409 case FTS_SLNONE:
1410 case FTS_F:
1411 case FTS_D:
1412 case FTS_DOT:
1413 case FTS_DC: // TODO: think about case of FTS_DC
1414 case FTS_NSOK:
1415 break;
1416
1417 // Error cases:
1418 case FTS_ERR:
1419 case FTS_NS:
1420 case FTS_DNR: {
1421 int fts_errno = node->fts_errno;
1422
1423 if ( fts_errno == ENOENT ) {
1424 debug ( 1, "Got error while privileged_fts_read(): %s (errno: %i; fts_info: %i).", strerror ( fts_errno ), fts_errno, node->fts_info );
1425 continue;
1426 } else {
1427 error ( "Got error while privileged_fts_read(): %s (errno: %i; fts_info: %i).", strerror ( fts_errno ), fts_errno, node->fts_info );
1428 ret = node->fts_errno;
1429 goto l_sync_initialsync_walk_end;
1430 }
1431 }
1432
1433 default:
1434 error ( "Got unknown fts_info vlaue while privileged_fts_read(): %i.", node->fts_info );
1435 ret = EINVAL;
1436 goto l_sync_initialsync_walk_end;
1437 }
1438
1439 path_rel = sync_path_abs2rel ( ctx_p, node->fts_path, -1, &path_rel_len, path_rel );
1440 debug ( 3, "Pointing to \"%s\" (node->fts_info == %i)", path_rel, node->fts_info );
1441
1442 if ( ctx_p->flags[EXCLUDEMOUNTPOINTS] && node->fts_info == FTS_D ) {
1443 if ( rsync_and_prefer_excludes ) {
1444 if ( node->fts_statp->st_dev != ctx_p->st_dev ) {
1445 debug ( 3, "Excluding \"%s\" due to location on other device: node->fts_statp->st_dev [0x%o] != ctx_p->st_dev [0x%o]", path_rel, node->fts_statp->st_dev, ctx_p->st_dev );
1446
1447 if ( queue_id == QUEUE_AUTO ) {
1448 int i = 0;
1449
1450 while ( i < QUEUE_MAX )
1451 indexes_addexclude ( indexes_p, strdup ( path_rel ), EVIF_CONTENTRECURSIVELY, i++ );
1452 } else
1453 indexes_addexclude ( indexes_p, strdup ( path_rel ), EVIF_CONTENTRECURSIVELY, queue_id );
1454
1455 fts_set ( tree, node, FTS_SKIP );
1456 }
1457 } else if ( !ctx_p->flags[RSYNCPREFERINCLUDE] )
1458 error ( "Excluding mount points is not implentemted for non \"rsync*\" modes." );
1459 }
1460
1461 mode_t st_mode = fts_no_stat ? ( node->fts_info == FTS_D ? S_IFDIR : S_IFREG ) : node->fts_statp->st_mode;
1462
1463 if ( !skip_rules ) {
1464 ruleaction_t perm = rules_getperm ( path_rel, st_mode, rules_p, RA_WALK | RA_SYNC );
1465
1466 if ( ! ( perm & RA_WALK ) ) {
1467 debug ( 3, "Rejecting to walk into \"%s\".", path_rel );
1468 fts_set ( tree, node, FTS_SKIP );
1469 }
1470
1471 if ( ! ( perm & RA_SYNC ) ) {
1472 debug ( 3, "Excluding \"%s\".", path_rel );
1473
1474 if ( rsync_and_prefer_excludes ) {
1475 if ( queue_id == QUEUE_AUTO ) {
1476 int i = 0;
1477
1478 while ( i < QUEUE_MAX )
1479 indexes_addexclude ( indexes_p, strdup ( path_rel ), EVIF_NONE, i++ );
1480 } else
1481 indexes_addexclude ( indexes_p, strdup ( path_rel ), EVIF_NONE, queue_id );
1482 }
1483
1484 continue;
1485 }
1486 }
1487
1488 if ( !rsync_and_prefer_excludes ) {
1489 evinfo_initialevmask ( ctx_p, &evinfo, node->fts_info == FTS_D );
1490
1491 switch ( ctx_p->flags[MODE] ) {
1492 case MODE_SIMPLE:
1493 SAFE ( sync_dosync ( node->fts_path, evinfo.evmask, ctx_p, indexes_p ), debug ( 1, "fpath == \"%s\"; evmask == 0x%o", node->fts_path, evinfo.evmask ); return -1; );
1494 continue;
1495
1496 default:
1497 break;
1498 }
1499
1500 evinfo.seqid_min = sync_seqid();
1501 evinfo.seqid_max = evinfo.seqid_min;
1503 evinfo.objtype_new = node->fts_info == FTS_D ? EOT_DIR : EOT_FILE;
1504 evinfo.fsize = fts_no_stat ? 0 : node->fts_statp->st_size;
1505 debug ( 3, "queueing \"%s\" (depth: %i) with int-flags %p", node->fts_path, node->fts_level, ( void * ) ( unsigned long ) evinfo.flags );
1506 int _ret = sync_queuesync ( path_rel, &evinfo, ctx_p, indexes_p, queue_id );
1507
1508 if ( _ret ) {
1509 error ( "Got error while queueing \"%s\".", node->fts_path );
1510 ret = errno;
1511 goto l_sync_initialsync_walk_end;
1512 }
1513
1514 continue;
1515 }
1516
1517 /* "FTS optimization" */
1518 if (
1519 skip_rules &&
1520 node->fts_info == FTS_D &&
1522 ) {
1523 debug ( 4, "\"FTS optimizator\"" );
1524 fts_set ( tree, node, FTS_SKIP );
1525 }
1526 }
1527
1528 if ( errno ) {
1529 error ( "Got error while privileged_fts_read() and related routines." );
1530 ret = errno;
1531 goto l_sync_initialsync_walk_end;
1532 }
1533
1534 if ( privileged_fts_close ( tree, PC_SYNC_INIIALSYNC_WALK_FTS_CLOSE ) ) {
1535 error ( "Got error while privileged_fts_close()." );
1536 ret = errno;
1537 goto l_sync_initialsync_walk_end;
1538 }
1539
1540l_sync_initialsync_walk_end:
1541
1542 if ( path_rel != NULL )
1543 free ( path_rel );
1544
1545 return ret;
1546}
1547
1548const char *sync_parameter_get ( const char *variable_name, void *_dosync_arg_p )
1549{
1550 struct dosync_arg *dosync_arg_p = _dosync_arg_p;
1551 ctx_t *ctx_p = dosync_arg_p->ctx_p;
1552#ifdef _DEBUG_FORCE
1553 debug ( 15, "(\"%s\", %p): 0x%x, \"%s\"", variable_name, _dosync_arg_p, ctx_p == NULL ? 0 : ctx_p->synchandler_argf, dosync_arg_p->evmask_str );
1554#endif
1555
1556 if ( ( ctx_p == NULL || ( ctx_p->synchandler_argf & SHFL_INCLUDE_LIST_PATH ) ) && !strcmp ( variable_name, "INCLUDE-LIST-PATH" ) )
1557 return dosync_arg_p->outf_path;
1558 else if ( ( ctx_p == NULL || ( ctx_p->synchandler_argf & SHFL_EXCLUDE_LIST_PATH ) ) && !strcmp ( variable_name, "EXCLUDE-LIST-PATH" ) )
1559 return dosync_arg_p->excf_path;
1560 else if ( !strcmp ( variable_name, "TYPE" ) )
1561 return dosync_arg_p->list_type_str;
1562 else if ( !strcmp ( variable_name, "EVENT-MASK" ) )
1563 return dosync_arg_p->evmask_str;
1564
1565 errno = ENOENT;
1566 return NULL;
1567}
1568
1569static char **sync_customargv ( ctx_t *ctx_p, struct dosync_arg *dosync_arg_p, synchandler_args_t *args_p )
1570{
1571 int d, s;
1572 char **argv = ( char ** ) xcalloc ( sizeof ( char * ), MAXARGUMENTS + 2 );
1573 s = d = 0;
1574 argv[d++] = strdup ( ctx_p->handlerfpath );
1575
1576 while ( s < args_p->c ) {
1577 char *arg = args_p->v[s];
1578 char isexpanded = args_p->isexpanded[s];
1579 s++;
1580#ifdef _DEBUG_FORCE
1581 debug ( 30, "\"%s\" [%p]", arg, arg );
1582#endif
1583
1584 if ( isexpanded ) {
1585#ifdef _DEBUG_FORCE
1586 debug ( 19, "\"%s\" [%p] is already expanded, just strdup()-ing it", arg, arg );
1587#endif
1588 argv[d++] = strdup ( arg );
1589 continue;
1590 }
1591
1592 if ( !strcmp ( arg, "%INCLUDE-LIST%" ) ) {
1593 int i = 0, e = dosync_arg_p->include_list_count;
1594 const char **include_list = dosync_arg_p->include_list;
1595#ifdef _DEBUG_FORCE
1596 debug ( 19, "INCLUDE-LIST: e == %u; d,s: %u,%u", e, d, s );
1597#endif
1598
1599 while ( i < e ) {
1600#ifdef PARANOID
1601
1602 if ( d >= MAXARGUMENTS ) {
1603 errno = E2BIG;
1604 critical ( "Too many arguments" );
1605 }
1606
1607#endif
1608 argv[d++] = parameter_expand ( ctx_p, strdup ( include_list[i++] ), PEF_NONE, NULL, NULL, sync_parameter_get, dosync_arg_p );
1609#ifdef _DEBUG_FORCE
1610 debug ( 19, "include-list: argv[%u] == %p", d - 1, argv[d - 1] );
1611#endif
1612 }
1613
1614 continue;
1615 }
1616
1617#ifdef PARANOID
1618
1619 if ( d >= MAXARGUMENTS ) {
1620 errno = E2BIG;
1621 critical ( "Too many arguments" );
1622 }
1623
1624#endif
1625 argv[d] = parameter_expand ( ctx_p, strdup ( arg ), PEF_NONE, NULL, NULL, sync_parameter_get, dosync_arg_p );
1626#ifdef _DEBUG_FORCE
1627 debug ( 19, "argv[%u] == %p \"%s\"", d, argv[d], argv[d] );
1628#endif
1629 d++;
1630 }
1631
1632 argv[d] = NULL;
1633#ifdef _DEBUG_FORCE
1634 debug ( 18, "return %p", argv );
1635#endif
1636 return argv;
1637}
1638
1639static void argv_free ( char **argv )
1640{
1641 char **argv_p;
1642#ifdef _DEBUG_FORCE
1643 debug ( 18, "(%p)", argv );
1644#endif
1645#ifdef VERYPARANOID
1646
1647 if ( argv == NULL )
1648 critical ( MSG_SECURITY_PROBLEM ( "argv_free(NULL)" ) );
1649
1650#endif
1651 argv_p = argv;
1652
1653 while ( *argv_p != NULL ) {
1654#ifdef _DEBUG_FORCE
1655 debug ( 25, "free(%p)", *argv_p );
1656#endif
1657 free ( * ( argv_p++ ) );
1658 }
1659
1660 free ( argv );
1661 return;
1662}
1663
1665{
1666 ( void ) initsync;
1668 return ret;
1669}
1670
1671int sync_initialsync ( const char *path, ctx_t *ctx_p, indexes_t *indexes_p, initsync_t initsync )
1672{
1673 int ret;
1675 debug ( 3, "(\"%s\", ctx_p, indexes_p, %i)", path, initsync );
1676#ifdef CLUSTER_SUPPORT
1677
1678 if ( initsync == INITSYNC_FULL ) {
1679 if ( ctx_p->cluster_iface )
1680 return cluster_initialsync();
1681 }
1682
1683#endif
1684
1685 if ( initsync == INITSYNC_FULL )
1687 else
1689
1690 // non-RSYNC case:
1691 if (
1692 ! (
1693 ( ctx_p->flags[MODE] == MODE_RSYNCDIRECT ) ||
1694 ( ctx_p->flags[MODE] == MODE_RSYNCSHELL ) ||
1695 ( ctx_p->flags[MODE] == MODE_RSYNCSO )
1696 )
1697 ) {
1698 debug ( 3, "syncing \"%s\"", path );
1699
1700 if ( ctx_p->flags[HAVERECURSIVESYNC] ) {
1701 if ( ctx_p->flags[MODE] == MODE_SO ) {
1702 api_eventinfo_t *ei = ( api_eventinfo_t * ) xmalloc ( sizeof ( *ei ) );
1703#ifdef PARANIOD
1704 memset ( ei, 0, sizeof ( *ei ) );
1705#endif
1707 ei->flags = EVIF_RECURSIVELY;
1708 ei->path_len = strlen ( path );
1709 ei->path = strdup ( path );
1711 ei->objtype_new = EOT_DIR;
1712 ret = so_call_sync ( ctx_p, indexes_p, 1, ei );
1713 return sync_initialsync_finish ( ctx_p, initsync, ret );
1714 } else {
1715 struct dosync_arg dosync_arg;
1716 synchandler_args_t *args_p;
1721 *dosync_arg.include_list = path;
1723 dosync_arg.list_type_str = "initialsync";
1724 char **argv = sync_customargv ( ctx_p, &dosync_arg, args_p );
1725 ret = SYNC_EXEC_ARGV (
1726 ctx_p,
1727 indexes_p,
1728 NULL,
1729 NULL,
1730 argv );
1731#ifdef THREADING_SUPPORT
1732
1733 if ( !SHOULD_THREAD ( ctx_p ) ) // If it's a thread then it will free the argv in GC. If not a thread then we have to free right here.
1734#endif
1735 argv_free ( argv );
1736
1737 return sync_initialsync_finish ( ctx_p, initsync, ret );
1738 }
1739 }
1740
1741#ifdef DOXYGEN
1742 sync_exec_argv ( NULL, NULL );
1743 sync_exec_argv_thread ( NULL, NULL );
1744#endif
1745 ret = sync_initialsync_walk ( ctx_p, path, indexes_p, queue_id, initsync );
1746
1747 if ( ret )
1748 error ( "Cannot get synclist" );
1749
1750 return sync_initialsync_finish ( ctx_p, initsync, ret );
1751 }
1752
1753 // RSYNC case:
1754
1755 if ( !ctx_p->flags[RSYNCPREFERINCLUDE] ) {
1757
1758 if ( !queueinfo->stime )
1759 queueinfo->stime = time ( NULL ); // Useful for debugging
1760
1761 eventinfo_t *evinfo = ( eventinfo_t * ) xmalloc ( sizeof ( *evinfo ) );
1762 memset ( evinfo, 0, sizeof ( *evinfo ) );
1763 evinfo->flags |= EVIF_RECURSIVELY;
1764 evinfo->seqid_min = sync_seqid();
1765 evinfo->seqid_max = evinfo->seqid_min;
1766 evinfo->objtype_old = EOT_DOESNTEXIST;
1767 evinfo->objtype_new = EOT_DIR;
1768 // Searching for excludes
1769 ret = sync_initialsync_walk ( ctx_p, path, indexes_p, queue_id, initsync );
1770
1771 if ( ret ) {
1772 error ( "Cannot get exclude what to exclude" );
1773 return sync_initialsync_finish ( ctx_p, initsync, ret );
1774 }
1775
1776 debug ( 3, "queueing \"%s\" with int-flags %p", path, ( void * ) ( unsigned long ) evinfo->flags );
1777 char *path_rel = sync_path_abs2rel ( ctx_p, path, -1, NULL, NULL );
1778 ret = indexes_queueevent ( indexes_p, path_rel, evinfo, queue_id );
1779 return sync_initialsync_finish ( ctx_p, initsync, ret );
1780 }
1781
1782 // Searching for includes
1783 ret = sync_initialsync_walk ( ctx_p, path, indexes_p, queue_id, initsync );
1784 return sync_initialsync_finish ( ctx_p, initsync, ret );
1785}
1786
1787int sync_notify_mark ( ctx_t *ctx_p, const char *accpath, const char *path, size_t pathlen, indexes_t *indexes_p )
1788{
1789 debug ( 3, "(..., \"%s\", %i,...)", path, pathlen );
1790 int wd = indexes_fpath2wd ( indexes_p, path );
1791
1792 if ( wd != -1 ) {
1793 debug ( 1, "\"%s\" is already marked (wd: %i). Skipping.", path, wd );
1794 return wd;
1795 }
1796
1797 debug ( 5, "ctx_p->notifyenginefunct.add_watch_dir(ctx_p, indexes_p, \"%s\")", accpath );
1798
1799 if ( ( wd = ctx_p->notifyenginefunct.add_watch_dir ( ctx_p, indexes_p, accpath ) ) == -1 ) {
1800 if ( errno == ENOENT )
1801 return -2;
1802
1803 error ( "Cannot ctx_p->notifyenginefunct.add_watch_dir() on \"%s\".",
1804 path );
1805 return -1;
1806 }
1807
1808 debug ( 6, "endof ctx_p->notifyenginefunct.add_watch_dir(ctx_p, indexes_p, \"%s\")", accpath );
1809 indexes_add_wd ( indexes_p, wd, path, pathlen );
1810 return wd;
1811}
1812
1813#ifdef CLUSTER_SUPPORT
1814static inline int sync_mark_walk_cluster_modtime_update ( ctx_t *ctx_p, const char *path, short int dirlevel, mode_t st_mode )
1815{
1816 if ( ctx_p->cluster_iface ) {
1817 int ret = cluster_modtime_update ( path, dirlevel, st_mode );
1818
1819 if ( ret ) error ( "cannot cluster_modtime_update()" );
1820
1821 return ret;
1822 }
1823
1824 return 0;
1825}
1826#endif
1827
1828int sync_mark_walk ( ctx_t *ctx_p, const char *dirpath, indexes_t *indexes_p )
1829{
1830 int ret = 0;
1831 const char *rootpaths[] = {dirpath, NULL};
1832 FTS *tree;
1833 rule_t *rules_p = ctx_p->rules;
1834 debug ( 2, "(ctx_p, \"%s\", indexes_p).", dirpath );
1835 int fts_opts = FTS_NOCHDIR | FTS_PHYSICAL | FTS_NOSTAT | ( ctx_p->flags[ONEFILESYSTEM] ? FTS_XDEV : 0 );
1836 debug ( 3, "fts_opts == %p", ( void * ) ( long ) fts_opts );
1837 tree = privileged_fts_open ( ( char *const * ) rootpaths, fts_opts, NULL, PC_SYNC_MARK_WALK_FTS_OPEN );
1838
1839 if ( tree == NULL ) {
1840 error_or_debug ( ( ctx_p->state == STATE_STARTING ) ? -1 : 2, "Cannot privileged_fts_open() on \"%s\".", dirpath );
1841 return errno;
1842 }
1843
1844 FTSENT *node;
1845 char *path_rel = NULL;
1846 size_t path_rel_len = 0;
1847#ifdef VERYPARANOID
1848 errno = 0;
1849#endif
1850
1851 while ( ( node = privileged_fts_read ( tree, PC_SYNC_MARK_WALK_FTS_READ ) ) ) {
1852#ifdef CLUSTER_SUPPORT
1853 int ret;
1854#endif
1855 debug ( 2, "walking: \"%s\" (depth %u): fts_info == %i", node->fts_path, node->fts_level, node->fts_info );
1856
1857 switch ( node->fts_info ) {
1858 // Duplicates:
1859 case FTS_DP:
1860 continue;
1861
1862 // Files:
1863 case FTS_DEFAULT:
1864 case FTS_SL:
1865 case FTS_SLNONE:
1866 case FTS_F:
1867 case FTS_NSOK:
1868#ifdef CLUSTER_SUPPORT
1869 if ( ( ret = sync_mark_walk_cluster_modtime_update ( ctx_p, node->fts_path, node->fts_level, S_IFREG ) ) )
1870 goto l_sync_mark_walk_end;
1871
1872#endif
1873 continue;
1874
1875 // Directories (to mark):
1876 case FTS_D:
1877 case FTS_DC: // TODO: think about case of FTS_DC
1878 case FTS_DOT:
1879#ifdef CLUSTER_SUPPORT
1880 if ( ( ret = sync_mark_walk_cluster_modtime_update ( ctx_p, node->fts_path, node->fts_level, S_IFDIR ) ) )
1881 goto l_sync_mark_walk_end;
1882
1883#endif
1884 break;
1885
1886 // Error cases:
1887 case FTS_ERR:
1888 case FTS_NS:
1889 case FTS_DNR:
1890 if ( errno == ENOENT ) {
1891 debug ( 1, "Got error while privileged_fts_read(); fts_info: %i.", node->fts_info );
1892 continue;
1893 } else {
1894 error_or_debug ( ( ctx_p->state == STATE_STARTING ) ? -1 : 2, "Got error while privileged_fts_read(); fts_info: %i.", node->fts_info );
1895 ret = errno;
1896 goto l_sync_mark_walk_end;
1897 }
1898
1899 default:
1900 error_or_debug ( ( ctx_p->state == STATE_STARTING ) ? -1 : 2, "Got unknown fts_info vlaue while privileged_fts_read(): %i.", node->fts_info );
1901 ret = EINVAL;
1902 goto l_sync_mark_walk_end;
1903 }
1904
1905 path_rel = sync_path_abs2rel ( ctx_p, node->fts_path, -1, &path_rel_len, path_rel );
1906 ruleaction_t perm = rules_search_getperm ( path_rel, S_IFDIR, rules_p, RA_WALK, NULL );
1907 debug ( 3, "perm == 0x%o", perm );
1908
1909 if ( ! ( perm & RA_WALK ) ) {
1910 debug ( 2, "setting an FTS_SKIP on the directory" );
1911
1912 if ( fts_set ( tree, node, FTS_SKIP ) )
1913 warning ( "Got error while fts_set(tree, node, FTS_SKIP): %s", path_rel );
1914 }
1915
1916 if ( ! ( perm & RA_MONITOR ) ) {
1917 debug ( 2, "don't mark the directory" );
1918 continue;
1919 }
1920
1921 debug ( 2, "marking \"%s\" (depth %u)", node->fts_path, node->fts_level );
1922 int wd = sync_notify_mark ( ctx_p, node->fts_accpath, node->fts_path, node->fts_pathlen, indexes_p );
1923
1924 if ( wd == -1 ) {
1925 error_or_debug ( ( ctx_p->state == STATE_STARTING ) ? -1 : 2, "Got error while notify-marking \"%s\".", node->fts_path );
1926 ret = errno;
1927 goto l_sync_mark_walk_end;
1928 }
1929
1930 debug ( 2, "watching descriptor is %i.", wd );
1931 }
1932
1933 if ( errno ) {
1934 error_or_debug ( ( ctx_p->state == STATE_STARTING ) ? -1 : 2, "Got error while privileged_fts_read() and related routines." );
1935 ret = errno;
1936 goto l_sync_mark_walk_end;
1937 }
1938
1939 if ( privileged_fts_close ( tree, PC_SYNC_MARK_WALK_FTS_CLOSE ) ) {
1940 error_or_debug ( ( ctx_p->state == STATE_STARTING ) ? -1 : 2, "Got error while privileged_fts_close()." );
1941 ret = errno;
1942 goto l_sync_mark_walk_end;
1943 }
1944
1945l_sync_mark_walk_end:
1946
1947 if ( path_rel != NULL )
1948 free ( path_rel );
1949
1950 return ret;
1951}
1952
1954{
1955 switch ( ctx_p->flags[MONITOR] ) {
1956#ifdef FANOTIFY_SUPPORT
1957
1958 case NE_FANOTIFY: {
1959 ctx_p->fsmondata = ( long ) fanotify_init ( FANOTIFY_FLAGS, FANOTIFY_EVFLAGS );
1960
1961 if ( ( long ) ctx_p->fsmondata == -1 ) {
1962 error ( "cannot fanotify_init(%i, %i).", FANOTIFY_FLAGS, FANOTIFY_EVFLAGS );
1963 return -1;
1964 }
1965
1966 return 0;
1967 }
1968
1969#endif
1970#ifdef INOTIFY_SUPPORT
1971
1972 case NE_INOTIFY: {
1973# ifdef INOTIFY_OLD
1974 ctx_p->fsmondata = ( void * ) ( long ) inotify_init();
1975# if INOTIFY_FLAGS != 0
1976# warning Do not know how to set inotify flags (too old system)
1977# endif
1978# else
1979 ctx_p->fsmondata = ( void * ) ( long ) inotify_init1 ( INOTIFY_FLAGS );
1980# endif
1981
1982 if ( ( long ) ctx_p->fsmondata == -1 ) {
1983 error ( "cannot inotify_init1(%i).", INOTIFY_FLAGS );
1984 return -1;
1985 }
1986
1987 return 0;
1988 }
1989
1990#endif
1991#ifdef KQUEUE_SUPPORT
1992
1993 case NE_KQUEUE: {
1994 int kqueue_d = kqueue_init ( ctx_p );
1995
1996 if ( kqueue_d == -1 ) {
1997 error ( "cannot kqueue_init(ctx_p)." );
1998 return -1;
1999 }
2000
2001 return 0;
2002 }
2003
2004#endif
2005#ifdef BSM_SUPPORT
2006
2007 case NE_BSM:
2008 case NE_BSM_PREFETCH: {
2009 int bsm_d = bsm_init ( ctx_p );
2010
2011 if ( bsm_d == -1 ) {
2012 error ( "cannot bsm_init(ctx_p)." );
2013 return -1;
2014 }
2015
2016 return 0;
2017 }
2018
2019#endif
2020#ifdef GIO_SUPPORT
2021
2022 case NE_GIO: {
2023 critical_on ( gio_init ( ctx_p ) == -1 );
2024 return 0;
2025 }
2026
2027#endif
2028 }
2029
2030 error ( "unknown notify-engine: %i", ctx_p->flags[MONITOR] );
2031 errno = EINVAL;
2032 return -1;
2033}
2034
2035static inline int sync_dosync_exec ( ctx_t *ctx_p, indexes_t *indexes_p, const char *evmask_str, const char *fpath )
2036{
2037 int rc;
2038 struct dosync_arg dosync_arg;
2039 debug ( 20, "(ctx_p, indexes_p, \"%s\", \"%s\")", evmask_str, fpath );
2041 *dosync_arg.include_list = fpath;
2043 dosync_arg.list_type_str = "sync";
2046 rc = SYNC_EXEC_ARGV (
2047 ctx_p,
2048 indexes_p,
2049 NULL, NULL,
2050 argv );
2051#ifdef THREADING_SUPPORT
2052
2053 if ( !SHOULD_THREAD ( ctx_p ) ) // If it's a thread then it will free the argv in GC. If not a thread then we have to free right here.
2054#endif
2055 argv_free ( argv );
2056
2057 return rc;
2058#ifdef DOXYGEN
2059 sync_exec_argv ( NULL, NULL );
2060 sync_exec_argv_thread ( NULL, NULL );
2061#endif
2062}
2063
2064int sync_dosync ( const char *fpath, uint32_t evmask, ctx_t *ctx_p, indexes_t *indexes_p )
2065{
2066 int ret;
2067#ifdef CLUSTER_SUPPORT
2068 ret = cluster_lock ( fpath );
2069
2070 if ( ret ) return ret;
2071
2072#endif
2073 char *evmask_str = xmalloc ( 1 << 8 );
2074 sprintf ( evmask_str, "%u", evmask );
2075 ret = sync_dosync_exec ( ctx_p, indexes_p, evmask_str, fpath );
2076 free ( evmask_str );
2077#ifdef CLUSTER_SUPPORT
2078 ret = cluster_unlock_all();
2079#endif
2080 return ret;
2081}
2082
2083int fileischanged ( ctx_t *ctx_p, indexes_t *indexes_p, const char *path_rel, stat64_t *lst_p, int is_deleted )
2084{
2085 if ( lst_p == NULL || !ctx_p->flags[MODSIGN] )
2086 return 1;
2087
2088 debug ( 9, "Checking modification signature" );
2089 fileinfo_t *finfo = indexes_fileinfo ( indexes_p, path_rel );
2090
2091 if ( finfo != NULL ) {
2092 uint32_t diff;
2093
2094 if ( ! ( diff = stat_diff ( &finfo->lst, lst_p ) & ctx_p->flags[MODSIGN] ) ) {
2095 debug ( 8, "Modification signature: File not changed: \"%s\"", path_rel );
2096 return 0; // Skip file syncing if it's metadata not changed enough (according to "--modification-signature" setting)
2097 }
2098
2099 debug ( 8, "Modification signature: stat_diff == 0x%o; significant diff == 0x%o (ctx_p->flags[MODSIGN] == 0x%o)", diff, diff & ctx_p->flags[MODSIGN], ctx_p->flags[MODSIGN] );
2100
2101 if ( is_deleted ) {
2102 debug ( 8, "Modification signature: Deleting information about \"%s\"", path_rel );
2103 indexes_fileinfo_add ( indexes_p, path_rel, NULL );
2104 free ( finfo );
2105 } else {
2106 debug ( 8, "Modification signature: Updating information about \"%s\"", path_rel );
2107 memcpy ( &finfo->lst, lst_p, sizeof ( finfo->lst ) );
2108 }
2109 } else {
2110 debug ( 8, "There's no information about this file/dir: \"%s\". Just remembering the current state.", path_rel );
2111 // Adding file/dir information
2112 finfo = xmalloc ( sizeof ( *finfo ) );
2113 memcpy ( &finfo->lst, lst_p, sizeof ( finfo->lst ) );
2114 indexes_fileinfo_add ( indexes_p, path_rel, finfo );
2115 }
2116
2117 return 1;
2118}
2119
2120static inline int sync_indexes_fpath2ei_addfixed ( ctx_t *ctx_p, indexes_t *indexes_p, const char *fpath, eventinfo_t *evinfo )
2121{
2122 static const char fpath_dot[] = ".";
2123 const char *fpath_fixed;
2124 fpath_fixed = fpath;
2125
2126 switch ( ctx_p->flags[MODE] ) {
2127 case MODE_DIRECT:
2128
2129 // If fpath is empty (that means CWD) then assign it to "."
2130 if ( !*fpath )
2131 fpath_fixed = fpath_dot;
2132
2133 break;
2134
2135 default:
2136 break;
2137 }
2138
2139 return indexes_fpath2ei_add ( indexes_p, strdup ( fpath_fixed ), evinfo );
2140}
2141
2143(
2144 int monitored, // Is an event from event monitor handler (1 -- it is; 0 -- it doesn't)
2145
2146 ctx_t *ctx_p,
2147 indexes_t *indexes_p,
2148
2149 const char *path_full,
2150 const char *path_rel,
2151
2152 stat64_t *lst_p,
2153
2154 eventobjtype_t objtype_old,
2155 eventobjtype_t objtype_new,
2156
2157 uint32_t event_mask,
2158 int event_wd,
2159 mode_t st_mode,
2160 off_t st_size,
2161
2162 char **path_buf_p,
2163 size_t *path_buf_len_p,
2164
2165 eventinfo_t *evinfo
2166)
2167{
2168 debug ( 10, "%i %p %p %p %p %p %i %i 0x%o %i %i %i %p %p %p",
2169 monitored,
2170 ctx_p,
2171 indexes_p,
2172 path_full,
2173 path_rel,
2174 lst_p,
2175 objtype_old,
2176 objtype_new,
2177 event_mask,
2178 event_wd,
2179 st_mode,
2180 st_size,
2181 path_buf_p,
2182 path_buf_len_p,
2183 evinfo
2184 );
2185#ifdef PARANOID
2186
2187 // &path_buf and &path_buf_len are passed to do not reallocate memory for path_rel/path_full each time
2188 if ( ( path_buf_p == NULL || path_buf_len_p == NULL ) && ( path_full == NULL || path_rel == NULL ) ) {
2189 error ( "path_rel_p == NULL || path_rel_len_p == NULL" );
2190 return EINVAL;
2191 }
2192
2193#endif
2194#ifdef VERYPARANOID
2195
2196 if ( path_full == NULL && path_rel == NULL ) {
2197 error ( "path_full == NULL && path_rel == NULL" );
2198 return EINVAL;
2199 }
2200
2201#endif
2202
2203 if ( path_rel == NULL ) {
2204 *path_buf_p = sync_path_abs2rel ( ctx_p, path_full, -1, path_buf_len_p, *path_buf_p );
2205 path_rel = *path_buf_p;
2206 }
2207
2208 ruleaction_t perm = RA_ALL;
2209
2210 if ( st_mode ) {
2211 // Checking by filter rules
2212 perm = rules_getperm ( path_rel, st_mode, ctx_p->rules, RA_WALK | RA_MONITOR | RA_SYNC );
2213
2214 if ( ! ( perm & ( RA_MONITOR | RA_WALK | RA_SYNC ) ) ) {
2215 return 0;
2216 }
2217 }
2218
2219 // Handling different cases
2220 int is_dir = objtype_old == EOT_DIR || objtype_new == EOT_DIR;
2221 int is_created = objtype_old == EOT_DOESNTEXIST;
2222 int is_deleted = objtype_new == EOT_DOESNTEXIST;
2223 debug ( 4, "is_dir == %x; is_created == %x; is_deleted == %x", is_dir, is_created, is_deleted );
2224
2225 if ( is_dir ) {
2226 if ( is_created ) {
2227 int ret;
2228
2229 if ( perm & ( RA_WALK | RA_MONITOR ) ) {
2230 if ( path_full == NULL ) {
2231 *path_buf_p = sync_path_rel2abs ( ctx_p, path_rel, -1, path_buf_len_p, *path_buf_p );
2232 path_full = *path_buf_p;
2233 }
2234
2235 if ( ( perm & RA_MONITOR ) && monitored ) {
2236 ret = sync_mark_walk ( ctx_p, path_full, indexes_p );
2237
2238 if ( ret ) {
2239 debug ( 1, "Seems, that directory \"%s\" disappeared, while trying to mark it.", path_full );
2240 return 0;
2241 }
2242 }
2243
2244 if ( perm & RA_WALK ) {
2245 ret = sync_initialsync ( path_full, ctx_p, indexes_p, INITSYNC_SUBDIR );
2246
2247 if ( ret ) {
2248 errno = ret;
2249 error ( "Got error from sync_initialsync()" );
2250 return errno;
2251 }
2252 }
2253 }
2254
2255 fileischanged ( ctx_p, indexes_p, path_rel, lst_p, is_deleted ); // Just to remember it's state
2256 return 0;
2257 } else if ( is_deleted ) {
2258 debug ( 2, "Disappeared \".../%s\".", path_rel );
2259 }
2260 }
2261
2262 if ( ! ( perm & RA_SYNC ) ) {
2263 return 0;
2264 }
2265
2266 if ( !fileischanged ( ctx_p, indexes_p, path_rel, lst_p, is_deleted ) ) {
2267 debug ( 4, "The file/dir is not changed. Returning." );
2268 return 0;
2269 }
2270
2271 switch ( ctx_p->flags[MODE] ) {
2272 case MODE_SIMPLE:
2273 if ( path_full == NULL ) {
2274 *path_buf_p = sync_path_rel2abs ( ctx_p, path_rel, -1, path_buf_len_p, *path_buf_p );
2275 path_full = *path_buf_p;
2276 }
2277
2278 return SAFE ( sync_dosync ( path_full, event_mask, ctx_p, indexes_p ), debug ( 1, "fpath == \"%s\"; evmask == 0x%o", path_full, event_mask ); return -1; );
2279
2280 default:
2281 break;
2282 }
2283
2284 // Locally queueing the event
2285 int isnew = 0;
2286
2287 if ( evinfo == NULL )
2288 evinfo = indexes_fpath2ei ( indexes_p, path_rel );
2289 else
2290 isnew++; // It's new for prequeue (but old for lockwait queue)
2291
2292 if ( evinfo == NULL ) {
2293 evinfo = ( eventinfo_t * ) xmalloc ( sizeof ( *evinfo ) );
2294 memset ( evinfo, 0, sizeof ( *evinfo ) );
2295 evinfo->fsize = st_size;
2296 evinfo->wd = event_wd;
2297 evinfo->seqid_min = sync_seqid();
2298 evinfo->seqid_max = evinfo->seqid_min;
2299 evinfo->objtype_old = objtype_old;
2300 isnew++;
2301 debug ( 3, "new event: fsize == %i; wd == %i", evinfo->fsize, evinfo->wd );
2302 } else {
2303 evinfo->seqid_max = sync_seqid();
2304 }
2305
2306 switch ( ctx_p->flags[MONITOR] ) {
2307#ifdef KQUEUE_SUPPORT
2308
2309 case NE_KQUEUE:
2310#endif
2311#ifdef INOTIFY_SUPPORT
2312 case NE_INOTIFY:
2313#endif
2314#if KQUEUE_SUPPORT | INOTIFY_SUPPORT
2315 evinfo->evmask |= event_mask;
2316 break;
2317#endif
2318#ifdef BSM_SUPPORT
2319
2320 case NE_BSM:
2321 case NE_BSM_PREFETCH:
2322 evinfo->evmask = event_mask;
2323 break;
2324#endif
2325#ifdef GIO_SUPPORT
2326
2327 case NE_GIO:
2328 evinfo->evmask = event_mask;
2329 break;
2330#endif
2331 }
2332
2333 evinfo->objtype_new = objtype_new;
2334 debug ( 2, "path_rel == \"%s\"; evinfo->objtype_old == %i; evinfo->objtype_new == %i; "
2335 "evinfo->seqid_min == %u; evinfo->seqid_max == %u",
2336 path_rel, evinfo->objtype_old, evinfo->objtype_new,
2337 evinfo->seqid_min, evinfo->seqid_max
2338 );
2339
2340 if ( isnew )
2341 // Fix the path (if required) and call indexes_fpath2ei_add() to remeber the new object to be synced
2342 sync_indexes_fpath2ei_addfixed ( ctx_p, indexes_p, path_rel, evinfo );
2343
2344 return 0;
2345}
2346
2347void _sync_idle_dosync_collectedexcludes ( gpointer fpath_gp, gpointer flags_gp, gpointer arg_gp )
2348{
2349 char *fpath = ( char * ) fpath_gp;
2350 indexes_t *indexes_p = ( ( struct dosync_arg * ) arg_gp )->indexes_p;
2351 debug ( 3, "\"%s\", %u (%p).", fpath, GPOINTER_TO_INT ( flags_gp ), flags_gp );
2352 indexes_addexclude_aggr ( indexes_p, strdup ( fpath ), ( eventinfo_flags_t ) GPOINTER_TO_INT ( flags_gp ) );
2353 return;
2354}
2355
2356// Is not a thread-save function!
2357eventinfo_t *ht_fpath_isincluded ( GHashTable *ht, const char *const fpath )
2358{
2359 static char buf[PATH_MAX + 2] = {0};
2360 char *ptr, *end;
2361 eventinfo_t *evinfo = g_hash_table_lookup ( ht, fpath );
2362 debug ( 5, "looking up for \"%s\": %p", fpath, evinfo );
2363
2364 if ( evinfo != NULL )
2365 return evinfo;
2366
2367 if ( !*fpath )
2368 return NULL;
2369
2370 evinfo = g_hash_table_lookup ( ht, "" );
2371
2372 if ( evinfo != NULL ) {
2373 debug ( 5, "recursive looking up for \"\": %p (%x: recusively: %x)", evinfo, evinfo->flags, evinfo->flags & EVIF_RECURSIVELY );
2374
2375 if ( evinfo->flags & EVIF_RECURSIVELY )
2376 return evinfo;
2377 }
2378
2379 size_t fpath_len = strlen ( fpath );
2380 memcpy ( buf, fpath, fpath_len + 1 );
2381 ptr = buf;
2382 end = &buf[fpath_len];
2383
2384 while ( ptr < end ) {
2385 if ( *ptr == '/' ) {
2386 *ptr = 0;
2387 evinfo = g_hash_table_lookup ( ht, buf );
2388
2389 if ( evinfo != NULL ) {
2390 debug ( 5, "recursive looking up for \"%s\": %p (%x: recusively: %x)", buf, evinfo, evinfo->flags, evinfo->flags & EVIF_RECURSIVELY );
2391 *ptr = '/';
2392
2393 if ( evinfo->flags & EVIF_RECURSIVELY )
2394 return evinfo;
2395 }
2396 }
2397
2398 ptr++;
2399 }
2400
2401 return evinfo;
2402}
2403
2404int _sync_islocked ( threadinfo_t *threadinfo_p, void *_fpath )
2405{
2406 char *fpath = _fpath;
2407 eventinfo_t *evinfo = ht_fpath_isincluded ( threadinfo_p->fpath2ei_ht, fpath );
2408 debug ( 4, "scanning thread %p: fpath<%s> -> evinfo<%p>", threadinfo_p->pthread, fpath, evinfo );
2409
2410 if ( evinfo != NULL )
2411 return 1;
2412
2413 return 0;
2414}
2415
2416static inline int sync_islocked ( const char *const fpath )
2417{
2418#ifdef THREADING_SUPPORT
2419 int rc = threads_foreach ( _sync_islocked, STATE_RUNNING, ( void * ) fpath );
2420 debug ( 3, "<%s>: %u", fpath, rc );
2421 return rc;
2422#else
2423 return 0;
2424#endif
2425}
2426
2427void _sync_idle_dosync_collectedevents ( gpointer fpath_gp, gpointer evinfo_gp, gpointer arg_gp )
2428{
2429 char *fpath = ( char * ) fpath_gp;
2430 eventinfo_t *evinfo = ( eventinfo_t * ) evinfo_gp;
2431 int *evcount_p = & ( ( struct dosync_arg * ) arg_gp )->evcount;
2432 ctx_t *ctx_p = ( ( struct dosync_arg * ) arg_gp )->ctx_p;
2433 indexes_t *indexes_p = ( ( struct dosync_arg * ) arg_gp )->indexes_p;
2434 queue_id_t queue_id = ( queue_id_t ) ( ( struct dosync_arg * ) arg_gp )->data;
2435 debug ( 3, "queue_id == %i.", queue_id );
2436
2437 if ( ctx_p->flags[THREADING] == PM_SAFE )
2438 if ( sync_islocked ( fpath ) ) {
2439 debug ( 3, "\"%s\" is locked, dropping to waitlock queue", fpath );
2440 eventinfo_t *evinfo_dup = xmalloc ( sizeof ( *evinfo_dup ) );
2441 memcpy ( evinfo_dup, evinfo, sizeof ( *evinfo ) );
2442 sync_queuesync ( fpath, evinfo_dup, ctx_p, indexes_p, QUEUE_LOCKWAIT );
2443 return;
2444 }
2445
2446 if ( ( ctx_p->listoutdir == NULL ) && ( ! ( ctx_p->synchandler_argf & SHFL_INCLUDE_LIST ) ) && ( ! ( ctx_p->flags[MODE] == MODE_SO ) ) ) {
2447 debug ( 3, "calling sync_dosync()" );
2448 SAFE ( sync_dosync ( fpath, evinfo->evmask, ctx_p, indexes_p ), debug ( 1, "fpath == \"%s\"; evmask == 0x%o", fpath, evinfo->evmask ); exit ( errno ? errno : -1 ) ); // TODO: remove exit() from here
2449 return;
2450 }
2451
2452 int isnew = 0;
2453 eventinfo_t *evinfo_idx = indexes_fpath2ei ( indexes_p, fpath );
2454
2455 if ( evinfo_idx == NULL ) {
2456 evinfo_idx = ( eventinfo_t * ) xmalloc ( sizeof ( *evinfo_idx ) );
2457 memset ( evinfo_idx, 0, sizeof ( *evinfo_idx ) );
2458 isnew++;
2459 ( *evcount_p )++;
2460 evinfo_idx->evmask = evinfo->evmask;
2461 evinfo_idx->flags = evinfo->flags;
2462 evinfo_idx->objtype_old = evinfo->objtype_old;
2463 evinfo_idx->objtype_new = evinfo->objtype_new;
2464 evinfo_idx->seqid_min = evinfo->seqid_min;
2465 evinfo_idx->seqid_max = evinfo->seqid_max;
2466 } else
2467 evinfo_merge ( ctx_p, evinfo_idx, evinfo );
2468
2469 queue_id_t _queue_id = 0;
2470
2471 while ( _queue_id < QUEUE_MAX ) {
2472 if ( _queue_id == queue_id ) {
2473 _queue_id++;
2474 continue;
2475 }
2476
2477 eventinfo_t *evinfo_q = indexes_lookupinqueue ( indexes_p, fpath, _queue_id );
2478
2479 if ( evinfo_q != NULL ) {
2480 evinfo_merge ( ctx_p, evinfo_idx, evinfo_q );
2481 indexes_removefromqueue ( indexes_p, fpath, _queue_id );
2482
2483 if ( !indexes_queuelen ( indexes_p, _queue_id ) )
2484 ctx_p->_queues[_queue_id].stime = 0;
2485 }
2486
2487 _queue_id++;
2488 }
2489
2490 if ( isnew ) {
2491 debug ( 4, "Collecting \"%s\"", fpath );
2492 // Fix the path (if required) and call indexes_fpath2ei_add() to remeber the new object to be synced
2493 sync_indexes_fpath2ei_addfixed ( ctx_p, indexes_p, fpath, evinfo_idx );
2494 } else
2495 free ( fpath );
2496
2497 return;
2498}
2499
2503};
2504gboolean sync_trylocked ( gpointer fpath_gp, gpointer evinfo_gp, gpointer arg_gp )
2505{
2506 char *fpath = ( char * ) fpath_gp;
2507 eventinfo_t *evinfo = ( eventinfo_t * ) evinfo_gp;
2508 struct dosync_arg *arg_p = ( struct dosync_arg * ) arg_gp;
2509 ctx_t *ctx_p = arg_p->ctx_p;
2510 indexes_t *indexes_p = arg_p->indexes_p;
2511 struct trylocked_arg *data = arg_p->data;
2512
2513 if ( !sync_islocked ( fpath ) ) {
2514 if ( sync_prequeue_loadmark ( 0, ctx_p, indexes_p, NULL, fpath, NULL,
2515 evinfo->objtype_old,
2516 evinfo->objtype_new,
2517 evinfo->evmask,
2518 0, 0, 0, &data->path_full, &data->path_full_len, evinfo ) ) {
2519 critical ( "Cannot re-queue \"%s\" to be synced", fpath );
2520 return FALSE;
2521 }
2522
2523 return TRUE;
2524 }
2525
2526 return FALSE;
2527}
2528
2530{
2531 int ret0 = 0, ret1 = 0;
2532
2533 if ( ctx_p->flags[DONTUNLINK] )
2534 return 0;
2535
2536 debug ( 3, "(ctx_p, {inc: %p, exc: %p}) thread %p", arg_p->incfpath, arg_p->excfpath, pthread_self() );
2537
2538 if ( arg_p->excfpath != NULL ) {
2539 debug ( 3, "unlink()-ing exclude-file: \"%s\"", arg_p->excfpath );
2540 ret0 = unlink ( arg_p->excfpath );
2541 free ( arg_p->excfpath );
2542 }
2543
2544 if ( arg_p->incfpath != NULL ) {
2545 debug ( 3, "unlink()-ing include-file: \"%s\"", arg_p->incfpath );
2546 ret1 = unlink ( arg_p->incfpath );
2547 free ( arg_p->incfpath );
2548 }
2549
2550 free ( arg_p );
2551 return ret0 ? ret0 : ret1;
2552}
2553
2554void sync_queuesync_wrapper ( gpointer fpath_gp, gpointer evinfo_gp, gpointer arg_gp )
2555{
2556 char *fpath_rel = ( char * ) fpath_gp;
2557 eventinfo_t *evinfo = ( eventinfo_t * ) evinfo_gp;
2558 ctx_t *ctx_p = ( ( struct dosync_arg * ) arg_gp )->ctx_p;
2559 indexes_t *indexes_p = ( ( struct dosync_arg * ) arg_gp )->indexes_p;
2560 sync_queuesync ( fpath_rel, evinfo, ctx_p, indexes_p, QUEUE_AUTO );
2561 return;
2562}
2563
2565{
2566 struct dosync_arg dosync_arg;
2568 dosync_arg.indexes_p = indexes_p;
2569 debug ( 3, "collected %i events per this time.", g_hash_table_size ( indexes_p->fpath2ei_ht ) );
2570 g_hash_table_foreach ( indexes_p->fpath2ei_ht, sync_queuesync_wrapper, &dosync_arg );
2571 g_hash_table_remove_all ( indexes_p->fpath2ei_ht );
2572 return 0;
2573}
2574
2576{
2577 time_t tm = time ( NULL );
2579
2581 debug ( 3, "(%i, ...): too early (%i + %i > %i).", queue_id, queueinfo->stime, queueinfo->collectdelay, tm );
2582 return 0;
2583 }
2584
2585 queueinfo->stime = 0;
2586 int evcount_real = g_hash_table_size ( indexes_p->fpath2ei_coll_ht[queue_id] );
2587 debug ( 3, "(%i, ...): evcount_real == %i", queue_id, evcount_real );
2588
2589 if ( evcount_real <= 0 ) {
2590 debug ( 3, "(%i, ...): no events, return 0.", queue_id );
2591 return 0;
2592 }
2593
2594 switch ( queue_id ) {
2595 case QUEUE_LOCKWAIT: {
2596 struct trylocked_arg arg_data = {0};
2597 dosync_arg->data = &arg_data;
2598 g_hash_table_foreach_remove ( indexes_p->fpath2ei_coll_ht[queue_id], sync_trylocked, dosync_arg );
2599 // Placing to global queues recently unlocked objects
2600 sync_prequeue_unload ( ctx_p, indexes_p );
2601#ifdef PARANOID
2602
2603 if ( arg_data.path_full != NULL )
2604#endif
2605 free ( arg_data.path_full );
2606
2607 break;
2608 }
2609
2610 default: {
2611 g_hash_table_foreach ( indexes_p->fpath2ei_coll_ht[queue_id], _sync_idle_dosync_collectedevents, dosync_arg );
2612 g_hash_table_remove_all ( indexes_p->fpath2ei_coll_ht[queue_id] );
2613
2614 if ( !ctx_p->flags[RSYNCPREFERINCLUDE] ) {
2615 g_hash_table_foreach ( indexes_p->exc_fpath_coll_ht[queue_id], _sync_idle_dosync_collectedexcludes, dosync_arg );
2616 g_hash_table_remove_all ( indexes_p->exc_fpath_coll_ht[queue_id] );
2617 }
2618
2619 break;
2620 }
2621 }
2622
2623 return 0;
2624}
2625
2627{
2628 pid_t pid = getpid();
2629 time_t tm = time ( NULL );
2630 stat64_t stat64;
2631 int counter = 0;
2632
2633 do {
2634 snprintf ( fpath, PATH_MAX, "%s/.clsync-%s.%u.%lu.%lu.%u", ctx_p->listoutdir, name, pid, ( long ) pthread_self(), ( unsigned long ) tm, rand() ); // To be unique
2635 lstat64 ( fpath, &stat64 );
2636
2637 if ( counter++ > COUNTER_LIMIT ) {
2638 error ( "Cannot find unused filename for list-file. The last try was \"%s\".", fpath );
2639 return ENOENT;
2640 }
2641 } while ( errno != ENOENT ); // TODO: find another way to check if the object exists
2642
2643 errno = 0;
2644 return 0;
2645}
2646
2647int sync_idle_dosync_collectedevents_listcreate ( struct dosync_arg *dosync_arg_p, char *name )
2648{
2649 debug ( 3, "Creating %s file", name );
2650 char *fpath = dosync_arg_p->outf_path;
2651 ctx_t *ctx_p = dosync_arg_p->ctx_p;
2652 int ret;
2653
2654 if ( ( ret = sync_idle_dosync_collectedevents_uniqfname ( ctx_p, fpath, name ) ) ) {
2655 error ( "sync_idle_dosync_collectedevents_listcreate: Cannot get unique file name." );
2656 return ret;
2657 }
2658
2659 dosync_arg_p->outf = fopen ( fpath, "w" );
2660
2661 if ( dosync_arg_p->outf == NULL ) {
2662 error ( "Cannot open \"%s\" as file for writing.", fpath );
2663 return errno;
2664 }
2665
2666 setbuffer ( dosync_arg_p->outf, dosync_arg_p->buf, BUFSIZ );
2667 debug ( 3, "Created list-file \"%s\"", fpath );
2668 dosync_arg_p->linescount = 0;
2669 return 0;
2670}
2671
2674
2676{
2678 free ( rsync_escape_result );
2679}
2680
2681const char *rsync_escape ( const char *path )
2682{
2683 size_t sc_count = 0;
2684 size_t i = 0;
2685
2686 while ( 1 ) {
2687 switch ( path[i] ) {
2688 case 0:
2689 goto l_rsync_escape_loop0_end;
2690
2691 case '[':
2692 case ']':
2693 case '*':
2694 case '?':
2695 case '\\':
2696 sc_count++;
2697 }
2698
2699 i++;
2700 };
2701
2702l_rsync_escape_loop0_end:
2703 if ( !sc_count )
2704 return path;
2705
2706 size_t required_size = i + sc_count + 1;
2707
2708 if ( required_size >= rsync_escape_result_size ) {
2709 rsync_escape_result_size = required_size + ALLOC_PORTION;
2711 }
2712
2713 // TODO: Optimize this. Second "switch" is a bad way.
2714 i++;
2715
2716 while ( i-- ) {
2717 rsync_escape_result[i + sc_count] = path[i];
2718
2719 switch ( path[i] ) {
2720 case '[':
2721 case ']':
2722 case '*':
2723 case '?':
2724 case '\\':
2725 sc_count--;
2726 rsync_escape_result[i + sc_count] = '\\';
2727 break;
2728 }
2729 }
2730
2731 return rsync_escape_result;
2732}
2733
2734static inline int rsync_outline ( FILE *outf, char *outline, eventinfo_flags_t flags )
2735{
2736#ifdef VERYPARANOID
2737 critical_on ( outf == NULL );
2738#endif
2739
2740 // doxygen recognizes / * * as start of a doxy comment, so trick it with concatenated strings
2741 if ( flags & EVIF_RECURSIVELY ) {
2742 debug ( 3, "Recursively \"%s\": Writing to rsynclist: \"%s/*""**\".", outline, outline );
2743 critical_on ( fprintf ( outf, "%s/*""**\n", outline ) <= 0 );
2744 } else if ( flags & EVIF_CONTENTRECURSIVELY ) {
2745 debug ( 3, "Content-recursively \"%s\": Writing to rsynclist: \"%s/*""*\".", outline, outline );
2746 critical_on ( fprintf ( outf, "%s/*""*\n", outline ) <= 0 );
2747 } else {
2748 debug ( 3, "Non-recursively \"%s\": Writing to rsynclist: \"%s\".", outline, outline );
2749 critical_on ( fprintf ( outf, "%s\n", outline ) <= 0 );
2750 }
2751
2752 return 0;
2753}
2754
2755gboolean rsync_aggrout ( gpointer outline_gp, gpointer flags_gp, gpointer arg_gp )
2756{
2757 struct dosync_arg *dosync_arg_p = ( struct dosync_arg * ) arg_gp;
2758 char *outline = ( char * ) outline_gp;
2759 FILE *outf = dosync_arg_p->outf;
2760 eventinfo_flags_t flags = ( eventinfo_flags_t ) GPOINTER_TO_INT ( flags_gp );
2761// debug(3, "\"%s\"", outline);
2762 int ret;
2763
2764 if ( ( ret = rsync_outline ( outf, outline, flags ) ) ) {
2765 error ( "Got error from rsync_outline(). Exit." );
2766 exit ( ret ); // TODO: replace this with kill(0, ...)
2767 }
2768
2769 return TRUE;
2770}
2771
2772static inline int rsync_listpush ( indexes_t *indexes_p, const char *fpath, size_t fpath_len, eventinfo_flags_t flags, unsigned int *linescount_p )
2773{
2774 char *fpathwslash;
2775
2776 if ( fpath_len > 0 ) {
2777 // Prepending with the slash
2778 fpathwslash = alloca ( fpath_len + 2 );
2779 fpathwslash[0] = '/';
2780 memcpy ( &fpathwslash[1], fpath, fpath_len + 1 );
2781 } else {
2782 // In this case slash is not required
2783 fpathwslash = ( char * ) fpath;
2784 }
2785
2786 fpathwslash = ( char * ) rsync_escape ( fpathwslash );
2787 char *end = fpathwslash;
2788 debug ( 3, "\"%s\": Adding to rsynclist: \"%s\" with flags %p.",
2789 fpathwslash, fpathwslash, ( void * ) ( long ) flags );
2790 indexes_outaggr_add ( indexes_p, strdup ( fpathwslash ), flags );
2791
2792 if ( linescount_p != NULL )
2793 ( *linescount_p )++;
2794
2795 while ( end != NULL ) {
2796 if ( *fpathwslash == 0x00 )
2797 break;
2798
2799 debug ( 3, "Non-recursively \"%s\": Adding to rsynclist: \"%s\".", fpathwslash, fpathwslash );
2800 indexes_outaggr_add ( indexes_p, strdup ( fpathwslash ), EVIF_NONE );
2801
2802 if ( linescount_p != NULL )
2803 ( *linescount_p )++;
2804
2805 end = strrchr ( fpathwslash, '/' );
2806
2807 if ( end == NULL )
2808 break;
2809
2810 if ( end - fpathwslash <= 0 )
2811 break;
2812
2813 *end = 0x00;
2814 };
2815
2816 return 0;
2817}
2818
2819gboolean sync_idle_dosync_collectedevents_rsync_exclistpush ( gpointer fpath_gp, gpointer flags_gp, gpointer arg_gp )
2820{
2821 struct dosync_arg *dosync_arg_p = ( struct dosync_arg * ) arg_gp;
2822 char *fpath = ( char * ) fpath_gp;
2823 FILE *excf = dosync_arg_p->outf;
2824 eventinfo_flags_t flags = GPOINTER_TO_INT ( flags_gp );
2825// ctx_t *ctx_p = dosync_arg_p->ctx_p;
2826// indexes_t *indexes_p = dosync_arg_p->indexes_p;
2827 debug ( 3, "\"%s\"", fpath );
2828 size_t fpath_len = strlen ( fpath );
2829 char *fpathwslash;
2830
2831 if ( fpath_len > 0 ) {
2832 // Prepending with the slash
2833 fpathwslash = alloca ( fpath_len + 2 );
2834 fpathwslash[0] = '/';
2835 memcpy ( &fpathwslash[1], fpath, fpath_len + 1 );
2836 } else {
2837 // In this case slash is not required
2838 fpathwslash = fpath;
2839 }
2840
2841 fpathwslash = ( char * ) rsync_escape ( fpathwslash );
2842 int ret;
2843
2844 if ( ( ret = rsync_outline ( excf, fpathwslash, flags ) ) ) {
2845 error ( "Got error from rsync_outline(). Exit." );
2846 exit ( ret ); // TODO: replace this with kill(0, ...)
2847 }
2848
2849 return TRUE;
2850}
2851
2853{
2854 ctx_t *ctx_p = dosync_arg_p->ctx_p;
2855 indexes_t *indexes_p = dosync_arg_p->indexes_p;
2856 debug ( 3, "Committing the file (flags[MODE] == %i)", ctx_p->flags[MODE] );
2857
2858 if (
2859 ( ctx_p->flags[MODE] == MODE_RSYNCDIRECT ) ||
2860 ( ctx_p->flags[MODE] == MODE_RSYNCSHELL ) ||
2861 ( ctx_p->flags[MODE] == MODE_RSYNCSO )
2862 )
2863 g_hash_table_foreach_remove ( indexes_p->out_lines_aggr_ht, rsync_aggrout, dosync_arg_p );
2864
2865 if ( dosync_arg_p->outf != NULL ) {
2866 critical_on ( fclose ( dosync_arg_p->outf ) );
2867 dosync_arg_p->outf = NULL;
2868 }
2869
2870 if ( dosync_arg_p->evcount > 0 ) {
2871 thread_callbackfunct_arg_t *callback_arg_p;
2872 debug ( 3, "%s [%s] (%p) -> %s [%s]", ctx_p->watchdir, ctx_p->watchdirwslash, ctx_p->watchdirwslash,
2874
2875 if ( ctx_p->flags[MODE] == MODE_SO ) {
2876 api_eventinfo_t *ei = dosync_arg_p->api_ei;
2877 return so_call_sync ( ctx_p, indexes_p, dosync_arg_p->evcount, ei );
2878 }
2879
2880 if ( ctx_p->flags[MODE] == MODE_RSYNCSO )
2881 return so_call_rsync (
2882 ctx_p,
2883 indexes_p,
2884 dosync_arg_p->outf_path,
2885 * ( dosync_arg_p->excf_path ) ? dosync_arg_p->excf_path : NULL );
2886
2887 callback_arg_p = xcalloc ( 1, sizeof ( *callback_arg_p ) );
2888
2890 callback_arg_p->incfpath = strdup ( dosync_arg_p->outf_path );
2891
2893 callback_arg_p->excfpath = strdup ( dosync_arg_p->excf_path );
2894
2895 {
2896 int rc;
2897 dosync_arg_p->list_type_str =
2900 ? "rsynclist" : "synclist";
2901 debug ( 9, "dosync_arg_p->include_list_count == %u", dosync_arg_p->include_list_count );
2902 char **argv = sync_customargv ( ctx_p, dosync_arg_p, &ctx_p->synchandler_args[SHARGS_PRIMARY] );
2903
2904 while ( dosync_arg_p->include_list_count )
2905 free ( ( char * ) dosync_arg_p->include_list[--dosync_arg_p->include_list_count] );
2906
2907 rc = SYNC_EXEC_ARGV (
2908 ctx_p,
2909 indexes_p,
2911 callback_arg_p,
2912 argv );
2913#ifdef THREADING_SUPPORT
2914
2915 if ( !SHOULD_THREAD ( ctx_p ) ) // If it's a thread then it will free the argv in GC. If not a thread then we have to free right here.
2916#endif
2917 argv_free ( argv );
2918
2919 return rc;
2920 }
2921 }
2922
2923 return 0;
2924#ifdef DOXYGEN
2925 sync_exec_argv ( NULL, NULL );
2926 sync_exec_argv_thread ( NULL, NULL );
2927#endif
2928}
2929
2930void sync_inclist_rotate ( ctx_t *ctx_p, struct dosync_arg *dosync_arg_p )
2931{
2932 int ret;
2933 char newexc_path[PATH_MAX + 1];
2934
2936 // TODO: optimize this out {
2937 if ( ( ret = sync_idle_dosync_collectedevents_uniqfname ( ctx_p, newexc_path, "exclist" ) ) ) {
2938 error ( "Cannot get unique file name." );
2939 exit ( ret );
2940 }
2941
2942 if ( ( ret = fileutils_copy ( dosync_arg_p->excf_path, newexc_path ) ) ) {
2943 error ( "Cannot copy file \"%s\" to \"%s\".", dosync_arg_p->excf_path, newexc_path );
2944 exit ( ret );
2945 }
2946
2947 // }
2948 // That's required to copy excludes' list file for every rsync execution.
2949 // The problem appears do to unlink()-ing the excludes' list file on callback function
2950 // "sync_idle_dosync_collectedevents_cleanup()" of every execution.
2951 }
2952
2953 if ( ( ret = sync_idle_dosync_collectedevents_commitpart ( dosync_arg_p ) ) ) {
2954 error ( "Cannot commit list-file \"%s\"", dosync_arg_p->outf_path );
2955 exit ( ret ); // TODO: replace with kill(0, ...);
2956 }
2957
2959#ifdef VERYPARANOID
2960 require_strlen_le ( newexc_path, PATH_MAX );
2961#endif
2962 strcpy ( dosync_arg_p->excf_path, newexc_path ); // TODO: optimize this out
2963
2964 if ( ( ret = sync_idle_dosync_collectedevents_listcreate ( dosync_arg_p, "list" ) ) ) {
2965 error ( "Cannot create new list-file" );
2966 exit ( ret ); // TODO: replace with kill(0, ...);
2967 }
2968 }
2969
2970 return;
2971}
2972
2973void sync_idle_dosync_collectedevents_listpush ( gpointer fpath_gp, gpointer evinfo_gp, gpointer arg_gp )
2974{
2975 struct dosync_arg *dosync_arg_p = ( struct dosync_arg * ) arg_gp;
2976 char *fpath = ( char * ) fpath_gp;
2977 eventinfo_t *evinfo = ( eventinfo_t * ) evinfo_gp;
2978 //int *evcount_p =&dosync_arg_p->evcount;
2979 FILE *outf = dosync_arg_p->outf;
2980 ctx_t *ctx_p = dosync_arg_p->ctx_p;
2981 unsigned int *linescount_p = &dosync_arg_p->linescount;
2982 indexes_t *indexes_p = dosync_arg_p->indexes_p;
2983 api_eventinfo_t **api_ei_p = &dosync_arg_p->api_ei;
2984 int *api_ei_count_p = &dosync_arg_p->api_ei_count;
2985 debug ( 3, "\"%s\" with int-flags %p. "
2986 "evinfo: seqid_min == %u, seqid_max == %u type_o == %i, type_n == %i",
2987 fpath, ( void * ) ( unsigned long ) evinfo->flags,
2988 evinfo->seqid_min, evinfo->seqid_max,
2989 evinfo->objtype_old, evinfo->objtype_new
2990 );
2991
2992 // so-module case:
2993 if ( ctx_p->flags[MODE] == MODE_SO ) {
2994 api_eventinfo_t *ei = & ( *api_ei_p ) [ ( *api_ei_count_p )++];
2995 ei->evmask = evinfo->evmask;
2996 ei->flags = evinfo->flags;
2997 ei->objtype_old = evinfo->objtype_old;
2998 ei->objtype_new = evinfo->objtype_new;
2999 ei->path_len = strlen ( fpath );
3000 ei->path = strdup ( fpath );
3001 return;
3002 }
3003
3005 dosync_arg_p->include_list[dosync_arg_p->include_list_count++] = strdup ( fpath );
3006
3007 if (
3008 dosync_arg_p->include_list_count >= ( size_t )
3009 ( MAXARGUMENTS -
3010 MAX (
3013 )
3014 )
3015 )
3016 sync_inclist_rotate ( ctx_p, dosync_arg_p );
3017 }
3018
3019 // Finish if we don't use list files
3020 if ( ! ( ctx_p->synchandler_argf &
3022 return;
3023
3024 // List files cases:
3025
3026 // non-RSYNC case
3027 if ( ! (
3028 ( ctx_p->flags[MODE] == MODE_RSYNCSHELL ) ||
3029 ( ctx_p->flags[MODE] == MODE_RSYNCDIRECT ) ||
3030 ( ctx_p->flags[MODE] == MODE_RSYNCSO )
3031 ) ) {
3032 if ( ctx_p->flags[SYNCLISTSIMPLIFY] ) {
3033 critical_on ( fprintf ( outf, "%s\n", fpath ) <= 0 );
3034 } else {
3035 critical_on ( fprintf ( outf, "sync %s %i %s\n", ctx_p->label, evinfo->evmask, fpath ) <= 0 );
3036 }
3037
3038 return;
3039 }
3040
3041 // RSYNC case
3042 if ( ctx_p->rsyncinclimit && ( *linescount_p >= ctx_p->rsyncinclimit ) )
3043 sync_inclist_rotate ( ctx_p, dosync_arg_p );
3044
3045 int ret;
3046
3047 if ( ( ret = rsync_listpush ( indexes_p, fpath, strlen ( fpath ), evinfo->flags, linescount_p ) ) ) {
3048 error ( "Got error from rsync_listpush(). Exit." );
3049 exit ( ret );
3050 }
3051
3052 return;
3053}
3054
3056{
3057 debug ( 3, "" );
3058 struct dosync_arg dosync_arg = {0};
3060 dosync_arg.indexes_p = indexes_p;
3061 char isrsyncpreferexclude =
3062 (
3063 ( ctx_p->flags[MODE] == MODE_RSYNCDIRECT ) ||
3064 ( ctx_p->flags[MODE] == MODE_RSYNCSHELL ) ||
3065 ( ctx_p->flags[MODE] == MODE_RSYNCSO )
3066 ) && ( !ctx_p->flags[RSYNCPREFERINCLUDE] );
3067#ifdef PARANOID
3068
3069 if ( ctx_p->listoutdir != NULL ) {
3070 g_hash_table_remove_all ( indexes_p->fpath2ei_ht );
3071
3072 if ( isrsyncpreferexclude )
3073 g_hash_table_remove_all ( indexes_p->exc_fpath_ht );
3074 }
3075
3076#endif
3077 // Setting the time to sync not before it:
3078 ctx_p->synctime = time ( NULL ) + ctx_p->syncdelay;
3079 debug ( 3, "Next sync will be not before: %u", ctx_p->synctime );
3080 int queue_id = 0;
3081
3082 while ( queue_id < QUEUE_MAX ) {
3083 int ret;
3084
3085 if ( ( queue_id == QUEUE_LOCKWAIT ) && ( ctx_p->flags[THREADING] != PM_SAFE ) ) {
3086 queue_id++;
3087 continue;
3088 }
3089
3090 queue_id_t *queue_id_p = ( queue_id_t * ) &dosync_arg.data;
3091 *queue_id_p = queue_id;
3093
3094 if ( ret ) {
3095 error ( "Got error while processing queue #%i\n.", queue_id );
3096 g_hash_table_remove_all ( indexes_p->fpath2ei_ht );
3097
3098 if ( isrsyncpreferexclude )
3099 g_hash_table_remove_all ( indexes_p->exc_fpath_ht );
3100
3101 return ret;
3102 }
3103
3104 queue_id++;
3105 }
3106
3107 if ( !dosync_arg.evcount ) {
3108 debug ( 3, "Summary events' count is zero. Return 0." );
3109 return 0;
3110 }
3111
3112 if ( ctx_p->flags[MODE] == MODE_SO ) {
3113 //dosync_arg.evcount = g_hash_table_size(indexes_p->fpath2ei_ht);
3114 debug ( 3, "There's %i events. Processing.", dosync_arg.evcount );
3115 dosync_arg.api_ei = ( api_eventinfo_t * ) xmalloc ( dosync_arg.evcount * sizeof ( *dosync_arg.api_ei ) );
3116 }
3117
3118 {
3119 int ret;
3120
3121 if ( ( ctx_p->listoutdir != NULL ) || ( ctx_p->flags[MODE] == MODE_SO ) ) {
3122 if ( ! ( ctx_p->flags[MODE] == MODE_SO ) ) {
3123 * ( dosync_arg.excf_path ) = 0x00;
3124
3125 if ( isrsyncpreferexclude ) {
3126 if ( ( ret = sync_idle_dosync_collectedevents_listcreate ( &dosync_arg, "exclist" ) ) ) {
3127 error ( "Cannot create list-file" );
3128 return ret;
3129 }
3130
3131#ifdef PARANOID
3132 g_hash_table_remove_all ( indexes_p->out_lines_aggr_ht );
3133#endif
3134 g_hash_table_foreach_remove ( indexes_p->exc_fpath_ht, sync_idle_dosync_collectedevents_rsync_exclistpush, &dosync_arg );
3135 g_hash_table_foreach_remove ( indexes_p->out_lines_aggr_ht, rsync_aggrout, &dosync_arg );
3136 critical_on ( fclose ( dosync_arg.outf ) );
3137#ifdef VERYPARANOID
3139#endif
3140 strcpy ( dosync_arg.excf_path, dosync_arg.outf_path ); // TODO: remove this strcpy()
3141 }
3142
3143 if ( ( ret = sync_idle_dosync_collectedevents_listcreate ( &dosync_arg, "list" ) ) ) {
3144 error ( "Cannot create list-file" );
3145 return ret;
3146 }
3147 }
3148 }
3149
3150 if ( ( ctx_p->listoutdir != NULL ) || ( ctx_p->flags[MODE] == MODE_SO ) || ( ctx_p->synchandler_argf & SHFL_INCLUDE_LIST ) ) {
3151#ifdef PARANOID
3152 g_hash_table_remove_all ( indexes_p->out_lines_aggr_ht );
3153#endif
3154 g_hash_table_foreach ( indexes_p->fpath2ei_ht, sync_idle_dosync_collectedevents_listpush, &dosync_arg );
3155
3157 error ( "Cannot submit to sync the list \"%s\"", dosync_arg.outf_path );
3158 // TODO: free dosync_arg.api_ei on case of error
3159 g_hash_table_remove_all ( indexes_p->fpath2ei_ht );
3160 return ret;
3161 }
3162
3163 g_hash_table_remove_all ( indexes_p->fpath2ei_ht );
3164 }
3165 }
3166
3168 return 0;
3169}
3170
3171int apievinfo2rsynclist ( indexes_t *indexes_p, FILE *listfile, int n, api_eventinfo_t *apievinfo )
3172{
3173 int i;
3174
3175 if ( listfile == NULL ) {
3176 error ( "listfile == NULL." );
3177 return EINVAL;
3178 }
3179
3180 i = 0;
3181
3182 while ( i < n ) {
3183 rsync_listpush ( indexes_p, apievinfo[i].path, apievinfo[i].path_len, apievinfo[i].flags, NULL );
3184 i++;
3185 }
3186
3187 struct dosync_arg dosync_arg = {0};
3188
3189 dosync_arg.outf = listfile;
3190
3191 g_hash_table_foreach_remove ( indexes_p->out_lines_aggr_ht, rsync_aggrout, &dosync_arg );
3192
3193 return 0;
3194}
3195
3196int sync_idle ( ctx_t *ctx_p, indexes_t *indexes_p )
3197{
3198 int ret;
3199 // Collecting garbage
3200#ifdef THREADING_SUPPORT
3201 ret = thread_gc ( ctx_p );
3202
3203 if ( ret ) return ret;
3204
3205#endif
3206
3207 // Checking if we can sync
3208
3209 if ( ctx_p->flags[STANDBYFILE] ) {
3210 struct stat st;
3211
3212 if ( !stat ( ctx_p->standbyfile, &st ) ) {
3213 state_t state_old;
3214 state_old = ctx_p->state;
3217 debug ( 1, "Found standby file. Holding over syncs. Sleeping "XTOSTR ( SLEEP_SECONDS ) " second." );
3218 sleep ( SLEEP_SECONDS );
3219 ctx_p->state = state_old;
3221 return 0;
3222 }
3223 }
3224
3225 // Syncing
3226 debug ( 3, "calling sync_idle_dosync_collectedevents()" );
3227#ifdef CLUSTER_SUPPORT
3228 ret = cluster_lock_byindexes();
3229
3230 if ( ret ) return ret;
3231
3232#endif
3233 ret = sync_idle_dosync_collectedevents ( ctx_p, indexes_p );
3234
3235 if ( ret ) return ret;
3236
3237#ifdef CLUSTER_SUPPORT
3238 ret = cluster_unlock_all();
3239
3240 if ( ret ) return ret;
3241
3242#endif
3243 return 0;
3244}
3245
3246int notify_wait ( ctx_t *ctx_p, indexes_t *indexes_p )
3247{
3248 static struct timeval tv;
3249 time_t tm = time ( NULL );
3250 long delay = ( ( unsigned long ) ~0 >> 1 );
3251 threadsinfo_t *threadsinfo_p = thread_info();
3252 debug ( 4, "pthread_mutex_unlock(&threadsinfo_p->mutex[PTHREAD_MUTEX_STATE])" );
3253 pthread_cond_broadcast ( &threadsinfo_p->cond[PTHREAD_MUTEX_STATE] );
3254 pthread_mutex_unlock ( &threadsinfo_p->mutex[PTHREAD_MUTEX_STATE] );
3255 long queue_id = 0;
3256
3257 while ( queue_id < QUEUE_MAX ) {
3259
3260 if ( !queueinfo->stime )
3261 continue;
3262
3264 debug ( 3, "There're events in instant queue (#%i), don't waiting.", queue_id - 1 );
3265 return 0;
3266 }
3267
3268 int qdelay = queueinfo->stime + queueinfo->collectdelay - tm;
3269 debug ( 3, "queue #%i: %i %i %i -> %i", queue_id - 1, queueinfo->stime, queueinfo->collectdelay, tm, qdelay );
3270
3271 if ( qdelay < - ( long ) ctx_p->syncdelay )
3272 qdelay = - ( long ) ctx_p->syncdelay;
3273
3274 delay = MIN ( delay, qdelay );
3275 }
3276
3277 long synctime_delay = ( ( long ) ctx_p->synctime ) - ( ( long ) tm );
3278 synctime_delay = synctime_delay > 0 ? synctime_delay : 0;
3279 debug ( 3, "delay = MAX(%li, %li)", delay, synctime_delay );
3280 delay = MAX ( delay, synctime_delay );
3281 delay = delay > 0 ? delay : 0;
3282#ifdef THREADING_SUPPORT
3283
3284 if ( ctx_p->flags[THREADING] ) {
3285 time_t _thread_nextexpiretime = thread_nextexpiretime();
3286 debug ( 3, "thread_nextexpiretime == %i", _thread_nextexpiretime );
3287
3288 if ( _thread_nextexpiretime ) {
3289 long thread_expiredelay = ( long ) thread_nextexpiretime() - ( long ) tm + 1; // +1 is to make "tm>threadinfo_p->expiretime" after select() definitely TRUE
3290 debug ( 3, "thread_expiredelay == %i", thread_expiredelay );
3291 thread_expiredelay = thread_expiredelay > 0 ? thread_expiredelay : 0;
3292 debug ( 3, "delay = MIN(%li, %li)", delay, thread_expiredelay );
3293 delay = MIN ( delay, thread_expiredelay );
3294 }
3295 }
3296
3297#endif
3298
3299 if ( ( !delay ) || ( ctx_p->state != STATE_RUNNING && ctx_p->state != STATE_LASTSYNC ) ) {
3300 debug ( 4, "forcing: no events (delay is %li, state is %s)",
3301 delay, status_descr[ctx_p->state] )
3302 return 0;
3303 }
3304
3306 // zero delay if "--exit-on-no-events" is set or if it is the last sync (see "--sync-on-quit")
3307 tv.tv_sec = 0;
3308 tv.tv_usec = 0;
3309 } else {
3310 debug ( 3, "sleeping for %li second(s).", SLEEP_SECONDS );
3311 sleep ( SLEEP_SECONDS );
3312 delay = ( ( long ) delay ) > SLEEP_SECONDS ? delay - SLEEP_SECONDS : 0;
3313 tv.tv_sec = delay;
3314 tv.tv_usec = 0;
3315 }
3316
3317 debug ( 4, "pthread_mutex_lock(&threadsinfo_p->mutex[PTHREAD_MUTEX_STATE])" );
3318 pthread_mutex_lock ( &threadsinfo_p->mutex[PTHREAD_MUTEX_STATE] );
3319
3321 return 0;
3322
3323 debug ( 4, "pthread_mutex_unlock(&threadsinfo_p->mutex[PTHREAD_MUTEX_STATE])" );
3324 pthread_cond_broadcast ( &threadsinfo_p->cond[PTHREAD_MUTEX_STATE] );
3325 pthread_mutex_lock ( &threadsinfo_p->mutex[PTHREAD_MUTEX_SELECT] );
3326 pthread_mutex_unlock ( &threadsinfo_p->mutex[PTHREAD_MUTEX_STATE] );
3327 debug ( 8, "ctx_p->notifyenginefunct.wait() [%p]", ctx_p->notifyenginefunct.wait );
3328 int ret = ctx_p->notifyenginefunct.wait ( ctx_p, indexes_p, &tv );
3329 pthread_mutex_unlock ( &threadsinfo_p->mutex[PTHREAD_MUTEX_SELECT] );
3330
3331 if ( ( ret == -1 ) && ( errno == EINTR ) ) {
3332 errno = 0;
3333 ret = 0;
3334 }
3335
3336 debug ( 4, "pthread_mutex_lock(&threadsinfo_p->mutex[PTHREAD_MUTEX_STATE])" );
3337 pthread_mutex_lock ( &threadsinfo_p->mutex[PTHREAD_MUTEX_STATE] );
3338
3339 if ( ( ctx_p->flags[EXITONNOEVENTS] ) && ( ret == 0 ) && ( ctx_p->state != STATE_LASTSYNC ) && ( ctx_p->state != STATE_TERM ) && ( ctx_p->state != STATE_EXIT ) ) {
3340 // if not events and "--exit-on-no-events" is set
3343 else
3345 }
3346
3347 return ret;
3348}
3349
3350#define SYNC_LOOP_IDLE {\
3351 int ret;\
3352 if((ret=sync_idle(ctx_p, indexes_p))) {\
3353 error("got error while sync_idle().");\
3354 return ret;\
3355 }\
3356 }
3357
3358#define SYNC_LOOP_CONTINUE_UNLOCK {\
3359 pthread_cond_broadcast(&threadsinfo_p->cond[PTHREAD_MUTEX_STATE]);\
3360 debug(4, "pthread_mutex_unlock()");\
3361 pthread_mutex_unlock(&threadsinfo_p->mutex[PTHREAD_MUTEX_STATE]);\
3362 continue;\
3363 }
3364
3366{
3367 debug ( 2, "\"%s\" \"%s\"", ctx_p->preexithookfile, ctx_p->label );
3368#ifdef VERYPARANOID
3369
3370 if ( ctx_p->preexithookfile == NULL )
3371 critical ( "ctx_p->preexithookfile == NULL" );
3372
3373#endif
3374 char *argv[] = { ctx_p->preexithookfile, ctx_p->label, NULL};
3375 exec_argv ( argv, NULL );
3376 return;
3377}
3378
3379int sync_loop ( ctx_t *ctx_p, indexes_t *indexes_p )
3380{
3381 int ret;
3382 threadsinfo_t *threadsinfo_p = thread_info();
3383 state_p = &ctx_p->state;
3385
3386 while ( ctx_p->state != STATE_EXIT ) {
3387 int events;
3388 debug ( 4, "pthread_mutex_lock(): PTHREAD_MUTEX_STATE" );
3389 pthread_mutex_lock ( &threadsinfo_p->mutex[PTHREAD_MUTEX_STATE] );
3390 debug ( 3, "current state is \"%s\" (%i) (iteration: %u/%u); threadsinfo_p->used == %u",
3392 ctx_p->iteration_num, ctx_p->flags[MAXITERATIONS], threadsinfo_p->used );
3393
3394 while ( ( ctx_p->flags[THREADING] == PM_OFF ) && threadsinfo_p->used ) {
3395 debug ( 1, "We are in non-threading mode but have %u syncer threads. Waiting for them end.", threadsinfo_p->used );
3396 pthread_cond_wait ( &threadsinfo_p->cond[PTHREAD_MUTEX_STATE], &threadsinfo_p->mutex[PTHREAD_MUTEX_STATE] );
3397 pthread_mutex_unlock ( &threadsinfo_p->mutex[PTHREAD_MUTEX_STATE] );
3398 }
3399
3400 events = 0;
3401
3402 switch ( ctx_p->state ) {
3403 case STATE_THREAD_GC:
3405#ifdef THREADING_SUPPORT
3406
3407 if ( thread_gc ( ctx_p ) ) {
3409 break;
3410 }
3411
3412#endif
3415
3416 case STATE_INITSYNC:
3417 if ( !ctx_p->flags[THREADING] ) {
3418 ctx_p->iteration_num = 0;
3420 }
3421
3423 pthread_cond_broadcast ( &threadsinfo_p->cond[PTHREAD_MUTEX_STATE] );
3424 pthread_mutex_unlock ( &threadsinfo_p->mutex[PTHREAD_MUTEX_STATE] );
3425 ret = sync_initialsync ( ctx_p->watchdir, ctx_p, indexes_p, INITSYNC_FULL );
3426
3427 if ( ret ) return ret;
3428
3429 if ( ( ctx_p->state == STATE_TERM ) || ( ctx_p->state == STATE_EXIT ) ) {
3430 continue;
3431 }
3432
3433 if ( ctx_p->flags[ONLYINITSYNC] ) {
3436 return ret;
3437 }
3438
3440 continue;
3441
3442 case STATE_RUNNING:
3443 if ( ( !ctx_p->flags[THREADING] ) && ctx_p->flags[MAXITERATIONS] ) {
3444 if ( ( typeof ( ctx_p->iteration_num ) ) ctx_p->flags[MAXITERATIONS] == ctx_p->iteration_num - 1 )
3446 else if ( ( typeof ( ctx_p->iteration_num ) ) ctx_p->flags[MAXITERATIONS] <= ctx_p->iteration_num )
3448 }
3449
3450 case STATE_PREEXIT:
3451 if ( ctx_p->state == STATE_PREEXIT && ctx_p->flags[PREEXITHOOK] == 0 && ctx_p->flags[SOFTEXITSYNC] == 0 ) {
3454 }
3455
3456 case STATE_LASTSYNC:
3457 switch ( ctx_p->state ) {
3458 case STATE_PREEXIT:
3459 debug ( 1, "preparing to exit" );
3461
3462 if ( ctx_p->flags[PREEXITHOOK] )
3463 hook_preexit ( ctx_p );
3464
3465 if ( ctx_p->flags[SOFTEXITSYNC] ) {
3467 } else {
3469 }
3470
3471 break;
3472
3473 case STATE_LASTSYNC:
3474 debug ( 3, "notify_wait ( ctx_p, indexes_p ) [lastsync]" );
3475 events = notify_wait ( ctx_p, indexes_p );
3477 break;
3478
3479 case STATE_RUNNING:
3480 debug ( 3, "notify_wait ( ctx_p, indexes_p )" );
3481 events = notify_wait ( ctx_p, indexes_p );
3482 break;
3483
3484 default:
3486 }
3487
3488 break;
3489
3490 case STATE_REHASH:
3492 debug ( 1, "rehashing." );
3493 main_rehash ( ctx_p );
3496
3497 case STATE_TERM:
3500
3501 case STATE_EXIT:
3504
3505 default:
3506 critical ( "internal error: ctx_p->state == %u", ctx_p->state );
3507 break;
3508 }
3509
3510 pthread_cond_broadcast ( &threadsinfo_p->cond[PTHREAD_MUTEX_STATE] );
3511 debug ( 4, "pthread_mutex_unlock(): PTHREAD_MUTEX_STATE" );
3512 pthread_mutex_unlock ( &threadsinfo_p->mutex[PTHREAD_MUTEX_STATE] );
3513
3514 if ( events == 0 ) {
3515 debug ( 2, "events == 0" );
3517 continue; // Timeout
3518 }
3519
3520 if ( events < 0 ) {
3521 error ( "Got error while waiting for event from notify subsystem." );
3522 return errno;
3523 }
3524
3525 debug ( 3, "ctx_p->notifyenginefunct.handle" );
3526 int count = ctx_p->notifyenginefunct.handle ( ctx_p, indexes_p );
3527
3528 if ( count <= 0 ) {
3529 error ( "Cannot handle with notify events." );
3530 return errno;
3531 }
3532
3534
3535 if ( ctx_p->flags[EXITONNOEVENTS] ) // clsync exits on no events, so sync_idle() is never called. We have to force the calling of it.
3537 }
3538
3539 debug ( 1, "last SYNC_LOOP_IDLE" );
3541 debug ( 1, "end" );
3542 return exitcode;
3543#ifdef DOXYGEN
3544 sync_idle ( 0, NULL, NULL );
3545#endif
3546}
3547
3548void sync_sig_int ( int signal )
3549{
3550 debug ( 2, "%i: Thread %p", signal, pthread_self() );
3551 return;
3552}
3553
3554#ifdef PARANOID
3555int _sync_tryforcecycle_i;
3556#endif
3557int sync_tryforcecycle ( ctx_t *ctx_p, pthread_t pthread_parent )
3558{
3559 ( void ) pthread_parent;
3560 debug ( 3, "sending signal to interrupt blocking operations like select()-s and so on (ctx_p->blockthread_count == %i)", ctx_p->blockthread_count );
3561 //pthread_kill(pthread_parent, SIGUSR_BLOPINT);
3562 int i, count;
3563 count = ctx_p->blockthread_count;
3564 i = 0;
3565
3566 while ( i < count ) {
3567 debug ( 2, "Sending SIGUSR_BLOPINT to thread %p", ctx_p->blockthread[i] );
3568 pthread_kill ( ctx_p->blockthread[i], SIGUSR_BLOPINT );
3569 i++;
3570 }
3571
3572#ifdef PARANOID
3573
3574 if ( ++_sync_tryforcecycle_i > KILL_TIMEOUT ) {
3575 error ( "Seems we got a deadlock." );
3576 return EDEADLK;
3577 }
3578
3579#endif
3580#ifdef SYNC_SWITCHSTATE_COND_TIMEDWAIT // Hangs
3581 struct timespec time_timeout;
3582 clock_gettime ( CLOCK_REALTIME, &time_timeout );
3583 time_timeout.tv_sec++;
3584// time_timeout.tv_sec = now.tv_sec;
3585 debug ( 3, "pthread_cond_timedwait() until %li.%li", time_timeout.tv_sec, time_timeout.tv_nsec );
3586
3587 if ( pthread_cond_timedwait ( pthread_cond_state, pthread_mutex_state, &time_timeout ) != ETIMEDOUT )
3588 return 0;
3589
3590#else
3591 debug ( 9, "sleep("XTOSTR ( SLEEP_SECONDS ) ")" );
3592 sleep ( SLEEP_SECONDS ); // TODO: replace this with pthread_cond_timedwait()
3593#endif
3594 return EINPROGRESS;
3595}
3596
3597int sync_switch_state ( ctx_t *ctx_p, pthread_t pthread_parent, int newstate )
3598{
3599 if ( state_p == NULL ) {
3600 debug ( 3, "sync_switch_state(ctx_p, %p, %i), but state_p == NULL", pthread_parent, newstate );
3601 return 0;
3602 }
3603
3604 debug ( 3, "sync_switch_state(ctx_p, %p, %i)", pthread_parent, newstate );
3605 // Getting mutexes
3606 threadsinfo_t *threadsinfo_p = thread_info();
3607
3608 if ( threadsinfo_p == NULL ) {
3609 // If no mutexes, just change the state
3610 goto l_sync_parent_interrupt_end;
3611 }
3612
3613 if ( !threadsinfo_p->mutex_init ) {
3614 // If no mutexes, just change the state
3615 goto l_sync_parent_interrupt_end;
3616 }
3617
3618 pthread_mutex_t *pthread_mutex_state = &threadsinfo_p->mutex[PTHREAD_MUTEX_STATE];
3619 pthread_mutex_t *pthread_mutex_select = &threadsinfo_p->mutex[PTHREAD_MUTEX_SELECT];
3620 pthread_cond_t *pthread_cond_state = &threadsinfo_p->cond [PTHREAD_MUTEX_STATE];
3621 // Locking all necessary mutexes
3622#ifdef PARANOID
3623 _sync_tryforcecycle_i = 0;
3624#endif
3625 debug ( 4, "while(pthread_mutex_trylock( pthread_mutex_state ))" );
3626
3627 while ( pthread_mutex_trylock ( pthread_mutex_state ) == EBUSY ) {
3628 int rc = sync_tryforcecycle ( ctx_p, pthread_parent );
3629
3630 if ( rc && rc != EINPROGRESS )
3631 return rc;
3632
3633 if ( !rc )
3634 break;
3635 }
3636
3637#ifdef PARANOID
3638 _sync_tryforcecycle_i = 0;
3639#endif
3640 debug ( 4, "while(pthread_mutex_trylock( pthread_mutex_select ))" );
3641
3642 while ( pthread_mutex_trylock ( pthread_mutex_select ) == EBUSY ) {
3643 int rc = sync_tryforcecycle ( ctx_p, pthread_parent );
3644
3645 if ( rc && rc != EINPROGRESS )
3646 return rc;
3647
3648 if ( !rc )
3649 break;
3650 }
3651
3652 // Changing the state
3653 *state_p = newstate;
3654#ifdef PARANOID
3655 pthread_kill ( pthread_parent, SIGUSR_BLOPINT );
3656#endif
3657 // Unlocking mutexes
3658 debug ( 4, "pthread_cond_broadcast(). New state is %i.", *state_p );
3659 pthread_cond_broadcast ( pthread_cond_state );
3660 debug ( 4, "pthread_mutex_unlock( pthread_mutex_state )" );
3661 pthread_mutex_unlock ( pthread_mutex_state );
3662 debug ( 4, "pthread_mutex_unlock( pthread_mutex_select )" );
3663 pthread_mutex_unlock ( pthread_mutex_select );
3664#ifdef THREADING_SUPPORT
3665 return thread_info_unlock ( 0 );
3666#else
3667 return 0;
3668#endif
3669l_sync_parent_interrupt_end:
3670 *state_p = newstate;
3671 pthread_kill ( pthread_parent, SIGUSR_BLOPINT );
3672#ifdef THREADING_SUPPORT
3673 return thread_info_unlock ( 0 );
3674#else
3675 return 0;
3676#endif
3677}
3678
3679/* === DUMP === */
3680
3688
3694
3701
3702void sync_dump_liststep ( gpointer fpath_gp, gpointer evinfo_gp, gpointer arg_gp )
3703{
3704 char *fpath = ( char * ) fpath_gp;
3705 eventinfo_t *evinfo = ( eventinfo_t * ) evinfo_gp;
3706 struct sync_dump_arg *arg = arg_gp;
3707 char act, num;
3708
3709 if ( fpath == NULL || evinfo == NULL )
3710 return;
3711
3712 switch ( arg->data ) {
3713 case DUMP_LTYPE_INCLUDE:
3714 act = '+';
3715 num = '1';
3716 break;
3717
3718 case DUMP_LTYPE_EXCLUDE:
3719 act = '-';
3720 num = '1';
3721 break;
3722
3723 case DUMP_LTYPE_EVINFO:
3724 act = '+';
3725 num = evinfo->flags & EVIF_RECURSIVELY ? '*' :
3726 ( evinfo->flags & EVIF_CONTENTRECURSIVELY ? '/' : '1' );
3727 break;
3728
3729 default:
3730 act = '?';
3731 num = '?';
3732 }
3733
3734 dprintf ( arg->fd_out, "%c%c\t%s\n", act, num, fpath );
3735 return;
3736}
3737
3738int sync_dump_thread ( threadinfo_t *threadinfo_p, void *_arg )
3739{
3740 struct sync_dump_arg *arg = _arg;
3741 char buf[BUFSIZ];
3742 snprintf ( buf, BUFSIZ, "%u-%u-%lx", threadinfo_p->iteration, threadinfo_p->thread_num, ( long ) threadinfo_p->pthread );
3743 arg->fd_out = openat ( arg->dirfd[DUMP_DIRFD_THREAD], buf, O_WRONLY | O_CREAT, DUMP_FILEMODE );
3744
3745 if ( arg->fd_out == -1 )
3746 return errno;
3747
3748 {
3749 char **argv;
3750 dprintf ( arg->fd_out,
3751 "thread:\n\titeration == %u\n\tnum == %u\n\tpthread == %lx\n\tstarttime == %lu\n\texpiretime == %lu\n\tchild_pid == %u\n\ttry_n == %u\nCommand:",
3752 threadinfo_p->iteration,
3753 threadinfo_p->thread_num,
3754 ( long ) threadinfo_p->pthread,
3755 threadinfo_p->starttime,
3756 threadinfo_p->expiretime,
3757 threadinfo_p->child_pid,
3758 threadinfo_p->try_n
3759 );
3760 argv = threadinfo_p->argv;
3761
3762 while ( *argv != NULL )
3763 dprintf ( arg->fd_out, " \"%s\"", * ( argv++ ) );
3764
3765 dprintf ( arg->fd_out, "\n" );
3766 }
3767 arg->data = DUMP_LTYPE_EVINFO;
3768 g_hash_table_foreach ( threadinfo_p->fpath2ei_ht, sync_dump_liststep, arg );
3769 close ( arg->fd_out );
3770 return 0;
3771}
3772
3773int sync_dump ( ctx_t *ctx_p, const char *const dir_path )
3774{
3775 indexes_t *indexes_p = ctx_p->indexes_p;
3776 int rootfd, fd_out;
3777 struct sync_dump_arg arg = {0};
3778 enum dump_dirfd_obj dirfd_obj;
3779 arg.ctx_p = ctx_p;
3780 debug ( 3, "%s", dir_path );
3781
3782 if ( dir_path == NULL )
3783 return EINVAL;
3784
3785 static const char *const subdirs[] = {
3786 [DUMP_DIRFD_QUEUE] = "queue",
3787 [DUMP_DIRFD_THREAD] = "threads"
3788 };
3789 errno = 0;
3790 rootfd = mkdirat_open ( dir_path, AT_FDCWD, DUMP_DIRMODE );
3791
3792 if ( rootfd == -1 ) {
3793 error ( "Cannot open directory \"%s\"", dir_path );
3794 goto l_sync_dump_end;
3795 }
3796
3797 fd_out = openat ( rootfd, "instance", O_WRONLY | O_CREAT, DUMP_FILEMODE );
3798
3799 if ( fd_out == -1 ) {
3800 error ( "Cannot open file \"%s\" for writing" );
3801 goto l_sync_dump_end;
3802 }
3803
3804 dprintf ( fd_out, "status == %s\n", getenv ( "CLSYNC_STATUS" ) ); // TODO: remove getenv() from here
3805 arg.fd_out = fd_out;
3806 arg.data = DUMP_LTYPE_EVINFO;
3807
3808 if ( indexes_p->nonthreaded_syncing_fpath2ei_ht != NULL )
3809 g_hash_table_foreach ( indexes_p->nonthreaded_syncing_fpath2ei_ht, sync_dump_liststep, &arg );
3810
3811 close ( fd_out );
3812 arg.dirfd[DUMP_DIRFD_ROOT] = rootfd;
3813 dirfd_obj = DUMP_DIRFD_ROOT + 1;
3814
3815 while ( dirfd_obj < DUMP_DIRFD_MAX ) {
3816 const char *const subdir = subdirs[dirfd_obj];
3817 arg.dirfd[dirfd_obj] = mkdirat_open ( subdir, rootfd, DUMP_DIRMODE );
3818
3819 if ( arg.dirfd[dirfd_obj] == -1 ) {
3820 error ( "Cannot open directory \"%s\"", subdir );
3821 goto l_sync_dump_end;
3822 }
3823
3824 dirfd_obj++;
3825 }
3826
3827 int queue_id = 0;
3828
3829 while ( queue_id < QUEUE_MAX ) {
3830 char buf[BUFSIZ];
3831 snprintf ( buf, BUFSIZ, "%u", queue_id );
3832 arg.fd_out = openat ( arg.dirfd[DUMP_DIRFD_QUEUE], buf, O_WRONLY | O_CREAT, DUMP_FILEMODE );
3833 arg.data = DUMP_LTYPE_EVINFO;
3834 g_hash_table_foreach ( indexes_p->fpath2ei_coll_ht[queue_id], sync_dump_liststep, &arg );
3835
3836 if ( indexes_p->exc_fpath_coll_ht[queue_id] != NULL ) {
3838 g_hash_table_foreach ( indexes_p->exc_fpath_coll_ht[queue_id], sync_dump_liststep, &arg );
3839 }
3840
3841 close ( arg.fd_out );
3842 queue_id++;
3843 }
3844
3845#ifdef THREADING_SUPPORT
3847#endif
3848l_sync_dump_end:
3849 dirfd_obj = DUMP_DIRFD_ROOT;
3850
3851 while ( dirfd_obj < DUMP_DIRFD_MAX ) {
3852 if ( arg.dirfd[dirfd_obj] != -1 && arg.dirfd[dirfd_obj] != 0 )
3853 close ( arg.dirfd[dirfd_obj] );
3854
3855 dirfd_obj++;
3856 }
3857
3858 if ( errno )
3859 error ( "Cannot create the dump to \"%s\"", dir_path );
3860
3861 return errno;
3862}
3863
3864/* === /DUMP === */
3865
3867{
3868 debug ( 9, "" );
3870 return;
3871}
3872
3874int sync_sighandler ( sighandler_arg_t *sighandler_arg_p )
3875{
3876 int signal = 0, ret;
3877 sigset_t sigset_full;
3878 ctx_t *ctx_p = sighandler_arg_p->ctx_p;
3879// indexes_t *indexes_p = sighandler_arg_p->indexes_p;
3880 pthread_t pthread_parent = sighandler_arg_p->pthread_parent;
3881// sigset_t *sigset_p = sighandler_arg_p->sigset_p;
3882 int *exitcode_p = sighandler_arg_p->exitcode_p;
3883 sync_sighandler_exitcode_p = exitcode_p;
3885 sigfillset ( &sigset_full );
3886
3887 while ( state_p == NULL || ( ( ctx_p->state != STATE_TERM ) && ( ctx_p->state != STATE_EXIT ) ) ) {
3888 debug ( 3, "waiting for signal (is sigset filled == %i)", sigismember ( &sigset_full, SIGTERM ) );
3889 ret = sigwait ( &sigset_full, &signal );
3890
3891 if ( state_p == NULL ) {
3892 switch ( signal ) {
3893 case SIGALRM:
3894 *exitcode_p = ETIME;
3895
3896 case SIGQUIT:
3897 case SIGTERM:
3898 case SIGINT:
3899 case SIGCHLD:
3900 // TODO: remove the exit() from here. Main thread should exit itself
3901 exit ( *exitcode_p );
3902 break;
3903
3904 default:
3905 warning ( "Got signal %i, but the main loop is not started, yet. Ignoring the signal.", signal );
3906 break;
3907 }
3908
3909 continue;
3910 }
3911
3912 debug ( 3, "got signal %i. ctx_p->state == %i.", signal, ctx_p->state );
3913
3914 if ( ret ) {
3915 // TODO: handle an error here
3916 }
3917
3918 if ( ctx_p->customsignal[signal] != NULL ) {
3919 if ( config_block_parse ( ctx_p, ctx_p->customsignal[signal] ) ) {
3920 *exitcode_p = errno;
3921 signal = SIGTERM;
3922 }
3923
3924 continue;
3925 }
3926
3927 switch ( signal ) {
3928 case SIGALRM:
3929 *exitcode_p = ETIME;
3930
3931 case SIGQUIT:
3932 sync_switch_state ( ctx_p, pthread_parent, STATE_PREEXIT );
3933 break;
3934
3935 case SIGTERM:
3936 case SIGINT:
3937 sync_switch_state ( ctx_p, pthread_parent, STATE_TERM );
3938
3939 // bugfix of https://github.com/clsync/clsync/issues/44
3940 while ( ctx_p->children ) { // Killing children if non-pthread mode or/and (mode=="so" or mode=="rsyncso")
3941 pid_t child_pid = ctx_p->child_pid[--ctx_p->children];
3942
3943 if ( privileged_kill_child ( child_pid, signal, 0 ) == ENOENT )
3944 continue;
3945
3946 if ( signal != SIGQUIT )
3947 if ( privileged_kill_child ( child_pid, SIGQUIT, 0 ) == ENOENT )
3948 continue;
3949
3950 if ( signal != SIGTERM )
3951 if ( privileged_kill_child ( child_pid, SIGTERM, 0 ) == ENOENT )
3952 continue;
3953
3954 if ( privileged_kill_child ( child_pid, SIGKILL, 0 ) == ENOENT )
3955 continue;
3956 }
3957
3958 break;
3959
3960 case SIGHUP:
3961 sync_switch_state ( ctx_p, pthread_parent, STATE_REHASH );
3962 break;
3963
3964 case SIGCHLD:
3965 sync_sigchld();
3966 break;
3967
3968 case SIGUSR_THREAD_GC:
3969 sync_switch_state ( ctx_p, pthread_parent, STATE_THREAD_GC );
3970 break;
3971
3972 case SIGUSR_INITSYNC:
3973 sync_switch_state ( ctx_p, pthread_parent, STATE_INITSYNC );
3974 break;
3975
3976 case SIGUSR_DUMP:
3978 break;
3979
3980 default:
3981 error ( "Unknown signal: %i. Exit.", signal );
3982 sync_switch_state ( ctx_p, pthread_parent, STATE_TERM );
3983 break;
3984 }
3985 }
3986
3987 debug ( 3, "signal handler closed." );
3988 return 0;
3989}
3990
3992{
3994 return pthread_kill ( pthread_sighandler, SIGTERM );
3995}
3996
3997
3998__extension__ int sync_run ( ctx_t *ctx_p )
3999{
4000 int ret;
4002 indexes_t indexes = {NULL};
4003 debug ( 9, "Creating signal handler thread" );
4004 {
4005 int i;
4007 sigset_t sigset_sighandler;
4008 sigemptyset ( &sigset_sighandler );
4009 sigaddset ( &sigset_sighandler, SIGALRM );
4010 sigaddset ( &sigset_sighandler, SIGHUP );
4011 sigaddset ( &sigset_sighandler, SIGQUIT );
4012 sigaddset ( &sigset_sighandler, SIGTERM );
4013 sigaddset ( &sigset_sighandler, SIGINT );
4014 sigaddset ( &sigset_sighandler, SIGCHLD );
4015 sigaddset ( &sigset_sighandler, SIGUSR_THREAD_GC );
4016 sigaddset ( &sigset_sighandler, SIGUSR_INITSYNC );
4017 sigaddset ( &sigset_sighandler, SIGUSR_DUMP );
4018 i = 0;
4019
4020 while ( i < MAXSIGNALNUM ) {
4021 if ( ctx_p->customsignal[i] != NULL )
4022 sigaddset ( &sigset_sighandler, i );
4023
4024 i++;
4025 }
4026
4027 ret = pthread_sigmask ( SIG_BLOCK, &sigset_sighandler, NULL );
4028
4029 if ( ret ) return ret;
4030
4032 sighandler_arg.pthread_parent = pthread_self();
4034 sighandler_arg.sigset_p = &sigset_sighandler;
4035 ret = pthread_create ( &pthread_sighandler, NULL, ( void * ( * ) ( void * ) ) sync_sighandler, &sighandler_arg );
4036
4037 if ( ret ) return ret;
4038
4039 sigset_t sigset_parent;
4040 sigemptyset ( &sigset_parent );
4041 sigaddset ( &sigset_parent, SIGUSR_BLOPINT );
4042 ret = pthread_sigmask ( SIG_UNBLOCK, &sigset_parent, NULL );
4043
4044 if ( ret ) return ret;
4045
4046 signal ( SIGUSR_BLOPINT, sync_sig_int );
4047 }
4048 debug ( 9, "Creating hash tables" );
4049 {
4050 int i;
4051 ctx_p->indexes_p = &indexes;
4052 indexes.wd2fpath_ht = g_hash_table_new_full ( g_direct_hash, g_direct_equal, 0, 0 );
4053 indexes.fpath2wd_ht = g_hash_table_new_full ( g_str_hash, g_str_equal, free, 0 );
4054 indexes.fpath2ei_ht = g_hash_table_new_full ( g_str_hash, g_str_equal, free, free );
4055 indexes.exc_fpath_ht = g_hash_table_new_full ( g_str_hash, g_str_equal, free, 0 );
4056 indexes.out_lines_aggr_ht = g_hash_table_new_full ( g_str_hash, g_str_equal, free, 0 );
4057 indexes.fileinfo_ht = g_hash_table_new_full ( g_str_hash, g_str_equal, free, free );
4058 i = 0;
4059
4060 while ( i < QUEUE_MAX ) {
4061 switch ( i ) {
4062 case QUEUE_LOCKWAIT:
4063 indexes.fpath2ei_coll_ht[i] = g_hash_table_new_full ( g_str_hash, g_str_equal, free, 0 );
4064 break;
4065
4066 default:
4067 indexes.fpath2ei_coll_ht[i] = g_hash_table_new_full ( g_str_hash, g_str_equal, free, free );
4068 indexes.exc_fpath_coll_ht[i] = g_hash_table_new_full ( g_str_hash, g_str_equal, free, 0 );
4069 }
4070
4071 i++;
4072 }
4073 }
4074 debug ( 9, "Loading dynamical libraries" );
4075
4076 if ( ctx_p->flags[MODE] == MODE_SO || ctx_p->flags[MODE] == MODE_RSYNCSO ) {
4077 /* security checks before dlopen */
4078 struct stat so_stat;
4079
4080 if ( stat ( ctx_p->handlerfpath, &so_stat ) == -1 ) {
4081 error ( "Can't stat shared object file \"%s\": %s", ctx_p->handlerfpath, strerror ( errno ) );
4082 return errno;
4083 }
4084
4085 // allow normal files only (stat will follow symlinks)
4086 if ( !S_ISREG ( so_stat.st_mode ) ) {
4087 error ( "Shared object \"%s\" must be a regular file (or symlink to a regular file).",
4088 ctx_p->handlerfpath, so_stat.st_uid );
4089 return EPERM;
4090 }
4091
4092 // allowed owners are: root and real uid (who started clsync prior to setuid)
4093 if ( so_stat.st_uid && so_stat.st_uid != getuid() ) {
4094 /* check for rare case when clsync binary owner is neither root nor current uid */
4095 struct stat cl_stat;
4096 char *cl_str = alloca ( 20 ); // allocate for "/proc/PID/exe"
4097 int ret;
4098 snprintf ( cl_str, 20, "/proc/%i/exe", getpid() );
4099
4100 // stat clsync binary itself to get its owner's uid
4101 if ( ( ret = stat ( cl_str, &cl_stat ) ) == -1 ) {
4102 error ( "Can't stat clsync binary file \"%s\": %s", cl_str, strerror ( errno ) );
4103 }
4104
4105 if ( ret == -1 || so_stat.st_uid != cl_stat.st_uid ) {
4106 error ( "Wrong owner for shared object \"%s\": %i. "
4107 "Only root, clsync file owner and user started the program are allowed.",
4108 ctx_p->handlerfpath, so_stat.st_uid );
4109 return EPERM;
4110 }
4111 }
4112
4113 // do not allow special bits and g+w,o+w
4114 if ( so_stat.st_mode & ( S_ISUID | S_ISGID | S_ISVTX | S_IWGRP | S_IWOTH ) ) {
4115 error ( "Wrong shared object \"%s\" permissions: %#lo"
4116 "Special bits, group and world writable are not allowed.",
4117 ctx_p->handlerfpath, so_stat.st_mode & 07777 );
4118 return EPERM;
4119 }
4120
4121 // dlopen()
4122 void *synchandler_handle = dlopen ( ctx_p->handlerfpath, RTLD_NOW | RTLD_LOCAL );
4123
4124 if ( synchandler_handle == NULL ) {
4125 error ( "Cannot load shared object file \"%s\": %s", ctx_p->handlerfpath, dlerror() );
4126 return -1;
4127 }
4128
4129 // resolving init, sync and deinit functions' handlers
4130 ctx_p->handler_handle = synchandler_handle;
4132
4133 if ( ctx_p->flags[MODE] == MODE_RSYNCSO ) {
4135
4136 if ( ctx_p->handler_funct.rsync == NULL ) {
4137 char *dlerror_str = dlerror();
4138 error ( "Cannot resolve symbol "API_PREFIX"rsync in shared object \"%s\": %s",
4139 ctx_p->handlerfpath, dlerror_str != NULL ? dlerror_str : "No error description returned." );
4140 }
4141 } else {
4143
4144 if ( ctx_p->handler_funct.sync == NULL ) {
4145 char *dlerror_str = dlerror();
4146 error ( "Cannot resolve symbol "API_PREFIX"sync in shared object \"%s\": %s",
4147 ctx_p->handlerfpath, dlerror_str != NULL ? dlerror_str : "No error description returned." );
4148 }
4149 }
4150
4152
4153 // running init function
4154 if ( ctx_p->handler_funct.init != NULL )
4155 if ( ( ret = ctx_p->handler_funct.init ( ctx_p, &indexes ) ) ) {
4156 error ( "Cannot init sync-handler module." );
4157 return ret;
4158 }
4159 }
4160
4161 // Initializing rand-generator if it's required
4162
4163 if ( ctx_p->listoutdir )
4164 srand ( time ( NULL ) );
4165
4166 if ( !ctx_p->flags[ONLYINITSYNC] ) {
4167 debug ( 9, "Initializing FS monitor kernel subsystem in this userspace application" );
4168
4169 if ( sync_notify_init ( ctx_p ) )
4170 return errno;
4171 }
4172
4173 if ( ( ret = privileged_init ( ctx_p ) ) )
4174 return ret;
4175
4176 {
4177 // Preparing monitor subsystem context function pointers
4178 switch ( ctx_p->flags[MONITOR] ) {
4179#ifdef INOTIFY_SUPPORT
4180
4181 case NE_INOTIFY:
4185 break;
4186#endif
4187#ifdef KQUEUE_SUPPORT
4188
4189 case NE_KQUEUE:
4193 break;
4194#endif
4195#ifdef BSM_SUPPORT
4196
4197 case NE_BSM:
4198 case NE_BSM_PREFETCH:
4202 break;
4203#endif
4204#ifdef GIO_SUPPORT
4205
4206 case NE_GIO:
4210 break;
4211#endif
4212#ifdef DTRACEPIPE_SUPPORT
4213
4214 case NE_DTRACEPIPE:
4218 break;
4219#endif
4220#ifdef VERYPARANOID
4221
4222 default:
4223 critical ( "Unknown FS monitor subsystem: %i", ctx_p->flags[MONITOR] );
4224#endif
4225 }
4226 }
4227#ifdef CLUSTER_SUPPORT
4228 // Initializing cluster subsystem
4229
4230 if ( ctx_p->cluster_iface != NULL ) {
4231 ret = cluster_init ( ctx_p, &indexes );
4232
4233 if ( ret ) {
4234 error ( "Cannot initialize cluster subsystem." );
4235 cluster_deinit();
4236 return ret;
4237 }
4238 }
4239
4240#endif
4241#ifdef ENABLE_SOCKET
4242
4243 // Creating control socket
4244 if ( ctx_p->socketpath != NULL )
4245 ret = control_run ( ctx_p );
4246
4247#endif
4248
4249 if ( !ctx_p->flags[ONLYINITSYNC] ) {
4250 // Marking file tree for FS monitor
4251 debug ( 30, "Running recursive notify marking function" );
4253
4254 if ( ret ) return ret;
4255 }
4256
4257 // "Infinite" loop of processling the events
4258 ret = sync_loop ( ctx_p, &indexes );
4259
4260 if ( ret ) return ret;
4261
4262 debug ( 1, "sync_loop() ended" );
4263#ifdef ENABLE_SOCKET
4264
4265 // Removing control socket
4266 if ( ctx_p->socketpath != NULL )
4268
4269#endif
4270 debug ( 1, "killing sighandler" );
4271 // TODO: Do cleanup of watching points
4272 pthread_kill ( pthread_sighandler, SIGINT );
4273#ifdef VALGRIND
4274 pthread_join ( pthread_sighandler, NULL ); // TODO: fix a deadlock
4275#endif
4276 // Killing children
4277#ifdef THREADING_SUPPORT
4278 thread_cleanup ( ctx_p );
4279#endif
4280 debug ( 2, "Deinitializing the FS monitor subsystem" );
4281
4282 switch ( ctx_p->flags[MONITOR] ) {
4283#ifdef INOTIFY_SUPPORT
4284
4285 case NE_INOTIFY:
4287 break;
4288#endif
4289#ifdef KQUEUE_SUPPORT
4290
4291 case NE_KQUEUE:
4292 kqueue_deinit ( ctx_p );
4293 break;
4294#endif
4295#ifdef BSM_SUPPORT
4296
4297 case NE_BSM:
4298 case NE_BSM_PREFETCH:
4299 bsm_deinit ( ctx_p );
4300 break;
4301#endif
4302#ifdef GIO_SUPPORT
4303
4304 case NE_GIO:
4305 gio_deinit ( ctx_p );
4306 break;
4307#endif
4308#ifdef DTRACEPIPE_SUPPORT
4309
4310 case NE_DTRACEPIPE:
4312 break;
4313#endif
4314 }
4315
4316 // Closing shared libraries
4317 if ( ctx_p->flags[MODE] == MODE_SO ) {
4318 int _ret;
4319
4320 if ( ctx_p->handler_funct.deinit != NULL )
4321 if ( ( _ret = ctx_p->handler_funct.deinit() ) ) {
4322 error ( "Cannot deinit sync-handler module." );
4323
4324 if ( !ret ) ret = _ret;
4325 }
4326
4327 if ( dlclose ( ctx_p->handler_handle ) ) {
4328 error ( "Cannot unload shared object file \"%s\": %s",
4329 ctx_p->handlerfpath, dlerror() );
4330
4331 if ( !ret ) ret = -1;
4332 }
4333 }
4334
4335 // Cleaning up run-time routines
4337 // Removing hash-tables
4338 {
4339 int i;
4340 debug ( 3, "Closing hash tables" );
4341 g_hash_table_destroy ( indexes.wd2fpath_ht );
4342 g_hash_table_destroy ( indexes.fpath2wd_ht );
4343 g_hash_table_destroy ( indexes.fpath2ei_ht );
4344 g_hash_table_destroy ( indexes.exc_fpath_ht );
4345 g_hash_table_destroy ( indexes.out_lines_aggr_ht );
4346 g_hash_table_destroy ( indexes.fileinfo_ht );
4347 i = 0;
4348
4349 while ( i < QUEUE_MAX ) {
4350 switch ( i ) {
4351 case QUEUE_LOCKWAIT:
4352 g_hash_table_destroy ( indexes.fpath2ei_coll_ht[i] );
4353 break;
4354
4355 default:
4356 g_hash_table_destroy ( indexes.fpath2ei_coll_ht[i] );
4357 g_hash_table_destroy ( indexes.exc_fpath_coll_ht[i] );
4358 }
4359
4360 i++;
4361 }
4362 }
4363 // Deinitializing cluster subsystem
4364#ifdef CLUSTER_SUPPORT
4365 debug ( 3, "Deinitializing cluster subsystem" );
4366
4367 if ( ctx_p->cluster_iface != NULL ) {
4368 int _ret;
4369 _ret = cluster_deinit();
4370
4371 if ( _ret ) {
4372 error ( "Cannot deinitialize cluster subsystem.", strerror ( _ret ), _ret );
4373 ret = _ret;
4374 }
4375 }
4376
4377#endif
4378 // One second for another threads
4379#ifdef VERYPARANOID
4380 debug ( 9, "sleep("TOSTR ( SLEEP_SECONDS ) ")" );
4381 sleep ( SLEEP_SECONDS );
4382#endif
4383
4384 if ( ctx_p->flags[EXITHOOK] ) {
4385 char *argv[] = { ctx_p->exithookfile, ctx_p->label, NULL};
4386 exec_argv ( argv, NULL );
4387 }
4388
4389 // Cleaning up cgroups staff
4390#ifdef CGROUP_SUPPORT
4391 debug ( 3, "Cleaning up cgroups staff" );
4392
4393 if ( ctx_p->flags[FORBIDDEVICES] )
4394 error_on ( privileged_clsync_cgroup_deinit ( ctx_p ) );
4395
4396#endif
4397 debug ( 3, "privileged_deinit()" );
4398 ret |= privileged_deinit ( ctx_p );
4399 debug ( 3, "finish" );
4400 return ret;
4401}
4402
int(* api_funct_init)(struct ctx *, struct indexes *)
Definition clsync.h:51
@ EVIF_NONE
Definition clsync.h:57
@ EVIF_RECURSIVELY
Definition clsync.h:58
@ EVIF_CONTENTRECURSIVELY
Definition clsync.h:59
int(* api_funct_sync)(int n, api_eventinfo_t *)
Definition clsync.h:52
int(* api_funct_rsync)(const char *inclist, const char *exclist)
Definition clsync.h:53
int(* api_funct_deinit)()
Definition clsync.h:54
enum eventinfo_flags eventinfo_flags_t
Definition clsync.h:61
enum eventobjtype eventobjtype_t
Definition clsync.h:37
@ EOT_DIR
Definition clsync.h:33
@ EOT_DOESNTEXIST
Definition clsync.h:31
@ EOT_FILE
Definition clsync.h:32
@ NE_INOTIFY
Definition common.h:107
@ NE_FANOTIFY
Definition common.h:106
@ NE_KQUEUE
Definition common.h:108
@ NE_GIO
Definition common.h:112
@ NE_BSM_PREFETCH
Definition common.h:110
@ NE_DTRACEPIPE
Definition common.h:111
@ NE_BSM
Definition common.h:109
enum initsync initsync_t
Definition common.h:214
#define IN_CREATE_SELF
Definition common.h:87
@ PTHREAD_MUTEX_MAX
Definition common.h:156
@ PTHREAD_MUTEX_STATE
Definition common.h:153
@ PTHREAD_MUTEX_SELECT
Definition common.h:154
@ PTHREAD_MUTEX_THREADSINFO
Definition common.h:155
initsync
Definition common.h:209
@ INITSYNC_FULL
Definition common.h:211
@ INITSYNC_SUBDIR
Definition common.h:212
@ PM_SAFE
Definition common.h:118
@ PM_OFF
Definition common.h:117
#define KILL_TIMEOUT
#define COUNTER_LIMIT
#define MAXARGUMENTS
#define FANOTIFY_FLAGS
#define MAXSIGNALNUM
#define DUMP_DIRMODE
#define DUMP_FILEMODE
#define BUFSIZ
#define MAXCHILDREN
#define API_PREFIX
#define SLEEP_SECONDS
#define FANOTIFY_EVFLAGS
#define INOTIFY_FLAGS
#define ALLOC_PORTION
int control_run(ctx_t *ctx_p)
Definition control.c:191
int control_cleanup(ctx_t *ctx_p)
Definition control.c:243
queue_id
Definition ctx.h:172
@ QUEUE_NORMAL
Definition ctx.h:173
@ QUEUE_LOCKWAIT
Definition ctx.h:176
@ QUEUE_AUTO
Definition ctx.h:179
@ QUEUE_INSTANT
Definition ctx.h:175
@ QUEUE_BIGFILE
Definition ctx.h:174
@ QUEUE_MAX
Definition ctx.h:178
@ DEBUG
Definition ctx.h:71
@ INITFULL
Definition ctx.h:79
@ STANDBYFILE
Definition ctx.h:93
@ RSYNCPREFERINCLUDE
Definition ctx.h:86
@ HAVERECURSIVESYNC
Definition ctx.h:84
@ SYNCLISTSIMPLIFY
Definition ctx.h:87
@ DONTUNLINK
Definition ctx.h:78
@ ONEFILESYSTEM
Definition ctx.h:88
@ MAXITERATIONS
Definition ctx.h:100
@ SKIPINITSYNC
Definition ctx.h:90
@ FORBIDDEVICES
Definition ctx.h:123
@ MODE
Definition ctx.h:76
@ MONITOR
Definition ctx.h:104
@ MODSIGN
Definition ctx.h:127
@ EXITHOOK
Definition ctx.h:94
@ THREADING
Definition ctx.h:55
@ IGNOREFAILURES
Definition ctx.h:101
@ ONLYINITSYNC
Definition ctx.h:91
@ EXCLUDEMOUNTPOINTS
Definition ctx.h:58
@ EXITONNOEVENTS
Definition ctx.h:92
@ PREEXITHOOK
Definition ctx.h:96
@ SOFTEXITSYNC
Definition ctx.h:133
enum state_enum state_t
Definition ctx.h:272
@ MODE_RSYNCSHELL
Definition ctx.h:165
@ MODE_DIRECT
Definition ctx.h:163
@ MODE_RSYNCSO
Definition ctx.h:167
@ MODE_SIMPLE
Definition ctx.h:162
@ MODE_RSYNCDIRECT
Definition ctx.h:166
@ MODE_SO
Definition ctx.h:168
#define STATE_STARTING(state_p)
Definition ctx.h:257
@ SHARGS_PRIMARY
Definition ctx.h:245
@ SHARGS_INITIAL
Definition ctx.h:246
enum ruleaction_enum ruleaction_t
Definition ctx.h:196
static char *const status_descr[]
Definition ctx.h:274
@ SHFL_EXCLUDE_LIST_PATH
Definition ctx.h:240
@ SHFL_INCLUDE_LIST_PATH
Definition ctx.h:239
@ SHFL_INCLUDE_LIST
Definition ctx.h:238
@ RA_MONITOR
Definition ctx.h:191
@ RA_SYNC
Definition ctx.h:192
@ RA_ALL
Definition ctx.h:194
@ RA_WALK
Definition ctx.h:193
enum queue_id queue_id_t
Definition ctx.h:181
@ SIGUSR_BLOPINT
Definition ctx.h:202
@ SIGUSR_INITSYNC
Definition ctx.h:201
@ SIGUSR_THREAD_GC
Definition ctx.h:200
@ SIGUSR_DUMP
Definition ctx.h:203
@ STATE_EXIT
Definition ctx.h:259
@ STATE_TERM
Definition ctx.h:266
@ STATE_UNKNOWN
Definition ctx.h:270
@ STATE_SYNCHANDLER_ERR
Definition ctx.h:262
@ STATE_HOLDON
Definition ctx.h:269
@ STATE_REHASH
Definition ctx.h:263
@ STATE_THREAD_GC
Definition ctx.h:267
@ STATE_INITSYNC
Definition ctx.h:268
@ STATE_PREEXIT
Definition ctx.h:264
@ STATE_RUNNING
Definition ctx.h:261
@ STATE_LASTSYNC
Definition ctx.h:265
#define register_blockthread(thread)
Definition ctx.h:31
#define error_or_debug(debug_level,...)
Definition error.h:51
#define critical(...)
Definition error.h:32
#define error(...)
Definition error.h:36
#define debug(debug_level,...)
Definition error.h:50
#define warning(...)
Definition error.h:40
#define critical_on(cond)
Definition error.h:33
#define error_on(cond)
Definition error.h:37
int mkdirat_open(const char *const dir_path, int dirfd_parent, mode_t dir_mode)
Combination of mkdirat() and openat()
Definition fileutils.c:179
int fileutils_copy(const char *path_from, const char *path_to)
Copies file.
Definition fileutils.c:78
uint32_t stat_diff(stat64_t *a, stat64_t *b)
Definition fileutils.c:219
GHashTable * g_hash_table_dup(GHashTable *src, GHashFunc hash_funct, GEqualFunc key_equal_funct, GDestroyNotify key_destroy_funct, GDestroyNotify value_destroy_funct, GDupFunc key_dup_funct, GDupFunc value_dup_funct)
Definition glibex.c:42
#define MSG_SECURITY_PROBLEM(a)
Definition macros.h:50
#define TOSTR(a)
Definition macros.h:44
#define XTOSTR(a)
Definition macros.h:45
#define COLLECTDELAY_INSTANT
Definition macros.h:47
#define SAFE(code, onfail)
Definition macros.h:56
#define require_strlen_le(str, limit)
Definition macros.h:52
#define PEF_NONE
Definition main.c:670
int config_block_parse(ctx_t *ctx_p, const char *const config_block_name)
Definition main.c:2547
char * parameter_expand(ctx_t *ctx_p, char *arg, int exceptionflags, int *macro_count_p, int *expand_count_p, const char *(*parameter_get)(const char *variable_name, void *arg), void *parameter_get_arg)
Expands option values, e. g. "/var/log/clsync-%label%.pid" -> "/var/log/clsync-clone....
Definition main.c:693
int main_status_update(ctx_t *ctx_p)
Definition main.c:2673
int main_rehash(ctx_t *ctx_p)
Definition main.c:2653
int sethandler_sigchld(void(*handler)())
Definition main.c:440
char ** argv
Definition main.c:2725
int(* bsm_handle)(struct ctx *ctx_p, struct indexes *indexes_p)
Definition mon_bsm.c:80
int bsm_deinit(ctx_t *ctx_p)
Definition mon_bsm.c:854
int(* bsm_wait)(struct ctx *ctx_p, struct indexes *indexes_p, struct timeval *timeout_p)
Definition mon_bsm.c:79
int bsm_init(ctx_t *ctx_p)
Definition mon_bsm.c:303
int bsm_add_watch_dir(struct ctx *ctx_p, struct indexes *indexes_p, const char *const accpath)
Definition mon_bsm.c:841
int dtracepipe_deinit(ctx_t *ctx_p)
int dtracepipe_wait(struct ctx *ctx_p, struct indexes *indexes_p, struct timeval *timeout_p)
int dtracepipe_handle(struct ctx *ctx_p, struct indexes *indexes_p)
int dtracepipe_add_watch_dir(struct ctx *ctx_p, struct indexes *indexes_p, const char *const accpath)
int gio_deinit(ctx_t *ctx_p)
Definition mon_gio.c:380
int gio_init(ctx_t *ctx_p)
Definition mon_gio.c:366
int gio_add_watch_dir(ctx_t *ctx_p, indexes_t *indexes_p, const char *const accpath)
Definition mon_gio.c:180
int gio_wait(ctx_t *ctx_p, struct indexes *indexes_p, struct timeval *tv_p)
Definition mon_gio.c:297
int gio_handle(ctx_t *ctx_p, indexes_t *indexes_p)
Definition mon_gio.c:309
int inotify_handle(ctx_t *ctx_p, indexes_t *indexes_p)
Definition mon_inotify.c:76
int inotify_deinit(ctx_t *ctx_p)
int inotify_wait(ctx_t *ctx_p, struct indexes *indexes_p, struct timeval *tv_p)
Definition mon_inotify.c:59
int inotify_add_watch_dir(ctx_t *ctx_p, indexes_t *indexes_p, const char *const accpath)
Definition mon_inotify.c:52
int kqueue_wait(ctx_t *ctx_p, struct indexes *indexes_p, struct timeval *tv_p)
Definition mon_kqueue.c:532
int kqueue_handle(ctx_t *ctx_p, indexes_t *indexes_p)
Definition mon_kqueue.c:782
int kqueue_deinit(ctx_t *ctx_p)
Definition mon_kqueue.c:827
int kqueue_add_watch_dir(ctx_t *ctx_p, indexes_t *indexes_p, const char *const accpath)
Definition mon_kqueue.c:491
ctx_t * ctx_p
Definition mon_kqueue.c:85
#define IN_ISDIR
Definition mon_kqueue.h:39
int kqueue_init()
#define ETIME
Definition port-hacks.h:24
struct stat64 stat64_t
Definition port-hacks.h:65
int privileged_init(ctx_t *ctx_p)
int privileged_deinit(ctx_t *ctx_p)
#define privileged_fts_read(a, b)
Definition privileged.h:124
#define privileged_check(...)
Definition privileged.h:120
#define privileged_kill_child
Definition privileged.h:147
#define privileged_waitpid
Definition privileged.h:133
#define privileged_fts_open(a, b, c, d)
Definition privileged.h:123
#define privileged_fork_execvp
Definition privileged.h:148
#define privileged_fts_close(a, b)
Definition privileged.h:125
ruleaction_t rules_getperm(const char *fpath, mode_t st_mode, rule_t *rules_p, ruleaction_t ruleactions)
Definition rules.c:403
ruleaction_t rules_search_getperm(const char *fpath, mode_t st_mode, rule_t *rules_p, const ruleaction_t ruleaction, rule_t **rule_pp)
Checks file path by rules' expressions (parsed from file)
Definition rules.c:327
uint32_t flags
Definition clsync.h:41
eventobjtype_t objtype_new
Definition clsync.h:45
size_t path_len
Definition clsync.h:42
const char * path
Definition clsync.h:43
uint32_t evmask
Definition clsync.h:40
eventobjtype_t objtype_old
Definition clsync.h:44
api_funct_sync sync
Definition ctx.h:223
api_funct_init init
Definition ctx.h:222
api_funct_rsync rsync
Definition ctx.h:224
api_funct_deinit deinit
Definition ctx.h:225
Definition ctx.h:315
char * listoutdir
Definition ctx.h:387
unsigned int syncdelay
Definition ctx.h:391
char * handlerfpath
Definition ctx.h:382
pid_t child_pid[MAXCHILDREN]
Definition ctx.h:330
int retries
Definition ctx.h:389
char * preexithookfile
Definition ctx.h:349
void * fsmondata
Definition ctx.h:419
int children
Definition ctx.h:331
char * watchdirwslash
Definition ctx.h:352
char * destdir
Definition ctx.h:350
pthread_t blockthread[(1<< 4)]
Definition ctx.h:398
void * handler_handle
Definition ctx.h:383
shflags_t synchandler_argf
Definition ctx.h:416
size_t bfilethreshold
Definition ctx.h:390
char * exithookfile
Definition ctx.h:348
char * destdirwslash
Definition ctx.h:353
char * socketpath
Definition ctx.h:355
char * label
Definition ctx.h:344
char * watchdir
Definition ctx.h:345
api_functs_t handler_funct
Definition ctx.h:384
dev_t st_dev
Definition ctx.h:335
unsigned int synctimeout
Definition ctx.h:395
queueinfo_t _queues[QUEUE_MAX]
Definition ctx.h:392
volatile state_t state
Definition ctx.h:317
size_t blockthread_count
Definition ctx.h:399
char * dump_path
Definition ctx.h:356
char * standbyfile
Definition ctx.h:347
struct notifyenginefuncts notifyenginefunct
Definition ctx.h:388
int flags[(1<< 10)]
Definition ctx.h:338
time_t synctime
Definition ctx.h:394
size_t rules_count
Definition ctx.h:334
unsigned int rsyncinclimit
Definition ctx.h:393
size_t watchdirlen
Definition ctx.h:375
uint32_t iteration_num
Definition ctx.h:332
char * customsignal[MAXSIGNALNUM+1]
Definition ctx.h:343
synchandler_args_t synchandler_args[SHARGS_MAX]
Definition ctx.h:415
rule_t rules[MAXRULES]
Definition ctx.h:333
char isignoredexitcode[(1<< 8)]
Definition ctx.h:397
char buf[(1<< 16)+1]
Definition common.h:171
int evcount
Definition common.h:161
unsigned int linescount
Definition common.h:168
const char * evmask_str
Definition common.h:177
char excf_path[PATH_MAX+1]
Definition common.h:162
const char * list_type_str
Definition common.h:176
char outf_path[PATH_MAX+1]
Definition common.h:163
FILE * outf
Definition common.h:164
int api_ei_count
Definition common.h:170
api_eventinfo_t * api_ei
Definition common.h:169
size_t include_list_count
Definition common.h:175
ctx_t * ctx_p
Definition common.h:165
void * data
Definition common.h:167
const char * include_list[(1<< 8)+2]
Definition common.h:174
uint32_t flags
Definition common.h:148
size_t fsize
Definition common.h:147
unsigned int seqid_min
Definition common.h:142
uint32_t evmask
Definition common.h:141
eventobjtype_t objtype_new
Definition common.h:145
int wd
Definition common.h:146
eventobjtype_t objtype_old
Definition common.h:144
unsigned int seqid_max
Definition common.h:143
stat64_t lst
Definition indexes.h:30
GHashTable * fpath2ei_coll_ht[QUEUE_MAX]
Definition indexes.h:40
GHashTable * fpath2wd_ht
Definition indexes.h:36
GHashTable * exc_fpath_ht
Definition indexes.h:38
GHashTable * wd2fpath_ht
Definition indexes.h:35
GHashTable * fpath2ei_ht
Definition indexes.h:37
GHashTable * out_lines_aggr_ht
Definition indexes.h:41
GHashTable * fileinfo_ht
Definition indexes.h:43
GHashTable * nonthreaded_syncing_fpath2ei_ht
Definition indexes.h:42
GHashTable * exc_fpath_coll_ht[QUEUE_MAX]
Definition indexes.h:39
int(* add_watch_dir)(struct ctx *ctx_p, struct indexes *indexes_p, const char *const accpath)
Definition ctx.h:232
int(* handle)(struct ctx *ctx_p, struct indexes *indexes_p)
Definition ctx.h:231
int(* wait)(struct ctx *ctx_p, struct indexes *indexes_p, struct timeval *tv_p)
Definition ctx.h:230
unsigned int collectdelay
Definition ctx.h:216
time_t stime
Definition ctx.h:217
Definition ctx.h:206
int * exitcode_p
Definition common.h:220
ctx_t * ctx_p
Definition common.h:217
pthread_t pthread_parent
Definition common.h:219
sigset_t * sigset_p
Definition common.h:221
ctx_t * ctx_p
Definition sync.c:3696
int dirfd[DUMP_DIRFD_MAX]
Definition sync.c:3697
char isexpanded[MAXARGUMENTS]
Definition ctx.h:253
char * v[MAXARGUMENTS]
Definition ctx.h:251
state_t state
Definition sync.h:39
int try_n
Definition sync.h:47
GHashTable * fpath2ei_ht
Definition sync.h:45
uint32_t iteration
Definition sync.h:32
int exitcode
Definition sync.h:37
api_eventinfo_t * ei
Definition sync.h:51
int child_pid
Definition sync.h:43
int n
Definition sync.h:50
pthread_t pthread
Definition sync.h:36
int thread_num
Definition sync.h:31
time_t starttime
Definition sync.h:41
char ** argv
Definition sync.h:35
time_t expiretime
Definition sync.h:42
thread_callbackfunct_arg_t * callback_arg
Definition sync.h:34
ctx_t * ctx_p
Definition sync.h:40
thread_callbackfunct_t callback
Definition sync.h:33
int errcode
Definition sync.h:38
threadinfo_t ** threadsstack
Definition sync.h:62
int allocated
Definition sync.h:59
threadinfo_t * threads
Definition sync.h:61
pthread_mutex_t mutex[PTHREAD_MUTEX_MAX]
Definition sync.h:56
int used
Definition sync.h:60
int stacklen
Definition sync.h:63
char mutex_init
Definition sync.h:58
pthread_cond_t cond[PTHREAD_MUTEX_MAX]
Definition sync.h:57
size_t path_full_len
Definition sync.c:2502
char * path_full
Definition sync.c:2501
char * sync_path_abs2rel(ctx_t *ctx_p, const char *path_abs, ssize_t path_abs_len, size_t *path_rel_len_p, char *path_rel_oldptr)
Definition sync.c:1018
int sync_term(int exitcode)
Definition sync.c:3991
static char ** sync_customargv(ctx_t *ctx_p, struct dosync_arg *dosync_arg_p, synchandler_args_t *args_p)
Definition sync.c:1569
eventinfo_t * ht_fpath_isincluded(GHashTable *ht, const char *const fpath)
Definition sync.c:2357
static void so_call_sync_finished(int n, api_eventinfo_t *ei)
Definition sync.c:633
pthread_t pthread_sighandler
Definition sync.c:59
static int so_call_rsync_finished(ctx_t *ctx_p, const char *inclistfile, const char *exclistfile)
Definition sync.c:781
void rsync_escape_cleanup()
Definition sync.c:2675
static unsigned int sync_seqid()
Definition sync.c:71
static void argv_free(char **argv)
Definition sync.c:1639
static int sync_indexes_fpath2ei_addfixed(ctx_t *ctx_p, indexes_t *indexes_p, const char *fpath, eventinfo_t *evinfo)
Definition sync.c:2120
#define SEQID_LE(a, b)
Definition sync.c:67
static int _exitcode_process(ctx_t *ctx_p, int exitcode)
Definition sync.c:164
int sync_idle(ctx_t *ctx_p, indexes_t *indexes_p)
Definition sync.c:3196
static void setenv_iteration(uint32_t iteration_num)
Definition sync.c:76
const char * sync_parameter_get(const char *variable_name, void *_dosync_arg_p)
Definition sync.c:1548
int sync_sighandler(sighandler_arg_t *sighandler_arg_p)
Definition sync.c:3874
void sync_queuesync_wrapper(gpointer fpath_gp, gpointer evinfo_gp, gpointer arg_gp)
Definition sync.c:2554
dump_ltype
Definition sync.c:3689
@ DUMP_LTYPE_EVINFO
Definition sync.c:3692
@ DUMP_LTYPE_EXCLUDE
Definition sync.c:3691
@ DUMP_LTYPE_INCLUDE
Definition sync.c:3690
int exec_argv(char **argv, int *child_pid)
Definition sync.c:550
static int rsync_outline(FILE *outf, char *outline, eventinfo_flags_t flags)
Definition sync.c:2734
#define SEQID_GE(a, b)
Definition sync.c:66
int sync_idle_dosync_collectedevents(ctx_t *ctx_p, indexes_t *indexes_p)
Definition sync.c:3055
static void api_evinfo_initialevmask(ctx_t *ctx_p, api_eventinfo_t *evinfo_p, int isdir)
Definition sync.c:1338
void sync_sig_int(int signal)
Definition sync.c:3548
int sync_prequeue_unload(ctx_t *ctx_p, indexes_t *indexes_p)
Definition sync.c:2564
int sync_notify_mark(ctx_t *ctx_p, const char *accpath, const char *path, size_t pathlen, indexes_t *indexes_p)
Definition sync.c:1787
static int sync_initialsync_finish(ctx_t *ctx_p, initsync_t initsync, int ret)
Definition sync.c:1664
int sync_initialsync_walk(ctx_t *ctx_p, const char *dirpath, indexes_t *indexes_p, queue_id_t queue_id, initsync_t initsync)
Definition sync.c:1347
char * rsync_escape_result
Definition sync.c:2673
void sync_sigchld()
Definition sync.c:3866
gboolean sync_trylocked(gpointer fpath_gp, gpointer evinfo_gp, gpointer arg_gp)
Definition sync.c:2504
static int sync_queuesync(const char *fpath_rel, eventinfo_t *evinfo, ctx_t *ctx_p, indexes_t *indexes_p, queue_id_t queue_id)
Definition sync.c:1251
size_t rsync_escape_result_size
Definition sync.c:2672
int sync_dump_thread(threadinfo_t *threadinfo_p, void *_arg)
Definition sync.c:3738
static int sync_dosync_exec(ctx_t *ctx_p, indexes_t *indexes_p, const char *evmask_str, const char *fpath)
Definition sync.c:2035
static void argv_dump(int debug_level, char **argv)
Definition sync.c:956
void hook_preexit(ctx_t *ctx_p)
Definition sync.c:3365
threadsinfo_t * thread_info()
Definition sync.c:190
#define SYNC_EXEC_ARGV(...)
Definition sync.c:949
int sync_exec_argv(ctx_t *ctx_p, indexes_t *indexes_p, thread_callbackfunct_t callback, thread_callbackfunct_arg_t *callback_arg_p, char **argv)
Definition sync.c:1087
#define SYNC_LOOP_CONTINUE_UNLOCK
Definition sync.c:3358
int sync_idle_dosync_collectedevents_cleanup(ctx_t *ctx_p, thread_callbackfunct_arg_t *arg_p)
Definition sync.c:2529
int sync_mark_walk(ctx_t *ctx_p, const char *dirpath, indexes_t *indexes_p)
Definition sync.c:1828
char * sync_path_rel2abs(ctx_t *ctx_p, const char *path_rel, ssize_t path_rel_len, size_t *path_abs_len_p, char *path_abs_oldptr)
Definition sync.c:988
int * sync_sighandler_exitcode_p
Definition sync.c:3873
int apievinfo2rsynclist(indexes_t *indexes_p, FILE *listfile, int n, api_eventinfo_t *apievinfo)
Writes the list to list-file for "--include-from" option of rsync using array of api_eventinfo_t.
Definition sync.c:3171
int sync_idle_dosync_collectedevents_commitpart(struct dosync_arg *dosync_arg_p)
Definition sync.c:2852
static int sync_islocked(const char *const fpath)
Definition sync.c:2416
static void evinfo_merge(ctx_t *ctx_p, eventinfo_t *evinfo_dst, eventinfo_t *evinfo_src)
Definition sync.c:108
int sync_dump(ctx_t *ctx_p, const char *const dir_path)
Definition sync.c:3773
#define SYNC_LOOP_IDLE
Definition sync.c:3350
int fileischanged(ctx_t *ctx_p, indexes_t *indexes_p, const char *path_rel, stat64_t *lst_p, int is_deleted)
Definition sync.c:2083
static void finish_iteration(ctx_t *ctx_p)
Definition sync.c:84
gpointer eidup(gpointer ei_gp)
Definition sync.c:100
static unsigned int _sync_seqid_value
Definition sync.c:70
static int so_call_rsync(ctx_t *ctx_p, indexes_t *indexes_p, const char *inclistfile, const char *exclistfile)
Definition sync.c:858
static int rsync_listpush(indexes_t *indexes_p, const char *fpath, size_t fpath_len, eventinfo_flags_t flags, unsigned int *linescount_p)
Definition sync.c:2772
void sync_dump_liststep(gpointer fpath_gp, gpointer evinfo_gp, gpointer arg_gp)
Definition sync.c:3702
dump_dirfd_obj
Definition sync.c:3681
@ DUMP_DIRFD_QUEUE
Definition sync.c:3683
@ DUMP_DIRFD_MAX
Definition sync.c:3686
@ DUMP_DIRFD_ROOT
Definition sync.c:3682
@ DUMP_DIRFD_THREAD
Definition sync.c:3684
int sync_switch_state(ctx_t *ctx_p, pthread_t pthread_parent, int newstate)
Definition sync.c:3597
gboolean sync_idle_dosync_collectedevents_rsync_exclistpush(gpointer fpath_gp, gpointer flags_gp, gpointer arg_gp)
Definition sync.c:2819
int sync_notify_init(ctx_t *ctx_p)
Definition sync.c:1953
int sync_loop(ctx_t *ctx_p, indexes_t *indexes_p)
Definition sync.c:3379
static int so_call_sync(ctx_t *ctx_p, indexes_t *indexes_p, int n, api_eventinfo_t *ei)
Definition sync.c:701
int sync_prequeue_loadmark(int monitored, ctx_t *ctx_p, indexes_t *indexes_p, const char *path_full, const char *path_rel, stat64_t *lst_p, eventobjtype_t objtype_old, eventobjtype_t objtype_new, uint32_t event_mask, int event_wd, mode_t st_mode, off_t st_size, char **path_buf_p, size_t *path_buf_len_p, eventinfo_t *evinfo)
Definition sync.c:2143
void sync_inclist_rotate(ctx_t *ctx_p, struct dosync_arg *dosync_arg_p)
Definition sync.c:2930
int sync_initialsync(const char *path, ctx_t *ctx_p, indexes_t *indexes_p, initsync_t initsync)
Definition sync.c:1671
int _sync_islocked(threadinfo_t *threadinfo_p, void *_fpath)
Definition sync.c:2404
pid_t clsyncapi_fork(ctx_t *ctx_p)
clsync's wrapper for function "fork()". Should be used instead of "fork()" directly,...
Definition sync.c:1060
int sync_idle_dosync_collectedevents_aggrqueue(queue_id_t queue_id, ctx_t *ctx_p, indexes_t *indexes_p, struct dosync_arg *dosync_arg)
Definition sync.c:2575
const char * rsync_escape(const char *path)
Definition sync.c:2681
int sync_idle_dosync_collectedevents_uniqfname(ctx_t *ctx_p, char *fpath, char *name)
Definition sync.c:2626
void _sync_idle_dosync_collectedevents(gpointer fpath_gp, gpointer evinfo_gp, gpointer arg_gp)
Definition sync.c:2427
void _sync_idle_dosync_collectedexcludes(gpointer fpath_gp, gpointer flags_gp, gpointer arg_gp)
Definition sync.c:2347
#define SHOULD_THREAD(ctx_p)
Definition sync.c:548
void sync_idle_dosync_collectedevents_listpush(gpointer fpath_gp, gpointer evinfo_gp, gpointer arg_gp)
Definition sync.c:2973
gboolean rsync_aggrout(gpointer outline_gp, gpointer flags_gp, gpointer arg_gp)
Definition sync.c:2755
#define debug_argv_dump(level, argv)
Definition sync.c:952
int notify_wait(ctx_t *ctx_p, indexes_t *indexes_p)
Definition sync.c:3246
volatile int exitcode
Definition sync.c:547
int sync_idle_dosync_collectedevents_listcreate(struct dosync_arg *dosync_arg_p, char *name)
Definition sync.c:2647
__extension__ int sync_run(ctx_t *ctx_p)
Definition sync.c:3998
volatile state_t * state_p
Definition sync.c:546
int sync_tryforcecycle(ctx_t *ctx_p, pthread_t pthread_parent)
Definition sync.c:3557
static void evinfo_initialevmask(ctx_t *ctx_p, eventinfo_t *evinfo_p, int isdir)
Definition sync.c:1291
int sync_dosync(const char *fpath, uint32_t evmask, ctx_t *ctx_p, indexes_t *indexes_p)
Definition sync.c:2064
int exitcode_process(ctx_t *ctx_p, int exitcode)
Definition sync.c:177
int(* thread_callbackfunct_t)(ctx_t *ctx_p, thread_callbackfunct_arg_t *arg_p)
Definition sync.h:29
int threads_foreach(int(*funct)(threadinfo_t *, void *), state_t state, void *arg)
time_t thread_nextexpiretime()