void poll_monitor::run() { collect_initial_data(); for (;;) { #ifdef HAVE_CXX_MUTEX unique_lock<mutex> run_guard(run_mutex); if (should_stop) break; run_guard.unlock(); #endif FSW_ELOG(_("Done scanning.\n")); sleep(latency < MIN_POLL_LATENCY ? MIN_POLL_LATENCY : latency); time(&curr_time); collect_data(); if (events.size()) { notify_events(events); events.clear(); } } }
void monitor::notify_overflow(const std::string& path) const { if (!allow_overflow) throw libfsw_exception(_("Event queue overflow.")); time_t curr_time; time(&curr_time); notify_events({{path, curr_time, {fsw_event_flag::Overflow}}}); }
void kqueue_monitor::process_events(const vector<struct kevent>& changes, const vector<struct kevent>& event_list, int event_num) { time_t curr_time; time(&curr_time); vector<event> events; for (auto i = 0; i < event_num; ++i) { struct kevent e = event_list[i]; if (e.flags & EV_ERROR) { perror(_("Event with EV_ERROR")); continue; } // If a NOTE_DELETE is found or a NOTE_LINK is found on a directory, then // the descriptor should be closed and the node rescanned: removing a // subtree in *BSD usually result in NOTE_REMOVED | NOTE_LINK being logged // for each subdirectory, but sometimes NOTE_WRITE | NOTE_LINK is only // observed. For this reason we mark those descriptors as to be deleted // anyway. // // If a NOTE_RENAME or NOTE_REVOKE flag is found, the file // descriptor should probably be closed and the file should be rescanned. // If a NOTE_WRITE flag is found and the descriptor is a directory, then // the directory needs to be rescanned because at least one file has // either been created or deleted. if ((e.fflags & NOTE_DELETE)) { load->descriptors_to_remove.insert(e.ident); } else if ((e.fflags & NOTE_RENAME) || (e.fflags & NOTE_REVOKE) || ((e.fflags & NOTE_WRITE) && S_ISDIR(load->file_modes[e.ident]))) { load->descriptors_to_rescan.insert(e.ident); } // Invoke the callback passing every path for which an event has been // received with a non empty filter flag. if (e.fflags) { events.push_back({load->file_names_by_descriptor[e.ident], curr_time, decode_flags(e.fflags)}); } } if (events.size()) { notify_events(events); } }
void poll_monitor::run() { collect_initial_data(); while (true) { FSW_ELOG(_("Done scanning.\n")); sleep(latency < MIN_POLL_LATENCY ? MIN_POLL_LATENCY : latency); time(&curr_time); collect_data(); if (events.size()) { notify_events(events); events.clear(); } } }
void inotify_monitor::run() { char buffer[BUFFER_SIZE]; while (true) { process_pending_events(); scan_root_paths(); // If no files can be watched, sleep and repeat the loop. if (!impl->watched_descriptors.size()) { sleep(latency); continue; } // Use select to timeout on file descriptor read the amount specified by // the monitor latency. This way, the monitor has a chance to update its // watches with at least the periodicity expected by the user. fd_set set; struct timeval timeout; FD_ZERO(&set); FD_SET(impl->inotify_monitor_handle, &set); double sec; double frac = modf(this->latency, &sec); timeout.tv_sec = sec; timeout.tv_usec = 1000 * 1000 * frac; int rv = select(impl->inotify_monitor_handle + 1, &set, nullptr, nullptr, &timeout); if (rv == -1) { throw libfsw_exception(_("::select() on inotify descriptor encountered an error.")); } // In case of read timeout just repeat the loop. if (rv == 0) { continue; } ssize_t record_num = read(impl->inotify_monitor_handle, buffer, BUFFER_SIZE); { ostringstream log; log << _("Number of records: ") << record_num << "\n"; FSW_ELOG(log.str().c_str()); } if (!record_num) { throw libfsw_exception(_("read() on inotify descriptor read 0 records.")); } if (record_num == -1) { perror("read()"); throw libfsw_exception(_("read() on inotify descriptor returned -1.")); } time(&impl->curr_time); for (char *p = buffer; p < buffer + record_num;) { struct inotify_event * event = reinterpret_cast<struct inotify_event *> (p); preprocess_event(event); p += (sizeof (struct inotify_event)) + event->len; } if (impl->events.size()) { notify_events(impl->events); impl->events.clear(); } sleep(latency); } }