void ec_lock_writer_unlock (ECLock *self) { g_static_rw_lock_reader_lock (self->free_lock); { g_assert (!self->destroyed); g_assert (have_writer_lock_unprotected (self)); g_mutex_lock (self->state_mutex); { // To help make sure we haven't screwed up this class, we // operate an actual reader/writer lock. g_static_rw_lock_writer_unlock (self->lock); // There obviously shouldn't be any readers at this point. g_assert (g_hash_table_size (self->readers) == 0); self->writer = NULL; maybe_trace (self, "released writer lock"); self->state = EC_LOCK_STATE_UNLOCKED; g_cond_broadcast (self->state_cv); } g_mutex_unlock (self->state_mutex); } g_static_rw_lock_reader_unlock (self->free_lock); }
void ec_lock_reader_unlock (ECLock *self) { g_static_rw_lock_reader_lock (self->free_lock); { g_assert (!self->destroyed); g_assert (have_reader_lock_unprotected (self)); g_mutex_lock (self->state_mutex); { // To make sure we haven't screwed up this class, we operate an // actual reader/writer lock. g_static_rw_lock_reader_unlock (self->lock); g_hash_table_remove (self->readers, g_thread_self ()); maybe_trace (self, "released reader lock"); if ( g_hash_table_size (self->readers) == 0 ) { self->state = EC_LOCK_STATE_UNLOCKED; g_cond_broadcast (self->state_cv); } } g_mutex_unlock (self->state_mutex); } g_static_rw_lock_reader_unlock (self->free_lock); }
/** * clientmgr_get_client_by_mac: * @mac : mac 地址,6 个字节 * Returns: #Client, NULL not found * * 通过 @mac 地址寻找特定的客户 */ Client * clientmgr_get_client_by_mac(const guchar * mac) { g_static_rw_lock_reader_lock(&lock); Client * ptr = clientmgr_get_client_by_mac_internal(mac); g_static_rw_lock_reader_unlock(&lock); return ptr; }
void ec_lock_reader_lock (ECLock *self) { g_static_rw_lock_reader_lock (self->free_lock); { g_assert (!self->destroyed); g_assert (!have_reader_lock_unprotected (self)); g_mutex_lock (self->state_mutex); while ( self->state == EC_LOCK_STATE_WRITER_LOCKED ) { g_cond_wait (self->state_cv, self->state_mutex); } { // To make sure we haven't screwed up this class, we operate an // actual reader/writer lock. It had better work... gboolean lock_result = g_static_rw_lock_reader_trylock (self->lock); g_assert (lock_result == TRUE); g_assert (self->writer == NULL); g_hash_table_insert (self->readers, g_thread_self (), NULL); maybe_trace (self, "obtained reader lock"); self->state = EC_LOCK_STATE_READER_LOCKED; g_cond_broadcast (self->state_cv); } g_mutex_unlock (self->state_mutex); } g_static_rw_lock_reader_unlock (self->free_lock); }
static void _lock_rw(GStaticRWLock * lock, char mode) { if (mode == 'w' || mode == 'W') g_static_rw_lock_writer_lock(lock); else g_static_rw_lock_reader_lock(lock); }
/** Wrapper for e_cal_backend_cache_get_timezone(). * * @param cb 3E calendar backend. * @param cache Calendar backend cache object. * @param tzid TZID of timezone. * * @return icaltimezone object (owned by cache, don't free). */ const icaltimezone *e_cal_backend_3e_cache_get_timezone(ECalBackend3e *cb, ECalBackendCache *cache, const char *tzid) { const icaltimezone *zone; g_static_rw_lock_reader_lock(&cb->priv->cache_lock); zone = e_cal_backend_cache_get_timezone(cache, tzid); g_static_rw_lock_reader_unlock(&cb->priv->cache_lock); return zone; }
gboolean clientmgr_get_client_is_enable_by_mac(const guchar * mac) { gboolean enable = FALSE; g_static_rw_lock_reader_lock(&lock); Client * ptr = clientmgr_get_client_by_mac_internal(mac); if(ptr) { enable = ptr->enable ; } g_static_rw_lock_reader_unlock(&lock); return enable; }
/** * pka_subscription_deliver_sample: * @subscription: A #PkaSubscription. * * Delivers @sample from @source to the @subscription. @manifest should * be the current manifest for the source that has already been sent * to pka_subscription_deliver_manifest(). * * Returns: None. * Side effects: None. */ void pka_subscription_deliver_sample (PkaSubscription *subscription, /* IN */ PkaSource *source, /* IN */ PkaManifest *manifest, /* IN */ PkaSample *sample) /* IN */ { GValue params[3] = { { 0 } }; guint8 *buffer = NULL; gsize buffer_len = 0; PkaSample *samples[1] = { sample }; g_return_if_fail(subscription != NULL); g_return_if_fail(sample != NULL); g_return_if_fail(PKA_IS_SOURCE(source)); /* * TODO: In the recent rewrite of this, we didn't implement buffering. * We need to add back support for buffering based on timeouts or * size of raw-data. */ ENTRY; g_static_rw_lock_reader_lock(&subscription->rw_lock); if (G_LIKELY(subscription->sample_closure)) { if (!pka_encoder_encode_samples(NULL, manifest, samples, 1, &buffer, &buffer_len)) { WARNING(Subscription, "Subscription %d failed to encode sample.", subscription->id); GOTO(failed); } DUMP_BYTES(Sample, buffer, buffer_len); /* * XXX: It should be obvious that this marshalling isn't very fast. * But I've certainly done worse. */ g_value_init(¶ms[0], PKA_TYPE_SUBSCRIPTION); g_value_init(¶ms[1], G_TYPE_POINTER); g_value_init(¶ms[2], G_TYPE_ULONG); g_value_set_boxed(¶ms[0], subscription); g_value_set_pointer(¶ms[1], buffer); g_value_set_ulong(¶ms[2], buffer_len); g_closure_invoke(subscription->sample_closure, NULL, 3, ¶ms[0], NULL); g_value_unset(¶ms[0]); g_value_unset(¶ms[1]); g_value_unset(¶ms[2]); g_free(buffer); } failed: g_static_rw_lock_reader_unlock(&subscription->rw_lock); EXIT; }
static gboolean fs_rtp_sub_stream_has_stopped_enter (FsRtpSubStream *self) { g_static_rw_lock_reader_lock (&self->priv->stopped_lock); if (self->priv->stopped) { g_static_rw_lock_reader_unlock (&self->priv->stopped_lock); return TRUE; } return FALSE; }
static ModemErrorMapping const * modem_registered_errors_reader_lock (void) { if (modem_registered_errors.list == NULL) { modem_registered_errors_writer_lock (); modem_registered_errors_writer_unlock (); } g_static_rw_lock_reader_lock (&modem_registered_errors.lock); return modem_registered_errors.list; }
gboolean ec_lock_writer_trylock (ECLock *self) { gboolean result; g_static_rw_lock_reader_lock (self->free_lock); { g_assert (!self->destroyed); g_assert (!have_writer_lock_unprotected (self)); g_mutex_lock (self->state_mutex); if ( self->state != EC_LOCK_STATE_UNLOCKED ) { // Consistency check: we better not be able to successfully // trylock the independent lock either. gboolean lock_result = g_static_rw_lock_writer_trylock (self->lock); g_assert (lock_result == FALSE); maybe_trace (self, "failed to obtain writer lock in writer_trylock method"); result = FALSE; } else { // To make sure we haven't screwed up this class, we operate an // actual reader/writer lock. It had better work... gboolean lock_result = g_static_rw_lock_writer_trylock (self->lock); g_assert (lock_result == TRUE); // Also, the table of readers had better be empty. g_assert (g_hash_table_size (self->readers) == 0); // And there better not be any other writer either. g_assert (self->writer == NULL); self->writer = g_thread_self (); maybe_trace (self, "obtained writer lock in writer_trylock method"); self->state = EC_LOCK_STATE_WRITER_LOCKED; // No need to broadcast the new state here; other threads are // only interested when a writer lock is released! result = TRUE; } g_mutex_unlock (self->state_mutex); } g_static_rw_lock_reader_unlock (self->free_lock); return result; }
static gpointer test_g_static_rw_lock_thread (gpointer data) { while (test_g_static_rw_lock_run) { if (g_random_double() > .2) /* I'm a reader */ { if (g_random_double() > .2) /* I'll block */ g_static_rw_lock_reader_lock (&test_g_static_rw_lock_lock); else /* I'll only try */ if (!g_static_rw_lock_reader_trylock (&test_g_static_rw_lock_lock)) continue; G_LOCK (test_g_static_rw_lock_state); g_assert (test_g_static_rw_lock_state >= 0); test_g_static_rw_lock_state++; G_UNLOCK (test_g_static_rw_lock_state); g_usleep (g_random_int_range (20,1000)); G_LOCK (test_g_static_rw_lock_state); test_g_static_rw_lock_state--; G_UNLOCK (test_g_static_rw_lock_state); g_static_rw_lock_reader_unlock (&test_g_static_rw_lock_lock); } else /* I'm a writer */ { if (g_random_double() > .2) /* I'll block */ g_static_rw_lock_writer_lock (&test_g_static_rw_lock_lock); else /* I'll only try */ if (!g_static_rw_lock_writer_trylock (&test_g_static_rw_lock_lock)) continue; G_LOCK (test_g_static_rw_lock_state); g_assert (test_g_static_rw_lock_state == 0); test_g_static_rw_lock_state = -1; G_UNLOCK (test_g_static_rw_lock_state); g_usleep (g_random_int_range (20,1000)); G_LOCK (test_g_static_rw_lock_state); test_g_static_rw_lock_state = 0; G_UNLOCK (test_g_static_rw_lock_state); g_static_rw_lock_writer_unlock (&test_g_static_rw_lock_lock); } } return NULL; }
void ec_lock_disable_tracing (ECLock *self) { g_static_rw_lock_reader_lock (self->free_lock); { g_assert (!self->destroyed); g_static_mutex_lock (self->trace_lock); { self->trace_on = FALSE; } g_static_mutex_unlock (self->trace_lock); } g_static_rw_lock_reader_unlock (self->free_lock); }
void ec_lock_remove_from_trace_list (ECLock *self) { g_static_rw_lock_reader_lock (self->free_lock); { g_assert (!self->destroyed); g_static_mutex_lock (self->trace_lock); { g_hash_table_remove (self->traced_threads, g_thread_self ()); } g_static_mutex_unlock (self->trace_lock); } g_static_rw_lock_reader_unlock (self->free_lock); }
void ec_lock_add_to_trace_list (ECLock *self) { g_static_rw_lock_reader_lock (self->free_lock); { g_assert (!self->destroyed); g_static_mutex_lock (self->trace_lock); { g_hash_table_insert (self->traced_threads, g_thread_self (), NULL); } g_static_mutex_unlock (self->trace_lock); } g_static_rw_lock_reader_unlock (self->free_lock); }
/** Wrapper for e_cal_backend_cache_get_component(). * * Get single component from cache. If component exists in cache but have cache * state E_CAL_COMPONENT_CACHE_STATE_REMOVED, NULL will be returned. * * @param cb 3E calendar backend. * @param cache Calendar backend cache object. * @param uid UID of the calendar component. * @param rid RID of the detached instance of recurring event. * * @return ECalComponent object or NULL. */ ECalComponent *e_cal_backend_3e_cache_get_component(ECalBackend3e *cb, ECalBackendCache *cache, const char *uid, const char *rid) { ECalComponent *comp; g_static_rw_lock_reader_lock(&cb->priv->cache_lock); comp = e_cal_backend_cache_get_component(cache, uid, rid); g_static_rw_lock_reader_unlock(&cb->priv->cache_lock); if (comp && e_cal_component_get_cache_state(comp) == E_CAL_COMPONENT_CACHE_STATE_REMOVED) { g_object_unref(comp); return NULL; } return comp; }
gboolean ec_lock_have_writer_lock (ECLock *self) { gboolean result; g_static_rw_lock_reader_lock (self->free_lock); { g_assert (!self->destroyed); result = have_writer_lock_unprotected (self); } g_static_rw_lock_reader_unlock (self->free_lock); return result; }
GList* pka_subscription_get_sources (PkaSubscription *subscription) /* IN */ { GList *list = NULL; g_return_val_if_fail(subscription != NULL, NULL); ENTRY; g_static_rw_lock_reader_lock(&subscription->rw_lock); g_tree_foreach(subscription->sources, pka_subscription_source_accumulator, &list); g_static_rw_lock_reader_unlock(&subscription->rw_lock); RETURN(list); }
/** Get timestamp of last sync. * * @param cb 3E calendar backend. * * @return Timestamp in local time. */ time_t e_cal_backend_3e_get_sync_timestamp(ECalBackend3e *cb) { time_t stamp = 0; g_static_rw_lock_reader_lock(&cb->priv->cache_lock); const char *ts = e_cal_backend_cache_get_server_utc_time(cb->priv->cache); g_static_rw_lock_reader_unlock(&cb->priv->cache_lock); if (ts) { icaltimetype time = icaltime_from_string(ts); stamp = icaltime_as_timet_with_zone(time, NULL); } return stamp; }
void pka_subscription_get_buffer (PkaSubscription *subscription, /* IN */ gint *buffer_timeout, /* OUT */ gint *buffer_size) /* OUT */ { g_return_if_fail(subscription != NULL); g_return_if_fail(buffer_timeout != NULL); g_return_if_fail(buffer_size != NULL); ENTRY; g_static_rw_lock_reader_lock(&subscription->rw_lock); *buffer_timeout = subscription->buffer_timeout; *buffer_size = subscription->buffer_size; g_static_rw_lock_reader_unlock(&subscription->rw_lock); EXIT; }
void ec_lock_name_thread (ECLock *self, const char *name) { g_static_rw_lock_reader_lock (self->free_lock); { g_assert (!self->destroyed); g_static_mutex_lock (self->trace_lock); { g_assert (g_hash_table_size (self->thread_names) < EC_LOCK_MAX_THREAD_NAMES); g_hash_table_insert (self->thread_names, g_thread_self (), g_string_new (name)); } g_static_mutex_unlock (self->trace_lock); } g_static_rw_lock_reader_unlock (self->free_lock); }
VFSMimeType* vfs_mime_type_get_from_type( const char* type ) { VFSMimeType * mime_type; g_static_rw_lock_reader_lock( &mime_hash_lock ); mime_type = g_hash_table_lookup( mime_hash, type ); g_static_rw_lock_reader_unlock( &mime_hash_lock ); if ( !mime_type ) { mime_type = vfs_mime_type_new( type ); g_static_rw_lock_writer_lock( &mime_hash_lock ); g_hash_table_insert( mime_hash, mime_type->type, mime_type ); g_static_rw_lock_writer_unlock( &mime_hash_lock ); } vfs_mime_type_ref( mime_type ); return mime_type; }
/** * pka_subscription_deliver_manifest: * @subscription: A #PkaSubscription. * @source: A #PkaSource. * @manifest: A #PkaManifest. * * Delivers @manifest from @souce to the subscriptions handlers. * * Returns: None. * Side effects: None. */ void pka_subscription_deliver_manifest (PkaSubscription *subscription, /* IN */ PkaSource *source, /* IN */ PkaManifest *manifest) /* IN */ { GValue params[3] = { { 0 } }; guint8 *buffer = NULL; gsize buffer_len = 0; g_return_if_fail(subscription != NULL); g_return_if_fail(manifest != NULL); g_return_if_fail(PKA_IS_SOURCE(source)); ENTRY; g_static_rw_lock_reader_lock(&subscription->rw_lock); if (G_LIKELY(subscription->manifest_closure)) { if (!pka_encoder_encode_manifest(NULL, manifest, &buffer, &buffer_len)) { WARNING(Subscription, "Subscription %d failed to encode manifest.", subscription->id); GOTO(failed); } DUMP_BYTES(Manifest, buffer, buffer_len); /* * XXX: It should be obvious that this marshalling isn't very fast. * But I've certainly done worse. At least it handles things cleanly * with regard to using libffi. */ g_value_init(¶ms[0], PKA_TYPE_SUBSCRIPTION); g_value_init(¶ms[1], G_TYPE_POINTER); g_value_init(¶ms[2], G_TYPE_ULONG); g_value_set_boxed(¶ms[0], subscription); g_value_set_pointer(¶ms[1], buffer); g_value_set_ulong(¶ms[2], buffer_len); g_closure_invoke(subscription->manifest_closure, NULL, 3, ¶ms[0], NULL); g_value_unset(¶ms[0]); g_value_unset(¶ms[1]); g_value_unset(¶ms[2]); g_free(buffer); } failed: g_static_rw_lock_reader_unlock(&subscription->rw_lock); EXIT; }
void ec_lock_writer_lock (ECLock *self) { g_static_rw_lock_reader_lock (self->free_lock); { g_assert (!self->destroyed); if ( have_writer_lock_unprotected (self) ) { GString *pts = pid_thread_string (); g_error ("%s: erroneous attempt to writer lock a lock already held", pts->str); } g_assert (!have_writer_lock_unprotected (self)); g_mutex_lock (self->state_mutex); while ( self->state != EC_LOCK_STATE_UNLOCKED ) { g_cond_wait (self->state_cv, self->state_mutex); } { // To make sure we haven't screwed up this class, we operate an // actual reader/writer lock. It had better work... gboolean lock_result = g_static_rw_lock_writer_trylock (self->lock); g_assert (lock_result == TRUE); // Also, the table of readers had better be empty. g_assert (g_hash_table_size (self->readers) == 0); // And there better not be any other writer either. g_assert (self->writer == NULL); self->writer = g_thread_self (); maybe_trace (self, "obtained writer lock"); self->state = EC_LOCK_STATE_WRITER_LOCKED; // No need to broadcast the new state here; other threads are // only interested when a writer lock is released! } g_mutex_unlock (self->state_mutex); } g_static_rw_lock_reader_unlock (self->free_lock); }
/** Wrapper for e_cal_backend_cache_get_components_by_uid(). * * Get master components and all detached instances for given UID. * Components/detached instances with cache state * E_CAL_COMPONENT_CACHE_STATE_REMOVED will be omitted. * * @param cb 3E calendar backend. * @param cache Calendar backend cache object. * @param uid UID of the calendar components. * * @return List of matching ECalComponent objects. */ GSList *e_cal_backend_3e_cache_get_components_by_uid(ECalBackend3e *cb, ECalBackendCache *cache, const char *uid) { GSList *iter, *iter_next; GSList *list; g_static_rw_lock_reader_lock(&cb->priv->cache_lock); list = e_cal_backend_cache_get_components_by_uid(cache, uid); g_static_rw_lock_reader_unlock(&cb->priv->cache_lock); for (iter = list; iter; iter = iter_next) { ECalComponent *comp = E_CAL_COMPONENT(iter->data); iter_next = iter->next; if (e_cal_component_get_cache_state(comp) == E_CAL_COMPONENT_CACHE_STATE_REMOVED) { list = g_slist_remove_link(list, iter); g_object_unref(comp); } } return list; }
static gboolean _pattern_db_process(PatternDB *self, PDBLookupParams *lookup, GArray *dbg_list) { LogMessage *msg = lookup->msg; PDBProcessParams process_params_p = {0}; PDBProcessParams *process_params = &process_params_p; g_static_rw_lock_reader_lock(&self->lock); if (_pattern_db_is_empty(self)) { g_static_rw_lock_reader_unlock(&self->lock); return FALSE; } process_params->rule = pdb_ruleset_lookup(self->ruleset, lookup, dbg_list); process_params->msg = msg; g_static_rw_lock_reader_unlock(&self->lock); if (process_params->rule) _pattern_db_process_matching_rule(self, process_params); else _pattern_db_process_unmatching_rule(self, process_params); _flush_emitted_messages(self, process_params); return process_params->rule != NULL; }
/** Get all timezones from the cache. * * @param cb 3E calendar backend. * @param cache Calendar backend cache object. * * @return List of icaltimezone objects (free them using icaltimezone_free(x, 1);). */ static GSList *e_cal_backend_cache_get_timezones(ECalBackend3e *cb, ECalBackendCache *cache) { GSList *keys, *iter; GSList *list = NULL; g_return_val_if_fail(E_IS_CAL_BACKEND_CACHE(cache), NULL); keys = e_file_cache_get_keys(E_FILE_CACHE(cache)); for (iter = keys; iter; iter = iter->next) { char *key = iter->data; const icaltimezone *zone; g_static_rw_lock_reader_lock(&cb->priv->cache_lock); zone = e_cal_backend_cache_get_timezone(cache, key); g_static_rw_lock_reader_unlock(&cb->priv->cache_lock); if (zone) { icalcomponent *zone_comp = icaltimezone_get_component((icaltimezone *)zone); icaltimezone *new_zone = icaltimezone_new(); /* make sure you have patched eds if you get segfaults here */ if (zone_comp == NULL) { g_critical("Patch your evolution-data-server or else..."); } icaltimezone_set_component(new_zone, icalcomponent_new_clone(zone_comp)); list = g_slist_prepend(list, new_zone); } } /* eds patch required here! */ g_slist_free(keys); return list; }
/** Sync changes from the server to the cache. * * @param cb 3E calendar backend. * * @return TRUE on success. * * @todo Handle UID/RID. * @todo Better server error handling. * @todo Conflict resolution. */ gboolean e_cal_backend_3e_sync_server_to_cache(ECalBackend3e *cb) { GError *local_err = NULL; gboolean update_sync = TRUE; icalcomponent *ical; icalcomponent *icomp; char filter[128]; struct tm tm; time_t stamp = MAX(e_cal_backend_3e_get_sync_timestamp(cb) - 60 * 60 * 24, 0); /*XXX: always add 1 day padding to prevent timezone problems */ /* prepare query filter string */ gmtime_r(&stamp, &tm); strftime(filter, sizeof(filter), "modified_since('%F %T')", &tm); ical = get_server_objects(cb, filter); if (ical == NULL) { return FALSE; } for (icomp = icalcomponent_get_first_component(ical, ICAL_ANY_COMPONENT); icomp; icomp = icalcomponent_get_next_component(ical, ICAL_ANY_COMPONENT)) { icalcomponent_kind kind = icalcomponent_isa(icomp); icalcomponent_set_cache_state(icomp, E_CAL_COMPONENT_CACHE_STATE_NONE); if (kind == ICAL_VEVENT_COMPONENT) { ECalComponent *comp; const char *uid = icalcomponent_get_uid(icomp); gboolean server_deleted = icalcomponent_3e_status_is_deleted(icomp); ECalComponentCacheState comp_state = E_CAL_COMPONENT_CACHE_STATE_NONE; g_static_rw_lock_reader_lock(&cb->priv->cache_lock); comp = e_cal_backend_cache_get_component(cb->priv->cache, uid, NULL); g_static_rw_lock_reader_unlock(&cb->priv->cache_lock); if (comp) { comp_state = e_cal_component_get_cache_state(comp); } if (server_deleted) { /* deleted by the server */ if (comp && e_cal_component_get_cache_state(comp) != E_CAL_COMPONENT_CACHE_STATE_CREATED && e_cal_component_get_cache_state(comp) != E_CAL_COMPONENT_CACHE_STATE_MODIFIED) { char *object = e_cal_component_get_as_string(comp); ECalComponentId *id = e_cal_component_get_id(comp); g_static_rw_lock_writer_lock(&cb->priv->cache_lock); e_cal_backend_cache_remove_component(cb->priv->cache, uid, NULL); g_static_rw_lock_writer_unlock(&cb->priv->cache_lock); e_cal_backend_notify_object_removed(E_CAL_BACKEND(cb), id, object, NULL); e_cal_component_free_id(id); g_free(object); } } else { char *old_object = NULL; char *object; ECalComponent *new_comp = e_cal_component_new(); e_cal_component_set_icalcomponent(new_comp, icalcomponent_new_clone(icomp)); e_cal_component_set_cache_state(new_comp, E_CAL_COMPONENT_CACHE_STATE_NONE); e_cal_backend_3e_convert_attachment_uris_to_local(cb, new_comp); if (comp) { old_object = e_cal_component_get_as_string(comp); } object = e_cal_component_get_as_string(new_comp); if (old_object == NULL) { if (e_cal_backend_3e_download_attachments(cb, new_comp, &local_err)) { /* not in cache yet */ g_static_rw_lock_writer_lock(&cb->priv->cache_lock); e_cal_backend_cache_put_component(cb->priv->cache, new_comp); g_static_rw_lock_writer_unlock(&cb->priv->cache_lock); e_cal_backend_notify_object_created(E_CAL_BACKEND(cb), object); } else { e_cal_backend_notify_gerror_error(E_CAL_BACKEND(cb), "Can't download attachment.", local_err); g_clear_error(&local_err); update_sync = FALSE; } } else if (strcmp(old_object, object)) { /* what is in cache and what is on server differs */ if (comp_state != E_CAL_COMPONENT_CACHE_STATE_NONE) { /* modified in cache, don't do anything */ } else { if (e_cal_backend_3e_download_attachments(cb, new_comp, &local_err)) { /* sync with server */ g_static_rw_lock_writer_lock(&cb->priv->cache_lock); e_cal_backend_cache_put_component(cb->priv->cache, new_comp); g_static_rw_lock_writer_unlock(&cb->priv->cache_lock); e_cal_backend_notify_object_modified(E_CAL_BACKEND(cb), old_object, object); } else { e_cal_backend_notify_gerror_error(E_CAL_BACKEND(cb), "Can't download attachment.", local_err); g_clear_error(&local_err); update_sync = FALSE; } } } g_free(old_object); g_free(object); g_object_unref(new_comp); } if (comp) { g_object_unref(comp); } } else if (kind == ICAL_VTIMEZONE_COMPONENT) { const char *tzid = icalcomponent_get_tzid(icomp); /* import non-existing timezones from the server */ if (!e_cal_backend_cache_get_timezone(cb->priv->cache, tzid)) { icaltimezone *zone = icaltimezone_new(); icalcomponent *zone_comp = icalcomponent_new_clone(icomp); if (icaltimezone_set_component(zone, zone_comp)) { g_static_rw_lock_writer_lock(&cb->priv->cache_lock); e_cal_backend_cache_put_timezone(cb->priv->cache, zone); g_static_rw_lock_writer_unlock(&cb->priv->cache_lock); } else { icalcomponent_free(zone_comp); } icaltimezone_free(zone, 1); } } else { g_warning("Unsupported component kind (%d) found on the 3e server.", kind); } } if (update_sync) { e_cal_backend_3e_set_sync_timestamp(cb, time(NULL)); } icalcomponent_free(ical); return TRUE; }
/** Sync cache changes to the server and unmark them. * * @param cb 3E calendar backend. * * @return TRUE on success. * * @todo Conflict resolution. */ gboolean e_cal_backend_3e_sync_cache_to_server(ECalBackend3e *cb) { GError *local_err = NULL; GList *components, *iter; if (!e_cal_backend_3e_open_connection(cb, &local_err)) { g_warning("Sync failed. Can't open connection to the 3e server. (%s)", local_err ? local_err->message : "Unknown error"); g_clear_error(&local_err); return FALSE; } sync_timezones_to_server(cb); g_static_rw_lock_reader_lock(&cb->priv->cache_lock); components = e_cal_backend_cache_get_components(cb->priv->cache); g_static_rw_lock_reader_unlock(&cb->priv->cache_lock); for (iter = components; iter && !e_cal_backend_3e_sync_should_stop(cb); iter = iter->next) { ECalComponent *comp = E_CAL_COMPONENT(iter->data); ECalComponent *remote_comp; ECalComponentId *id = e_cal_component_get_id(comp); ECalComponentCacheState state = e_cal_component_get_cache_state(comp); /* remove client properties before sending component to the server */ e_cal_component_set_x_property(comp, "X-EVOLUTION-STATUS", NULL); e_cal_component_set_cache_state(comp, E_CAL_COMPONENT_CACHE_STATE_NONE); e_cal_component_set_x_property(comp, "X-3E-DELETED", NULL); remote_comp = e_cal_component_clone(comp); gboolean attachments_converted = e_cal_backend_3e_convert_attachment_uris_to_remote(cb, remote_comp); char *remote_object = e_cal_component_get_as_string(remote_comp); char *object = e_cal_component_get_as_string(comp); if (!attachments_converted) { goto next; } if (state == E_CAL_COMPONENT_CACHE_STATE_CREATED || state == E_CAL_COMPONENT_CACHE_STATE_MODIFIED) { if (!e_cal_backend_3e_upload_attachments(cb, remote_comp, &local_err)) { e_cal_backend_notify_gerror_error(E_CAL_BACKEND(cb), "3e attachemnts sync failure", local_err); g_clear_error(&local_err); goto next; } } switch (state) { case E_CAL_COMPONENT_CACHE_STATE_CREATED: { ESClient_addObject(cb->priv->conn, cb->priv->calspec, remote_object, &local_err); if (local_err) { e_cal_backend_notify_gerror_error(E_CAL_BACKEND(cb), "3e sync failure", local_err); g_clear_error(&local_err); break; } char *new_object = e_cal_component_get_as_string(comp); e_cal_backend_notify_object_modified(E_CAL_BACKEND(cb), object, new_object); g_free(new_object); g_static_rw_lock_writer_lock(&cb->priv->cache_lock); e_cal_backend_cache_put_component(cb->priv->cache, comp); g_static_rw_lock_writer_unlock(&cb->priv->cache_lock); break; } case E_CAL_COMPONENT_CACHE_STATE_MODIFIED: { ESClient_updateObject(cb->priv->conn, cb->priv->calspec, remote_object, &local_err); if (local_err) { e_cal_backend_notify_gerror_error(E_CAL_BACKEND(cb), "3e sync failure", local_err); g_clear_error(&local_err); break; } char *new_object = e_cal_component_get_as_string(comp); e_cal_backend_notify_object_modified(E_CAL_BACKEND(cb), object, new_object); g_free(new_object); g_static_rw_lock_writer_lock(&cb->priv->cache_lock); e_cal_backend_cache_put_component(cb->priv->cache, comp); g_static_rw_lock_writer_unlock(&cb->priv->cache_lock); break; } case E_CAL_COMPONENT_CACHE_STATE_REMOVED: { char *oid = id->rid ? g_strdup_printf("%s@%s", id->uid, id->rid) : g_strdup(id->uid); ESClient_deleteObject(cb->priv->conn, cb->priv->calspec, oid, &local_err); g_free(oid); if (local_err) { // ignore the error if component doesn't exist anymore if (local_err->code == ES_XMLRPC_ERROR_UNKNOWN_COMPONENT) { g_clear_error(&local_err); local_err = NULL; } else { e_cal_backend_notify_gerror_error(E_CAL_BACKEND(cb), "3e sync failure", local_err); g_clear_error(&local_err); break; } } g_static_rw_lock_writer_lock(&cb->priv->cache_lock); e_cal_backend_cache_remove_component(cb->priv->cache, id->uid, id->rid); g_static_rw_lock_writer_unlock(&cb->priv->cache_lock); break; } case E_CAL_COMPONENT_CACHE_STATE_NONE: default: break; } next: g_object_unref(comp); g_object_unref(remote_comp); e_cal_component_free_id(id); g_free(object); g_free(remote_object); } g_list_free(components); e_cal_backend_3e_close_connection(cb); return TRUE; }
static gboolean _xr_server_servlet_method_call(xr_server* server, xr_server_conn* conn, xr_call* call) { xr_servlet* servlet = NULL; xr_servlet* cur_servlet; char *servlet_name; g_return_val_if_fail(server != NULL, FALSE); g_return_val_if_fail(conn != NULL, FALSE); g_return_val_if_fail(call != NULL, FALSE); /* session mode */ const char* session_id = xr_http_get_header(conn->http, "X-SESSION-ID"); if (session_id && xr_http_get_header(conn->http, "X-SESSION-USE")) { /* lookup servlet in session and try to lock it for call, if call is in progress try again later (1ms) */ again: g_static_rw_lock_reader_lock(&server->sessions_lock); servlet = g_hash_table_lookup(server->sessions, session_id); if (servlet) { if (!g_mutex_trylock(servlet->call_mutex)) { g_static_rw_lock_reader_unlock(&server->sessions_lock); g_usleep(10000); goto again; } } g_static_rw_lock_reader_unlock(&server->sessions_lock); /* if servlet does not exist */ if (servlet == NULL) { xr_servlet_def* def; servlet_name = xr_call_get_servlet_name(call, xr_http_get_resource(conn->http) + 1); if (servlet_name == NULL) { xr_call_set_error(call, -1, "Undefined servlet name."); return FALSE; } def = _find_servlet_def(server, servlet_name); if (def == NULL) { xr_call_set_error(call, -1, "Unknown servlet %s.", servlet_name); g_free(servlet_name); return FALSE; } g_free(servlet_name); servlet = xr_servlet_new(def, conn); if (servlet == NULL) { xr_call_set_error(call, -1, "Servlet initialization failed."); return FALSE; } g_static_rw_lock_writer_lock(&server->sessions_lock); /* user might have used same session ID to create servlet in other thread, check for this situation */ cur_servlet = g_hash_table_lookup(server->sessions, session_id); if (cur_servlet) { xr_servlet_free_fini(servlet); servlet = cur_servlet; } else { g_hash_table_replace(server->sessions, g_strdup(session_id), servlet); } /* this will block sessions ht access until servlet call completes, if servlet was found in other thread, which should be rare occurrance */ if (!g_mutex_trylock(servlet->call_mutex)) { g_static_rw_lock_writer_unlock(&server->sessions_lock); g_usleep(10000); goto again; } g_static_rw_lock_writer_unlock(&server->sessions_lock); } servlet->conn = conn; servlet->last_used = time(NULL); gboolean rs = _xr_servlet_do_call(servlet, call); g_mutex_unlock(servlet->call_mutex); return rs; } /* persistent mode */ /* get xr_servlet object for current connection and given servlet name */ servlet_name = xr_call_get_servlet_name(call, xr_http_get_resource(conn->http) + 1); if (servlet_name == NULL) { xr_call_set_error(call, -1, "Undefined servlet name."); return FALSE; } servlet = xr_server_conn_find_servlet(conn, servlet_name); if (servlet == NULL) { xr_servlet_def* def = _find_servlet_def(server, servlet_name); if (def == NULL) { xr_call_set_error(call, -1, "Unknown servlet %s.", servlet_name); g_free(servlet_name); return FALSE; } servlet = xr_servlet_new(def, conn); if (servlet == NULL) { xr_call_set_error(call, -1, "Servlet initialization failed."); g_free(servlet_name); return FALSE; } g_ptr_array_add(conn->servlets, servlet); } g_free(servlet_name); return _xr_servlet_do_call(servlet, call); }