gboolean _mate_vfs_async_job_add_callback (MateVFSJob *job, MateVFSNotifyResult *notify_result) { gboolean cancelled; g_static_mutex_lock (&async_job_callback_map_lock); g_assert (!async_job_map_shutting_down); /* Assign a unique id to each job callback. Use unique IDs instead of the * notify_results pointers to avoid aliasing problems. */ notify_result->callback_id = ++async_job_callback_map_next_id; JOB_DEBUG (("adding callback %d ", notify_result->callback_id)); if (async_job_callback_map == NULL) { /* First job, allocate a new hash table. */ async_job_callback_map = g_hash_table_new (NULL, NULL); } /* we are using async_job_callback_map_lock to ensure atomicity of * checking/clearing job->cancelled and adding/cancelling callbacks */ cancelled = job->cancelled; if (!cancelled) { g_hash_table_insert (async_job_callback_map, GUINT_TO_POINTER (notify_result->callback_id), notify_result); } g_static_mutex_unlock (&async_job_callback_map_lock); return !cancelled; }
gboolean _mate_vfs_async_job_completed (MateVFSAsyncHandle *handle) { MateVFSJob *job; _mate_vfs_async_job_map_lock (); JOB_DEBUG (("%d", GPOINTER_TO_UINT (handle))); /* Job done, remove it's id from the map */ g_assert (async_job_map != NULL); job = _mate_vfs_async_job_map_get_job (handle); if (job != NULL) { g_hash_table_remove (async_job_map, handle); } if (async_job_map_shutting_down && g_hash_table_size (async_job_map) == 0) { /* We were the last active job, turn the lights off. */ mate_vfs_async_job_map_destroy (); } _mate_vfs_async_job_map_unlock (); return job != NULL; }
static gboolean subdir_check_remove_place_holder (FmDirTreeModel *dir_tree_model) { GList *item_list = dir_tree_model->current_subdir_check; if (!g_cancellable_is_cancelled (dir_tree_model->subdir_cancellable) && item_list) { FmDirTreeItem *dir_tree_item = (FmDirTreeItem*) item_list->data; // remove existing subdirs or place holder item if needed. if (dir_tree_item->children) { // Remove the place holder... JOB_DEBUG ("JOB_DEBUG: subdir_check_remove_place_holder: remove place holder for %s\n\n", fm_file_info_get_disp_name (dir_tree_item->file_info)); GtkTreePath *tree_path = fm_dir_tree_model_item_to_tree_path (dir_tree_model, item_list); fm_dir_tree_model_remove_all_children (dir_tree_model, item_list, tree_path); gtk_tree_path_free (tree_path); // why i added this ??? doesn't seem needed... //~ fm_dir_tree_model_remove_item (dir_tree_model, item_list); } } return subdir_check_finish (dir_tree_model); }
void _mate_vfs_async_job_remove_callback (guint callback_id) { g_assert (async_job_callback_map != NULL); JOB_DEBUG (("removing callback %d ", callback_id)); g_static_mutex_lock (&async_job_callback_map_lock); g_hash_table_remove (async_job_callback_map, GUINT_TO_POINTER (callback_id)); g_static_mutex_unlock (&async_job_callback_map_lock); }
static void thread_entry_point (gpointer data, gpointer user_data) { MateVFSJob *job; gboolean complete; job = (MateVFSJob *) data; /* job map must always be locked before the job_lock * if both locks are needed */ _mate_vfs_async_job_map_lock (); if (_mate_vfs_async_job_map_get_job (job->job_handle) == NULL) { JOB_DEBUG (("job already dead, bail %p", job->job_handle)); _mate_vfs_async_job_map_unlock (); /* FIXME: doesn't that leak here? */ return; } JOB_DEBUG (("locking job_lock %p", job->job_handle)); g_mutex_lock (job->job_lock); _mate_vfs_async_job_map_unlock (); _mate_vfs_job_execute (job); complete = _mate_vfs_job_complete (job); JOB_DEBUG (("Unlocking access lock %p", job->job_handle)); g_mutex_unlock (job->job_lock); if (complete) { _mate_vfs_async_job_map_lock (); JOB_DEBUG (("job %p done, removing from map and destroying", job->job_handle)); _mate_vfs_async_job_completed (job->job_handle); _mate_vfs_job_destroy (job); _mate_vfs_async_job_map_unlock (); } }
static void callback_map_cancel_one (gpointer key, gpointer value, gpointer user_data) { MateVFSNotifyResult *notify_result; notify_result = (MateVFSNotifyResult *) value; if (notify_result->job_handle == (MateVFSAsyncHandle *)user_data) { JOB_DEBUG (("cancelling callback %u - job %u cancelled", GPOINTER_TO_UINT (key), GPOINTER_TO_UINT (user_data))); notify_result->cancelled = TRUE; } }
void _mate_vfs_async_job_cancel_job_and_callbacks (MateVFSAsyncHandle *job_handle, MateVFSJob *job) { g_static_mutex_lock (&async_job_callback_map_lock); if (job != NULL) { job->cancelled = TRUE; } if (async_job_callback_map == NULL) { JOB_DEBUG (("job %u, no callbacks scheduled yet", GPOINTER_TO_UINT (job_handle))); } else { g_hash_table_foreach (async_job_callback_map, callback_map_cancel_one, job_handle); } g_static_mutex_unlock (&async_job_callback_map_lock); }
/** * gnome_vfs_async_cancel: * @handle: handle of the async operation to be cancelled. * * Cancel an asynchronous operation and close all its callbacks. * * In a single-threaded application, its guaranteed that if you * call this before the operation finished callback has been called * the callback will never be called. * * However, in a multithreaded application, or to be more specific, if * you call gnome_vfs_async_cancel from another thread than the thread * handling the glib mainloop, there is a race condition where if * the operation finished callback was just dispatched, you might * still cancel the operation. So, in this case you need to handle the * fact that the operation callback might still run even though another * thread has cancelled the operation. * * One way to avoid problems from this is to mark the data structure you're * using as callback_data as destroyed, and then queue an idle and do the * actual freeing in an idle handler. The idle handler is guaranteed to run * after the callback has been exectuted, so by then it is safe to destroy * the callback_data. The callback handler must handle the case where the * callback_data is marked destroyed by doing nothing. * * This is clearly not ideal for multithreaded applications, but as good as * we can with the current API. Eventually we'll have to change the API to * make this work better. */ void gnome_vfs_async_cancel (GnomeVFSAsyncHandle *handle) { GnomeVFSJob *job; _gnome_vfs_async_job_map_lock (); job = _gnome_vfs_async_job_map_get_job (handle); if (job == NULL) { JOB_DEBUG (("job %u - job no longer exists", GPOINTER_TO_UINT (handle))); /* have to cancel the callbacks because they still can be pending */ _gnome_vfs_async_job_cancel_job_and_callbacks (handle, NULL); } else { /* Cancel the job in progress. OK to do outside of job->job_lock, * job lifetime is protected by _gnome_vfs_async_job_map_lock. */ _gnome_vfs_job_module_cancel (job); _gnome_vfs_async_job_cancel_job_and_callbacks (handle, job); } _gnome_vfs_async_job_map_unlock (); }
static gboolean subdir_check_job (GIOSchedulerJob *job, GCancellable *cancellable, gpointer user_data) { FmDirTreeModel *dir_tree_model = FM_DIR_TREE_MODEL (user_data); // Lock ---------------------------------------------------------------------------------------- g_mutex_lock (dir_tree_model->subdir_checks_mutex); GList *item_list = (GList*) g_queue_pop_head (&dir_tree_model->subdir_checks); FmDirTreeItem *dir_tree_item = (FmDirTreeItem*) item_list->data; dir_tree_model->current_subdir_check = item_list; // If the directory is a Drive, get it's target directory... //gboolean is_drive = fm_file_info_is_drive (dir_tree_item->file_info); GFile *gfile; gboolean is_mountable = fm_file_info_is_mountable (dir_tree_item->file_info); if (is_mountable) { gfile = g_file_new_for_path (fm_file_info_get_target (dir_tree_item->file_info)); } else { gfile = fm_path_to_gfile (fm_file_info_get_path (dir_tree_item->file_info)); } g_mutex_unlock (dir_tree_model->subdir_checks_mutex); // Unlock -------------------------------------------------------------------------------------- //~ GError *gerror = NULL; //~ gfile_info = g_file_query_info (gfile, gfile_info_query_attribs, 0, fm_job_get_cancellable (fmjob), &gerror); /** * Parse input directory... * */ char *directory = fm_file_info_get_name (dir_tree_item->file_info); JOB_DEBUG ("\n----------------------------------------------------------------------------------------------\n"); JOB_DEBUG ("JOB_DEBUG: subdir_check_job: check \"%s\"\n", directory); JOB_DEBUG ("----------------------------------------------------------------------------------------------\n"); if (is_mountable) { JOB_DEBUG ("JOB_DEBUG: subdir_check_job: %s is mountable type !!!\n\n", directory); } GFileEnumerator *enumerator = g_file_enumerate_children (gfile, G_FILE_ATTRIBUTE_STANDARD_NAME"," G_FILE_ATTRIBUTE_STANDARD_TYPE"," G_FILE_ATTRIBUTE_STANDARD_IS_HIDDEN, 0, cancellable, NULL); gboolean has_subdir = FALSE; if (enumerator) { while (!g_cancellable_is_cancelled (cancellable)) { GFileInfo *gfile_info = g_file_enumerator_next_file (enumerator, cancellable, NULL); if (G_LIKELY (gfile_info)) { GFileType g_file_type = g_file_info_get_file_type (gfile_info); gboolean is_hidden = g_file_info_get_is_hidden (gfile_info); //~ TREEVIEW_DEBUG ("TREEVIEW_DEBUG: subdir_check_job: GFileInfo for %s = %d\n", //~ g_file_info_get_name (gfile_info), g_file_type); g_object_unref (gfile_info); if (g_file_type == G_FILE_TYPE_DIRECTORY || g_file_type == G_FILE_TYPE_MOUNTABLE) { if (dir_tree_model->show_hidden || !is_hidden) { JOB_DEBUG ("JOB_DEBUG: subdir_check_job: A directory found in \"%s\" !!!\n\n", directory); has_subdir = TRUE; break; } } } else { break; } } GError *error = NULL; g_file_enumerator_close (enumerator, cancellable, &error); g_object_unref (enumerator); } else { JOB_DEBUG ("JOB_DEBUG: subdir_check_job: Error: can't read \"%s\"...\n", directory); } // NO_DEBUG ("check result - %s has_dir: %d\n", g_file_get_parse_name (gfile), has_subdir); g_object_unref (gfile); if (!has_subdir) { JOB_DEBUG ("JOB_DEBUG: subdir_check_job: No directory found in \"%s\"\n\t\t\t > Remove place holder\n\n", directory); return g_io_scheduler_job_send_to_mainloop (job, (GSourceFunc) subdir_check_remove_place_holder, dir_tree_model, NULL); } return subdir_check_finish (dir_tree_model); }