static void ttrss_source_merge_categories (ttrssSourcePtr source, nodePtr parent, gint parentId, JsonNode *items) { JsonArray *array = json_node_get_array (items); GList *iter, *elements; elements = iter = json_array_get_elements (array); while (iter) { JsonNode *node = (JsonNode *)iter->data; gint id = json_get_int (node, "bare_id"); if (id > 0) { const gchar *type = json_get_string (node, "type"); const gchar *name = json_get_string (node, "name"); /* ignore everything without a name */ if (json_get_string (node, "name")) { /* Process child categories */ if (type && g_str_equal (type, "category")) { nodePtr folder; debug2 (DEBUG_UPDATE, "TinyTinyRSS category id=%ld name=%s", id, name); folder = ttrss_source_find_or_create_folder (name, parent); g_hash_table_insert (source->categoryToNode, GINT_TO_POINTER (id), folder); g_hash_table_insert (source->nodeToCategory, folder, GINT_TO_POINTER (id)); /* Process child categories ... */ if (json_get_node (node, "items")) ttrss_source_merge_categories (source, folder, id, json_get_node (node, "items")); /* Process child feeds */ } else { debug3 (DEBUG_UPDATE, "TinyTinyRSS feed=%s folder=%d (%ld)", name, parentId, id); g_hash_table_insert (source->categories, GINT_TO_POINTER (id), GINT_TO_POINTER (parentId)); } } } iter = g_list_next (iter); } g_list_free (elements); }
static void reedah_item_callback (JsonNode *node, itemPtr item) { JsonNode *canonical, *categories; GList *elements, *iter; /* Determine link: path is "canonical[0]/@href" */ canonical = json_get_node (node, "canonical"); if (canonical && JSON_NODE_TYPE (canonical) == JSON_NODE_ARRAY) { iter = elements = json_array_get_elements (json_node_get_array (canonical)); while (iter) { const gchar *href = json_get_string ((JsonNode *)iter->data, "href"); if (href) { item_set_source (item, href); break; } iter = g_list_next (iter); } g_list_free (elements); } /* Determine read state: check for category with ".*state/com.google/read" */ categories = json_get_node (node, "categories"); if (categories && JSON_NODE_TYPE (categories) == JSON_NODE_ARRAY) { iter = elements = json_array_get_elements (json_node_get_array (canonical)); while (iter) { const gchar *category = json_node_get_string ((JsonNode *)iter->data); if (category) { item->readStatus = (strstr (category, "state\\/com.google\\/read") != NULL); break; } iter = g_list_next (iter); } g_list_free (elements); } }
JsonNode * json_api_get_node (JsonNode *parent, const gchar *mapping) { JsonNode *node = parent; gchar **step; gchar **steps = g_strsplit (g_strdup (mapping), "/", 0); step = steps; if (!*step) return node; while (*(step + 1) && node) { node = json_get_node (node, *step); step++; } g_strfreev (steps); return node; }
static void google_subscription_opml_cb (subscriptionPtr subscription, const struct updateResult * const result, updateFlags flags) { InoreaderSourcePtr source = (InoreaderSourcePtr) subscription->node->data; subscription->updateJob = NULL; // FIXME: the following code is very similar to ttrss! if (result->data && result->httpstatus == 200) { JsonParser *parser = json_parser_new (); if (json_parser_load_from_data (parser, result->data, -1, NULL)) { JsonArray *array = json_node_get_array (json_get_node (json_parser_get_root (parser), "subscriptions")); GList *iter, *elements; GSList *siter; /* We expect something like this: [{"id":"feed\/http:\/\/rss.slashdot.org\/Slashdot\/slashdot", "title":"Slashdot", "categories":[], "firstitemmsec":"1368112925514", "htmlUrl":"null"}, ... Note that the data doesn't contain an URL. We recover it from the id field. */ elements = iter = json_array_get_elements (array); /* Add all new nodes we find */ while (iter) { JsonNode *node = (JsonNode *)iter->data; /* ignore everything without a feed url */ if (json_get_string (node, "id")) { inoreader_source_merge_feed (source, json_get_string (node, "id") + 5, // FIXME: Unescape string! json_get_string (node, "title"), json_get_string (node, "id")); } iter = g_list_next (iter); } g_list_free (elements); /* Remove old nodes we cannot find anymore */ siter = source->root->children; while (siter) { nodePtr node = (nodePtr)siter->data; gboolean found = FALSE; elements = iter = json_array_get_elements (array); while (iter) { JsonNode *json_node = (JsonNode *)iter->data; // FIXME: Compare with unescaped string if (g_str_equal (node->subscription->source, json_get_string (json_node, "id") + 5)) { debug1 (DEBUG_UPDATE, "node: %s", node->subscription->source); found = TRUE; break; } iter = g_list_next (iter); } g_list_free (elements); if (!found) feedlist_node_removed (node); siter = g_slist_next (siter); } opml_source_export (subscription->node); /* save new feeds to feed list */ subscription->node->available = TRUE; //return; } else { g_warning ("Invalid JSON returned on Inoreader feed list request! >>>%s<<<", result->data); } g_object_unref (parser); } else { subscription->node->available = FALSE; debug0 (DEBUG_UPDATE, "inoreader_subscription_cb(): ERROR: failed to get subscription list!"); } if (!(flags & INOREADER_SOURCE_UPDATE_ONLY_LIST)) node_foreach_child_data (subscription->node, node_update_subscription, GUINT_TO_POINTER (0)); }
static void ttrss_subscription_process_update_result (subscriptionPtr subscription, const struct updateResult * const result, updateFlags flags) { ttrssSourcePtr source = (ttrssSourcePtr) subscription->node->data; debug1 (DEBUG_UPDATE, "ttrss_subscription_process_update_result: %s", result->data); if (result->data && result->httpstatus == 200) { JsonParser *parser = json_parser_new (); if (json_parser_load_from_data (parser, result->data, -1, NULL)) { JsonNode *content = json_get_node (json_parser_get_root (parser), "content"); JsonNode *categories, *items; /* We expect something like this: {"categories":{"identifier":"id","label":"name","items":[ {"id":"CAT:-1","items":[ {"id":"FEED:-4","name":"All articles","unread":1547,"type":"feed","error":"","updated":"","icon":"images\/tag.png","bare_id":-4,"auxcounter":0}, {"id":"FEED:-3","name":"Fresh articles","unread":0,"type":"feed","error":"","updated":"","icon":"images\/fresh.png","bare_id":-3,"auxcounter":0}, {"id":"FEED:-1","name":"Starred articles","unread":0,"type":"feed","error":"","updated":"","icon":"images\/mark_set.svg","bare_id":-1,"auxcounter":0}, {"id":"FEED:-2","name":"Published articles","unread":0,"type":"feed","error":"","updated":"","icon":"images\/pub_set.svg","bare_id":-2,"auxcounter":0}, {"id":"FEED:0","name":"Archived articles","unread":0,"type":"feed","error":"","updated":"","icon":"images\/archive.png","bare_id":0,"auxcounter":0}, {"id":"FEED:-6","name":"Recently read","unread":0,"type":"feed","error":"","updated":"","icon":"images\/recently_read.png","bare_id":-6,"auxcounter":0}], "name":"Special","type":"category","unread":0,"bare_id":-1}, {"id":"CAT:1","bare_id":1,"auxcounter":0,"name":"OSS","items":[ {"id":"CAT:2","bare_id":2,"name":"News","items":[ {"id":"FEED:4","bare_id":4,"auxcounter":0,"name":"Tiny Tiny RSS: Forum","checkbox":false,"unread":0,"error":"","icon":false,"param":"Jan 03, 13:15"}, {"id":"FEED:3","bare_id":3,"auxcounter":0,"name":"Tiny Tiny RSS: New Releases","checkbox":false,"unread":0,"error":"","icon":false,"param":"Jan 03, 13:15"}], "checkbox":false,"type":"category","unread":0,"child_unread":0,"auxcounter":0,"param":"(2 feeds)"}, {"id":"FEED:6","bare_id":6,"auxcounter":0,"name":"Ars Technica","checkbox":false,"unread":0,"error":"","icon":"feed-icons\/6.ico","param":"Sep 18, 20:43"}, {"id":"FEED:7","bare_id":7,"auxcounter":0,"name":"LZone","checkbox":false,"unread":0,"error":"","icon":"feed-icons\/7.ico","param":"Jul 06, 23:09"}], "checkbox":false,"type":"category","unread":0,"child_unread":0,"param":"(4 feeds)"}, {"id":"CAT:0","bare_id":0,"auxcounter":0,"name":"Uncategorized","items":[ {"id":"FEED:5","bare_id":5,"auxcounter":0,"name":"Slashdot","checkbox":false,"error":"","icon":false,"param":"Jan 03, 13:15","unread":0,"type":"feed"}], "type":"category","checkbox":false,"unread":0,"child_unread":0,"param":"(1 feed)"}]}}} So we need to: - ignore all negative categories - treat feeds in category #0 as root level feeds - traverse all categories > #1 - remember category ids in source->categories hash As we need to perform a subscription list anyway we can ignore all feed infos */ if (!content) { debug0 (DEBUG_UPDATE, "ttrss_subscription_process_update_result(): Failed to get subscription list!"); subscription->node->available = FALSE; return; } categories = json_get_node (content, "categories"); if (!categories) { debug0 (DEBUG_UPDATE, "ttrss_subscription_process_update_result(): Failed to get categories list: no 'categories' element found!"); subscription->node->available = FALSE; return; } items = json_get_node (categories, "items"); if (!items || (JSON_NODE_TYPE (items) != JSON_NODE_ARRAY)) { debug0 (DEBUG_UPDATE, "ttrss_subscription_process_update_result(): Failed to get categories list: no 'categories' element found!"); subscription->node->available = FALSE; return; } /* Process categories tree recursively */ g_hash_table_remove_all (source->categories); g_hash_table_insert (source->categoryToNode, GINT_TO_POINTER (0), source->root); ttrss_source_merge_categories (source, source->root, 0, items); /* And trigger the actual feed fetching */ ttrss_source_update_subscription_list (source, subscription); } else { g_warning ("Invalid JSON returned on TinyTinyRSS request! >>>%s<<<", result->data); } g_object_unref (parser); } else { subscription->node->available = FALSE; debug0 (DEBUG_UPDATE, "ttrss_subscription_process_update_result(): Failed to get categories list!"); } }
static void ttrss_source_subscription_list_cb (const struct updateResult * const result, gpointer user_data, guint32 flags) { subscriptionPtr subscription = (subscriptionPtr) user_data; ttrssSourcePtr source = (ttrssSourcePtr) subscription->node->data; debug1 (DEBUG_UPDATE,"ttrss_subscription_cb(): %s", result->data); subscription->updateJob = NULL; if (result->data && result->httpstatus == 200) { JsonParser *parser = json_parser_new (); if (json_parser_load_from_data (parser, result->data, -1, NULL)) { JsonNode *content = json_get_node (json_parser_get_root (parser), "content"); JsonArray *array; GList *iter, *elements; /* We expect something like this: [ {"feed_url":"http://feeds.arstechnica.com/arstechnica/everything", "title":"Ars Technica", "id":6, "unread":20, "has_icon":true, "cat_id":0, "last_updated":1287853210}, {"feed_url":"http://rss.slashdot.org/Slashdot/slashdot", "title":"Slashdot", "id":5, "unread":33, "has_icon":true, "cat_id":0, "last_updated":1287853206}, [...] Or an error message that could look like this: {"seq":null,"status":1,"content":{"error":"NOT_LOGGED_IN"}} */ if (!content || (JSON_NODE_TYPE (content) != JSON_NODE_ARRAY)) { debug0 (DEBUG_UPDATE, "ttrss_subscription_cb(): Failed to get subscription list!"); subscription->node->available = FALSE; return; } array = json_node_get_array (content); elements = iter = json_array_get_elements (array); /* Add all new nodes we find */ while (iter) { JsonNode *node = (JsonNode *)iter->data; /* ignore everything without a feed url */ if (json_get_string (node, "feed_url")) { ttrss_source_merge_feed (source, json_get_string (node, "feed_url"), json_get_string (node, "title"), json_get_int (node, "id")); } iter = g_list_next (iter); } g_list_free (elements); /* Remove old nodes we cannot find anymore */ node_foreach_child_data (source->root, ttrss_source_check_node_for_removal, array); /* Save new subscription tree to OPML cache file */ opml_source_export (subscription->node); subscription->node->available = TRUE; } else { g_warning ("Invalid JSON returned on TinyTinyRSSS request! >>>%s<<<", result->data); } g_object_unref (parser); } else { subscription->node->available = FALSE; debug0 (DEBUG_UPDATE, "ttrss_subscription_cb(): ERROR: failed to get TinyTinyRSS subscription list!"); } if (!(flags & TTRSS_SOURCE_UPDATE_ONLY_LIST)) node_foreach_child_data (subscription->node, node_update_subscription, GUINT_TO_POINTER (0)); }
GList * json_api_get_items (const gchar *json, const gchar *root, jsonApiMapping *mapping, jsonApiItemCallbackFunc callback) { GList *items = NULL; JsonParser *parser = json_parser_new (); if (json_parser_load_from_data (parser, json, -1, NULL)) { JsonArray *array = json_node_get_array (json_get_node (json_parser_get_root (parser), root)); GList *elements = json_array_get_elements (array); GList *iter = elements; debug1 (DEBUG_PARSING, "JSON API: found items root node \"%s\"", root); while (iter) { JsonNode *node = (JsonNode *)iter->data; itemPtr item = item_new (); /* Parse default feeds */ item_set_id (item, json_api_get_string (node, mapping->id)); item_set_title (item, json_api_get_string (node, mapping->title)); item_set_source (item, json_api_get_string (node, mapping->link)); item->time = json_api_get_int (node, mapping->updated); item->readStatus = json_api_get_bool (node, mapping->read); item->flagStatus = json_api_get_bool (node, mapping->flag); if (mapping->negateRead) item->readStatus = !item->readStatus; /* Handling encoded content */ const gchar *content; gchar *xhtml; content = json_api_get_string (node, mapping->description); if (mapping->xhtml) { xhtml = xhtml_extract_from_string (content, NULL); item_set_description (item, xhtml); xmlFree (xhtml); } else { item_set_description (item, content); } /* Optional meta data */ const gchar *tmp = json_api_get_string (node, mapping->author); if (tmp) item->metadata = metadata_list_append (item->metadata, "author", tmp); items = g_list_append (items, (gpointer)item); /* Allow optional item callback to process stuff */ if (callback) (*callback)(node, item); iter = g_list_next (iter); } g_list_free (elements); g_object_unref (parser); } else { debug1 (DEBUG_PARSING, "Could not parse JSON \"%s\"", json); } return items; }
static void ttrss_feed_subscription_process_update_result (subscriptionPtr subscription, const struct updateResult* const result, updateFlags flags) { if (result->data && result->httpstatus == 200) { JsonParser *parser = json_parser_new (); if (json_parser_load_from_data (parser, result->data, -1, NULL)) { JsonArray *array = json_node_get_array (json_get_node (json_parser_get_root (parser), "content")); GList *elements = json_array_get_elements (array); GList *iter = elements; GList *items = NULL; /* We expect to get something like this [{"id":118, "unread":true, "marked":false, "updated":1287927675, "is_updated":false, "title":"IBM Says New ...", "link":"http:\/\/rss.slashdot.org\/~r\/Slashdot\/slashdot\/~3\/ALuhNKO3NV4\/story01.htm", "feed_id":"5", "content":"coondoggie writes ..." }, {"id":117, "unread":true, "marked":false, "updated":1287923814, [...] */ while (iter) { JsonNode *node = (JsonNode *)iter->data; itemPtr item = item_new (); gchar *id; const gchar *content; gchar *xhtml; id = g_strdup_printf ("%" G_GINT64_FORMAT, json_get_int (node, "id")); item_set_id (item, id); g_free (id); item_set_title (item, json_get_string (node, "title")); item_set_source (item, json_get_string (node, "link")); content = json_get_string (node, "content"); xhtml = xhtml_extract_from_string (content, NULL); item_set_description (item, xhtml); xmlFree (xhtml); item->time = json_get_int (node, "updated"); if (json_get_bool (node, "unread")) { item->readStatus = FALSE; } else { item->readStatus = TRUE; } if (json_get_bool (node, "marked")) item->flagStatus = TRUE; items = g_list_append (items, (gpointer)item); iter = g_list_next (iter); } g_list_free (elements); /* merge against feed cache */ if (items) { itemSetPtr itemSet = node_get_itemset (subscription->node); gint newCount = itemset_merge_items (itemSet, items, TRUE /* feed valid */, FALSE /* markAsRead */); itemlist_merge_itemset (itemSet); itemset_free (itemSet); feedlist_node_was_updated (subscription->node, newCount); } subscription->node->available = TRUE; } else { subscription->node->available = FALSE; g_string_append (((feedPtr)subscription->node->data)->parseErrors, _("Could not parse JSON returned by TinyTinyRSS API!")); } g_object_unref (parser); } else { subscription->node->available = FALSE; } }
static void theoldreader_subscription_cb (subscriptionPtr subscription, const struct updateResult * const result, updateFlags flags) { TheOldReaderSourcePtr source = (TheOldReaderSourcePtr) subscription->node->data; debug1 (DEBUG_UPDATE,"theoldreader_subscription_cb(): %s", result->data); // FIXME: the following code is very similar to ttrss! if (result->data && result->httpstatus == 200) { JsonParser *parser = json_parser_new (); if (json_parser_load_from_data (parser, result->data, -1, NULL)) { JsonArray *array = json_node_get_array (json_get_node (json_parser_get_root (parser), "subscriptions")); GList *iter, *elements; GSList *siter; /* We expect something like this: [{"id":"feed/51d49b79d1716c7b18000025", "title":"LZone", "categories":[], "sortid":"51d49b79d1716c7b18000025", "firstitemmsec":"1371403150181", "url":"http://lzone.de/rss.xml", "htmlUrl":"http://lzone.de", "iconUrl":"http://s.yeoldereader.com/system/uploads/feed/picture/5152/884a/4dce/57aa/7e00/icon_0a6a.ico"}, ... */ elements = iter = json_array_get_elements (array); /* Add all new nodes we find */ while (iter) { JsonNode *node = (JsonNode *)iter->data; /* ignore everything without a feed url */ if (json_get_string (node, "url")) { theoldreader_source_merge_feed (source, json_get_string (node, "url"), json_get_string (node, "title"), json_get_string (node, "id")); } iter = g_list_next (iter); } g_list_free (elements); /* Remove old nodes we cannot find anymore */ siter = source->root->children; while (siter) { nodePtr node = (nodePtr)siter->data; gboolean found = FALSE; elements = iter = json_array_get_elements (array); while (iter) { JsonNode *json_node = (JsonNode *)iter->data; if (g_str_equal (node->subscription->source, json_get_string (json_node, "url"))) { debug1 (DEBUG_UPDATE, "node: %s", node->subscription->source); found = TRUE; break; } iter = g_list_next (iter); } g_list_free (elements); if (!found) feedlist_node_removed (node); siter = g_slist_next (siter); } opml_source_export (subscription->node); /* save new feeds to feed list */ subscription->node->available = TRUE; //return; } else { g_warning ("Invalid JSON returned on TheOldReader request! >>>%s<<<", result->data); } g_object_unref (parser); } else { subscription->node->available = FALSE; debug0 (DEBUG_UPDATE, "theoldreader_subscription_cb(): ERROR: failed to get subscription list!"); } if (!(flags & THEOLDREADER_SOURCE_UPDATE_ONLY_LIST)) node_foreach_child_data (subscription->node, node_update_subscription, GUINT_TO_POINTER (0)); }
static void theoldreader_subscription_cb (subscriptionPtr subscription, const struct updateResult * const result, updateFlags flags) { TheOldReaderSourcePtr source = (TheOldReaderSourcePtr) subscription->node->data; debug1 (DEBUG_UPDATE,"theoldreader_subscription_cb(): %s", result->data); subscription->updateJob = NULL; // FIXME: the following code is very similar to ttrss! if (result->data && result->httpstatus == 200) { JsonParser *parser = json_parser_new (); if (json_parser_load_from_data (parser, result->data, -1, NULL)) { JsonArray *array = json_node_get_array (json_get_node (json_parser_get_root (parser), "subscriptions")); GList *iter, *elements, *citer, *celements; GSList *siter; /* We expect something like this: [{"id":"feed/51d49b79d1716c7b18000025", "title":"LZone", "categories":[{"id":"user/-/label/myfolder","label":"myfolder"}], "sortid":"51d49b79d1716c7b18000025", "firstitemmsec":"1371403150181", "url":"http://lzone.de/rss.xml", "htmlUrl":"http://lzone.de", "iconUrl":"http://s.yeoldereader.com/system/uploads/feed/picture/5152/884a/4dce/57aa/7e00/icon_0a6a.ico"}, ... */ elements = iter = json_array_get_elements (array); /* Add all new nodes we find */ while (iter) { JsonNode *categories, *node = (JsonNode *)iter->data; nodePtr folder = NULL; /* Check for categories, if there use first one as folder */ categories = json_get_node (node, "categories"); if (categories && JSON_NODE_TYPE (categories) == JSON_NODE_ARRAY) { citer = celements = json_array_get_elements (json_node_get_array (categories)); while (citer) { const gchar *label = json_get_string ((JsonNode *)citer->data, "label"); if (label) { folder = node_source_find_or_create_folder (source->root, label, label); /* Store category id also for folder (needed when subscribing new feeds) */ g_hash_table_insert (source->folderToCategory, g_strdup (folder->id), g_strdup (label)); break; } citer = g_list_next (citer); } g_list_free (celements); } /* ignore everything without a feed url */ if (json_get_string (node, "url")) { theoldreader_source_merge_feed (source, json_get_string (node, "url"), json_get_string (node, "title"), json_get_string (node, "id"), folder); } iter = g_list_next (iter); } g_list_free (elements); /* Remove old nodes we cannot find anymore */ node_foreach_child_data (source->root, theoldreader_source_check_node_for_removal, array); /* Save new subscription tree to OPML cache file */ opml_source_export (subscription->node); subscription->node->available = TRUE; } else { g_warning ("Invalid JSON returned on TheOldReader request! >>>%s<<<", result->data); } g_object_unref (parser); } else { subscription->node->available = FALSE; debug0 (DEBUG_UPDATE, "theoldreader_subscription_cb(): ERROR: failed to get subscription list!"); } if (!(flags & NODE_SOURCE_UPDATE_ONLY_LIST)) node_foreach_child_data (subscription->node, node_update_subscription, GUINT_TO_POINTER (0)); }