/** * hif_sack_finalize: **/ static void hif_sack_finalize(GObject *object) { HifSack *sack = HIF_SACK(object); HifSackPrivate *priv = GET_PRIVATE(sack); Pool *pool = priv->pool; Repo *repo; int i; FOR_REPOS(i, repo) { HyRepo hrepo = repo->appdata; hy_repo_free(hrepo); }
END_TEST START_TEST(test_load_repo_err) { HySack sack = hy_sack_create(test_globals.tmpdir, NULL, NULL, NULL, HY_MAKE_CACHE_DIR); HyRepo repo = hy_repo_create("crabalocker"); hy_repo_set_string(repo, HY_REPO_MD_FN, "/non/existing"); fail_unless(hy_sack_load_repo(sack, repo, 0) == HY_E_FAILED); fail_unless(hy_get_errno() == HY_E_IO); hy_repo_free(repo); hy_sack_free(sack); }
void setup_yum_sack(HySack sack, const char *yum_repo_name) { Pool *pool = sack_pool(sack); const char *repo_path = pool_tmpjoin(pool, test_globals.repo_dir, YUM_DIR_SUFFIX, NULL); fail_if(access(repo_path, X_OK)); HyRepo repo = glob_for_repofiles(pool, yum_repo_name, repo_path); fail_if(hy_sack_load_repo(sack, repo, HY_BUILD_CACHE | HY_LOAD_FILELISTS | HY_LOAD_UPDATEINFO | HY_LOAD_PRESTO)); fail_unless(hy_sack_count(sack) == TEST_EXPECT_YUM_NSOLVABLES); hy_repo_free(repo); }
END_TEST START_TEST(test_load_repo_err) { g_autoptr(GError) error = NULL; HifSack *sack = hif_sack_new(); hif_sack_set_cachedir(sack, test_globals.tmpdir); fail_unless(hif_sack_setup(sack, HIF_SACK_SETUP_FLAG_MAKE_CACHE_DIR, &error)); g_assert(sack != NULL); HyRepo repo = hy_repo_create("crabalocker"); g_assert(repo != NULL); hy_repo_set_string(repo, HY_REPO_MD_FN, "/non/existing"); fail_unless(!hif_sack_load_repo(sack, repo, 0, &error)); fail_unless(g_error_matches (error, HIF_ERROR, HIF_ERROR_FILE_INVALID)); hy_repo_free(repo); g_object_unref(sack); }
HyRepo glob_for_repofiles(Pool *pool, const char *repo_name, const char *path) { HyRepo repo = hy_repo_create(repo_name); const char *tmpl; wordexp_t word_vector; tmpl = pool_tmpjoin(pool, path, "/repomd.xml", NULL); if (wordexp(tmpl, &word_vector, 0) || word_vector.we_wordc < 1) goto fail; hy_repo_set_string(repo, HY_REPO_MD_FN, word_vector.we_wordv[0]); tmpl = pool_tmpjoin(pool, path, "/*primary.xml.gz", NULL); if (wordexp(tmpl, &word_vector, WRDE_REUSE) || word_vector.we_wordc < 1) goto fail; hy_repo_set_string(repo, HY_REPO_PRIMARY_FN, word_vector.we_wordv[0]); tmpl = pool_tmpjoin(pool, path, "/*filelists.xml.gz", NULL); if (wordexp(tmpl, &word_vector, WRDE_REUSE) || word_vector.we_wordc < 1) goto fail; hy_repo_set_string(repo, HY_REPO_FILELISTS_FN, word_vector.we_wordv[0]); tmpl = pool_tmpjoin(pool, path, "/*prestodelta.xml.gz", NULL); if (wordexp(tmpl, &word_vector, WRDE_REUSE) || word_vector.we_wordc < 1) goto fail; hy_repo_set_string(repo, HY_REPO_PRESTO_FN, word_vector.we_wordv[0]); tmpl = pool_tmpjoin(pool, path, "/*updateinfo.xml.gz", NULL); if (wordexp(tmpl, &word_vector, WRDE_REUSE) || word_vector.we_wordc < 1) goto fail; hy_repo_set_string(repo, HY_REPO_UPDATEINFO_FN, word_vector.we_wordv[0]); wordfree(&word_vector); return repo; fail: wordfree(&word_vector); hy_repo_free(repo); return NULL; }
static void repo_dealloc(_RepoObject *self) { hy_repo_free(self->repo); Py_TYPE(self)->tp_free(self); }
uint32_t TDNFRefreshCache( PTDNF pTdnf ) { uint32_t dwError = 0; HySack hSack = NULL; if(!pTdnf) { dwError = ERROR_TDNF_INVALID_PARAMETER; BAIL_ON_TDNF_ERROR(dwError); } //Creating a new sack without removing the //old one did not work well. Remove first, then //create if(pTdnf->hSack) { hy_sack_free(pTdnf->hSack); pTdnf->hSack = NULL; } //init with cache dwError = TDNFInitSack(pTdnf, &hSack, HY_LOAD_FILELISTS); BAIL_ON_TDNF_ERROR(dwError); //Do the same for all enabled repos if(pTdnf->pRepos) { PTDNF_REPO_DATA pRepo = pTdnf->pRepos; while(pRepo) { if(pRepo->nEnabled) { hy_repo_free(pRepo->hRepo); pRepo->hRepo = NULL; dwError = TDNFInitRepo(pTdnf, pRepo, &pRepo->hRepo); BAIL_ON_TDNF_ERROR(dwError); } pRepo = pRepo->pNext; } } pTdnf->hSack = hSack; cleanup: return dwError; error: if(hSack) { hy_sack_free(hSack); } if(pTdnf->hSack) { hy_sack_free(pTdnf->hSack); pTdnf->hSack = NULL; } goto cleanup; }
//Download repo metadata and initialize uint32_t TDNFInitRepo( PTDNF pTdnf, PTDNF_REPO_DATA pRepoData, HyRepo* phRepo ) { uint32_t dwError = 0; gboolean bRet = 0; LrHandle* hLibRepo = NULL; LrResult* pResult = NULL; LrYumRepo* pRepo = NULL; int nLocalOnly = 0; char* pszRepoCacheDir = NULL; char* pszRepoDataDir = NULL; char* pszUserPass = NULL; char* ppszRepoUrls[] = {NULL, NULL}; char* ppszLocalUrls[] = {NULL, NULL}; char* ppszDownloadList[] = {"primary", "filelists", "updateinfo", NULL}; PTDNF_CONF pConf = NULL; HyRepo hRepo = NULL; if(!pTdnf || !pTdnf->pConf || !pRepoData || !phRepo) { dwError = ERROR_TDNF_INVALID_PARAMETER; BAIL_ON_TDNF_ERROR(dwError); } pConf = pTdnf->pConf; pszRepoCacheDir = g_build_path( G_DIR_SEPARATOR_S, pConf->pszCacheDir, pRepoData->pszId, NULL); if(!pszRepoCacheDir) { dwError = ERROR_TDNF_INVALID_PARAMETER; BAIL_ON_TDNF_ERROR(dwError); } pszRepoDataDir = g_build_path( G_DIR_SEPARATOR_S, pszRepoCacheDir, TDNF_REPODATA_DIR_NAME, NULL); if(!pszRepoDataDir) { dwError = ERROR_TDNF_INVALID_PARAMETER; BAIL_ON_TDNF_ERROR(dwError); } ppszRepoUrls[0] = pRepoData->pszBaseUrl; ppszLocalUrls[0] = pszRepoCacheDir; hLibRepo = lr_handle_init(); if(!hLibRepo) { dwError = ERROR_TDNF_INVALID_PARAMETER; BAIL_ON_TDNF_ERROR(dwError); } pResult = lr_result_init(); if(!pResult) { dwError = ERROR_TDNF_INVALID_PARAMETER; BAIL_ON_TDNF_ERROR(dwError); } //Look for repodata dir - this is auto created //during last refresh so skip download if present if(!access(pszRepoDataDir, F_OK)) { nLocalOnly = 1; lr_handle_setopt(hLibRepo, NULL, LRO_URLS, ppszLocalUrls); lr_handle_setopt(hLibRepo, NULL, LRO_IGNOREMISSING, 1); } else { //Look for the repo root cache dir. If not there, //try to create and download into it. if(access(pszRepoCacheDir, F_OK)) { if(errno != ENOENT) { dwError = errno; } BAIL_ON_TDNF_SYSTEM_ERROR(dwError); if(mkdir(pszRepoCacheDir, 755)) { dwError = errno; BAIL_ON_TDNF_SYSTEM_ERROR(dwError); } } lr_handle_setopt(hLibRepo, NULL, LRO_URLS, ppszRepoUrls); lr_handle_setopt(hLibRepo, NULL, LRO_SSLVERIFYPEER, 1); lr_handle_setopt(hLibRepo, NULL, LRO_SSLVERIFYHOST, 2); lr_handle_setopt(hLibRepo, NULL, LRO_DESTDIR, pszRepoCacheDir); lr_handle_setopt(hLibRepo, NULL, LRO_YUMDLIST, ppszDownloadList); if(!IsNullOrEmptyString(pRepoData->pszUser) && !IsNullOrEmptyString(pRepoData->pszPass)) { dwError = TDNFAllocateStringPrintf( &pszUserPass, "%s:%s", pRepoData->pszUser, pRepoData->pszPass); BAIL_ON_TDNF_ERROR(dwError); lr_handle_setopt( hLibRepo, NULL, LRO_USERPWD, pszUserPass); } } lr_handle_setopt(hLibRepo, NULL, LRO_REPOTYPE, LR_YUMREPO); lr_handle_setopt(hLibRepo, NULL, LRO_LOCAL, nLocalOnly); bRet = lr_handle_perform(hLibRepo, pResult, NULL); if(!bRet) { dwError = ERROR_TDNF_REPO_PERFORM; BAIL_ON_TDNF_ERROR(dwError); } bRet = lr_result_getinfo(pResult, NULL, LRR_YUM_REPO, &pRepo); if(!bRet) { dwError = ERROR_TDNF_REPO_GETINFO; BAIL_ON_TDNF_ERROR(dwError); } //Create and set repo properties hRepo = hy_repo_create(pRepoData->pszId); if(!hRepo) { dwError = ERROR_TDNF_HAWKEY_FAILED; BAIL_ON_TDNF_ERROR(dwError); } dwError = TDNFInitRepoFromMetaData(hRepo, pRepo); BAIL_ON_TDNF_ERROR(dwError); *phRepo = hRepo; cleanup: if(pszRepoDataDir) { g_free(pszRepoDataDir); } if(pszRepoCacheDir) { g_free(pszRepoCacheDir); } if(pResult) { lr_result_free(pResult); } if(hLibRepo) { lr_handle_free(hLibRepo); } return dwError; error: //If there is an error during init, log the error //remove any cache data that could be potentially corrupt. if(pRepoData) { fprintf( stderr, "Error: Failed to synchronize cache for repo '%s' from '%s'\n", pRepoData->pszName, pRepoData->pszBaseUrl); if(pTdnf) { TDNFRepoRemoveCache(pTdnf, pRepoData->pszId); } } if(phRepo) { *phRepo = NULL; } if(hRepo) { hy_repo_free(hRepo); } goto cleanup; }
int main(int argc, const char **argv) { HifSack *sack = hif_sack_new (); HyRepo repo; char *md_repo; char *md_primary_xml; char *md_filelists; char *md_repo_updates; char *md_primary_updates_xml; char *md_filelists_updates; int ret; g_autoptr(GError) error = NULL; if (!hif_sack_setup(sack, HIF_SACK_SETUP_FLAG_MAKE_CACHE_DIR, NULL)) return 1; if (read_repopaths(&md_repo, &md_primary_xml, &md_filelists, &md_repo_updates, &md_primary_updates_xml, &md_filelists_updates)) { fprintf(stderr, "This is hawkey testing hack, it needs a readable %s file " "containing the following paths on separate lines:\n" "<main repomd.xml path>\n" "<main primary.xml.gz path>\n" "<main filelist.xml.gz path>\n" "<updates repomd.xml path>\n" "<updates primary.xml.gz path>\n" "<updates filelists.xml.gz path>\n", CFG_FILE); return 1; } int load_flags = HIF_SACK_LOAD_FLAG_BUILD_CACHE; /* rpmdb */ repo = hy_repo_create(HY_SYSTEM_REPO_NAME); hif_sack_load_system_repo(sack, NULL, load_flags, &error); hy_repo_free(repo); if (need_filelists(argc, argv)) load_flags |= HIF_SACK_LOAD_FLAG_USE_FILELISTS; /* Fedora repo */ repo = config_repo("Fedora", md_repo, md_primary_xml, md_filelists); ret = hif_sack_load_repo(sack, repo, load_flags, &error); assert(ret == 0); (void)ret; hy_repo_free(repo); /* Fedora updates repo */ repo = config_repo("updates", md_repo_updates, md_primary_updates_xml, md_filelists_updates); ret = hif_sack_load_repo(sack, repo, load_flags, &error); assert(ret == 0); (void)ret; hy_repo_free(repo); free(md_repo); free(md_primary_xml); free(md_filelists); free(md_repo_updates); free(md_primary_updates_xml); free(md_filelists_updates); hif_sack_set_installonly(sack, installonly); hif_sack_set_installonly_limit(sack, 3); if (argc == 2 && !strcmp(argv[1], "-o")) { obsoletes(sack); } else if (argc == 2) { search_and_print(sack, argv[1]); } else if (argc == 3 && !strcmp(argv[1], "-f")) { search_filter_files(sack, argv[2]); } else if (argc == 3 && !strcmp(argv[1], "-r")) { search_filter_repos(sack, argv[2]); } else if (argc == 3 && !strcmp(argv[1], "-u")) { updatables_query_name(sack, argv[2]); } else if (argc == 3 && !strcmp(argv[1], "-ul")) { update_local(sack, argv[2]); } else if (argc == 3 && !strcmp(argv[1], "-ur")) { update_remote(sack, argv[2]); } else if (argc == 3 && !strcmp(argv[1], "-e")) { erase(sack, argv[2]); } else if (argc == 3) { search_anded(sack, argv[1], argv[2]); } else if (argc == 4 && !strcmp(argv[1], "-p")) { search_provides(sack, argv[2], argv[3]); } g_object_unref(sack); return 0; }
uint32_t TDNFRefreshSack( PTDNF pTdnf, int nCleanMetadata ) { uint32_t dwError = 0; HyRepo hRepo = NULL; int nYumFlags = HY_LOAD_FILELISTS | HY_LOAD_UPDATEINFO; if(!pTdnf) { dwError = ERROR_TDNF_INVALID_PARAMETER; BAIL_ON_TDNF_ERROR(dwError); } if(pTdnf->hSack) { hy_sack_free(pTdnf->hSack); pTdnf->hSack = NULL; } dwError = TDNFInitSack(pTdnf, &pTdnf->hSack, HY_LOAD_FILELISTS); BAIL_ON_TDNF_ERROR(dwError); //If there is an empty repo directory, do nothing if(pTdnf->pRepos) { PTDNF_REPO_DATA pTempRepo = pTdnf->pRepos; while(pTempRepo) { if(pTempRepo->nEnabled) { if(nCleanMetadata) { fprintf(stdout, "Refreshing metadata for: '%s'\n", pTempRepo->pszName); dwError = TDNFRepoRemoveCache(pTdnf, pTempRepo->pszId); if(dwError == ERROR_TDNF_FILE_NOT_FOUND) { dwError = 0;//Ignore non existent folders } BAIL_ON_TDNF_ERROR(dwError); } dwError = TDNFInitRepo(pTdnf, pTempRepo, &hRepo); if(dwError) { if(pTempRepo->nSkipIfUnavailable) { pTempRepo->nEnabled = 0; fprintf(stdout, "Disabling Repo: '%s'\n", pTempRepo->pszName); dwError = 0; } } BAIL_ON_TDNF_ERROR(dwError); if(pTempRepo->nEnabled) { if(pTempRepo->hRepo) { hy_repo_free(pTempRepo->hRepo); pTempRepo->hRepo = NULL; } pTempRepo->hRepo = hRepo; dwError = TDNFLoadYumRepo(pTdnf->hSack, hRepo, nYumFlags); BAIL_ON_TDNF_ERROR(dwError); } } pTempRepo = pTempRepo->pNext; } } cleanup: return dwError; error: goto cleanup; }