enum sess_close http_DoConnection(const struct http *hp) { char *p, *q; enum sess_close ret; unsigned u; if (!http_GetHdr(hp, H_Connection, &p)) { if (hp->protover < 11) return (SC_REQ_HTTP10); return (SC_NULL); } ret = SC_NULL; AN(p); for (; *p; p++) { if (vct_issp(*p)) continue; if (*p == ',') continue; for (q = p + 1; *q; q++) if (*q == ',' || vct_issp(*q)) break; u = pdiff(p, q); if (u == 5 && !strncasecmp(p, "close", u)) ret = SC_REQ_CLOSE; u = http_findhdr(hp, u, p); if (u != 0) hp->hdf[u] |= HDF_FILTER; if (!*q) break; p = q; } return (ret); }
void diff(const std::valarray<double>& a, const std::valarray<double>& b, std::valarray<double>& c) const { #ifdef DEBUG if (a.size()!=b.size()) ERROR("Vector size mismatch in distance."); #endif c.resize(a.size()); pdiff(&a[0], &b[0], &c[0], a.size()); }
unsigned WS_Reserve(struct ws *ws, unsigned bytes) { unsigned b2; WS_Assert(ws); assert(ws->r == NULL); b2 = PRNDDN(ws->e - ws->f); if (bytes != 0 && bytes < b2) b2 = PRNDUP(bytes); xxxassert(ws->f + b2 <= ws->e); ws->r = ws->f + b2; DSL(DBG_WORKSPACE, 0, "WS_Reserve(%p, %u/%u) = %u", ws, b2, bytes, pdiff(ws->f, ws->r)); WS_Assert(ws); return (pdiff(ws->f, ws->r)); }
unsigned WS_Reserve(struct ws *ws, unsigned bytes) { unsigned b2; WS_Assert(ws); assert(ws->r == NULL); if (bytes == 0) b2 = ws->e - ws->f; else if (bytes > ws->e - ws->f) b2 = ws->e - ws->f; else b2 = bytes; b2 = PRNDDN(b2); xxxassert(ws->f + b2 <= ws->e); ws->r = ws->f + b2; DSL(0x02, SLT_Debug, 0, "WS_Reserve(%p, %u/%u) = %u", ws, b2, bytes, pdiff(ws->f, ws->r)); WS_Assert(ws); return (pdiff(ws->f, ws->r)); }
VCL_INT vmod_workspace_free(VRT_CTX, VCL_ENUM which) { struct ws *ws; CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); ws = wsfind(ctx, which); WS_Assert(ws); AZ(ws->r); return pdiff(ws->f, ws->e); }
void WS_Assert(const struct ws *ws) { CHECK_OBJ_NOTNULL(ws, WS_MAGIC); DSL(0x02, SLT_Debug, 0, "WS(%p = (%s, %p %u %u %u)", ws, ws->id, ws->s, pdiff(ws->s, ws->f), ws->r == NULL ? 0 : pdiff(ws->f, ws->r), pdiff(ws->s, ws->e)); assert(ws->s != NULL); assert(PAOK(ws->s)); assert(ws->e != NULL); assert(PAOK(ws->e)); assert(ws->s < ws->e); assert(ws->f >= ws->s); assert(ws->f <= ws->e); assert(PAOK(ws->f)); if (ws->r) { assert(ws->r > ws->s); assert(ws->r <= ws->e); assert(PAOK(ws->r)); } }
void WS_Assert(const struct ws *ws) { CHECK_OBJ_NOTNULL(ws, WS_MAGIC); DSL(DBG_WORKSPACE, 0, "WS(%p = (%s, %p %u %u %u)", ws, ws->id, ws->s, pdiff(ws->s, ws->f), ws->r == NULL ? 0 : pdiff(ws->f, ws->r), pdiff(ws->s, ws->e)); assert(ws->s != NULL); assert(PAOK(ws->s)); assert(ws->e != NULL); assert(PAOK(ws->e)); assert(ws->s < ws->e); assert(ws->f >= ws->s); assert(ws->f <= ws->e); assert(PAOK(ws->f)); if (ws->r) { assert(ws->r > ws->s); assert(ws->r <= ws->e); assert(PAOK(ws->r)); } assert(*ws->e == 0x15); }
void VSL_Flush(struct vsl_log *vsl, int overflow) { uint32_t *p; unsigned l; l = pdiff(vsl->wlb, vsl->wlp); if (l == 0) return; assert(l >= 8); p = vsl_get(l - 8, vsl->wlr, overflow); memcpy(p + 1, vsl->wlb + 1, l - 4); VWMB(); p[0] = vsl->wlb[0]; vsl->wlp = vsl->wlb; vsl->wlr = 0; }
void WSL_Flush(struct worker *wrk, int overflow) { uint32_t *p; unsigned l; l = pdiff(wrk->wlb, wrk->wlp); if (l == 0) return; assert(l >= 8); p = vsl_get(l - 8, wrk->wlr, overflow); memcpy(p + 1, wrk->wlb + 1, l - 4); VWMB(); p[0] = wrk->wlb[0]; wrk->wlp = wrk->wlb; wrk->wlr = 0; }
static int do_merge(int argc, char *argv[], int obj, int reverse, int replace, int ignore, int show_wiggles, int quiet) { /* merge three files, A B C, so changed between B and C get made to A */ struct stream f, flist[3]; struct file fl[3]; int i; int chunks1 = 0, chunks2 = 0, chunks3 = 0; char *replacename = NULL, *orignew = NULL; struct csl *csl1, *csl2; struct ci ci; FILE *outfile = stdout; switch (argc) { case 0: fprintf(stderr, "%s: no files given for --merge\n", Cmd); return 2; case 3: case 2: case 1: for (i = 0; i < argc; i++) { flist[i] = load_file(argv[i]); if (flist[i].body == NULL) { fprintf(stderr, "%s: cannot load file '%s' - %s\n", Cmd, argv[i], strerror(errno)); return 2; } } break; default: fprintf(stderr, "%s: too many files given for --merge\n", Cmd); return 2; } switch (argc) { case 1: /* a merge file */ f = flist[0]; if (!split_merge(f, &flist[0], &flist[1], &flist[2])) { fprintf(stderr, "%s: merge file %s looks bad.\n", Cmd, argv[0]); return 2; } break; case 2: /* a file and a patch */ f = flist[1]; chunks2 = chunks3 = split_patch(f, &flist[1], &flist[2]); break; case 3: /* three separate files */ break; } if (reverse) { f = flist[1]; flist[1] = flist[2]; flist[2] = f; } for (i = 0; i < 3; i++) { if (flist[i].body == NULL) { fprintf(stderr, "%s: file %d missing\n", Cmd, i); return 2; } } if (replace) { int fd; replacename = xmalloc(strlen(argv[0]) + 20); orignew = xmalloc(strlen(argv[0]) + 20); strcpy(replacename, argv[0]); strcpy(orignew, argv[0]); strcat(orignew, ".porig"); if (open(orignew, O_RDONLY) >= 0 || errno != ENOENT) { fprintf(stderr, "%s: %s already exists\n", Cmd, orignew); return 2; } strcat(replacename, "XXXXXX"); fd = mkstemp(replacename); if (fd == -1) { fprintf(stderr, "%s: could not create temporary file for %s\n", Cmd, replacename); return 2; } outfile = fdopen(fd, "w"); } if (obj == 'l') { fl[0] = split_stream(flist[0], ByLine); fl[1] = split_stream(flist[1], ByLine); fl[2] = split_stream(flist[2], ByLine); } else { fl[0] = split_stream(flist[0], ByWord); fl[1] = split_stream(flist[1], ByWord); fl[2] = split_stream(flist[2], ByWord); } if (chunks2 && !chunks1) csl1 = pdiff(fl[0], fl[1], chunks2); else csl1 = diff(fl[0], fl[1]); csl2 = diff_patch(fl[1], fl[2]); ci = make_merger(fl[0], fl[1], fl[2], csl1, csl2, obj == 'w', ignore, show_wiggles); print_merge(outfile, &fl[0], &fl[1], &fl[2], obj == 'w', ci.merger); if (!quiet && ci.conflicts) fprintf(stderr, "%d unresolved conflict%s found\n", ci.conflicts, ci.conflicts == 1 ? "" : "s"); if (!quiet && ci.ignored) fprintf(stderr, "%d already-applied change%s ignored\n", ci.ignored, ci.ignored == 1 ? "" : "s"); if (replace) { fclose(outfile); if (rename(argv[0], orignew) == 0 && rename(replacename, argv[0]) == 0) /* all ok */; else { fprintf(stderr, "%s: failed to move new file into place.\n", Cmd); return 2; } } return (ci.conflicts > 0); }
static int do_diff(int argc, char *argv[], int obj, int ispatch, int which, int reverse) { /* create a diff (line or char) of two streams */ struct stream f, flist[3]; int chunks1 = 0, chunks2 = 0, chunks3 = 0; int exit_status = 0; struct file fl[2]; struct csl *csl; switch (argc) { case 0: fprintf(stderr, "%s: no file given for --diff\n", Cmd); return 2; case 1: f = load_file(argv[0]); if (f.body == NULL) { fprintf(stderr, "%s: cannot load file '%s' - %s\n", Cmd, argv[0], strerror(errno)); return 2; } chunks1 = chunks2 = split_patch(f, &flist[0], &flist[1]); if (!flist[0].body || !flist[1].body) { fprintf(stderr, "%s: couldn't parse patch %s\n", Cmd, argv[0]); return 2; } break; case 2: flist[0] = load_file(argv[0]); if (flist[0].body == NULL) { fprintf(stderr, "%s: cannot load file '%s' - %s\n", Cmd, argv[0], strerror(errno)); return 2; } if (ispatch) { f = load_file(argv[1]); if (f.body == NULL) { fprintf(stderr, "%s: cannot load patch" " '%s' - %s\n", Cmd, argv[1], strerror(errno)); return 2; } if (which == '2') chunks2 = chunks3 = split_patch(f, &flist[2], &flist[1]); else chunks2 = chunks3 = split_patch(f, &flist[1], &flist[2]); } else flist[1] = load_file(argv[1]); if (flist[1].body == NULL) { fprintf(stderr, "%s: cannot load file" " '%s' - %s\n", Cmd, argv[1], strerror(errno)); return 2; } break; default: fprintf(stderr, "%s: too many files given for --diff\n", Cmd); return 2; } if (reverse) { f = flist[1]; flist[1] = flist[2]; flist[2] = f; } fl[0] = split_stream(flist[0], obj == 'l' ? ByLine : ByWord); fl[1] = split_stream(flist[1], obj == 'l' ? ByLine : ByWord); if (chunks2 && !chunks1) csl = pdiff(fl[0], fl[1], chunks2); else csl = diff_patch(fl[0], fl[1]); if (obj == 'l') { if (!chunks1) printf("@@ -1,%d +1,%d @@\n", fl[0].elcnt, fl[1].elcnt); exit_status = do_diff_lines(fl, csl); } else { if (!chunks1) { /* count lines in each file */ int l1, l2, i; l1 = l2 = 0; for (i = 0 ; i < fl[0].elcnt ; i++) if (ends_line(fl[0].list[i])) l1++; for (i = 0 ; i < fl[1].elcnt ; i++) if (ends_line(fl[1].list[i])) l2++; printf("@@ -1,%d +1,%d @@\n", l1, l2); } exit_status = do_diff_words(fl, csl); } return exit_status; }
BackupStoreRefCountDatabase::refcount_t HousekeepStoreAccount::DeleteFile( int64_t InDirectory, int64_t ObjectID, BackupStoreDirectory &rDirectory, const std::string &rDirectoryFilename, BackupStoreInfo& rBackupStoreInfo) { // Find the entry inside the directory bool wasDeleted = false; bool wasOldVersion = false; int64_t deletedFileSizeInBlocks = 0; // A pointer to an object which requires committing if the directory save goes OK std::auto_ptr<RaidFileWrite> padjustedEntry; // BLOCK { BackupStoreRefCountDatabase::refcount_t refs = mapNewRefs->GetRefCount(ObjectID); BackupStoreDirectory::Entry *pentry = rDirectory.FindEntryByID(ObjectID); if(pentry == 0) { BOX_ERROR("Housekeeping on account " << BOX_FORMAT_ACCOUNT(mAccountID) << " " "found error: object " << BOX_FORMAT_OBJECTID(ObjectID) << " " "not found in dir " << BOX_FORMAT_OBJECTID(InDirectory) << ", " "indicates logic error/corruption? Run " "bbstoreaccounts check <accid> fix"); mErrorCount++; return refs; } // Record the flags it's got set wasDeleted = pentry->IsDeleted(); wasOldVersion = pentry->IsOld(); // Check this should be deleted if(!wasDeleted && !wasOldVersion) { // Things changed since we were last around return refs; } // Record size deletedFileSizeInBlocks = pentry->GetSizeInBlocks(); if(refs > 1) { // Not safe to merge patches if someone else has a // reference to this object, so just remove the // directory entry and return. rDirectory.DeleteEntry(ObjectID); if(wasDeleted) { rBackupStoreInfo.AdjustNumDeletedFiles(-1); } if(wasOldVersion) { rBackupStoreInfo.AdjustNumOldFiles(-1); } mapNewRefs->RemoveReference(ObjectID); return refs - 1; } // If the entry is involved in a chain of patches, it needs to be handled // a bit more carefully. if(pentry->GetDependsNewer() != 0 && pentry->GetDependsOlder() == 0) { // This entry is a patch from a newer entry. Just need to update the info on that entry. BackupStoreDirectory::Entry *pnewer = rDirectory.FindEntryByID(pentry->GetDependsNewer()); if(pnewer == 0 || pnewer->GetDependsOlder() != ObjectID) { THROW_EXCEPTION(BackupStoreException, PatchChainInfoBadInDirectory); } // Change the info in the newer entry so that this no longer points to this entry pnewer->SetDependsOlder(0); } else if(pentry->GetDependsOlder() != 0) { BackupStoreDirectory::Entry *polder = rDirectory.FindEntryByID(pentry->GetDependsOlder()); if(pentry->GetDependsNewer() == 0) { // There exists an older version which depends on this one. Need to combine the two over that one. // Adjust the other entry in the directory if(polder == 0 || polder->GetDependsNewer() != ObjectID) { THROW_EXCEPTION(BackupStoreException, PatchChainInfoBadInDirectory); } // Change the info in the older entry so that this no longer points to this entry polder->SetDependsNewer(0); } else { // This entry is in the middle of a chain, and two patches need combining. // First, adjust the directory entries BackupStoreDirectory::Entry *pnewer = rDirectory.FindEntryByID(pentry->GetDependsNewer()); if(pnewer == 0 || pnewer->GetDependsOlder() != ObjectID || polder == 0 || polder->GetDependsNewer() != ObjectID) { THROW_EXCEPTION(BackupStoreException, PatchChainInfoBadInDirectory); } // Remove the middle entry from the linked list by simply using the values from this entry pnewer->SetDependsOlder(pentry->GetDependsOlder()); polder->SetDependsNewer(pentry->GetDependsNewer()); } // COMMON CODE to both cases // Generate the filename of the older version std::string objFilenameOlder; MakeObjectFilename(pentry->GetDependsOlder(), objFilenameOlder); // Open it twice (it's the diff) std::auto_ptr<RaidFileRead> pdiff(RaidFileRead::Open(mStoreDiscSet, objFilenameOlder)); std::auto_ptr<RaidFileRead> pdiff2(RaidFileRead::Open(mStoreDiscSet, objFilenameOlder)); // Open this file std::string objFilename; MakeObjectFilename(ObjectID, objFilename); std::auto_ptr<RaidFileRead> pobjectBeingDeleted(RaidFileRead::Open(mStoreDiscSet, objFilename)); // And open a write file to overwrite the other directory entry padjustedEntry.reset(new RaidFileWrite(mStoreDiscSet, objFilenameOlder, mapNewRefs->GetRefCount(ObjectID))); padjustedEntry->Open(true /* allow overwriting */); if(pentry->GetDependsNewer() == 0) { // There exists an older version which depends on this one. Need to combine the two over that one. BackupStoreFile::CombineFile(*pdiff, *pdiff2, *pobjectBeingDeleted, *padjustedEntry); } else { // This entry is in the middle of a chain, and two patches need combining. BackupStoreFile::CombineDiffs(*pobjectBeingDeleted, *pdiff, *pdiff2, *padjustedEntry); } // The file will be committed later when the directory is safely commited. // Work out the adjusted size int64_t newSize = padjustedEntry->GetDiscUsageInBlocks(); int64_t sizeDelta = newSize - polder->GetSizeInBlocks(); mBlocksUsedDelta += sizeDelta; if(polder->IsDeleted()) { mBlocksInDeletedFilesDelta += sizeDelta; } if(polder->IsOld()) { mBlocksInOldFilesDelta += sizeDelta; } polder->SetSizeInBlocks(newSize); } // pentry no longer valid } // Delete it from the directory rDirectory.DeleteEntry(ObjectID); // Save directory back to disc // BLOCK { RaidFileWrite writeDir(mStoreDiscSet, rDirectoryFilename, mapNewRefs->GetRefCount(InDirectory)); writeDir.Open(true /* allow overwriting */); rDirectory.WriteToStream(writeDir); // Get the disc usage (must do this before commiting it) int64_t new_size = writeDir.GetDiscUsageInBlocks(); // Commit directory writeDir.Commit(BACKUP_STORE_CONVERT_TO_RAID_IMMEDIATELY); // Adjust block counts if the directory itself changed in size int64_t original_size = rDirectory.GetUserInfo1_SizeInBlocks(); int64_t adjust = new_size - original_size; mBlocksUsedDelta += adjust; mBlocksInDirectoriesDelta += adjust; UpdateDirectorySize(rDirectory, new_size); } // Commit any new adjusted entry if(padjustedEntry.get() != 0) { padjustedEntry->Commit(BACKUP_STORE_CONVERT_TO_RAID_IMMEDIATELY); padjustedEntry.reset(); // delete it now } // Drop reference count by one. Must now be zero, to delete the file. bool remaining_refs = mapNewRefs->RemoveReference(ObjectID); ASSERT(!remaining_refs); // Delete from disc BOX_TRACE("Removing unreferenced object " << BOX_FORMAT_OBJECTID(ObjectID)); std::string objFilename; MakeObjectFilename(ObjectID, objFilename); RaidFileWrite del(mStoreDiscSet, objFilename, mapNewRefs->GetRefCount(ObjectID)); del.Delete(); // Adjust counts for the file ++mFilesDeleted; mBlocksUsedDelta -= deletedFileSizeInBlocks; if(wasDeleted) { mBlocksInDeletedFilesDelta -= deletedFileSizeInBlocks; rBackupStoreInfo.AdjustNumDeletedFiles(-1); } if(wasOldVersion) { mBlocksInOldFilesDelta -= deletedFileSizeInBlocks; rBackupStoreInfo.AdjustNumOldFiles(-1); } // Delete the directory? // Do this if... dir has zero entries, and is marked as deleted in it's containing directory if(rDirectory.GetNumberOfEntries() == 0) { // Candidate for deletion mEmptyDirectories.push_back(InDirectory); } return 0; }