void XXObjectRectangle::unionRect(XSWFCONTEXT&cnt,XXVARLIST&list) { XXObjectRectangle*pRect=m_pRoot->m_pGlobal->CreateRectangle(); if(pRect&&list.GetSize()>0&&list[0].IsObject()) { XXObjectRectangle*pr=(XXObjectRectangle*)list[0].pObject; if(pr->IsObject(XXOBJ_RECTANGLE)) { double r=left+width; double b=top+height; double r1=pr->left+pr->width; double b1=pr->top+pr->height; double l=XMIN(left,pr->left); double t=XMIN(top,pr->top); r=XMAX(r,r1); b=XMAX(b,b1); pRect->left=l; pRect->top=t; pRect->width=r-l; pRect->height=b-t; //if(r>l&&b>t) // pVar->iData32=XTRUE; } } cnt.pStack->Push((pRect)); }
static void storeClientFileRead(store_client * sc) { MemObject *mem = sc->entry->mem_obj; assert(sc->new_callback); assert(!sc->flags.disk_io_pending); sc->flags.disk_io_pending = 1; assert(sc->node_ref.node == NULL); /* We should never, ever have a node here; or we'd leak! */ stmemNodeRefCreate(&sc->node_ref); /* Creates an entry with reference count == 1 */ if (mem->swap_hdr_sz == 0) { storeRead(sc->swapin_sio, sc->node_ref.node->data, XMIN(SM_PAGE_SIZE, sc->copy_size), 0, storeClientReadHeader, sc); } else { if (sc->entry->swap_status == SWAPOUT_WRITING) assert(storeSwapOutObjectBytesOnDisk(mem) > sc->copy_offset); /* XXX is this right? Shouldn't we incl. mem->swap_hdr_sz? */ storeRead(sc->swapin_sio, sc->node_ref.node->data, XMIN(SM_PAGE_SIZE, sc->copy_size), sc->copy_offset + mem->swap_hdr_sz, storeClientReadBody, sc); } }
XBOOL XXVar::SetString(XPCTSTR strBuf, int l) { if(nType!=XODT_STRING||nStringType!=STRING_REF) { Release(); int nl=(l<<1)+sizeof(XSTRINGDATA)+1; if(nl<64) nl=64; XSTRINGDATA*pData=AllocBuffer(nl); if(pData==XNULL) return XFALSE; strTxt=(XPTSTR)(pData+1); strTxt[l]=0; } else { if(!SetLength(l)) return XFALSE; } if(strBuf) { l=XMIN(l,(int)XString8::SafeStrlen(strBuf)); XGlobal::Memcpy(strTxt,(void*)strBuf,l); strTxt[l]=0; } else l=0; GetData()->nLength=l; nType=XODT_STRING; nStringType=STRING_REF; return XTRUE; }
size_t read_all_adb_encoded(int fd, void* buf, size_t sz) { char encbuf[4096]; unsigned state = 0; char* dec = buf; char* decend = dec + sz; ssize_t ret; size_t nr_read = 0; while (nr_read < sz) { do { WITH_IO_SIGNALS_ALLOWED(); ret = read(fd, encbuf, XMIN(sz - nr_read, sizeof (encbuf))); } while (ret == -1 && errno == EINTR); if (ret < 0) die_errno("read[adbenc]"); if (ret < 1) break; const char* in = encbuf; const char* inend = encbuf + ret; char* cur_dec = dec; adb_decode(&state, &dec, decend, &in, inend); nr_read += dec - cur_dec; } return nr_read; }
XBOOL XXVar::SetLength(int l,XBOOL bCopy) { XSTRINGDATA*pData=GetData(); int al=l+sizeof(XSTRINGDATA)+1; if(pData->nRefs<2&&(int)pData->nMaxLength>=al) { pData->nLength=l; strTxt[l]=0; } else { int ol=XMIN((int)pData->nLength,l); int nl=al+l; if(nl<64) nl=64; XSTRINGDATA*pNew=AllocBuffer(nl); if(pNew==XNULL) return XFALSE; pNew->nLength=l; XPTSTR strNew=(XPTSTR)(pNew+1); if(bCopy&&ol) { pNew->nLength=l; XGlobal::Memcpy(strNew,strTxt,ol); } else pNew->nLength=bCopy?l:0; strTxt=strNew; //strTxt[ol]=0; FreeBuffer(pData); } return XTRUE; }
static size_t channel_read_adb_hack(struct channel* c, size_t sz) { size_t nr_added = 0; while (nr_added < sz) { char buf[4096]; size_t to_read = XMIN(sz - nr_added, sizeof (buf)); ssize_t chunksz = read(c->fdh->fd, buf, to_read); if (chunksz < 0 && nr_added == 0) die_errno("read"); if (chunksz < 1) break; struct iovec iov[2]; ringbuf_writable_iov(c->rb, iov, chunksz); unsigned state = c->leftover_escape; const char* in = buf; const char* inend = in + chunksz; size_t np = 0; for (int i = 0; i < ARRAYSIZE(iov); ++i) { char* decstart = iov[i].iov_base; char* dec = decstart; char* decend = dec + iov[i].iov_len; adb_decode(&state, &dec, decend, &in, inend); np += (dec - decstart); } ringbuf_note_added(c->rb, np); nr_added += np; } return nr_added; }
int dceslicecompose(DCEslice* s1, DCEslice* s2, DCEslice* result) { int err = NC_NOERR; size_t lastx = 0; DCEslice sr; /* For back compatability so s1 and result can be same object */ #ifdef DEBUG1 slicedump("compose: s1",s1); slicedump("compose: s2",s2); #endif sr.node.sort = CES_SLICE; sr.stride = s1->stride * s2->stride; sr.first = MAP(s1,s2->first); if(sr.first > s1->last) return NC_EINVALCOORDS; lastx = MAP(s1,s2->last); sr.last = XMIN(s1->last,lastx); sr.length = (sr.last + 1) - sr.first; sr.declsize = XMAX(s1->declsize,s2->declsize); /* use max declsize */ /* fill in other fields */ sr.count = (sr.length + (sr.stride - 1))/sr.stride; *result = sr; #ifdef DEBUG1 slicedump("compose: result",result); #endif return err; }
void XHScrollBar::CalcRect(XRect &rect, int &pp) { GetClientRect(rect); int w=rect.Width()-(rect.bottom<<1); int mw=rect.Height()<<1; pp=m_nRange==0?1000:XMIN(1000,(w-mw)*1000/m_nRange); int bx=m_nPos*pp/1000+rect.bottom-1; int ex=rect.right-((m_nRange-m_nPos)*pp/1000+rect.bottom)+1; rect=XRect(bx,0,ex,rect.bottom); }
int XCatch::GetData() { if(!m_file.IsValid()) return CWAIT_ERROR; RESPONSEINFO*p=GetResponseInfo(); m_inData.SetSize(MAX_PACK,XFALSE); int nSize=XMIN(p->nTotalSize-p->nLength,MAX_PACK); int l=m_file.Read(m_inData.GetData(),nSize); m_inData.SetSize(l); return CWAIT_OK; }
static size_t channel_wanted_readsz(struct channel* c) { if (c->dir != CHANNEL_FROM_FD) return 0; if (c->fdh == NULL) return 0; return XMIN(ringbuf_room(c->rb), c->window); }
static size_t channel_wanted_writesz(struct channel* c) { if (c->dir != CHANNEL_TO_FD) return 0; if (c->fdh == NULL) return 0; return XMIN(ringbuf_size(c->rb), UINT32_MAX - c->bytes_written); }
/* Append incoming data. */ void stmemAppend(mem_hdr * mem, const char *data, int len) { mem_node *p; int avail_len; int len_to_copy; debug(19, 6) ("memAppend: len %d\n", len); /* Does the last block still contain empty space? * If so, fill out the block before dropping into the * allocation loop */ if (mem->head && mem->tail && (mem->tail->len < SM_PAGE_SIZE)) { avail_len = SM_PAGE_SIZE - (mem->tail->len); len_to_copy = XMIN(avail_len, len); xmemcpy((mem->tail->data + mem->tail->len), data, len_to_copy); /* Adjust the ptr and len according to what was deposited in the page */ data += len_to_copy; len -= len_to_copy; mem->tail->len += len_to_copy; } while (len > 0) { len_to_copy = XMIN(len, SM_PAGE_SIZE); p = xcalloc(1, sizeof(mem_node)); p->next = NULL; p->len = len_to_copy; p->data = memAllocate(MEM_STMEM_BUF); store_mem_size += SM_PAGE_SIZE; xmemcpy(p->data, data, len_to_copy); if (!mem->head) { /* The chain is empty */ mem->head = mem->tail = p; } else { /* Append it to existing chain */ mem->tail->next = p; mem->tail = p; } len -= len_to_copy; data += len_to_copy; } }
void XXObjectRectangle::intersects(XSWFCONTEXT&cnt,XXVARLIST&list) { XXVar pVar;//=XXVar::CreateBool(XFALSE); pVar.ToLogic(); if(list.GetSize()>0&&list[0].IsObject()) { XXObjectRectangle*pr=(XXObjectRectangle*)list[0].pObject; if(pr->IsObject(XXOBJ_RECTANGLE)) { double r=left+width; double b=top+height; double r1=pr->left+pr->width; double b1=pr->top+pr->height; double l=XMAX(left,pr->left); double t=XMAX(top,pr->top); r=XMIN(r,r1); b=XMIN(b,b1); if(r>l&&b>t) pVar.iData32=XTRUE; } } cnt.pStack->Push(pVar); }
ssize_t stmemCopy(const mem_hdr * mem, squid_off_t offset, char *buf, size_t size) { mem_node *p = mem->head; squid_off_t t_off = mem->origin_offset; size_t bytes_to_go = size; char *ptr_to_buf = NULL; int bytes_from_this_packet = 0; int bytes_into_this_packet = 0; debug(19, 6) ("memCopy: offset %" PRINTF_OFF_T ": size %d\n", offset, (int) size); if (p == NULL) return 0; assert(size > 0); /* Seek our way into store */ while ((t_off + p->len) < offset) { t_off += p->len; if (!p->next) { debug(19, 1) ("memCopy: p->next == NULL\n"); return 0; } assert(p->next); p = p->next; } /* Start copying begining with this block until * we're satiated */ bytes_into_this_packet = offset - t_off; bytes_from_this_packet = XMIN(bytes_to_go, p->len - bytes_into_this_packet); xmemcpy(buf, p->data + bytes_into_this_packet, bytes_from_this_packet); bytes_to_go -= bytes_from_this_packet; ptr_to_buf = buf + bytes_from_this_packet; p = p->next; while (p && bytes_to_go > 0) { if (bytes_to_go > p->len) { xmemcpy(ptr_to_buf, p->data, p->len); ptr_to_buf += p->len; bytes_to_go -= p->len; } else { xmemcpy(ptr_to_buf, p->data, bytes_to_go); bytes_to_go -= bytes_to_go; } p = p->next; } return size - bytes_to_go; }
static void storeClientCallback(store_client * sc, ssize_t sz) { STNCB *new_callback = sc->new_callback; void *cbdata = sc->callback_data; mem_node_ref nr; assert(sc->new_callback); sc->new_callback = NULL; sc->callback_data = NULL; nr = sc->node_ref; /* XXX this should be a reference; and we should dereference our copy! */ /* This code "transfers" its ownership (and reference) of the node_ref to the caller. Ugly, but works. */ sc->node_ref.node = NULL; sc->node_ref.offset = -1; /* Can't use XMIN here - sz is signed; copy_size isn't; things get messy */ if (sz < 0) new_callback(cbdata, nr, -1); else new_callback(cbdata, nr, XMIN(sz, sc->copy_size)); cbdataUnlock(cbdata); }
/* * When passing on to the next layers, use the ip_len * value for the length, unless the given len happens to * to be less for some reason. Note that ip_len might * be less than len due to Ethernet padding. */ void handle_ipv4(const struct ip *ip, int len, void *userdata) { int offset; int iplen; uint16_t ip_off; if (len < sizeof(*ip)) return; offset = ip->ip_hl << 2; iplen = XMIN(nptohs(&ip->ip_len), len); if (callback_ipv4) if (0 != callback_ipv4(ip, iplen, userdata)) return; ip_off = ntohs(ip->ip_off); if ((ip_off & (IP_OFFMASK | IP_MF)) && _reassemble_fragments) { handle_ipv4_fragment(ip, iplen, userdata); } else if (IPPROTO_UDP == ip->ip_p) { handle_udp((struct udphdr *)((char *)ip + offset), iplen - offset, userdata); } else if (IPPROTO_TCP == ip->ip_p) { handle_tcp((struct tcphdr *)((char *)ip + offset), iplen - offset, userdata); } else if (IPPROTO_GRE == ip->ip_p) { handle_gre((u_char *)ip + offset, iplen - offset, userdata); } }
int ath3k_load_fwfile(struct libusb_device_handle *hdl, const struct ath3k_firmware *fw) { int size, count, sent = 0; int ret, r; count = fw->len; size = XMIN(count, FW_HDR_SIZE); ath3k_debug("%s: file=%s, size=%d\n", __func__, fw->fwname, count); /* * Flip the device over to configuration mode. */ ret = libusb_control_transfer(hdl, LIBUSB_REQUEST_TYPE_VENDOR | LIBUSB_ENDPOINT_OUT, ATH3K_DNLOAD, 0, 0, fw->buf + sent, size, 1000); /* XXX timeout */ if (ret != size) { fprintf(stderr, "Can't switch to config mode; ret=%d\n", ret); return (-1); } sent += size; count -= size; /* Load in the rest of the data */ while (count) { size = XMIN(count, BULK_SIZE); ath3k_debug("%s: transferring %d bytes, offset %d\n", __func__, sent, size); ret = libusb_bulk_transfer(hdl, 0x2, fw->buf + sent, size, &r, 1000); if (ret < 0 || r != size) { fprintf(stderr, "Can't load firmware: err=%s, size=%d\n", libusb_strerror(ret), size); return (-1); } sent += size; count -= size; } return (0); }
static void storeClientReadHeader(void *data, const char *buf, ssize_t len) { static int md5_mismatches = 0; store_client *sc = data; StoreEntry *e = sc->entry; MemObject *mem = e->mem_obj; int swap_hdr_sz = 0; size_t body_sz; size_t copy_sz; tlv *tlv_list; tlv *t; int swap_object_ok = 1; char *new_url = NULL; char *new_store_url = NULL; assert(sc->flags.disk_io_pending); sc->flags.disk_io_pending = 0; assert(sc->callback != NULL); debug(20, 3) ("storeClientReadHeader: len %d\n", (int) len); if (len < 0) { debug(20, 3) ("storeClientReadHeader: %s\n", xstrerror()); storeClientCallback(sc, len); return; } tlv_list = storeSwapMetaUnpack(buf, &swap_hdr_sz); if (swap_hdr_sz > len) { /* oops, bad disk file? */ debug(20, 1) ("WARNING: swapfile header too small\n"); storeClientCallback(sc, -1); return; } if (tlv_list == NULL) { debug(20, 1) ("WARNING: failed to unpack meta data\n"); storeClientCallback(sc, -1); return; } /* * Check the meta data and make sure we got the right object. */ for (t = tlv_list; t && swap_object_ok; t = t->next) { switch (t->type) { case STORE_META_KEY: assert(t->length == SQUID_MD5_DIGEST_LENGTH); if (!EBIT_TEST(e->flags, KEY_PRIVATE) && memcmp(t->value, e->hash.key, SQUID_MD5_DIGEST_LENGTH)) { debug(20, 2) ("storeClientReadHeader: swapin MD5 mismatch\n"); debug(20, 2) ("\t%s\n", storeKeyText(t->value)); debug(20, 2) ("\t%s\n", storeKeyText(e->hash.key)); if (isPowTen(++md5_mismatches)) debug(20, 1) ("WARNING: %d swapin MD5 mismatches\n", md5_mismatches); swap_object_ok = 0; } break; case STORE_META_URL: new_url = xstrdup(t->value); break; case STORE_META_STOREURL: new_store_url = xstrdup(t->value); break; case STORE_META_OBJSIZE: break; case STORE_META_STD: case STORE_META_STD_LFS: break; case STORE_META_VARY_HEADERS: if (mem->vary_headers) { if (strcmp(mem->vary_headers, t->value) != 0) swap_object_ok = 0; } else { /* Assume the object is OK.. remember the vary request headers */ mem->vary_headers = xstrdup(t->value); } break; default: debug(20, 2) ("WARNING: got unused STORE_META type %d\n", t->type); break; } } /* Check url / store_url */ do { if (new_url == NULL) { debug(20, 1) ("storeClientReadHeader: no URL!\n"); swap_object_ok = 0; break; } /* * If we have a store URL then it must match the requested object URL. * The theory is that objects with a store URL have been normalised * and thus a direct access which didn't go via the rewrite framework * are illegal! */ if (new_store_url) { if (NULL == mem->store_url) mem->store_url = new_store_url; else if (0 == strcasecmp(mem->store_url, new_store_url)) (void) 0; /* a match! */ else { debug(20, 1) ("storeClientReadHeader: store URL mismatch\n"); debug(20, 1) ("\t{%s} != {%s}\n", (char *) new_store_url, mem->store_url); swap_object_ok = 0; break; } } /* If we have no store URL then the request and the memory URL must match */ /* if ((!new_store_url) && mem->url && strcasecmp(mem->url, new_url) != 0) { debug(20, 1) ("storeClientReadHeader: URL mismatch\n"); debug(20, 1) ("\t{%s} != {%s}\n", (char *) new_url, mem->url); swap_object_ok = 0; break; } */ } while (0); storeSwapTLVFree(tlv_list); xfree(new_url); /* don't free new_store_url if its owned by the mem object now */ if (mem->store_url != new_store_url) xfree(new_store_url); if (!swap_object_ok) { storeClientCallback(sc, -1); return; } mem->swap_hdr_sz = swap_hdr_sz; mem->object_sz = e->swap_file_sz - swap_hdr_sz; /* * If our last read got some data the client wants, then give * it to them, otherwise schedule another read. */ body_sz = len - swap_hdr_sz; if (sc->copy_offset < body_sz) { /* * we have (part of) what they want */ copy_sz = XMIN(sc->copy_size, body_sz); debug(20, 3) ("storeClientReadHeader: copying %d bytes of body\n", (int) copy_sz); xmemmove(sc->copy_buf, sc->copy_buf + swap_hdr_sz, copy_sz); if (sc->copy_offset == 0 && len > 0 && memHaveHeaders(mem) == 0) httpReplyParse(mem->reply, sc->copy_buf, headersEnd(sc->copy_buf, copy_sz)); storeClientCallback(sc, copy_sz); return; } /* * we don't have what the client wants, but at least we now * know the swap header size. */ storeClientFileRead(sc); }
struct pollfd channel_request_poll(struct channel* c) { if (channel_wanted_readsz(c)) return (struct pollfd){c->fdh->fd, POLLIN, 0}; if (channel_wanted_writesz(c)) return (struct pollfd){c->fdh->fd, POLLOUT, 0}; return (struct pollfd){-1, 0, 0}; } void channel_write(struct channel* c, const struct iovec* iov, unsigned nio) { assert(c->dir == CHANNEL_TO_FD); if (c->fdh == NULL) return; // If the stream is closed, just discard bool try_direct = !c->always_buffer && ringbuf_size(c->rb) == 0; size_t directwrsz = 0; size_t totalsz; if (c->adb_encoding_hack) try_direct = false; // If writing directly, would make us overflow the write counter, // fall back to buffered IO. if (try_direct) { totalsz = iovec_sum(iov, nio); if (c->track_bytes_written && UINT32_MAX - c->bytes_written < totalsz) { try_direct = false; } } if (try_direct) { // If writev fails, just fall back to buffering path directwrsz = XMAX(writev(c->fdh->fd, iov, nio), 0); if (c->track_bytes_written) c->bytes_written += directwrsz; } for (unsigned i = 0; i < nio; ++i) { size_t skip = XMIN(iov[i].iov_len, directwrsz); directwrsz -= skip; char* b = (char*)iov[i].iov_base + skip; size_t blen = iov[i].iov_len - skip; ringbuf_copy_in(c->rb, b, blen); ringbuf_note_added(c->rb, blen); } } // Begin channel shutdown process. Closure is not complete until // channel_dead_p(c) returns true. void channel_close(struct channel* c) { c->pending_close = true; if (c->fdh != NULL && ((c->dir == CHANNEL_TO_FD && ringbuf_size(c->rb) == 0) || c->dir == CHANNEL_FROM_FD)) { fdh_destroy(c->fdh); c->fdh = NULL; } } static void poll_channel_1(void* arg) { struct channel* c = arg; size_t sz; if ((sz = channel_wanted_readsz(c)) > 0) { size_t nr_read; if (c->adb_encoding_hack) nr_read = channel_read_adb_hack(c, sz); else nr_read = channel_read_1(c, sz); assert(nr_read <= c->window); if (c->track_window) c->window -= nr_read; if (nr_read == 0) channel_close(c); } if ((sz = channel_wanted_writesz(c)) > 0) { size_t nr_written; if (c->adb_encoding_hack) nr_written = channel_write_adb_hack(c, sz); else nr_written = channel_write_1(c, sz); assert(nr_written <= UINT32_MAX - c->bytes_written); if (c->track_bytes_written) c->bytes_written += nr_written; if (c->pending_close && ringbuf_size(c->rb) == 0) channel_close(c); } } bool channel_dead_p(struct channel* c) { return (c->fdh == NULL && ringbuf_size(c->rb) == 0 && c->sent_eof == true); } void channel_poll(struct channel* c) { struct errinfo ei = { .want_msg = false }; if (catch_error(poll_channel_1, c, &ei) && ei.err != EINTR) { if (c->dir == CHANNEL_TO_FD) { // Error writing to fd, so purge buffered bytes we'll // never write. By purging, we also make the stream // appear writable (because now there's space available), // but any writes will actually go into a black hole. // This way, if somebody's blocked on being able to write // to this stream, he'll get unblocked. This behavior is // important when c is TO_PEER and lets us complete an // orderly shutdown, flushing any data we've buffered, // without adding special logic all over the place to // account for this situation. ringbuf_note_removed(c->rb, ringbuf_size(c->rb)); } channel_close(c); c->err = ei.err; } }
void MAST::GCMMAOptimizationInterface::optimize() { #if MAST_ENABLE_GCMMA == 1 // make sure that all processes have the same problem setup _feval->sanitize_parallel(); int N = _feval->n_vars(), M = _feval->n_eq() + _feval->n_ineq(), n_rel_change_iters = _feval->n_iters_relative_change(); libmesh_assert_greater(N, 0); std::vector<Real> XVAL(N, 0.), XOLD1(N, 0.), XOLD2(N, 0.), XMMA(N, 0.), XMIN(N, 0.), XMAX(N, 0.), XLOW(N, 0.), XUPP(N, 0.), ALFA(N, 0.), BETA(N, 0.), DF0DX(N, 0.), A(M, 0.), B(M, 0.), C(M, 0.), Y(M, 0.), RAA(M, 0.), ULAM(M, 0.), FVAL(M, 0.), FAPP(M, 0.), FNEW(M, 0.), FMAX(M, 0.), DFDX(M*N, 0.), P(M*N, 0.), Q(M*N, 0.), P0(N, 0.), Q0(N, 0.), UU(M, 0.), GRADF(M, 0.), DSRCH(M, 0.), HESSF(M*(M+1)/2, 0.), f0_iters(n_rel_change_iters); std::vector<int> IYFREE(M, 0); std::vector<bool> eval_grads(M, false); Real ALBEFA = 0.1, GHINIT = 0.5, GHDECR = 0.7, GHINCR = 1.2, F0VAL = 0., F0NEW = 0., F0APP = 0., RAA0 = 0., Z = 0., GEPS =_feval->tolerance(); /*C********+*********+*********+*********+*********+*********+*********+ C C The meaning of some of the scalars and vectors in the program: C C N = Complex of variables x_j in the problem. C M = Complex of constraints in the problem (not including C the simple upper and lower bounds on the variables). C ALBEFA = Relative spacing between asymptote and mode limit. Lower value C will cause the move limit (alpha,beta) to move closer to asymptote C values (l, u). C GHINIT = Initial asymptote setting. For the first two iterations the C asymptotes (l, u) are defined based on offsets from the design C point as this fraction of the design variable bounds, ie. C l_j = x_j^k - GHINIT * (x_j^max - x_j^min) C u_j = x_j^k + GHINIT * (x_j^max - x_j^min) C GHDECR = Fraction by which the asymptote is reduced for oscillating C changes in design variables based on three consecutive iterations C GHINCR = Fraction by which the asymptote is increased for non-oscillating C changes in design variables based on three consecutive iterations C INNMAX = Maximal number of inner iterations within each outer iter. C A reasonable choice is INNMAX=10. C ITER = Current outer iteration number ( =1 the first iteration). C GEPS = Tolerance parameter for the constraints. C (Used in the termination criteria for the subproblem.) C C XVAL(j) = Current value of the variable x_j. C XOLD1(j) = Value of the variable x_j one iteration ago. C XOLD2(j) = Value of the variable x_j two iterations ago. C XMMA(j) = Optimal value of x_j in the MMA subproblem. C XMIN(j) = Original lower bound for the variable x_j. C XMAX(j) = Original upper bound for the variable x_j. C XLOW(j) = Value of the lower asymptot l_j. C XUPP(j) = Value of the upper asymptot u_j. C ALFA(j) = Lower bound for x_j in the MMA subproblem. C BETA(j) = Upper bound for x_j in the MMA subproblem. C F0VAL = Value of the objective function f_0(x) C FVAL(i) = Value of the i:th constraint function f_i(x). C DF0DX(j) = Derivative of f_0(x) with respect to x_j. C FMAX(i) = Right hand side of the i:th constraint. C DFDX(k) = Derivative of f_i(x) with respect to x_j, C where k = (j-1)*M + i. C P(k) = Coefficient p_ij in the MMA subproblem, where C k = (j-1)*M + i. C Q(k) = Coefficient q_ij in the MMA subproblem, where C k = (j-1)*M + i. C P0(j) = Coefficient p_0j in the MMA subproblem. C Q0(j) = Coefficient q_0j in the MMA subproblem. C B(i) = Right hand side b_i in the MMA subproblem. C F0APP = Value of the approximating objective function C at the optimal soultion of the MMA subproblem. C FAPP(i) = Value of the approximating i:th constraint function C at the optimal soultion of the MMA subproblem. C RAA0 = Parameter raa_0 in the MMA subproblem. C RAA(i) = Parameter raa_i in the MMA subproblem. C Y(i) = Value of the "artificial" variable y_i. C Z = Value of the "minimax" variable z. C A(i) = Coefficient a_i for the variable z. C C(i) = Coefficient c_i for the variable y_i. C ULAM(i) = Value of the dual variable lambda_i. C GRADF(i) = Gradient component of the dual objective function. C DSRCH(i) = Search direction component in the dual subproblem. C HESSF(k) = Hessian matrix component of the dual function. C IYFREE(i) = 0 for dual variables which are fixed to zero in C the current subspace of the dual subproblem, C = 1 for dual variables which are "free" in C the current subspace of the dual subproblem. C C********+*********+*********+*********+*********+*********+*********+*/ /* * The USER should now give values to the parameters * M, N, GEPS, XVAL (starting point), * XMIN, XMAX, FMAX, A and C. */ // _initi(M,N,GEPS,XVAL,XMIN,XMAX,FMAX,A,C); // Assumed: FMAX == A _feval->_init_dvar_wrapper(XVAL, XMIN, XMAX); // set the value of C[i] to be very large numbers Real max_x = 0.; for (unsigned int i=0; i<N; i++) if (max_x < fabs(XVAL[i])) max_x = fabs(XVAL[i]); std::fill(C.begin(), C.end(), std::max(1.e0*max_x, _constr_penalty)); int INNMAX=_max_inner_iters, ITER=0, ITE=0, INNER=0, ICONSE=0; /* * The outer iterative process starts. */ bool terminate = false, inner_terminate=false; while (!terminate) { ITER=ITER+1; ITE=ITE+1; /* * The USER should now calculate function values and gradients * at XVAL. The result should be put in F0VAL,DF0DX,FVAL,DFDX. */ std::fill(eval_grads.begin(), eval_grads.end(), true); _feval->_evaluate_wrapper(XVAL, F0VAL, true, DF0DX, FVAL, eval_grads, DFDX); if (ITER == 1) // output the very first iteration _feval->_output_wrapper(0, XVAL, F0VAL, FVAL, true); /* * RAA0,RAA,XLOW,XUPP,ALFA and BETA are calculated. */ raasta_(&M, &N, &RAA0, &RAA[0], &XMIN[0], &XMAX[0], &DF0DX[0], &DFDX[0]); asympg_(&ITER, &M, &N, &ALBEFA, &GHINIT, &GHDECR, &GHINCR, &XVAL[0], &XMIN[0], &XMAX[0], &XOLD1[0], &XOLD2[0], &XLOW[0], &XUPP[0], &ALFA[0], &BETA[0]); /* * The inner iterative process starts. */ // write the asymptote data for the inneriterations _output_iteration_data(ITER, XVAL, XMIN, XMAX, XLOW, XUPP, ALFA, BETA); INNER=0; inner_terminate = false; while (!inner_terminate) { /* * The subproblem is generated and solved. */ mmasug_(&ITER, &M, &N, &GEPS, &IYFREE[0], &XVAL[0], &XMMA[0], &XMIN[0], &XMAX[0], &XLOW[0], &XUPP[0], &ALFA[0], &BETA[0], &A[0], &B[0], &C[0], &Y[0], &Z, &RAA0, &RAA[0], &ULAM[0], &F0VAL, &FVAL[0], &F0APP, &FAPP[0], &FMAX[0], &DF0DX[0], &DFDX[0], &P[0], &Q[0], &P0[0], &Q0[0], &UU[0], &GRADF[0], &DSRCH[0], &HESSF[0]); /* * The USER should now calculate function values at XMMA. * The result should be put in F0NEW and FNEW. */ std::fill(eval_grads.begin(), eval_grads.end(), false); _feval->_evaluate_wrapper(XMMA, F0NEW, false, DF0DX, FNEW, eval_grads, DFDX); if (INNER >= INNMAX) { libMesh::out << "** Max Inner Iter Reached: Terminating! Inner Iter = " << INNER << std::endl; inner_terminate = true; } else { /* * It is checked if the approximations were conservative. */ conser_( &M, &ICONSE, &GEPS, &F0NEW, &F0APP, &FNEW[0], &FAPP[0]); if (ICONSE == 1) { libMesh::out << "** Conservative Solution: Terminating! Inner Iter = " << INNER << std::endl; inner_terminate = true; } else { /* * The approximations were not conservative, so RAA0 and RAA * are updated and one more inner iteration is started. */ INNER=INNER+1; raaupd_( &M, &N, &GEPS, &XMMA[0], &XVAL[0], &XMIN[0], &XMAX[0], &XLOW[0], &XUPP[0], &F0NEW, &FNEW[0], &F0APP, &FAPP[0], &RAA0, &RAA[0]); } } } /* * The inner iterative process has terminated, which means * that an outer iteration has been completed. * The variables are updated so that XVAL stands for the new * outer iteration point. The fuction values are also updated. */ xupdat_( &N, &ITER, &XMMA[0], &XVAL[0], &XOLD1[0], &XOLD2[0]); fupdat_( &M, &F0NEW, &FNEW[0], &F0VAL, &FVAL[0]); /* * The USER may now write the current solution. */ _feval->_output_wrapper(ITER, XVAL, F0VAL, FVAL, true); f0_iters[(ITE-1)%n_rel_change_iters] = F0VAL; /* * One more outer iteration is started as long as * ITE is less than MAXITE: */ if (ITE == _feval->max_iters()) { libMesh::out << "GCMMA: Reached maximum iterations, terminating! " << std::endl; terminate = true; } // relative change in objective bool rel_change_conv = true; Real f0_curr = f0_iters[n_rel_change_iters-1]; for (unsigned int i=0; i<n_rel_change_iters-1; i++) { if (f0_curr > sqrt(GEPS)) rel_change_conv = (rel_change_conv && fabs(f0_iters[i]-f0_curr)/fabs(f0_curr) < GEPS); else rel_change_conv = (rel_change_conv && fabs(f0_iters[i]-f0_curr) < GEPS); } if (rel_change_conv) { libMesh::out << "GCMMA: Converged relative change tolerance, terminating! " << std::endl; terminate = true; } } #endif //MAST_ENABLE_GCMMA == 1 }
XU32 XDomTD::LayeroutPre(DRAWCONTEXT*pDraw,CELLDATA*pData) { XINT i; SpanCol(pData); //LAYEROUTDATA margin; //PreLayerout(pDraw,pData,margin); // if(pDraw->SETWIDTH==-84) // int a=0; int w=FindAttrib(XEAB::WIDTH,0); int cspan=XMAX(FindAttrib(XEAB::COLSPAN,1),1); int rspan=XMAX(FindAttrib(XEAB::ROWSPAN,1),1); /*if(w==0) { if(m_childs.GetSize()>0) pData->setCols.Add(0); } */ //else if(w) { for(i=pData->setCols.GetSize();i<pData->nCol;i++) pData->setCols.Add(0); for(i=0;i<cspan;i++) { XU16 id=pData->nCol+i; if(id>=pData->setCols.GetSize()) { pData->setCols.Add(w/(cspan-i)); w-=w/(cspan-i); } else if(i+1<cspan) { if(w>0) { if(pData->setCols[id]>=0) w=XMAX(w-pData->setCols[id],0); else w-=w/(cspan-i); } else { if(pData->setCols[id]<0) w=XMIN(w-pData->setCols[id],0); else w-=w/(cspan-i); } } else { if(w>0) { if(pData->setCols[id]<w) pData->setCols[id]=w; } else if(pData->setCols[id]>w) pData->setCols[id]=w; } } } // else if(m_childs.GetSize()>0) pData->nCol+=cspan; if(rspan>1) { pData->spans.Add(rspan); pData->spans.Add(pData->nCol); pData->spans.Add(0); pData->spans.Add(0); pData->spans.Add(cspan); } //EndLayerout(pDraw,pData); return 0; }
static void storeClientReadHeader(void *data, const char *buf, ssize_t len) { store_client *sc = data; StoreEntry *e = sc->entry; MemObject *mem = e->mem_obj; STCB *callback = sc->callback; int swap_hdr_sz = 0; size_t body_sz; size_t copy_sz; tlv *tlv_list; tlv *t; int swap_object_ok = 1; assert(sc->flags.disk_io_pending); sc->flags.disk_io_pending = 0; assert(sc->callback != NULL); debug(20, 3) ("storeClientReadHeader: len %d\n", len); if (len < 0) { debug(20, 3) ("storeClientReadHeader: %s\n", xstrerror()); sc->callback = NULL; callback(sc->callback_data, sc->copy_buf, len); return; } tlv_list = storeSwapMetaUnpack(buf, &swap_hdr_sz); if (swap_hdr_sz > len) { /* oops, bad disk file? */ debug(20, 1) ("WARNING: swapfile header too small\n"); sc->callback = NULL; callback(sc->callback_data, sc->copy_buf, -1); return; } if (tlv_list == NULL) { debug(20, 1) ("WARNING: failed to unpack swapfile meta data\n"); sc->callback = NULL; callback(sc->callback_data, sc->copy_buf, -1); return; } /* * Check the meta data and make sure we got the right object. */ for (t = tlv_list; t; t = t->next) { switch (t->type) { case STORE_META_KEY: assert(t->length == MD5_DIGEST_CHARS); if (memcmp(t->value, e->key, MD5_DIGEST_CHARS)) debug(20, 1) ("WARNING: swapin MD5 mismatch\n"); break; case STORE_META_URL: if (NULL == mem->url) (void) 0; /* can't check */ else if (0 == strcasecmp(mem->url, t->value)) (void) 0; /* a match! */ else { debug(20, 1) ("storeClientReadHeader: URL mismatch\n"); debug(20, 1) ("\t{%s} != {%s}\n", t->value, mem->url); swap_object_ok = 0; break; } break; case STORE_META_STD: break; default: debug(20, 1) ("WARNING: got unused STORE_META type %d\n", t->type); break; } } storeSwapTLVFree(tlv_list); if (!swap_object_ok) { sc->callback = NULL; callback(sc->callback_data, sc->copy_buf, -1); return; } mem->swap_hdr_sz = swap_hdr_sz; mem->object_sz = e->swap_file_sz - swap_hdr_sz; /* * If our last read got some data the client wants, then give * it to them, otherwise schedule another read. */ body_sz = len - swap_hdr_sz; if (sc->copy_offset < body_sz) { /* * we have (part of) what they want */ copy_sz = XMIN(sc->copy_size, body_sz); debug(20, 3) ("storeClientReadHeader: copying %d bytes of body\n", copy_sz); xmemmove(sc->copy_buf, sc->copy_buf + swap_hdr_sz, copy_sz); if (sc->copy_offset == 0 && len > 0 && mem->reply->sline.status == 0) httpReplyParse(mem->reply, sc->copy_buf, headersEnd(sc->copy_buf, copy_sz)); sc->callback = NULL; callback(sc->callback_data, sc->copy_buf, copy_sz); return; } /* * we don't have what the client wants, but at least we now * know the swap header size. */ storeClientFileRead(sc); }