static value make_list(constant loc, cstlist csts, int has_tail, bool save_location, fncode fn) { struct list *l; if (has_tail && csts != NULL) { l = csts->cst ? make_constant(csts->cst, FALSE, fn) : NULL; csts = csts->next; } else l = NULL; GCPRO1(l); /* Remember that csts is in reverse order ... */ while (csts) { value tmp = make_constant(csts->cst, save_location, fn); l = alloc_list(tmp, l); SET_READONLY(l); SET_IMMUTABLE(l); csts = csts->next; } if (save_location) { value vloc = make_location(&loc->loc); l = alloc_list(vloc, l); SET_READONLY(l); SET_IMMUTABLE(l); } GCPOP(1); return l; }
Obj copy_list (Obj list) { sint32 length_1; Obj new_list, old_cons, old_cdr, new_cons; if (list==NULL) return (Obj)NULL; else { length_1 = length(list); new_list = alloc_list(length_1,1,(Obj)NULL,-1); old_cons = list; old_cdr = CDR(old_cons); new_cons = new_list; while (IMMED_TAG(old_cdr)==2) { /* Consp */ CAR(new_cons) = CAR(old_cons); old_cons = old_cdr; old_cdr = CDR(old_cons); new_cons = CDR(new_cons); } CAR(new_cons) = CAR(old_cons); CDR(new_cons) = old_cdr; goto exit_nil; exit_nil: return new_list; } }
static int rio_open(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); const int irq = np->pdev->irq; int i; i = alloc_list(dev); if (i) return i; rio_hw_init(dev); i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev); if (i) { rio_hw_stop(dev); free_list(dev); return i; } timer_setup(&np->timer, rio_timer, 0); np->timer.expires = jiffies + 1 * HZ; add_timer(&np->timer); netif_start_queue (dev); dl2k_enable_int(np); return 0; }
static value_t List_append( PREFUNC, value_t listObj, value_t value ) { ARGCHECK_2( listObj, value ); value_t head_chunk = listObj->slots[LIST_HEAD_CHUNK_SLOT]; value_t cold_storage = listObj->slots[LIST_COLD_STORAGE_SLOT]; value_t tail_chunk = listObj->slots[LIST_TAIL_CHUNK_SLOT]; // If our tail chunk has space remaining, append this value to it. If the // chunk is full, we will append it to the cold storage list, then create // a new, one-element chunk for our tail. if (!chunk_can_grow( tail_chunk)) { // Following the same amortization logic as our push operation, we will // not append the entire full chunk, but only the first three elements. // The current tail value will remain on the tail chunk, along with the // value we are about to append. This way we always follow an expensive // operation with a cheap one, which gives us the amortized O(1) // performance which is the whole point of using a finger tree. value_t appendable = chunk_chop( zone, tail_chunk ); cold_storage = METHOD_1( cold_storage, sym_append, appendable ); tail_chunk = chunk_alloc_1( zone, chunk_tail( zone, tail_chunk ) ); } tail_chunk = chunk_append( zone, tail_chunk, value ); return alloc_list( zone, head_chunk, cold_storage, tail_chunk ); }
void PushFront(node_p list,int _data) { node_p tmp; alloc_list(&tmp,_data); tmp->next=list->next; list->next=tmp; }
static value_t List_push( PREFUNC, value_t listObj, value_t value ) { ARGCHECK_2( listObj, value ); value_t head_chunk = listObj->slots[LIST_HEAD_CHUNK_SLOT]; value_t cold_storage = listObj->slots[LIST_COLD_STORAGE_SLOT]; value_t tail_chunk = listObj->slots[LIST_TAIL_CHUNK_SLOT]; // If our head chunk has space remaining, push this value onto it. If the // chunk is full, we will push it onto the cold storage list and create a // new, one-element chunk for the new value, which becomes our head chunk. // This way the cold storage is a list of chunks, not a list of values. if (!chunk_can_grow( head_chunk )) { // Pushing a chunk onto cold storage is a potentially O(log n) // operation, as is popping a chunk. Therefore instead of pushing the // whole chunk, we will push only the last three elements. Our new // head chunk will include the new value and the previous head. This // way, an inexpensive pop always follows an expensive push, and vice // versa, which is how we get amortized O(1) performance. value_t pushable = chunk_pop( zone, head_chunk ); cold_storage = METHOD_1( cold_storage, sym_push, pushable ); head_chunk = chunk_alloc_1( zone, chunk_head( head_chunk ) ); } // Push the new value onto our head chunk, which may or may not have been // freshly created for the purpose. head_chunk = chunk_push( zone, head_chunk, value ); return alloc_list( zone, head_chunk, cold_storage, tail_chunk ); }
/** * @brief Parse a list of the following format: * ( *LWS element *( *LWS "," *LWS element )) * @param lp pointer to list_t type for returned list. Cannot be NULL. * @param p string to parse * @return 0 on success, -1 on error (no memory). * * Note: the list is allocated. Use saslc__list_free() to free it. */ int saslc__list_parse(list_t **lp, const char *p) { const char *e, *n; list_t *l, *t, **tp; l = NULL; tp = NULL; n = p; for (;;) { p = n; p = skip_LWS(p); if (*p == '\0') break; n = next_element(p); e = n > p && n[-1] == ',' ? n - 1 : n; e = strip_LWS(e - 1, p); if (e <= p) continue; t = alloc_list(p, (size_t)(e - p)); if (t == NULL) { saslc__list_free(l); return -1; } if (tp != NULL) *tp = t; else l = t; tp = &t->next; } *lp = l; return 0; }
cell_t *quote(cell_t *x) { cell_t *c = alloc_list(1); c->op = OP_value; c->value.type = T_LIST; c->value.ptr[0] = x; return c; }
value_t AllocTwoItemList( zone_t zone, value_t head, value_t tail ) { return alloc_list( zone, chunk_alloc_1( zone, head ), &list_empty, chunk_alloc_1( zone, tail ) ); }
t_lst *new_list(t_node *node) { t_lst *ptr; ptr = alloc_list(); ptr->node = node; return (ptr); }
/* garp_delay facility function */ void alloc_garp_delay(void) { if (!LIST_EXISTS(garp_delay)) garp_delay = alloc_list(NULL, NULL); list_add(garp_delay, MALLOC(sizeof(garp_delay_t))); }
static value make_quote(constant c, bool save_location, fncode fn) { struct list *l; value quote; l = alloc_list(make_constant(c->u.constant, save_location, fn), NULL); SET_READONLY(l); SET_IMMUTABLE(l); GCPRO1(l); quote = make_gsymbol("quote", fn); l = alloc_list(quote, l); SET_READONLY(l); SET_IMMUTABLE(l); if (save_location) { value loc = make_location(&c->loc); l = alloc_list(loc, l); SET_READONLY(l); SET_IMMUTABLE(l); } GCPOP(1); return l; }
static http_checker_t * alloc_http_get(char *proto) { http_checker_t *http_get_chk; http_get_chk = (http_checker_t *) MALLOC(sizeof (http_checker_t)); http_get_chk->arg = (http_t *) MALLOC(sizeof (http_t)); http_get_chk->proto = (!strcmp(proto, "HTTP_GET")) ? PROTO_HTTP : PROTO_SSL; http_get_chk->url = alloc_list(free_url, dump_url); http_get_chk->nb_get_retry = 1; http_get_chk->delay_before_retry = 3 * TIMER_HZ; return http_get_chk; }
static void initialise_list(list *l, const char *file_name, const struct rt_entry *default_list, uint32_t max) { if (*l) return; *l = alloc_list(free_rt_entry, dump_rt_entry); if (!*l) return; read_file(file_name, l, max); if (default_list) add_default(l, default_list); }
/* Set instances group pointer */ void vrrp_sync_set_group(vrrp_sgroup_t *vgroup) { vrrp_t *vrrp; char *str; int i; for (i = 0; i < vector_size(vgroup->iname); i++) { str = vector_slot(vgroup->iname, i); vrrp = vrrp_get_instance(str); if (vrrp) { if (LIST_ISEMPTY(vgroup->index_list)) vgroup->index_list = alloc_list(NULL, NULL); list_add(vgroup->index_list, vrrp); vrrp->sync = vgroup; } } }
void glNewList(unsigned int list,int mode) { GLList *l; GLContext *c=gl_get_context(); assert(mode == GL_COMPILE || mode == GL_COMPILE_AND_EXECUTE); assert(c->compile_flag == 0); l=find_list(c,list); if (l!=NULL) delete_list(c,list); l=alloc_list(c,list); c->current_op_buffer=l->first_op_buffer; c->current_op_buffer_index=0; c->compile_flag=1; c->exec_flag=(mode == GL_COMPILE_AND_EXECUTE); }
/* Set instances group pointer */ void vrrp_sync_set_group(vrrp_sgroup *vgroup) { vrrp_rt *vrrp; char *str; int i; for (i = 0; i < VECTOR_SIZE(vgroup->iname); i++) { str = VECTOR_SLOT(vgroup->iname, i); vrrp = vrrp_get_instance(str); if (vrrp) { if (LIST_ISEMPTY(vgroup->index_list)) vgroup->index_list = alloc_list(NULL, NULL); list_add(vgroup->index_list, vrrp); vrrp->sync = vgroup; } } }
static value_t List_chop( PREFUNC, value_t listObj ) { ARGCHECK_1( listObj ); value_t head_chunk = listObj->slots[LIST_HEAD_CHUNK_SLOT]; value_t cold_storage = listObj->slots[LIST_COLD_STORAGE_SLOT]; value_t tail_chunk = listObj->slots[LIST_TAIL_CHUNK_SLOT]; // Remove the tail element from this list. If our tail chunk is not minimal, // we can just chop off its tail and call it done. Otherwise, this will // empty our tail chunk, so we must get a new tail chunk. We will try to // get a new chunk from the end of cold storage. If cold storage is empty, // we will try to get a single value from the tail of our head chunk. And // if even that fails, we will fall back to single-element list mode. if (chunk_can_shrink( tail_chunk )) { tail_chunk = chunk_chop( zone, tail_chunk ); } else { // We are going to throw away the only item left in our tail chunk, so // we need a new tail chunk to replace it. If cold storage is not // empty, we'll get its tail, which is a whole chunk. value_t empty_val = METHOD_0( cold_storage, sym_is_empty ); if (!BoolFromBoolean( zone, empty_val )) { // The cold storage is not empty. yay, get its tail and then chop // it off so we don't try to use it twice. tail_chunk = METHOD_0( cold_storage, sym_tail ); cold_storage = METHOD_0( cold_storage, sym_chop ); } else if (chunk_can_shrink( head_chunk )) { // Cold storage is empty, but there are still items left on our // head chunk. We'll poach one item and paste it on the tail. tail_chunk = chunk_alloc_1( zone, chunk_tail( zone, head_chunk ) ); head_chunk = chunk_chop( zone, head_chunk ); } else { // Cold storage is empty, the head chunk is empty, and the tail // chunk is already minimal. This means we have only one value left, // which means we should drop back to single-item mode. return AllocSingleItemList( zone, chunk_tail( zone, head_chunk ) ); } } return alloc_list( zone, head_chunk, cold_storage, tail_chunk ); }
/* * scamper_fds_init * * setup the global data structures necessary for scamper to manage a set of * file descriptors */ int scamper_fds_init() { #ifdef HAVE_GETDTABLESIZE scamper_debug(__func__, "fd table size: %d", getdtablesize()); #endif #ifdef HAVE_POLL pollfunc = fds_poll; #endif #ifdef HAVE_KQUEUE if(scamper_option_kqueue()) { pollfunc = fds_kqueue; if(fds_kqueue_init() != 0) return -1; } #endif #ifdef HAVE_EPOLL if(scamper_option_epoll()) { pollfunc = fds_epoll; if(fds_epoll_init() != 0) return -1; } #endif if(scamper_option_select() || pollfunc == NULL) pollfunc = fds_select; if((fd_list = alloc_list("fd_list")) == NULL || (read_fds = alloc_list("read_fds")) == NULL || (read_queue = alloc_list("read_queue")) == NULL || (write_fds = alloc_list("write_fds")) == NULL || (write_queue = alloc_list("write_queue")) == NULL || (refcnt_0 = alloc_list("refcnt_0")) == NULL) { return -1; } if((fd_tree = splaytree_alloc(fd_cmp)) == NULL) { printerror(errno, strerror, __func__, "alloc fd tree failed"); return -1; } planetlab = scamper_option_planetlab(); return 0; }
static value_t List_pop( PREFUNC, value_t listObj ) { ARGCHECK_1( listObj ); value_t head_chunk = listObj->slots[LIST_HEAD_CHUNK_SLOT]; value_t cold_storage = listObj->slots[LIST_COLD_STORAGE_SLOT]; value_t tail_chunk = listObj->slots[LIST_TAIL_CHUNK_SLOT]; // Remove the head element from this list. If our head chunk is not minimal, // we can just pop the value from the head chunk and continue on. Otherwise, // popping the value from the head chunk will empty it, and then we must // pull a new chunk from our cold storage. If the cold storage is empty, we // will try to cannibalize an element from the tail chunk. But if the tail // chunk is minimal, that means we have only one element left, which means // we should drop back down to single-element list mode. if (chunk_can_shrink( head_chunk )) { head_chunk = chunk_pop( zone, head_chunk ); } else { // We need a new head chunk. If cold storage is not empty, get a chunk // from cold storage and call it our new head. value_t empty_val = METHOD_0( cold_storage, sym_is_empty ); if (!BoolFromBoolean( zone, empty_val )) { // The cold storage is not empty. yay, get a chunk from it. head_chunk = METHOD_0( cold_storage, sym_head ); cold_storage = METHOD_0( cold_storage, sym_pop ); } else if (chunk_can_shrink( tail_chunk )) { // Cold storage is empty, but there are still items left on our // tail. We'll poach one item from the tail and put it on our head. head_chunk = chunk_alloc_1( zone, chunk_head( tail_chunk ) ); tail_chunk = chunk_pop( zone, tail_chunk ); } else { // Cold storage is empty and the tail chunk is already minimal. // This means we have only one value left, which means we should // drop back to single-item mode. return AllocSingleItemList( zone, chunk_head( tail_chunk ) ); } } return alloc_list( zone, head_chunk, cold_storage, tail_chunk ); }
/** * @brief allocate a new list node for a string and append it to a * list * @param l the list to append * @param p the string */ int saslc__list_append(list_t **l, const char *p) { list_t *n, *e; e = NULL; for (n = *l; n != NULL; n = n->next) e = n; n = alloc_list(p, strlen(p)); if (n == NULL) return -1; if (e == NULL) *l = n; else e->next = n; return 0; }
int main() { int i=0; alloc_list(&head,0); pthread_mutex_init(&lock,NULL); pthread_mutex_init(&_lock,NULL); pthread_cond_init(&cond,NULL); pthread_t id1,id2,id3,id4; pthread_create(&id1,NULL,consum,NULL); pthread_create(&id4,NULL,consum,NULL); pthread_create(&id2,NULL,product,NULL); pthread_create(&id3,NULL,product,NULL); pthread_join(id1,NULL); pthread_join(id2,NULL); pthread_join(id3,NULL); pthread_join(id4,NULL); pthread_mutex_destroy(&lock); pthread_mutex_destroy(&_lock); pthread_cond_destroy(&cond); return 0; }
/* Set instances group pointer */ void vrrp_sync_set_group(vrrp_sgroup_t *vgroup) { vrrp_t *vrrp; char *str; unsigned int i; vrrp_t *vrrp_last = NULL; /* Can't handle no members of the group */ if (!vgroup->iname) return; vgroup->index_list = alloc_list(NULL, NULL); for (i = 0; i < vector_size(vgroup->iname); i++) { str = vector_slot(vgroup->iname, i); vrrp = vrrp_get_instance(str); if (vrrp) { if (vrrp->sync) log_message(LOG_INFO, "Virtual router %s cannot exist in more than one sync group; ignoring %s", str, vgroup->gname); else { list_add(vgroup->index_list, vrrp); vrrp->sync = vgroup; vrrp_last = vrrp; } } else log_message(LOG_INFO, "Virtual router %s specified in sync group %s doesn't exist - ignoring", str, vgroup->gname); } if (LIST_SIZE(vgroup->index_list) <= 1) { /* The sync group will be removed by the calling function */ log_message(LOG_INFO, "Sync group %s has only %d virtual router(s) - removing", vgroup->gname, LIST_SIZE(vgroup->index_list)); /* If there is only one entry in the group, remove the group from the vrrp entry */ if (vrrp_last) vrrp_last->sync = NULL; } }
static value make_list(cstlist csts) { if (csts == NULL) return NULL; /* the first entry has the list tail */ struct list *l = csts->cst ? make_constant(csts->cst) : NULL; csts = csts->next; GCPRO1(l); /* Remember that csts is in reverse order ... */ while (csts) { value tmp = make_constant(csts->cst); assert(immutablep(tmp)); l = alloc_list(tmp, l); l->o.flags |= OBJ_READONLY | OBJ_IMMUTABLE; csts = csts->next; } UNGCPRO(); return l; }
unsigned int glGenLists(int range) { GLContext *c = gl_get_context(); int count, i, list; GLList **lists; lists = c->shared_state.lists; count = 0; for (i = 0; i < MAX_DISPLAY_LISTS; i++) { if (!lists[i]) { count++; if (count == range) { list = i - range + 1; for (i = 0; i < range; i++) { alloc_list(c, list + i); } return list; } } else { count=0; } } return 0; }
list_t *insert_list(list_t *p, uintptr_t val) { list_t* l= alloc_list(val); l->next = p; return l; }
static int rio_open (struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->ioaddr; const int irq = np->pdev->irq; int i; u16 macctrl; i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev); if (i) return i; /* Reset all logic functions */ dw16(ASICCtrl + 2, GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset); mdelay(10); /* DebugCtrl bit 4, 5, 9 must set */ dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230); /* Jumbo frame */ if (np->jumbo != 0) dw16(MaxFrameSize, MAX_JUMBO+14); alloc_list (dev); /* Get station address */ for (i = 0; i < 6; i++) dw8(StationAddr0 + i, dev->dev_addr[i]); set_multicast (dev); if (np->coalesce) { dw32(RxDMAIntCtrl, np->rx_coalesce | np->rx_timeout << 16); } /* Set RIO to poll every N*320nsec. */ dw8(RxDMAPollPeriod, 0x20); dw8(TxDMAPollPeriod, 0xff); dw8(RxDMABurstThresh, 0x30); dw8(RxDMAUrgentThresh, 0x30); dw32(RmonStatMask, 0x0007ffff); /* clear statistics */ clear_stats (dev); /* VLAN supported */ if (np->vlan) { /* priority field in RxDMAIntCtrl */ dw32(RxDMAIntCtrl, dr32(RxDMAIntCtrl) | 0x7 << 10); /* VLANId */ dw16(VLANId, np->vlan); /* Length/Type should be 0x8100 */ dw32(VLANTag, 0x8100 << 16 | np->vlan); /* Enable AutoVLANuntagging, but disable AutoVLANtagging. VLAN information tagged by TFC' VID, CFI fields. */ dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging); } setup_timer(&np->timer, rio_timer, (unsigned long)dev); np->timer.expires = jiffies + 1*HZ; add_timer (&np->timer); /* Start Tx/Rx */ dw32(MACCtrl, dr32(MACCtrl) | StatsEnable | RxEnable | TxEnable); macctrl = 0; macctrl |= (np->vlan) ? AutoVLANuntagging : 0; macctrl |= (np->full_duplex) ? DuplexSelect : 0; macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0; macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0; dw16(MACCtrl, macctrl); netif_start_queue (dev); dl2k_enable_int(np); return 0; }
static void init_if_queue(void) { if_queue = alloc_list(free_if, dump_if); }
static int rio_open (struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); long ioaddr = dev->base_addr; int i; u16 macctrl; i = request_irq (dev->irq, &rio_interrupt, IRQF_SHARED, dev->name, dev); if (i) return i; /* Reset all logic functions */ writew (GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset, ioaddr + ASICCtrl + 2); mdelay(10); /* DebugCtrl bit 4, 5, 9 must set */ writel (readl (ioaddr + DebugCtrl) | 0x0230, ioaddr + DebugCtrl); /* Jumbo frame */ if (np->jumbo != 0) writew (MAX_JUMBO+14, ioaddr + MaxFrameSize); alloc_list (dev); /* Get station address */ for (i = 0; i < 6; i++) writeb (dev->dev_addr[i], ioaddr + StationAddr0 + i); set_multicast (dev); if (np->coalesce) { writel (np->rx_coalesce | np->rx_timeout << 16, ioaddr + RxDMAIntCtrl); } /* Set RIO to poll every N*320nsec. */ writeb (0x20, ioaddr + RxDMAPollPeriod); writeb (0xff, ioaddr + TxDMAPollPeriod); writeb (0x30, ioaddr + RxDMABurstThresh); writeb (0x30, ioaddr + RxDMAUrgentThresh); writel (0x0007ffff, ioaddr + RmonStatMask); /* clear statistics */ clear_stats (dev); /* VLAN supported */ if (np->vlan) { /* priority field in RxDMAIntCtrl */ writel (readl(ioaddr + RxDMAIntCtrl) | 0x7 << 10, ioaddr + RxDMAIntCtrl); /* VLANId */ writew (np->vlan, ioaddr + VLANId); /* Length/Type should be 0x8100 */ writel (0x8100 << 16 | np->vlan, ioaddr + VLANTag); /* Enable AutoVLANuntagging, but disable AutoVLANtagging. VLAN information tagged by TFC' VID, CFI fields. */ writel (readl (ioaddr + MACCtrl) | AutoVLANuntagging, ioaddr + MACCtrl); } init_timer (&np->timer); np->timer.expires = jiffies + 1*HZ; np->timer.data = (unsigned long) dev; np->timer.function = &rio_timer; add_timer (&np->timer); /* Start Tx/Rx */ writel (readl (ioaddr + MACCtrl) | StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl); macctrl = 0; macctrl |= (np->vlan) ? AutoVLANuntagging : 0; macctrl |= (np->full_duplex) ? DuplexSelect : 0; macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0; macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0; writew(macctrl, ioaddr + MACCtrl); netif_start_queue (dev); /* Enable default interrupts */ EnableInt (); return 0; }
static int _XTextPropertyToTextList( XLCd lcd, Display *dpy, const XTextProperty *text_prop, const char *to_type, XPointer **list_ret, int *count_ret) { XlcConv conv = NULL; const char *from_type; XPointer from, to, buf; char *str_ptr, *last_ptr; Atom encoding; int from_left, to_left, buf_len, ret, len; int unconv_num, nitems = text_prop->nitems; Bool is_wide_char = False, do_strcpy = False; if (strcmp(XlcNWideChar, to_type) == 0) is_wide_char = True; if (nitems <= 0) { *list_ret = NULL; *count_ret = 0; return Success; } if (text_prop->format != 8) return XConverterNotFound; encoding = text_prop->encoding; if (encoding == XA_STRING) from_type = XlcNString; else if (encoding == XInternAtom(dpy, "UTF8_STRING", False)) from_type = XlcNUtf8String; else if (encoding == XInternAtom(dpy, "COMPOUND_TEXT", False)) from_type = XlcNCompoundText; else if (encoding == XInternAtom(dpy, XLC_PUBLIC(lcd, encoding_name), False)) from_type = XlcNMultiByte; else return XConverterNotFound; if (is_wide_char) { buf_len = (text_prop->nitems + 1) * sizeof(wchar_t); } else { if (strcmp(to_type, XlcNUtf8String) == 0) buf_len = text_prop->nitems * 6 + 1; else buf_len = text_prop->nitems * XLC_PUBLIC(lcd, mb_cur_max) + 1; } buf = Xmalloc(buf_len); if (buf == NULL) return XNoMemory; to = buf; to_left = buf_len; /* can be XlcNMultiByte to XlcNMultiByte, or XlcNUtf8String to XlcNUtf8String */ if (!strcmp(from_type, to_type)) { do_strcpy = True; } else { conv = _XlcOpenConverter(lcd, from_type, lcd, to_type); if (conv == NULL) { Xfree(buf); return XConverterNotFound; } } last_ptr = str_ptr = (char *) text_prop->value; unconv_num = *count_ret = 0; while (1) { if (nitems == 0 || *str_ptr == 0) { from = (XPointer) last_ptr; from_left = str_ptr - last_ptr; last_ptr = str_ptr; if (do_strcpy) { len = min(from_left, to_left); strncpy(to, from, len); from += len; to += len; from_left -= len; to_left -= len; ret = 0; } else { ret = _XlcConvert(conv, &from, &from_left, &to, &to_left, NULL, 0); } if (ret < 0) continue; unconv_num += ret; (*count_ret)++; if (nitems == 0) break; last_ptr = ++str_ptr; if (is_wide_char) { *((wchar_t *)to) = (wchar_t) 0; to += sizeof(wchar_t); to_left -= sizeof(wchar_t); } else { *((char *)to) = '\0'; to++; to_left--; } if (! do_strcpy) _XlcResetConverter(conv); } else str_ptr++; nitems--; } if (! do_strcpy) _XlcCloseConverter(conv); if (is_wide_char) { *((wchar_t *) to) = (wchar_t) 0; to_left -= sizeof(wchar_t); } else { *((char *) to) = '\0'; to_left--; } *list_ret = alloc_list(is_wide_char, *count_ret, buf_len - to_left); if (*list_ret) copy_list(is_wide_char, buf, *list_ret, *count_ret); Xfree(buf); return unconv_num; }