ot_u8 vworm_format( ) {
#if (VWORM_SIZE > 0)
    ot_int      i;
    ot_u16*     cursor;
    ot_u8       output = 0;

    /// 1. Load default cursor (using embedded method)
    cursor = (ot_u16*)(OTF_VWORM_START_ADDR);

    /// 2. Format all Blocks, Put Block IDs into Primary Blocks
    for (i=0; i<VWORM_PRIMARY_PAGES; i++) {
        output |= NAND_erase_page(cursor);
        cursor  = PTR_OFFSET(cursor, VWORM_PAGESIZE);
    }
    for (i=0; i<VWORM_FALLOW_PAGES; i++) {
        output |= NAND_erase_page(cursor);
        cursor  = PTR_OFFSET(cursor, VWORM_PAGESIZE);
    }

    return output;
#else
    return 0;
#endif
}
Example #2
0
/** smart comparison of (compressed, valid) dnames from packet */
static int
smart_compare(sldns_buffer* pkt, uint8_t* dnow, 
	uint8_t* dprfirst, uint8_t* dprlast)
{
	if(LABEL_IS_PTR(*dnow)) {
		/* ptr points to a previous dname */
		uint8_t* p = sldns_buffer_at(pkt, PTR_OFFSET(dnow[0], dnow[1]));
		if( p == dprfirst || p == dprlast )
			return 0;
		/* prev dname is also a ptr, both ptrs are the same. */
		if(LABEL_IS_PTR(*dprlast) &&
			dprlast[0] == dnow[0] && dprlast[1] == dnow[1])
			return 0;
	}
	return dname_pkt_compare(pkt, dnow, dprlast);
}
static const struct mail_index_record *
tview_apply_flag_updates(struct mail_index_view_transaction *tview,
			 struct mail_index_map *map,
			 const struct mail_index_record *rec, uint32_t seq)
{
	struct mail_index_transaction *t = tview->t;
	const struct mail_index_flag_update *updates;
	struct mail_index_record *trec;
	unsigned int idx, count;

	/* see if there are any flag updates */
	if (seq < t->min_flagupdate_seq || seq > t->max_flagupdate_seq ||
	    !array_is_created(&t->updates))
		return rec;

	updates = array_get(&t->updates, &count);
	idx = mail_index_transaction_get_flag_update_pos(t, 0, count, seq);
	if (seq < updates[idx].uid1 || seq > updates[idx].uid2)
		return rec;

	/* yes, we have flag updates. since we can't modify rec directly and
	   we want to be able to handle multiple mail_index_lookup() calls
	   without the second one overriding the first one's data, we'll
	   create a records array and return data from there.

	   it's also possible that the record size increases, so we potentially
	   have to create multiple arrays. they all get eventually freed when
	   the view gets freed. */
	if (map->hdr.record_size > tview->record_size) {
		if (!array_is_created(&tview->all_recs))
			i_array_init(&tview->all_recs, 4);
		tview->recs_count = t->first_new_seq;
		tview->record_size = I_MAX(map->hdr.record_size,
					   tview->view.map->hdr.record_size);
		tview->recs = i_malloc(tview->record_size *
				       tview->recs_count);
		array_append(&tview->all_recs, &tview->recs, 1);
	}
	i_assert(tview->recs_count == t->first_new_seq);
	i_assert(seq > 0 && seq <= tview->recs_count);

	trec = PTR_OFFSET(tview->recs, (seq-1) * tview->record_size);
	memcpy(trec, rec, map->hdr.record_size);
	trec->flags |= updates[idx].add_flags;
	trec->flags &= ~updates[idx].remove_flags;
	return trec;
}
Example #4
0
int pread_full(int fd, void *data, size_t size, off_t offset)
{
	ssize_t ret;

	while (size > 0) {
		ret = pread(fd, data, size < SSIZE_T_MAX ?
			    size : SSIZE_T_MAX, offset);
		if (ret <= 0)
			return ret;

		data = PTR_OFFSET(data, ret);
		size -= ret;
		offset += ret;
	}

	return 1;
}
static bool _file_lazy_read
(struct sieve_binary_file *file, off_t *offset, void *buffer, size_t size)
{
	struct sieve_instance *svinst = file->svinst;
	int ret;
	void *indata = buffer;
	size_t insize = size;

	*offset = SIEVE_BINARY_ALIGN(*offset);

	/* Seek to the correct position */
	if ( *offset != file->offset &&
		lseek(file->fd, *offset, SEEK_SET) == (off_t) -1 ) {
		sieve_sys_error(svinst, "binary read:"
			"failed to seek(fd, %lld, SEEK_SET) in binary %s: %m",
			(long long) *offset, file->path);
		return FALSE;
	}

	/* Read record into memory */
	while (insize > 0) {
		if ( (ret=read(file->fd, indata, insize)) <= 0 ) {
			if ( ret == 0 )
				sieve_sys_error(svinst,
					"binary read: binary %s is truncated (more data expected)",
					file->path);
			else
				sieve_sys_error(svinst,
					"binary read: failed to read from binary %s: %m", file->path);
			break;
		}

		indata = PTR_OFFSET(indata, ret);
		insize -= ret;
	}

	if ( insize != 0 ) {
		/* Failed to read the whole requested record */
		return FALSE;
	}

	*offset += size;
	file->offset = *offset;

	return TRUE;
}
static buffer_t *_file_memory_load_buffer
	(struct sieve_binary_file *file, off_t *offset, size_t size)
{
	struct _file_memory *fmem = (struct _file_memory *) file;

	*offset = SIEVE_BINARY_ALIGN(*offset);

	if ( (*offset) + size <= fmem->memory_size ) {
		const void *data = PTR_OFFSET(fmem->memory, *offset);
		*offset += size;
		file->offset = *offset;

		return buffer_create_const_data(file->pool, data, size);
	}

	return NULL;
}
static const void *_file_memory_load_data
	(struct sieve_binary_file *file, off_t *offset, size_t size)
{
	struct _file_memory *fmem = (struct _file_memory *) file;

	*offset = SIEVE_BINARY_ALIGN(*offset);

	if ( (*offset) + size <= fmem->memory_size ) {
		const void *data = PTR_OFFSET(fmem->memory, *offset);
		*offset += size;
		file->offset = *offset;

		return data;
	}

	return NULL;
}
Example #8
0
void Settings::get_colour_from_gui (Builder &builder, int i)
{
  const char *glade_name = colour_selectors[i].glade_name;
  vmml::Vector4f *dest =
      (vmml::Vector4f *)PTR_OFFSET(this, colour_selectors[i].member_offset);
  Gdk::Color c;
  Gtk::ColorButton *w = NULL;
  builder->get_widget (glade_name, w);
  if (!w) return;

  c = w->get_color();
  dest->r = c.get_red_p();
  dest->g = c.get_green_p();
  dest->b = c.get_blue_p();
  dest->a = (float) (w->get_alpha()) / 65535.0;

  m_signal_visual_settings_changed.emit();
}
Example #9
0
static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
{
	unsigned i;

	for (i = 0; i < KEY_PTRS(k); i++)
		if (ptr_available(c, k, i)) {
			struct cache *ca = PTR_CACHE(c, k, i);
			size_t bucket = PTR_BUCKET_NR(c, k, i);
			size_t r = bucket_remainder(c, PTR_OFFSET(k, i));

			if (KEY_SIZE(k) + r > c->sb.bucket_size ||
			    bucket <  ca->sb.first_bucket ||
			    bucket >= ca->sb.nbuckets)
				return true;
		}

	return false;
}
static ssize_t
mail_transaction_log_file_read_header(struct mail_transaction_log_file *file)
{
	void *dest;
	size_t pos, dest_size;
	ssize_t ret;

	i_assert(file->buffer == NULL && file->mmap_base == NULL);

	memset(&file->hdr, 0, sizeof(file->hdr));
	if (file->last_size < mmap_get_page_size() && file->last_size > 0) {
		/* just read the entire transaction log to memory.
		   note that if some of the data hasn't been fully committed
		   yet (hdr.size=0), the buffer must be truncated later */
		file->buffer = buffer_create_dynamic(default_pool, 4096);
		file->buffer_offset = 0;
		dest_size = file->last_size;
		dest = buffer_append_space_unsafe(file->buffer, dest_size);
	} else {
		/* read only the header */
		dest = &file->hdr;
		dest_size = sizeof(file->hdr);
	}

	/* it's not necessarily an error to read less than wanted header size,
	   since older versions of the log format used smaller headers. */
        pos = 0;
	do {
		ret = pread(file->fd, PTR_OFFSET(dest, pos),
			    dest_size - pos, pos);
		if (ret > 0)
			pos += ret;
	} while (ret > 0 && pos < dest_size);

	if (file->buffer != NULL) {
		buffer_set_used_size(file->buffer, pos);
		memcpy(&file->hdr, file->buffer->data,
		       I_MIN(pos, sizeof(file->hdr)));
	}

	return ret < 0 ? -1 : (ssize_t)pos;
}
static bool _file_memory_load(struct sieve_binary_file *file)
{
	struct _file_memory *fmem = (struct _file_memory *) file;
	int ret;
	size_t size;
	void *indata;

	i_assert(file->fd > 0);

	/* Allocate memory buffer
	 */
	indata = p_malloc(file->pool, file->st.st_size);
	size = file->st.st_size;

	file->offset = 0;
	fmem->memory = indata;
	fmem->memory_size = file->st.st_size;

	/* Return to beginning of the file */
	if ( lseek(file->fd, 0, SEEK_SET) == (off_t) -1 ) {
		sieve_sys_error("failed to seek() in binary %s: %m", file->path);
		return FALSE;
	}

	/* Read the whole file into memory */
	while (size > 0) {
		if ( (ret=read(file->fd, indata, size)) <= 0 ) {
			sieve_sys_error("failed to read from binary %s: %m", file->path);
			break;
		}

		indata = PTR_OFFSET(indata, ret);
		size -= ret;
	}

	if ( size != 0 ) {
		/* Failed to read the whole file */
		return FALSE;
	}

	return TRUE;
}
bool sieve_variable_identifier_is_valid(const char *identifier)
{
	const char *p = identifier;
	size_t plen = strlen(identifier);
	const char *pend;

	if ( plen == 0 || plen >= EXT_VARIABLES_MAX_VARIABLE_NAME_LEN )
		return FALSE;

	pend = PTR_OFFSET(identifier, plen);

	if ( *p == '_' || i_isalpha(*p) ) {
		p++;

		while ( p < pend && (*p == '_' || i_isalnum(*p)) ) {
			p++;
		}
	}

	return ( p == pend );
}
/* FIXME: Is this even necessary for a file? */
static bool _save_full
(struct sieve_binary *sbin, struct ostream *stream, const void *data, size_t size)
{
	size_t bytes_left = size;
	const void *pdata = data;

	while ( bytes_left > 0 ) {
		ssize_t ret;

		if ( (ret=o_stream_send(stream, pdata, bytes_left)) <= 0 ) {
			sieve_sys_error(sbin->svinst,
				"binary save: failed to write %"PRIuSIZE_T" bytes "
				"to output stream: %s", bytes_left, strerror(stream->stream_errno));
			return FALSE;
		}

		pdata = PTR_OFFSET(pdata, ret);
		bytes_left -= ret;
	}

	return TRUE;
}
Example #14
0
void Settings::set_to_gui (Builder &builder)
{
  for (uint i = 0; i < G_N_ELEMENTS (settings); i++) {
    const char *glade_name = settings[i].glade_name;

    if (!glade_name)
      continue;

    set_to_gui (builder, i);
  }
  set_shrink_to_gui (builder);

  for (uint i = 0; i < G_N_ELEMENTS (colour_selectors); i++) {
      const char *glade_name = colour_selectors[i].glade_name;
      vmml::Vector4f *src =
        (vmml::Vector4f *) PTR_OFFSET(this, colour_selectors[i].member_offset);
      Gdk::Color c;
      Gtk::ColorButton *w = NULL;
      builder->get_widget (glade_name, w);
      if (w) {
        w->set_use_alpha(true);
        c.set_rgb_p(src->r, src->g, src->b);
        w->set_color(c);
        w->set_alpha(src->a * 65535.0);
      }
  }

  // Set serial speed. Find the row that holds this value
  Gtk::ComboBoxEntry *portspeed = NULL;
  builder->get_widget ("Hardware.SerialSpeed", portspeed);
  if (portspeed) {
    std::ostringstream ostr;
    ostr << Hardware.SerialSpeed;
    Glib::ustring val(ostr.str());
    portspeed->get_entry()->set_text(val);
  }
}
Example #15
0
static void *t_malloc_real(size_t size, bool permanent)
{
	struct stack_block *block;
	void *ret;
	size_t alloc_size;
#ifdef DEBUG
	bool warn = FALSE;
#endif

	if (unlikely(size == 0 || size > SSIZE_T_MAX))
		i_panic("Trying to allocate %"PRIuSIZE_T" bytes", size);

	if (unlikely(data_stack_frame == 0)) {
		/* kludgy, but allow this before initialization */
		data_stack_init();
	}

	/* allocate only aligned amount of memory so alignment comes
	   always properly */
#ifndef DEBUG
	alloc_size = MEM_ALIGN(size);
#else
	alloc_size = MEM_ALIGN(sizeof(size)) + MEM_ALIGN(size + SENTRY_COUNT);
#endif
	data_stack_last_buffer_reset(TRUE);

	/* used for t_try_realloc() */
	current_frame_block->last_alloc_size[frame_pos] = alloc_size;

	if (current_block->left >= alloc_size) {
		/* enough space in current block, use it */
		ret = STACK_BLOCK_DATA(current_block) +
			(current_block->size - current_block->left);

		if (current_block->left - alloc_size <
		    current_block->lowwater) {
			current_block->lowwater =
				current_block->left - alloc_size;
		}
                if (permanent)
			current_block->left -= alloc_size;
	} else {
		/* current block is full, see if we can use the unused_block */
		if (unused_block != NULL && unused_block->size >= alloc_size) {
			block = unused_block;
			unused_block = NULL;
		} else {
			block = mem_block_alloc(alloc_size);
#ifdef DEBUG
			warn = TRUE;
#endif
		}

		block->left = block->size;
		if (block->left - alloc_size < block->lowwater)
			block->lowwater = block->left - alloc_size;
		if (permanent)
			block->left -= alloc_size;
		block->next = NULL;

		current_block->next = block;
		current_block = block;

		ret = STACK_BLOCK_DATA(current_block);
#ifdef DEBUG
		if (warn && getenv("DEBUG_SILENT") == NULL) {
			/* warn after allocation, so if i_warning() wants to
			   allocate more memory we don't go to infinite loop */
			i_warning("Growing data stack with: %"PRIuSIZE_T,
				  block->size);
		}
#endif
	}
#ifdef DEBUG
	memcpy(ret, &size, sizeof(size));
	ret = PTR_OFFSET(ret, MEM_ALIGN(sizeof(size)));
	/* make sure the sentry contains CLEAR_CHRs. it might not if we
	   had used t_buffer_get(). */
	memset(PTR_OFFSET(ret, size), CLEAR_CHR,
	       MEM_ALIGN(size + SENTRY_COUNT) - size);
#endif
        return ret;
}
Example #16
0
static int mcht_matches_match_key
(struct sieve_match_context *mctx, const char *val, size_t val_size,
	const char *key, size_t key_size)
{
	const struct sieve_comparator *cmp = mctx->comparator;
	struct sieve_match_values *mvalues;
	string_t *mvalue = NULL, *mchars = NULL;
	string_t *section, *subsection;
	const char *vend, *kend, *vp, *kp, *wp, *pvp;
	bool backtrack = FALSE; /* TRUE: match of '?'-connected sections failed */
	char wcard = '\0';      /* Current wildcard */
	char next_wcard = '\0'; /* Next  widlcard */
	unsigned int key_offset = 0;

	if ( cmp->def == NULL || cmp->def->char_match == NULL )
		return 0;

	/* Key sections */
	section = t_str_new(32);    /* Section (after beginning or *) */
	subsection = t_str_new(32); /* Sub-section (after ?) */

	/* Mark end of value and key */
	vend = (const char *) val + val_size;
	kend = (const char *) key + key_size;

	/* Initialize pointers */
	vp = val;                   /* Value pointer */
	kp = key;                   /* Key pointer */
	wp = key;                   /* Wildcard (key) pointer */

	/* Start match values list if requested */
	if ( (mvalues = sieve_match_values_start(mctx->runenv)) != NULL ) {
		/* Skip ${0} for now; added when match succeeds */
		sieve_match_values_add(mvalues, NULL);

		mvalue = t_str_new(32);     /* Match value (*) */
		mchars = t_str_new(32);     /* Match characters (.?..?.??) */
	}

	/* Match the pattern:
	 *   <pattern> = <section>*<section>*<section>...
	 *   <section> = <sub-section>?<sub-section>?<sub-section>...
	 *
	 * Escape sequences \? and \* need special attention.
	 */

	debug_printf("=== Start ===\n");
	debug_printf("  key:   %s\n", t_strdup_until(key, kend));
	debug_printf("  value: %s\n", t_strdup_until(val, vend));

	/* Loop until either key or value ends */
	while (kp < kend && vp < vend ) {
		const char *needle, *nend;

		if ( !backtrack ) {
			/* Search the next '*' wildcard in the key string */

			wcard = next_wcard;

			/* Find the needle to look for in the string */
			key_offset = 0;
			for (;;) {
				next_wcard = _scan_key_section(section, &wp, kend);

				if ( wcard == '\0' || str_len(section) > 0 )
					break;

				if ( next_wcard == '*' ) {
					break;
				}

				if ( wp < kend )
					wp++;
				else
					break;
				key_offset++;
			}

			debug_printf("found wildcard '%c' at pos [%d]\n",
				next_wcard, (int) (wp-key));

			if ( mvalues != NULL )
				str_truncate(mvalue, 0);
		} else {
			/* Backtracked; '*' wildcard is retained */
			debug_printf("backtracked");
			backtrack = FALSE;
		}

		/* Determine what we are looking for */
		needle = str_c(section);
		nend = PTR_OFFSET(needle, str_len(section));

		debug_printf("  section needle:  '%s'\n", t_strdup_until(needle, nend));
		debug_printf("  section key:     '%s'\n", t_strdup_until(kp, kend));
		debug_printf("  section remnant: '%s'\n", t_strdup_until(wp, kend));
		debug_printf("  value remnant:   '%s'\n", t_strdup_until(vp, vend));
		debug_printf("  key offset:      %d\n", key_offset);

		pvp = vp;
		if ( next_wcard == '\0' ) {
			if ( wcard == '\0' ) {
				/* No current wildcard; match needs to happen right at the beginning */
				debug_printf("next_wcard = NULL && wcard = NUL; needle should be equal to value.\n");

				if ( (vend - vp) != (nend - needle) ||
					!cmp->def->char_match(cmp, &vp, vend, &needle, nend) ) {
					debug_printf("  key not equal to value\n");
					break;
				}

			} else {
				const char *qp, *qend;

				/* No more wildcards; find the needle substring at the end of string */
				debug_printf("next_wcard = NUL; must find needle at end\n");

				/* Check if the value is still large enough */
				if ( vend - str_len(section) < vp ) {
					debug_printf("  wont match: value is too short\n");
					break;
				}

				/* Move value pointer to where the needle should be */
				vp = PTR_OFFSET(vend, -str_len(section));

				/* Record match values */
				qend = vp;
				qp = vp - key_offset;

				if ( mvalues != NULL )
					str_append_n(mvalue, pvp, qp-pvp);

				/* Compare needle to end of value string */
				if ( !cmp->def->char_match(cmp, &vp, vend, &needle, nend) ) {
					debug_printf("  match at end failed\n");
					break;
				}

				/* Add match values */
				if ( mvalues != NULL ) {
					/* Append '*' match value */
					sieve_match_values_add(mvalues, mvalue);

					/* Append any initial '?' match values */
					for ( ; qp < qend; qp++ )
						sieve_match_values_add_char(mvalues, *qp);
				}
			}

			/* Finish match */
			kp = kend;
			vp = vend;

			debug_printf("  matched end of value\n");
			break;
		} else {
			/* Next wildcard found; match needle before next wildcard */

			const char *prv = NULL; /* Stored value pointer for backtrack */
			const char *prk = NULL; /* Stored key pointer for backtrack */
			const char *prw = NULL; /* Stored wildcard pointer for backtrack */
			const char *chars;

			/* Reset '?' match values */
			if ( mvalues != NULL )
				str_truncate(mchars, 0);

			if ( wcard == '\0' ) {
				/* No current wildcard; match needs to happen right at the beginning */
				debug_printf("wcard = NUL; needle should be found at the beginning.\n");
				debug_printf("  begin needle: '%s'\n", t_strdup_until(needle, nend));
				debug_printf("  begin value:  '%s'\n", t_strdup_until(vp, vend));

				if ( !cmp->def->char_match(cmp, &vp, vend, &needle, nend) ) {
					debug_printf("  failed to find needle at beginning\n");
					break;
				}

			} else {
				/* Current wildcard present; match needle between current and next wildcard */
				debug_printf("wcard != NUL; must find needle at an offset (>= %d).\n",
					key_offset);

				/* Match may happen at any offset (>= key offset): find substring */
				vp += key_offset;
				if ( (vp >= vend) || !_string_find(cmp, &vp, vend, &needle, nend) ) {
					debug_printf("  failed to find needle at an offset\n");
					break;
				}

				prv = vp - str_len(section);
				prk = kp;
				prw = wp;

				/* Append match values */
				if ( mvalues != NULL ) {
					const char *qend = vp - str_len(section);
					const char *qp = qend - key_offset;

					/* Append '*' match value */
					str_append_n(mvalue, pvp, qp-pvp);

					/* Append any initial '?' match values (those that caused the key
					 * offset.
					 */
					for ( ; qp < qend; qp++ )
						str_append_c(mchars, *qp);
				}
			}

			/* Update wildcard and key pointers for next wildcard scan */
			if ( wp < kend ) wp++;
			kp = wp;

			/* Scan successive '?' wildcards */
			while ( next_wcard == '?' ) {
				debug_printf("next_wcard = '?'; need to match arbitrary character\n");

				/* Add match value */
				if ( mvalues != NULL )
					str_append_c(mchars, *vp);

				vp++;

				/* Scan for next '?' wildcard */
				next_wcard = _scan_key_section(subsection, &wp, kend);
				debug_printf("found next wildcard '%c' at pos [%d] (fixed match)\n",
					next_wcard, (int) (wp-key));

				/* Determine what we are looking for */
				needle = str_c(subsection);
				nend = PTR_OFFSET(needle, str_len(subsection));

				debug_printf("  sub key:       '%s'\n", t_strdup_until(needle, nend));
				debug_printf("  value remnant: '%s'\n", vp <= vend ? t_strdup_until(vp, vend) : "");

				/* Try matching the needle at fixed position */
				if ( (needle == nend && next_wcard == '\0' && vp < vend ) ||
					!cmp->def->char_match(cmp, &vp, vend, &needle, nend) ) {

					/* Match failed: now we have a problem. We need to backtrack to the previous
					 * '*' wildcard occurrence and start scanning for the next possible match.
					 */

					debug_printf("  failed fixed match\n");

					/* Start backtrack */
					if ( prv != NULL && prv + 1 < vend ) {
						/* Restore pointers */
						vp = prv;
						kp = prk;
						wp = prw;

						/* Skip forward one value character to scan the next possible match */
						if ( mvalues != NULL )
							str_append_c(mvalue, *vp);
						vp++;

						/* Set wildcard state appropriately */
						wcard = '*';
						next_wcard = '?';

						/* Backtrack */
						backtrack = TRUE;

						debug_printf("  BACKTRACK\n");
					}

					/* Break '?' wildcard scanning loop */
					break;
				}

				/* Update wildcard and key pointers for next wildcard scan */
				if ( wp < kend ) wp++;
				kp = wp;
			}

			if ( !backtrack ) {
				unsigned int i;

				if ( next_wcard == '?' ) {
					debug_printf("failed to match '?'\n");
					break;
				}

				if ( mvalues != NULL ) {
					if ( prv != NULL )
						sieve_match_values_add(mvalues, mvalue);

					chars = (const char *) str_data(mchars);

					for ( i = 0; i < str_len(mchars); i++ ) {
						sieve_match_values_add_char(mvalues, chars[i]);
					}
				}

				if ( next_wcard != '*' ) {
					debug_printf("failed to match at end of string\n");
					break;
				}
			}
		}

		/* Check whether string ends in a wildcard
		 * (avoid scanning the rest of the string)
		 */
		if ( kp == kend && next_wcard == '*' ) {
			/* Add the rest of the string as match value */
			if ( mvalues != NULL ) {
				str_truncate(mvalue, 0);
				str_append_n(mvalue, vp, vend-vp);
				sieve_match_values_add(mvalues, mvalue);
			}

			/* Finish match */
			kp = kend;
			vp = vend;

			debug_printf("key ends with '*'\n");
			break;
		}

		debug_printf("== Loop ==\n");
	}

	/* Eat away a trailing series of *s */
	if ( vp == vend ) {
		while ( kp < kend && *kp == '*' ) kp++;
	}

	/* By definition, the match is only successful if both value and key pattern
	 * are exhausted.
	 */

	debug_printf("=== Finish ===\n");
	debug_printf("  result: %s\n", (kp == kend && vp == vend) ? "true" : "false");

	if (kp == kend && vp == vend) {
		/* Activate new match values after successful match */
		if ( mvalues != NULL ) {
			/* Set ${0} */
			string_t *matched = str_new_const(pool_datastack_create(), val, val_size);
			sieve_match_values_set(mvalues, 0, matched);

			/* Commit new match values */
			sieve_match_values_commit(mctx->runenv, &mvalues);
		}
		return 1;
	}

	/* No match; drop collected match values */
	sieve_match_values_abort(&mvalues);
	return 0;
}
static int
mail_transaction_log_file_read_hdr(struct mail_transaction_log_file *file,
				   bool ignore_estale)
{
        struct mail_transaction_log_file *f;
	int ret;

	i_assert(!MAIL_TRANSACTION_LOG_FILE_IN_MEMORY(file));

	if (file->corrupted)
		return 0;

	ret = mail_transaction_log_file_read_header(file);
	if (ret < 0) {
                if (errno != ESTALE || !ignore_estale)
			log_file_set_syscall_error(file, "pread()");
		return -1;
	}
	if (file->hdr.major_version != MAIL_TRANSACTION_LOG_MAJOR_VERSION) {
		/* incompatible version - fix silently */
		return 0;
	}
	if (ret < MAIL_TRANSACTION_LOG_HEADER_MIN_SIZE) {
		mail_transaction_log_file_set_corrupted(file,
			"unexpected end of file while reading header");
		return 0;
	}

	if (file->hdr.minor_version >= 2 || file->hdr.major_version > 1) {
		/* we have compatibility flags */
		enum mail_index_header_compat_flags compat_flags = 0;

#if !WORDS_BIGENDIAN
		compat_flags |= MAIL_INDEX_COMPAT_LITTLE_ENDIAN;
#endif
		if (file->hdr.compat_flags != compat_flags) {
			/* architecture change */
			mail_index_set_error(file->log->index,
					     "Rebuilding index file %s: "
					     "CPU architecture changed",
					     file->log->index->filepath);
			return 0;
		}
	}
	if (file->hdr.hdr_size < MAIL_TRANSACTION_LOG_HEADER_MIN_SIZE) {
		mail_transaction_log_file_set_corrupted(file,
			"Header size too small");
		return 0;
	}
	if (file->hdr.hdr_size < sizeof(file->hdr)) {
		/* @UNSAFE: smaller than we expected - zero out the fields we
		   shouldn't have filled */
		memset(PTR_OFFSET(&file->hdr, file->hdr.hdr_size), 0,
		       sizeof(file->hdr) - file->hdr.hdr_size);
	}

	if (file->hdr.indexid == 0) {
		/* corrupted */
		file->corrupted = TRUE;
		mail_index_set_error(file->log->index,
			"Transaction log file %s: marked corrupted",
			file->filepath);
		return 0;
	}
	if (file->hdr.indexid != file->log->index->indexid) {
		if (file->log->index->indexid != 0 &&
		    !file->log->index->initial_create) {
			/* index file was probably just rebuilt and we don't
			   know about it yet */
			mail_transaction_log_file_set_corrupted(file,
				"indexid changed %u -> %u",
				file->log->index->indexid, file->hdr.indexid);
			return 0;
		}

		/* creating index file. since transaction log is created
		   first, use the indexid in it to create the main index
		   to avoid races. */
		file->log->index->indexid = file->hdr.indexid;
	}

	/* make sure we already don't have a file with the same sequence
	   opened. it shouldn't happen unless the old log file was
	   corrupted. */
	for (f = file->log->files; f != NULL; f = f->next) {
		if (f->hdr.file_seq == file->hdr.file_seq) {
			if (strcmp(f->filepath, f->log->head->filepath) != 0) {
				/* old "f" is the .log.2 */
				return mail_transaction_log_file_fail_dupe(f);
			} else {
				/* new "file" is probably the .log.2 */
				return mail_transaction_log_file_fail_dupe(file);
			}
		}
	}

	file->sync_highest_modseq = file->hdr.initial_modseq;
	return 1;
}
ot_u8 vworm_write(vaddr addr, ot_u16 data) {
#if ((VWORM_SIZE > 0) && (OT_FEATURE_VLNVWRITE == ENABLED))
    ot_int  index;
    ot_int  offset;
    ot_u16  wrtest;
    ot_u16* p_ptr;
    ot_u16* a_ptr;

    SEGFAULT_CHECK(addr, in_vworm, 7, "VLC_445");   //__LINE__      

    /// 1.  Resolve the vaddr directly
    offset  = addr & (VWORM_PAGESIZE-1);
    index   = (addr-VWORM_BASE_VADDR) >> VWORM_PAGESHIFT;
    p_ptr   = PTR_OFFSET(X2table.block[index].primary, offset);
    
    /// 2. No ancillary block, but try a write anyway
    if (X2table.block[index].ancillary == NULL) {
        
        /// 2a. If no 0->1 write requirement, then we're good to go
        if ((data & ~(*p_ptr)) == 0) {
            return vworm_mark_physical(p_ptr, data);
        }

        /// 2b. Attach a fallow to this bitch (it becomes ancillary)
        sub_attach_fallow(&X2table.block[index]);
    }
    
    /// 3. There is ancillary block, so go through the logical write process,
    ///    which is designed to shake out a write out of whatever it can get.
    ///    The only bit combination that cannot be managed is [1->0 via 0,0]
    a_ptr   = PTR_OFFSET(X2table.block[index].ancillary, offset);
    wrtest  = ~data & ~(*p_ptr) & ~(*a_ptr);
    
    if (wrtest == 0) {
        ot_u8   test = 0;
    
        /// 3a. Adjust cases where [1->0 via 1,1] or [0->1 via 1,0]
        wrtest  = ~data & *p_ptr & *a_ptr;
        wrtest |= data & *p_ptr & ~(*a_ptr);
        if (wrtest != 0) {
            test |= vworm_mark_physical(p_ptr, *p_ptr ^ wrtest);
        }

        /// 3b. Adjust cases where [0->1 via 0,1]
        wrtest  = data & ~(*p_ptr) & *a_ptr;
        if (wrtest != 0) {
            test |= vworm_mark_physical(a_ptr, *a_ptr ^ wrtest);
        }
        
        return test;
    }
    
    /// 4. Recombine this block, with the exception of the given addr offset, 
    ///    which we will then write-to
    else {
        p_ptr = sub_recombine_block(&X2table.block[index], offset, 2);
        return vworm_mark_physical(p_ptr, data);
    }
#else
    return 0;
#endif 
}
ot_u16* sub_recombine_block(block_ptr* block_in, ot_int skip, ot_int span) {
    ot_u8 test;
    ot_int i;
    ot_u16* new_ptr;
    ot_u16* f_ptr;
    ot_u16* p_ptr;
    ot_u16* a_ptr;

    /// 1. Assign pointers
    p_ptr   = block_in->primary;
    a_ptr   = block_in->ancillary;
    new_ptr = X2table.fallow[(VWORM_FALLOW_PAGES-1)];
    f_ptr   = new_ptr;
    
    /// 2. Combine the old blocks into the fallow block
    span+=skip;
    for (i=0; i<OTF_VWORM_PAGESIZE; i+=2) {
        if ((i<skip) || (i>=span)) {
            test |= vworm_mark_physical(f_ptr, ~(*p_ptr ^ *a_ptr));
        }
        f_ptr++;
        p_ptr++;
        a_ptr++;
    }
    
//    i += span;
//    PTR_OFFSET(f_ptr, span);
//    PTR_OFFSET(p_ptr, span);
//    PTR_OFFSET(a_ptr, span);
//    
//    for (; i<VWORM_PAGESIZE; i+=2) {
//        test |= vworm_mark_physical(f_ptr, ~(*p_ptr ^ *a_ptr));
//        f_ptr++;
//        p_ptr++;
//        a_ptr++;
//    }
    
    /// 3. Erase the old blocks
    NAND_erase_page( block_in->primary );
    NAND_erase_page( block_in->ancillary );
    
    /// 4. Make the two erased blocks fallow blocks. If we are in this function,
    /// we can deduce that there is at least one ancillary and one fallow, so we
    /// stop when NULL is discovered or when at the end of the fallows.
#   if (VWORM_FALLOW_PAGES >= 2)
        for (i=(VWORM_FALLOW_PAGES-1); X2table.fallow[i] != NULL; i--) {
            X2table.fallow[i] = X2table.fallow[i-1];
        }
        X2table.fallow[i+1] = block_in->primary;
        X2table.fallow[i]   = block_in->ancillary;
#   else
        X2table.fallow[1]   = block_in->primary;
        X2table.fallow[0]   = block_in->ancillary;
#   endif     
    
    /// 5. Set the primary block to its new position, and ancillary to NULL
    block_in->ancillary = NULL;
    block_in->primary   = new_ptr;
    
    /// 6. return the (physical) skip address
    return PTR_OFFSET(new_ptr, skip);
}