struct _JsonNode* mu_msg_to_json (MuMsg *msg, unsigned docid, const MuMsgIterThreadInfo *ti, MuMsgOptions opts) { JsonNode *node; JsonBuilder *bob; time_t t; size_t s; g_return_val_if_fail (msg, NULL); g_return_val_if_fail (!((opts & MU_MSG_OPTION_HEADERS_ONLY) && (opts & MU_MSG_OPTION_EXTRACT_IMAGES)),NULL); bob = json_builder_new (); bob = json_builder_begin_object (bob); if (ti) add_thread_info (bob, ti); add_string_member (bob, "subject", mu_msg_get_subject (msg)); /* in the no-headers-only case (see below) we get a more complete list * of contacts, so no need to get them here if that's the case */ if (opts & MU_MSG_OPTION_HEADERS_ONLY) add_contacts (bob, msg); t = mu_msg_get_date (msg); if (t != (time_t)-1) add_int_member (bob, "date", t); s = mu_msg_get_size (msg); if (s != (size_t)-1) add_int_member (bob, "size", s); add_string_member (bob, "message-id", mu_msg_get_msgid (msg)); add_string_member (bob, "mailing-list", mu_msg_get_mailing_list (msg)); add_string_member (bob, "path", mu_msg_get_path (msg)); add_string_member (bob, "maildir", mu_msg_get_maildir (msg)); add_string_member (bob, "priority", mu_msg_prio_name(mu_msg_get_prio(msg))); add_flags (bob, msg); add_list_member (bob, "tags", mu_msg_get_tags(msg)); add_list_member (bob, "references", mu_msg_get_references (msg)); add_string_member (bob, "in-reply-to", mu_msg_get_header (msg, "In-Reply-To")); /* headers are retrieved from the database, views from the * message file file attr things can only be gotten from the * file (ie., mu view), not from the database (mu find). */ if (!(opts & MU_MSG_OPTION_HEADERS_ONLY)) add_file_parts (bob, msg, opts); bob = json_builder_end_object (bob); node = json_builder_get_root (bob); g_clear_object (&bob); return node; }
static int check_all_flags( void ) { int ret = 0; int cpu0 = 0, cpu1 = 0; #ifdef HAVE_MMX if( x264_cpu_detect() & X264_CPU_MMXEXT ) { ret |= add_flags( &cpu0, &cpu1, X264_CPU_MMX | X264_CPU_MMXEXT, "MMX" ); ret |= add_flags( &cpu0, &cpu1, X264_CPU_CACHELINE_64, "MMX Cache64" ); cpu1 &= ~X264_CPU_CACHELINE_64; #ifdef ARCH_X86 ret |= add_flags( &cpu0, &cpu1, X264_CPU_CACHELINE_32, "MMX Cache32" ); cpu1 &= ~X264_CPU_CACHELINE_32; #endif } if( x264_cpu_detect() & X264_CPU_SSE2 ) { ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE | X264_CPU_SSE2 | X264_CPU_SSE2_IS_SLOW, "SSE2Slow" ); ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE2_IS_FAST, "SSE2Fast" ); ret |= add_flags( &cpu0, &cpu1, X264_CPU_CACHELINE_64, "SSE2Fast Cache64" ); } if( x264_cpu_detect() & X264_CPU_SSE3 ) ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSE3 | X264_CPU_CACHELINE_64, "SSE3" ); if( x264_cpu_detect() & X264_CPU_SSSE3 ) { cpu1 &= ~X264_CPU_CACHELINE_64; ret |= add_flags( &cpu0, &cpu1, X264_CPU_SSSE3, "SSSE3" ); ret |= add_flags( &cpu0, &cpu1, X264_CPU_CACHELINE_64, "SSSE3 Cache64" ); ret |= add_flags( &cpu0, &cpu1, X264_CPU_PHADD_IS_FAST, "PHADD" ); } #elif ARCH_PPC if( x264_cpu_detect() & X264_CPU_ALTIVEC ) { fprintf( stderr, "x264: ALTIVEC against C\n" ); ret = check_all_funcs( 0, X264_CPU_ALTIVEC ); } #endif return ret; }
void obby::user::assign_net6(const net6::user& user6, const colour& colour) { // User must not be already connected if(get_flags() & flags::CONNECTED) throw std::logic_error("obby::user::assign_net6"); // Name must be the same if(m_name != user6.get_name() ) throw std::logic_error("obby::user::assign_net6"); m_user6 = &user6; m_colour = colour; add_flags(flags::CONNECTED); }
/* Write error code to exception packet. */ bool framework_write_exception(application_event_result code, const nabto_packet_header *hdr, uint8_t *buf, uint16_t size, uint16_t *olen) { unabto_buffer w_buf; unabto_query_response w_b; uint16_t expected_len; uint8_t *ptr; expected_len = hdr->hlen + OFS_DATA; if (size <= expected_len) { return false; } /* Write packet header and crypto payload header into buf. */ ptr = reconstruct_header(buf, hdr); if (!ptr) { return false; } if (expected_len != (uint16_t)(ptr - buf)) { return false; } /* Set up a write buffer to write into buf (after crypto payload header). */ unabto_buffer_init(&w_buf, buf + expected_len, (int)(size - expected_len)); unabto_query_response_init(&w_b, &w_buf); if (!unabto_query_write_uint32(&w_b, code)) { return false; } add_flags(buf, NP_PACKET_HDR_FLAG_EXCEPTION); NABTO_LOG_TRACE(("Inserting EXCEPTION %i in buffer: %" PRItext, code, result_s(code))); *olen = (uint16_t) (expected_len + unabto_query_response_used(&w_b)); return true; }
static int check_all_flags( void ) { int ret = 0; int cpu0 = 0, cpu1 = 0; uint32_t cpu_detect_rs = cpu_detect(); #if HAVE_MMX if( cpu_detect_rs & VSIMD_CPU_MMX2 ) { ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_MMX | VSIMD_CPU_MMX2, "MMX" ); ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_CACHELINE_64, "MMX Cache64" ); cpu1 &= ~VSIMD_CPU_CACHELINE_64; #if ARCH_X86 ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_CACHELINE_32, "MMX Cache32" ); cpu1 &= ~VSIMD_CPU_CACHELINE_32; #endif if( cpu_detect_rs & VSIMD_CPU_LZCNT ) { ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_LZCNT, "MMX LZCNT" ); cpu1 &= ~VSIMD_CPU_LZCNT; } ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_SLOW_CTZ, "MMX SlowCTZ" ); cpu1 &= ~VSIMD_CPU_SLOW_CTZ; } if( cpu_detect_rs & VSIMD_CPU_SSE ) ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_SSE, "SSE" ); if( cpu_detect_rs & VSIMD_CPU_SSE2 ) { ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_SSE2 | VSIMD_CPU_SSE2_IS_SLOW, "SSE2Slow" ); ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_SSE2_IS_FAST, "SSE2Fast" ); ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_CACHELINE_64, "SSE2Fast Cache64" ); cpu1 &= ~VSIMD_CPU_CACHELINE_64; ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_SLOW_SHUFFLE, "SSE2 SlowShuffle" ); cpu1 &= ~VSIMD_CPU_SLOW_SHUFFLE; ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_SLOW_CTZ, "SSE2 SlowCTZ" ); cpu1 &= ~VSIMD_CPU_SLOW_CTZ; if( cpu_detect_rs & VSIMD_CPU_LZCNT ) { ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_LZCNT, "SSE2 LZCNT" ); cpu1 &= ~VSIMD_CPU_LZCNT; } } if( cpu_detect_rs & VSIMD_CPU_SSE3 ) { ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_SSE3 | VSIMD_CPU_CACHELINE_64, "SSE3" ); cpu1 &= ~VSIMD_CPU_CACHELINE_64; } if( cpu_detect_rs & VSIMD_CPU_SSSE3 ) { ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_SSSE3, "SSSE3" ); ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_CACHELINE_64, "SSSE3 Cache64" ); cpu1 &= ~VSIMD_CPU_CACHELINE_64; ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_SLOW_SHUFFLE, "SSSE3 SlowShuffle" ); cpu1 &= ~VSIMD_CPU_SLOW_SHUFFLE; ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_SLOW_CTZ, "SSSE3 SlowCTZ" ); cpu1 &= ~VSIMD_CPU_SLOW_CTZ; ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_SLOW_ATOM, "SSSE3 SlowAtom" ); ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_CACHELINE_64, "SSSE3 Cache64 SlowAtom" ); cpu1 &= ~VSIMD_CPU_CACHELINE_64; cpu1 &= ~VSIMD_CPU_SLOW_ATOM; if( cpu_detect_rs & VSIMD_CPU_LZCNT ) { ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_LZCNT, "SSSE3 LZCNT" ); cpu1 &= ~VSIMD_CPU_LZCNT; } } if( cpu_detect_rs & VSIMD_CPU_SSE4 ) ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_SSE4, "SSE4" ); if( cpu_detect_rs & VSIMD_CPU_SSE42 ) ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_SSE42, "SSE4.2" ); if( cpu_detect_rs & VSIMD_CPU_AVX ) ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_AVX, "AVX" ); if( cpu_detect_rs & VSIMD_CPU_XOP ) ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_XOP, "XOP" ); if( cpu_detect_rs & VSIMD_CPU_FMA4 ) { ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_FMA4, "FMA4" ); cpu1 &= ~VSIMD_CPU_FMA4; } if( cpu_detect_rs & VSIMD_CPU_FMA3 ) { ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_FMA3, "FMA3" ); cpu1 &= ~VSIMD_CPU_FMA3; } if( cpu_detect_rs & VSIMD_CPU_AVX2 ) { ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_FMA3 | VSIMD_CPU_AVX2, "AVX2" ); if( cpu_detect_rs & VSIMD_CPU_LZCNT ) { ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_LZCNT, "AVX2 LZCNT" ); cpu1 &= ~VSIMD_CPU_LZCNT; } } if( cpu_detect_rs & VSIMD_CPU_BMI1 ) { ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_BMI1, "BMI1" ); cpu1 &= ~VSIMD_CPU_BMI1; } if( cpu_detect_rs & VSIMD_CPU_BMI2 ) { ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_BMI1|VSIMD_CPU_BMI2, "BMI2" ); cpu1 &= ~(VSIMD_CPU_BMI1|VSIMD_CPU_BMI2); } #elif ARCH_PPC if( cpu_detect_rs & VSIMD_CPU_ALTIVEC ) { fprintf( stderr, "x264: ALTIVEC against C\n" ); ret = check_all_funcs( 0, VSIMD_CPU_ALTIVEC ); } #elif ARCH_ARM if( cpu_detect_rs & VSIMD_CPU_NEON ) x264_checkasm_call = x264_checkasm_call_neon; if( cpu_detect_rs & VSIMD_CPU_ARMV6 ) ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_ARMV6, "ARMv6" ); if( cpu_detect_rs & VSIMD_CPU_NEON ) ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_NEON, "NEON" ); if( cpu_detect_rs & VSIMD_CPU_FAST_NEON_MRC ) ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_FAST_NEON_MRC, "Fast NEON MRC" ); #elif ARCH_AARCH64 if( cpu_detect_rs & VSIMD_CPU_ARMV8 ) ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_ARMV8, "ARMv8" ); if( cpu_detect_rs & VSIMD_CPU_NEON ) ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_NEON, "NEON" ); #elif ARCH_MIPS if( cpu_detect_rs & VSIMD_CPU_MSA ) ret |= add_flags( &cpu0, &cpu1, VSIMD_CPU_MSA, "MSA" ); #endif return ret; }