int main() { startStuff(); while (1) { /* Create the queues */ xCommsReceiveQueue = xQueueCreate (COMMS_RECEIVE_QUEUE_SIZE, sizeof (CommandString *)); ASSERT_STRING (xCommsReceiveQueue, "Could not create xCommsReceiveQueue"); xMotionCommandQueue = xQueueCreate (MOTION_COMMAND_QUEUE_SIZE, sizeof (CodedCommand)); ASSERT_STRING (xMotionCommandQueue, "Could not create xMotionCommandQueue"); xSensorCommandQueue = xQueueCreate (SENSOR_COMMAND_QUEUE_SIZE, sizeof (CodedCommand)); ASSERT_STRING (xSensorCommandQueue, "Could not create xSensorCommandQueue"); xHomeEventQueue = xQueueCreate (HOME_EVENT_QUEUE_SIZE, sizeof (HomeEvent)); ASSERT_STRING (xHomeEventQueue, "Could not create xHomeEventQueue"); xCommsTransmitQueue = xQueueCreate (COMMS_TRANSMIT_QUEUE_SIZE, sizeof (char *)); ASSERT_STRING (xCommsTransmitQueue, "Could not create xCommsTransmitQueue"); /* Create the tasks */ xTaskCreate (vTaskMotion, (signed char * const) "MotionTask", 500, PNULL, 1, NULL); xTaskCreate (vTaskHome, (signed char * const) "HomeTask", 500, PNULL, 2, NULL); xTaskCreate (vTaskSensor, (signed char * const) "SensorTask", 500, PNULL, 3, NULL); /* Higher than motion so that we don't bump into things */ xTaskCreate (vTaskProcessing, (signed char * const) "ProcessingTask", 500, PNULL, 4, NULL); /* Higher than motion so that we can interrupt it */ xTaskCreate (vTaskCommsTransmit, (signed char * const) "CommsTransmitTask", 500, PNULL, 5, NULL); xTaskCreate (vTaskCommsReceive, (signed char * const) "CommsReceiveTask", 500, PNULL, 6, NULL); /* Start the scheduler */ vTaskStartScheduler(); ASSERT_ALWAYS_STRING ("Should never get here!"); } endStuff(); }
END_TEST START_TEST(ShouldRecognizeSingleCharacterPunctIdents) { Lexer lexer = Setup("! % ^ & ~ ?= %= ^= &= ~=\n"); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "!", 1); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "%", 1); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "^", 1); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "&", 1); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "~", 1); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "?", 1); ASSERT(Lexer_Next(lexer) == TOKEN_EQUALWITHOUTWHITESPACE); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "%", 1); ASSERT(Lexer_Next(lexer) == TOKEN_EQUALWITHOUTWHITESPACE); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "^", 1); ASSERT(Lexer_Next(lexer) == TOKEN_EQUALWITHOUTWHITESPACE); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "&", 1); ASSERT(Lexer_Next(lexer) == TOKEN_EQUALWITHOUTWHITESPACE); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "~", 1); ASSERT(Lexer_Next(lexer) == TOKEN_EQUALWITHOUTWHITESPACE); }
END_TEST START_TEST(UrlDecodeShouldSkipBadEscapes) { String str = String_FromC("This%is a%%aa%test.%f"); const char *expectedResult = "This%is a%\xAA%test.%f"; ASSERT_STRING(String_UrlDecode(str), expectedResult, StrLen(expectedResult)); str = String_FromC("This%is a%%aa%test.%"); expectedResult = "This%is a%\xAA%test.%"; ASSERT_STRING(String_UrlDecode(str), expectedResult, StrLen(expectedResult)); }
END_TEST START_TEST(HtmlEncodeToAsciiShouldEncodeTheFourDangerousCharactersToNamedEntities) { String str = String_Create("<This 'is & a \"test.\">", 22); ASSERT_STRING(String_HtmlEncodeToAscii(str), "<This 'is & a "test.">", 42); }
END_TEST START_TEST(HtmlEncodeShouldOnlyChangeTheFourDangerousCharacters) { String str = String_Create("<\xA0\x00\x1A\xFF'\t\n\r&#\x7F?\"test.\">", 21); ASSERT_STRING(String_HtmlEncode(str), "<\xA0\x00\x1A\xFF'\t\n\r&#\x7F?"test.">", 41); }
END_TEST START_TEST(UrlDecodeShouldDecodeUppercaseNonAsciiCharacters) { String str = String_FromC("This%00is%0D%0Aa%7F%C2%A0test."); ASSERT_STRING(String_UrlDecode(str), "This\0is\r\na\x7F\xC2\xA0test.", 18); }
static int tty_setWinTitle_25(struct pvm_object me , struct data_area_4_thread *tc ) { DEBUG_INFO; struct data_area_4_tty *da = pvm_data_area( me, tty ); int n_param = POP_ISTACK; CHECK_PARAM_COUNT(n_param, 1); struct pvm_object _text = POP_ARG; ASSERT_STRING(_text); int len = pvm_get_str_len( _text ); const char * data = (const char *)pvm_get_str_data(_text); if( len > PVM_MAX_TTY_TITLE-1 ) len = PVM_MAX_TTY_TITLE-1 ; strlcpy( da->title, data, len+1 ); //buf[len] = 0; SYS_FREE_O(_text); w_set_title( &(da->w), da->w.title ); SYSCALL_RETURN_NOTHING; }
END_TEST START_TEST(UrlDecodeShouldDecodeLowercaseNonAsciiCharacters) { String str = String_FromC("This%00is%0d%0aa%7f%c2%a0test."); ASSERT_STRING(String_UrlDecode(str), "This\0is\r\na\x7F\xC2\xA0test.", 18); }
END_TEST START_TEST(HtmlEncodeToAsciiShouldEncodeAllNonAsciiCharacters) { String str = String_Create("\xEF\xBB\xBF<\xC2\xA0\x00\x1A'\n\r&#\x7F?\"test.\">", 23); ASSERT_STRING(String_HtmlEncodeToAscii(str), "< \x00\x1A'\n\r&#\x7F?"test.">", 52); }
END_TEST START_TEST(UrlQueryEncodeShouldEncodeNonAsciiCharacters) { String str = String_Create("This\0is\r\na\x7F\xC2\xA0test.", 18); const char *expectedResult = "This%00is%0D%0Aa%7F%C2%A0test."; ASSERT_STRING(String_UrlQueryEncode(str), expectedResult, StrLen(expectedResult)); }
END_TEST START_TEST(UrlDecodeShouldDecodeDangerousCharactersFromEscapes) { String str = String_FromC("%5B%28%21%2AThis%20%27%3Bis%20%3A%40%26%3D%20a%20%2B%25%24test%2C%2F%3F%23%29%5D"); const char *expectedResult = "[(!*This ';is :@&= a +%$test,/?#)]"; ASSERT_STRING(String_UrlDecode(str), expectedResult, StrLen(expectedResult)); }
END_TEST START_TEST(UrlQueryEncodeShouldIgnoreSafePunctuation) { String str = String_FromC("\"-.<>\\^_`{|}~Pack my box with five dozen liquor jugs."); const char *expectedResult = "\"-.<>\\^_`{|}~Pack%20my%20box%20with%20five%20dozen%20liquor%20jugs."; ASSERT_STRING(String_UrlQueryEncode(str), expectedResult, StrLen(expectedResult)); }
END_TEST START_TEST(UrlQueryEncodeShouldEncodeOnlyAFewDangerousCharactersToEscapes) { String str = String_Create("[(!*This ';is :@&= a +%$test,/?#)]", 34); const char *expectedResult = "[(!*This%20';is%20:@%26%3D%20a%20+%25$test,/%3F%23)]"; ASSERT_STRING(String_UrlQueryEncode(str), expectedResult, StrLen(expectedResult)); }
END_TEST //------------------------------------------------------------------------------------------------- // HTML-Encoding-to-ASCII Tests. START_TEST(EmptyStringsShouldHtmlEncodeToAsciiToEmptyStrings) { ASSERT_STRING(String_HtmlEncodeToAscii(String_Empty), NULL, 0); }
END_TEST //------------------------------------------------------------------------------------------------- // HTML-Decoding Tests. START_TEST(EmptyStringsShouldHtmlDecodeToEmptyStrings) { ASSERT_STRING(String_HtmlDecode(String_Empty), NULL, 0); }
END_TEST //------------------------------------------------------------------------------------------------- // URL-Decoding Tests. // // URL-decoding means decoding anything starting with a '%' followed by two hex characters // into its equivalent byte. Malformed codes are skipped. START_TEST(EmptyStringsShouldUrlDecodeToEmptyStrings) { ASSERT_STRING(String_UrlDecode(String_Empty), NULL, 0); }
END_TEST //------------------------------------------------------------------------------------------------- // URL-Query-Encoding Tests. // // URL-encoding means encoding non-ASCII characters, space, and these punctuation marks: // & = ? # % START_TEST(EmptyStringsShouldUrlQueryEncodeToEmptyStrings) { ASSERT_STRING(String_UrlQueryEncode(String_Empty), NULL, 0); }
END_TEST //------------------------------------------------------------------------------------------------- // Basic URL-Encoding Tests. // // URL-encoding means encoding non-ASCII characters, space, and these punctuation marks: // ! * ' ; : @ & = + $ , / ? # % ( ) [ ] START_TEST(EmptyStringsShouldUrlEncodeToEmptyStrings) { ASSERT_STRING(String_UrlEncode(String_Empty), NULL, 0); }
TEST execute_REQ1() { mvSession state; __perform__(&state, REQ1); ASSERT_INT(state.varcount(), 1); ASSERT_INT(state.entities.size(), 1); mvAttrlist& attrs = state.entities[0].data; ASSERT_INT(attrs.size(), 1); ASSERT_INT(attrs[0].type, STRING); ASSERT_STRREF(attrs[0].name, "name"); ASSERT_STRING(attrs[0].value.string, "Umberto Eco"); }
END_TEST START_TEST(HtmlDecodeShouldSkipBadInput) { ASSERT_STRING(String_HtmlDecode(String_FromC("&frog;This is a test.")), "&frog;This is a test.", 15 + 6); ASSERT_STRING(String_HtmlDecode(String_FromC("&#;This is a test.")), "&#;This is a test.", 15 + 3); ASSERT_STRING(String_HtmlDecode(String_FromC("f;This is a test.")), "f;This is a test.", 15 + 6); ASSERT_STRING(String_HtmlDecode(String_FromC("&#frog;This is a test.")), "&#frog;This is a test.", 15 + 7); ASSERT_STRING(String_HtmlDecode(String_FromC("&#x;This is a test.")), "&#x;This is a test.", 15 + 4); ASSERT_STRING(String_HtmlDecode(String_FromC("&#xz;This is a test.")), "&#xz;This is a test.", 15 + 5); ASSERT_STRING(String_HtmlDecode(String_FromC("z;This is a test.")), "z;This is a test.", 15 + 7); }
END_TEST START_TEST(ShouldHandleWeirdPunctuationCornerCases) { Lexer lexer = Setup( "+==+ += +== =+= =+\n" "&==& &= &== =&= =&\n" ); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "+==+", 4); ASSERT(Lexer_Next(lexer) == TOKEN_PLUS); ASSERT(Lexer_Next(lexer) == TOKEN_EQUALWITHOUTWHITESPACE); ASSERT(Lexer_Next(lexer) == TOKEN_PLUS); ASSERT(Lexer_Next(lexer) == TOKEN_EQ); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "=+", 2); ASSERT(Lexer_Next(lexer) == TOKEN_EQUALWITHOUTWHITESPACE); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "=+", 2); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "&==&", 4); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "&", 1); ASSERT(Lexer_Next(lexer) == TOKEN_EQUALWITHOUTWHITESPACE); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "&", 1); ASSERT(Lexer_Next(lexer) == TOKEN_EQ); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "=&", 2); ASSERT(Lexer_Next(lexer) == TOKEN_EQUALWITHOUTWHITESPACE); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "=&", 2); }
TEST compile_REQ11() { mvCommand cmd; try { singletonParser.parse(cmd, REQ11); } catch (mvError* err) { FAIL(err); } mvQuery patt(cmd); ASSERT_STRREF(patt.classname, "person"); ASSERT_INT(patt.attrs.size(), 1); ASSERT_STRREF(patt.attrs[0].name, "name"); ASSERT_INT(patt.attrs[0].type, STRING); ASSERT_STRING(patt.attrs[0].value.string, "Umberto Eco"); }
static int putws_17(struct pvm_object me , struct data_area_4_thread *tc ) { DEBUG_INFO; struct data_area_4_tty *da = pvm_data_area( me, tty ); //printf("putws font %d,%d\n", da->font_width, da->font_height ); int n_param = POP_ISTACK; CHECK_PARAM_COUNT(n_param, 1); struct pvm_object _text = POP_ARG; ASSERT_STRING(_text); int len = pvm_get_str_len( _text ); const char * data = (const char *)pvm_get_str_data(_text); char buf[BS+2]; if( len > BS ) len = BS; strncpy( buf, data, len ); //buf[len] = '\n'; buf[len] = 0; SYS_FREE_O(_text); //printf("tty print: '%s' at %d,%d\n", buf, da->x, da->y ); struct rgba_t fg = da->fg; struct rgba_t bg = da->bg; w_font_tty_string( &(da->w), tty_font, buf, fg, bg, &(da->x), &(da->y) ); drv_video_window_update( &(da->w) ); SYSCALL_RETURN_NOTHING; }
/* The Sensor reading task */ void vTaskSensor (void *pvParameters) { bool success; unsigned char x; char sendString [ADC_READING_STRING_LEN * MAX_NUM_ADCS + 1]; /* +1 for terminator */ CodedCommand codedSensorCommand; portBASE_TYPE xStatus; while (1) { xStatus = xQueueReceive (xSensorCommandQueue, &codedSensorCommand, portMAX_DELAY); ASSERT_STRING (xStatus == pdPASS, "Failed to receive from sensor command queue."); success = false; /* Assume failure */ /* Print out what command we're going to execute */ rob_lcd_goto_xy (0, 1); rob_print_from_program_space (PSTR ("CMD: ")); if (codedSensorCommand.buffer[CODED_COMMAND_INDEX_POS] != CODED_COMMAND_INDEX_UNUSED) { rob_print_character ('#'); rob_print_unsigned_long (codedSensorCommand.buffer[CODED_COMMAND_INDEX_POS]); rob_print_character (' '); } rob_print_character (codedSensorCommand.buffer[CODED_COMMAND_ID_POS]); rob_lcd_goto_xy (0, 2); /* Now do it */ switch (codedSensorCommand.buffer[CODED_COMMAND_ID_POS]) { case '*': /* The only sensor command supported at the moment */ { RobMemset (sendString, ' ', sizeof (sendString)); calibrateAdcs(); for (x = 0; x < MAX_NUM_ADCS; x++) { unsigned int milliVolts; /* Write something like "FL:40 " (for ADC 2 of 6 (which is Front Left), object detected at 40 cm), or "FL: " if nothing is there. */ memcpy (&sendString[ADC_READING_STRING_LEN * x], channelToString[x], SENSOR_STRING_LEN); memcpy (&sendString[(ADC_READING_STRING_LEN * x) + SENSOR_STRING_LEN], NOTHING_THERE_STRING, NOTHING_THERE_STRING_LEN); milliVolts = readAdc (x); if (objectDetected (milliVolts)) { itoa (voltageToDistance (milliVolts), &(sendString[(ADC_READING_STRING_LEN * x) + SENSOR_STRING_LEN + 1]), 10); /* +1 to leave the ':' there */ } /* Overwrite the null terminator that itoa() puts in with a space */ sendString[RobStrlen (sendString)] = ' '; } sendString[sizeof (sendString) - 1] = 0; /* Add terminator */ sendSerialString (sendString, sizeof (sendString)); rob_print (sendString); success = true; } break; default: { ASSERT_ALWAYS_PARAM (codedSensorCommand.buffer[CODED_COMMAND_ID_POS]); } break; } if (!success) { sendSerialString (ERROR_STRING, sizeof (ERROR_STRING)); } } }
END_TEST START_TEST(ShouldRecognizeGeneralPunctuativeForms) { Lexer lexer = Setup( "&& ^^ ** ++ -- +>> <<+ << >> <+> <-> <--> *~*\n" "&&= ^^= **= ++= --= +>>= <<+= <<= >>= <+>= <->= <-->= *~*=\n" ); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "&&", 2); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "^^", 2); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "**", 2); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "++", 2); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "--", 2); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "+>>", 3); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "<<+", 3); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "<<", 2); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, ">>", 2); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "<+>", 3); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "<->", 3); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "<-->", 4); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "*~*", 3); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "&&", 2); ASSERT(Lexer_Next(lexer) == TOKEN_EQUALWITHOUTWHITESPACE); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "^^", 2); ASSERT(Lexer_Next(lexer) == TOKEN_EQUALWITHOUTWHITESPACE); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "**", 2); ASSERT(Lexer_Next(lexer) == TOKEN_EQUALWITHOUTWHITESPACE); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "++", 2); ASSERT(Lexer_Next(lexer) == TOKEN_EQUALWITHOUTWHITESPACE); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "--", 2); ASSERT(Lexer_Next(lexer) == TOKEN_EQUALWITHOUTWHITESPACE); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "+>>", 3); ASSERT(Lexer_Next(lexer) == TOKEN_EQUALWITHOUTWHITESPACE); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "<<+", 3); ASSERT(Lexer_Next(lexer) == TOKEN_EQUALWITHOUTWHITESPACE); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "<<", 2); ASSERT(Lexer_Next(lexer) == TOKEN_EQUALWITHOUTWHITESPACE); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, ">>", 2); ASSERT(Lexer_Next(lexer) == TOKEN_EQUALWITHOUTWHITESPACE); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "<+>", 3); ASSERT(Lexer_Next(lexer) == TOKEN_EQUALWITHOUTWHITESPACE); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "<->", 3); ASSERT(Lexer_Next(lexer) == TOKEN_EQUALWITHOUTWHITESPACE); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "<-->", 4); ASSERT(Lexer_Next(lexer) == TOKEN_EQUALWITHOUTWHITESPACE); ASSERT(Lexer_Next(lexer) == TOKEN_PUNCTNAME); ASSERT_STRING(lexer->token->text, "*~*", 3); ASSERT(Lexer_Next(lexer) == TOKEN_EQUALWITHOUTWHITESPACE); }
END_TEST START_TEST(HtmlDecodeShouldDecodeNumericLatin1EntitiesToUtf8) { ASSERT_STRING(String_HtmlDecode(String_FromC(" This Áis ½ a átest.ÿÞ")), "\xC2\xA0This \xC3\x81is \xC2\xBD a \xC3\xA1test.\xC3\xBF\xC3\x9E", 28); }
END_TEST START_TEST(HtmlDecodeShouldDecodeNamedNonLatin1EntitiesToUtf8) { ASSERT_STRING(String_HtmlDecode(String_FromC("ΣThis αis Ÿ a —test.∀♥")), "\xCE\xA3This \xCE\xB1is \xC5\xB8 a \xE2\x80\x94test.\xE2\x88\x80\xE2\x99\xA5", 31); }
END_TEST START_TEST(HtmlDecodeShouldDecodeNamedLatin1EntitiesToUtf8) { ASSERT_STRING(String_HtmlDecode(String_FromC(" This Áis ½ a átest.ÿÞ")), "\xC2\xA0This \xC3\x81is \xC2\xBD a \xC3\xA1test.\xC3\xBF\xC3\x9E", 28); }
END_TEST START_TEST(HtmlDecodeShouldDecodeTheAsciiNamedEntities) { ASSERT_STRING(String_HtmlDecode(String_FromC("<This 'is & a "test.">")), "<This 'is & a \"test.\">", 22); }
END_TEST START_TEST(UnencodedStringsShouldHtmlDecodeToUnencodedStrings) { ASSERT_STRING(String_HtmlDecode(String_FromC("This is a test.")), "This is a test.", 15); }