コード例 #1
0
// Generates a token of text from this element, returning only the width.
bool ElementTextDefault::GenerateToken(float& token_width, int line_begin)
{
	// Bail if we don't have a valid font face.
	FontFaceHandle* font_face_handle = GetFontFaceHandle();
	if (font_face_handle == NULL ||
		line_begin >= (int) text.Length())
		return 0;

	// Determine how we are processing white-space while formatting the text.
	int white_space_property = GetProperty< int >(WHITE_SPACE);
	bool collapse_white_space = white_space_property == WHITE_SPACE_NORMAL ||
								white_space_property == WHITE_SPACE_NOWRAP ||
								white_space_property == WHITE_SPACE_PRE_LINE;
	bool break_at_endline = white_space_property == WHITE_SPACE_PRE ||
							white_space_property == WHITE_SPACE_PRE_WRAP ||
							white_space_property == WHITE_SPACE_PRE_LINE;

	const word* token_begin = text.CString() + line_begin;
	WString token;

	BuildToken(token, token_begin, text.CString() + text.Length(), true, collapse_white_space, break_at_endline, GetProperty< int >(TEXT_TRANSFORM));
	token_width = (float) font_face_handle->GetStringWidth(token, 0);

	return LastToken(token_begin, text.CString() + text.Length(), collapse_white_space, break_at_endline);
}
コード例 #2
0
ファイル: Tokenizer.c プロジェクト: mihasighi/smtcomp14-sl
//-----------------------------------------------------------------------------
TOKEN QuotedToken(READFILE Stream,char OpeningQuote,TokenType Type) {

    static SuperString LocalValue;
    int Index;

    Index = 0;
    LocalValue[Index] = OpeningQuote;
    do {
        IncrementTokenIndex(Stream,&Index);
        LocalValue[Index] = NextCharacter(Stream);
//----Check legality - only visibles and only quote and escape escaped
        if (LocalValue[Index] < ' ' || LocalValue[Index] > '~') {
            CharacterError(Stream);
        }
        if (LocalValue[Index] == '\\') {
            IncrementTokenIndex(Stream,&Index);
            LocalValue[Index] = NextCharacter(Stream);
            if (LocalValue[Index] != OpeningQuote && 
LocalValue[Index] != '\\') {
                CharacterError(Stream);
            }
        }
    } while (LocalValue[Index] != OpeningQuote || LocalValue[Index-1] == '\\');
    IncrementTokenIndex(Stream,&Index);
    LocalValue[Index] = '\0';

//----Strip '' quotes from regular lower words
    if (LocalValue[0] == '\'' && islower(LocalValue[1])) {
        Index = 1;
//----Make sure it's legal without the ''s
        while (isalnum(LocalValue[Index]) || LocalValue[Index] == '_') {
            Index++;
        }
        if (Index == strlen(LocalValue) -1) {
            LocalValue[Index] = '\0';
            return(BuildToken(lower_word,&(LocalValue[1])));
        } 
    }

    return(BuildToken(Type,LocalValue));
}
コード例 #3
0
ファイル: Gate.cpp プロジェクト: ooeyusea/hyper_net
void Gate::OnRecvBindAccountAck(IKernel * kernel, s32 nodeType, s32 nodeId, const OArgs & args) {
	s64 agentId = args.GetDataInt64(0);
	s64 accountId = args.GetDataInt64(1);
	s32 errorCode = args.GetDataInt32(2);
	s32 tokenCount = args.GetDataInt32(3);

	if (_players.find(agentId) != _players.end()) {
		Player& player = _players[agentId];
		OASSERT(player.state == ST_AUTHENING && player.accountId == accountId, "wtf");
		if (player.accountId != accountId)
			return;

		if (errorCode == 0) {
			bool ret = _roleMgr->GetRoleList(accountId, [&player](IKernel * kernel, s64 actorId, IRole * role) {
				player.roles.push_back({ actorId, role });
			});

			if (ret) {
				TokenData data = { player.accountId, 0, (s32)(tools::GetTimeMillisecond() / 1000), tokenCount };
				char token[MAX_TOKEN_SIZE + 1];
				BuildToken(token, sizeof(token) - 1, data);

				olib::Buffer<4096> buf;
				buf << _noError << token << (s32)player.roles.size();
				for (const auto& role : player.roles) {
					buf << role.actorId;
					role.role->Pack(buf);
				}

				SendToClient(kernel, player.agentId, _loginAckId, buf.Out());
			}
			else {
				Reset(kernel, agentId, ST_NONE, node_type::USER);

				olib::Buffer<128> buf;
				buf << _errorLoadRoleListFailed;
				SendToClient(kernel, player.agentId, _loginAckId, buf.Out());
			}
		}
		else {
			olib::Buffer<128> buf;
			buf << errorCode;
			SendToClient(kernel, player.agentId, _loginAckId, buf.Out());
		}
	}
}
コード例 #4
0
ファイル: Tokenizer.c プロジェクト: mihasighi/smtcomp14-sl
//-----------------------------------------------------------------------------
TOKEN GetNextToken(READFILE Stream) {

    int CurrentChar,PreviousChar;
    int Index;
//----static so it doesn't have to get allocated everytime (very often!)
    static SuperString LocalValue;

//DEBUG printf("char was ==%c==\n",CurrentCharacter(Stream));
    if (Stream->Overshot) {
//DEBUG printf("overshot\n");
        CurrentChar = CurrentCharacter(Stream);
    } else {
//DEBUG printf("get next\n");
        CurrentChar = NextCharacter(Stream);
    }
    Stream->Overshot = 0;

//----Skip whitespace
    while (isspace(CurrentChar)) {
        PreviousChar = CurrentChar;
        CurrentChar = NextCharacter(Stream);
//----Check for a blank line, if required
        if (Stream->NeedNonLogicTokens && PreviousChar == '\n' && 
CurrentChar == '\n') {
            return(BuildToken(blank_line_token,""));
        }
    }

//DEBUG printf("char is ==%c==\n",CurrentChar);
    switch (CurrentChar) {
        case '/':
            Index = 0;
            LocalValue[Index++] = CurrentChar;
            PreviousChar = CurrentChar;
            CurrentChar = NextCharacter(Stream);
            if (CurrentChar == '*') {
                LocalValue[Index] = CurrentChar;
                while (CurrentChar != EOF && (CurrentChar != '/' || 
PreviousChar != '*')) {
                    PreviousChar = CurrentChar;
                    CurrentChar = NextCharacter(Stream);
                    IncrementTokenIndex(Stream,&Index);
                    LocalValue[Index] = CurrentChar;
                }
                if (CurrentChar == '/') {
//----Add eoln if it's there
                    CurrentChar = NextCharacter(Stream);
                    if (CurrentChar == '\n') {
                        IncrementTokenIndex(Stream,&Index);
                        LocalValue[Index] = CurrentChar;
                    } else {
                        Stream->Overshot = 1;
                    }
                    IncrementTokenIndex(Stream,&Index);
                    LocalValue[Index] = '\0';
                    if (Stream->NeedNonLogicTokens) {
                        return(BuildToken(comment_token,LocalValue));
                    } else {
                        return(GetNextToken(Stream));
                    }
                } else {
                    CharacterError(Stream);
                }
            } else {
                CharacterError(Stream);
            }
            break;
        case '%':
        case '#':
            if (Stream->NeedNonLogicTokens) {
                Index = 0;
                do {
                    LocalValue[Index] = CurrentChar;
                    IncrementTokenIndex(Stream,&Index);
                    CurrentChar = NextCharacter(Stream);
                } while (CurrentChar != '\n' && CurrentChar != EOF);
                LocalValue[Index] = '\0';
                Stream->Overshot = 1;
                return(BuildToken(comment_token,LocalValue));
            } else {
//----Discard sequences of comments (recursive approach gave stack overflow)
                do {
                    while (CurrentChar != '\n' && CurrentChar != EOF) {
                        CurrentChar = NextCharacter(Stream);
                    }
                    CurrentChar = NextCharacter(Stream);
                } while (CurrentChar == '%' || CurrentChar == '#');
                Stream->Overshot = 1;
                return(GetNextToken(Stream));
            }
            break;
        case '\'':
            return(QuotedToken(Stream,'\'',lower_word));
            break;
        case '(':
            return(BuildToken(punctuation,"("));
            break;
        case ')':
            return(BuildToken(punctuation,")"));
            break;
        case '[':
            return(BuildToken(punctuation,"["));
            break;
        case ']':
            return(BuildToken(punctuation,"]"));
            break;
        case '!':
            CurrentChar = NextCharacter(Stream);
            if (CurrentChar == '=') {
                return(BuildToken(lower_word,"!="));
            } else if (CurrentChar == '>') {
                return(BuildToken(quantifier,"!>"));
            } else if (CurrentChar == '!') {
                return(BuildToken(unary_connective,"!!"));
            } else {
                Stream->Overshot = 1;
                return(BuildToken(quantifier,"!"));
            }
            break;
        case '?':
            CurrentChar = NextCharacter(Stream);
            if (CurrentChar == '*') {
                return(BuildToken(quantifier,"?*"));
            } else if (CurrentChar == '?') {
                return(BuildToken(unary_connective,"??"));
            } else {
                Stream->Overshot = 1;
                return(BuildToken(quantifier,"?"));
            }
            break;
        case '^':
            return(BuildToken(quantifier,"^"));
            break;
        case '.':
            return(BuildToken(punctuation,"."));
            break;
        case ':':
            return(BuildToken(punctuation,":"));
            break;
        case ',':
            return(BuildToken(punctuation,","));
            break;
        case '<':
            CurrentChar = NextCharacter(Stream);
            if (CurrentChar == '='){
                CurrentChar = NextCharacter(Stream);
                if (CurrentChar == '>') {
                    return(BuildToken(binary_connective,"<=>"));
                } else {
                    Stream->Overshot = 1;
                    return(BuildToken(binary_connective,"<="));
                }
            } else if (CurrentChar == '~') {
                CurrentChar = NextCharacter(Stream);
                if (CurrentChar == '>') {
                    return(BuildToken(binary_connective,"<~>"));
                } else {
                    CharacterError(Stream);
                }
            } else if (CurrentChar == '<') {
                return(BuildToken(punctuation,"<<"));
            } else {
                CharacterError(Stream);
            }
            break;
        case '=':
            CurrentChar = NextCharacter(Stream);
            if (CurrentChar == '>') {
                return(BuildToken(binary_connective,"=>"));
            } else {
                Stream->Overshot = 1;
                return(BuildToken(lower_word,"="));
            }
            break;
        case '~':
            CurrentChar = NextCharacter(Stream);
            if (CurrentChar == '|') {
                return(BuildToken(binary_connective,"~|"));
            } else if (CurrentChar == '&') {
                return(BuildToken(binary_connective,"~&"));
            } else {
                Stream->Overshot = 1;
                return(BuildToken(unary_connective,"~"));
            }
            break;
        case '+':
            CurrentChar = NextCharacter(Stream);
            if (CurrentChar == '+') {
                return(BuildToken(unary_connective,"++"));
            } else if (NumberToken(Stream,'+',CurrentChar,LocalValue)) {
                return(BuildToken(number,LocalValue));
            } else {
                Stream->Overshot = 1;
                return(BuildToken(binary_connective,"+"));
            }
            break;
        case '-':
            CurrentChar = NextCharacter(Stream);
            if (CurrentChar == '-') {
                CurrentChar = NextCharacter(Stream);
                if (CurrentChar == '>') {
                    return(BuildToken(binary_connective,"-->"));
                } else {
                    Stream->Overshot = 1;
                    return(BuildToken(unary_connective,"--"));
                }
//----Code copied from below for numbers
            } else if (NumberToken(Stream,'-',CurrentChar,LocalValue)) {
                return(BuildToken(number,LocalValue));
            } else {
                Stream->Overshot = 1;
                return(BuildToken(punctuation,"-"));
            }
            break;
        case '"':
            return(QuotedToken(Stream,'"',distinct_object));
            break;
        case '|':
            return(BuildToken(binary_connective,"|"));
            break;
        case '&':
            return(BuildToken(binary_connective,"&"));
            break;
        case '@':
            CurrentChar = NextCharacter(Stream);
            if (CurrentChar == '+') {
                return(BuildToken(quantifier,"@+"));
            } else if (CurrentChar == '-') {
                return(BuildToken(quantifier,"@-"));
            } else {
                Stream->Overshot = 1;
                return(BuildToken(binary_connective,"@"));
            }
            break;
        case '>':
            return(BuildToken(binary_connective,">"));
            break;
        case '*':
            return(BuildToken(binary_connective,"*"));
            break;
        case EOF:
            return(BuildToken(endeof,""));
            break;
        default:
            Index = 0;
            if (CurrentChar == '$' || islower(CurrentChar)) {
                do {
                    LocalValue[Index] = CurrentChar;
                    CurrentChar = NextCharacter(Stream);
                    IncrementTokenIndex(Stream,&Index);
                } while (isalnum(CurrentChar) || CurrentChar=='_' ||
//----Allow second $ for system predicates and functions
(Index == 1 && CurrentChar == '$' && LocalValue[0] == '$'));
                LocalValue[Index] = '\0';
                Stream->Overshot = 1;
//----Ensure $ words have some length
                Index = 0;
                while (LocalValue[Index] == '$') {
                    Index++;
                }
                if (Index > 0 && !islower(LocalValue[Index])) {
                    CharacterError(Stream);
                }
//----Replace equal by = for now (can remove in future)
//----At some point I did comment this out, but it's still needed for
//----reformatting old, e.g., EP proofs.
//                if (!strcmp(LocalValue,"equal")) {
//                    strcpy(LocalValue,"=");
//                }
                return(BuildToken(lower_word,LocalValue));
            } else if (isupper(CurrentChar)) {
                do {
                    LocalValue[Index] = CurrentChar;
                    CurrentChar = NextCharacter(Stream);
                    IncrementTokenIndex(Stream,&Index);
                } while (isalnum(CurrentChar) || (CurrentChar=='_'));
                LocalValue[Index] = '\0';
                Stream->Overshot = 1;
//----Nasty hack to allow end of file to be specified by user on input stream
                if (!strcmp(LocalValue,"EOF__")) {
                    return(BuildToken(endeof,""));
                } else {
                    return(BuildToken(upper_word,LocalValue));
                }
//----Numbers
            } else if (NumberToken(Stream,'\0',CurrentChar,LocalValue)) {
                return(BuildToken(number,LocalValue));
            } else {
                CharacterError(Stream);
            }
            break;
    }
//----Need a default return for the error cases which compiler doesn't get
    return(NULL);
}
コード例 #5
0
ファイル: Tokenizer.c プロジェクト: mihasighi/smtcomp14-sl
//-----------------------------------------------------------------------------
TOKEN CloneToken(TOKEN TokenCopy) {

    return(BuildToken(TokenCopy->KindToken,TokenCopy->NameToken));
}
コード例 #6
0
ファイル: tseum.c プロジェクト: conioh/os-design
BOOLEAN
TestAccessCheck()
{
    UCHAR AclBuffer[1024];
    SECURITY_DESCRIPTOR SecurityDescriptor;
    PACL Acl;

    UCHAR TokenBuffer[512];
    PACCESS_TOKEN Token;

    NTSTATUS Status;

    Acl = (PACL)AclBuffer;
    BuildAcl( Fred, Acl, 1024 );

    Token = (PACCESS_TOKEN)TokenBuffer;
    BuildToken( Fred, Token );

    DiscretionarySecurityDescriptor( &SecurityDescriptor, Acl );

    //
    //  Test should be successful based on aces
    //

    if (!NT_SUCCESS(Status = NtAccessCheck( &SecurityDescriptor,
                                         Token,
                                         0x00000001,
                                         NULL ))) {
        DbgPrint("NtAccessCheck Error should allow access : %8lx\n", Status);
        return FALSE;
    }

    //
    //  Test should be successful based on owner
    //

    if (!NT_SUCCESS(Status = NtAccessCheck( &SecurityDescriptor,
                                         Token,
                                         READ_CONTROL,
                                         &FredGuid ))) {
        DbgPrint("NtAccessCheck Error should allow owner : %8lx\n", Status);
        return FALSE;
    }

    //
    //  Test should be unsuccessful based on aces
    //

    if (NT_SUCCESS(Status = NtAccessCheck( &SecurityDescriptor,
                                        Token,
                                        0x0000000f,
                                        &FredGuid ))) {
        DbgPrint("NtAccessCheck Error shouldn't allow access : %8lx\n", Status);
        return FALSE;
    }

    //
    //  Test should be unsuccessful based on non owner
    //

    if (NT_SUCCESS(Status = NtAccessCheck( &SecurityDescriptor,
                                        Token,
                                        READ_CONTROL,
                                        &BarneyGuid ))) {
        DbgPrint("NtAccessCheck Error shouldn't allow non owner access : %8lx\n", Status);
        return FALSE;
    }

    return TRUE;
}
コード例 #7
0
// Generates a line of text rendered from this element
bool ElementTextDefault::GenerateLine(WString& line, int& line_length, float& line_width, int line_begin, float maximum_line_width, float right_spacing_width, bool trim_whitespace_prefix)
{
	FontFaceHandle* font_face_handle = GetFontFaceHandle();

	// Initialise the output variables.
	line.Clear();
	line_length = 0;
	line_width = 0;

	// Bail if we don't have a valid font face.
	if (font_face_handle == NULL)
		return true;

	// Determine how we are processing white-space while formatting the text.
	int white_space_property = GetProperty< int >(WHITE_SPACE);
	bool collapse_white_space = white_space_property == WHITE_SPACE_NORMAL ||
								white_space_property == WHITE_SPACE_NOWRAP ||
								white_space_property == WHITE_SPACE_PRE_LINE;
	bool break_at_line = maximum_line_width >= 0 &&
						 (white_space_property == WHITE_SPACE_NORMAL ||
						  white_space_property == WHITE_SPACE_PRE_WRAP ||
						  white_space_property == WHITE_SPACE_PRE_LINE);
	bool break_at_endline = white_space_property == WHITE_SPACE_PRE ||
							white_space_property == WHITE_SPACE_PRE_WRAP ||
							white_space_property == WHITE_SPACE_PRE_LINE;

	// Determine what (if any) text transformation we are putting the characters through.
	int text_transform_property = GetProperty< int >(TEXT_TRANSFORM);

	// Starting at the line_begin character, we generate sections of the text (we'll call them tokens) depending on the
	// white-space parsing parameters. Each section is then appended to the line if it can fit. If not, or if an
	// endline is found (and we're processing them), then the line is ended. kthxbai!

	const word* token_begin = text.CString() + line_begin;
	const word* string_end = text.CString() + text.Length();
	while (token_begin != string_end)
	{
		WString token;
		const word* next_token_begin = token_begin;

		// Generate the next token and determine its pixel-length.
		bool break_line = BuildToken(token, next_token_begin, string_end, line.Empty() && trim_whitespace_prefix, collapse_white_space, break_at_endline, text_transform_property);
		int token_width = font_face_handle->GetStringWidth(token, line.Empty() ? 0 : line[line.Length() - 1]);

		// If we're breaking to fit a line box, check if the token can fit on the line before we add it.
		if (break_at_line)
		{
			if (!line.Empty() &&
				(line_width + token_width > maximum_line_width ||
				 LastToken(next_token_begin, string_end, collapse_white_space, break_at_endline) && line_width + token_width > maximum_line_width - right_spacing_width))
			{
				return false;
			}
		}

		// The token can fit on the end of the line, so add it onto the end and increment our width and length
		// counters.
		line += token;
		line_length += (next_token_begin - token_begin);
		line_width += token_width;

		// Break out of the loop if an endline was forced.
		if (break_line)
			return false;

		// Set the beginning of the next token.
		token_begin = next_token_begin;
	}

	return true;
}