I tried to continue to work on my previous example and expand the rules. My problem is, that rules that use ID_IDENTIFIER do not work - although I know that the lexer is working (using unit tests).
Here's the example:
#include <boost/spirit/include/qi.hpp>
#include <boost/spirit/include/lex_lexertl.hpp>
namespace qi = boost::spirit::qi;
namespace lex = boost::spirit::lex;
enum LexerIDs { ID_IDENTIFIER, ID_WHITESPACE, ID_INTEGER, ID_FLOAT, ID_PUNCTUATOR };
template <typename Lexer>
struct custom_lexer : lex::lexer<Lexer>
{
custom_lexer()
: identifier ("[a-zA-Z_][a-zA-Z0-9_]*")
, white_space ("[ \\t\\n]+")
, integer_value ("[1-9][0-9]*")
, hex_value ("0[xX][0-9a-fA-F]+")
, float_value ("[0-9]*\\.[0-9]+([eE][+-]?[0-9]+)?")
, float_value2 ("[0-9]+\\.([eE][+-]?[0-9]+)?")
, punctuator ("\\[|\\]|\\(|\\)|\\.|&>|\\*\\*|\\*|\\+|-|~|!|\\/|%|<<|>>|<|>|<=|>=|==|!=|\\^|&|\\||\\^\\^|&&|\\|\\||\\?|:|,")// [ ] ( ) . &> ** * + - ~ ! / % << >> < > <= >= == != ^ & | ^^ && || ? : ,
{
using boost::spirit::lex::_start;
using boost::spirit::lex::_end;
this->self.add
(identifier , ID_IDENTIFIER)
/*(white_space , ID_WHITESPACE)*/
(integer_value, ID_INTEGER)
(hex_value , ID_INTEGER)
(float_value , ID_FLOAT)
(float_value2 , ID_FLOAT)
(punctuator , ID_PUNCTUATOR);
this->self("WS") = white_space;
}
lex::token_def<std::string> identifier;
lex::token_def<lex::omit> white_space;
lex::token_def<int> integer_value;
lex::token_def<int> hex_value;
lex::token_def<double> float_value;
lex::token_def<double> float_value2;
lex::token_def<> punctuator;
};
template< typename Iterator, typename Skipper>
struct custom_grammar : qi::grammar<Iterator, Skipper>
{
template< typename TokenDef >
custom_grammar(const TokenDef& tok) : custom_grammar::base_type(ges)
{
ges = qi::token(ID_IDENTIFIER);
BOOST_SPIRIT_DEBUG_NODE(ges);
}
qi::rule<Iterator, Skipper > ges;
};
int main(int argc, _TCHAR* argv[])
{
std::string test("testidentifier");
typedef char const* Iterator;
typedef lex::lexertl::token<Iterator, lex::omit, boost::mpl::true_> token_type;
typedef lex::lexertl::lexer<token_type> lexer_type;
typedef qi::in_state_skipper<custom_lexer<lexer_type>::lexer_def> skipper_type;
typedef custom_lexer<lexer_type>::iterator_type iterator_type;
custom_lexer<lexer_type> my_lexer;
custom_grammar<iterator_type, skipper_type> my_grammar(my_lexer);
Iterator first = test.c_str();
Iterator last = &first[test.size()];
bool r = lex::tokenize_and_phrase_parse(first,last,my_lexer,my_grammar,qi::in_state( "WS" )[ my_lexer.self ]);
std::cout << std::boolalpha << r << "\n";
std::cout << "Remaining unparsed: '" << std::string(first,last) << "'\n";
return 0;
}
a similar rule with ID_INTEGER matches fine for "1234"
The problem lies in the way you define your token ids in your enumeration. If you do it this way ID_IDENTIFIER
gets a value of 0 and that is not a valid lex token id. Fortunately Spirit.Lex defines a value boost::spirit::lex::min_token_id
that you can use to make sure you get valid tokens. Using this your enum would be:
enum LexerIDs { ID_IDENTIFIER=boost::spirit::lex::min_token_id+1, ID_WHITESPACE, ID_INTEGER, ID_FLOAT, ID_PUNCTUATOR };