summaryrefslogtreecommitdiff
path: root/src/mongo/db/fts/tokenizer.cpp
blob: 73f485901f62f2127610523f806541988ce9f400 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
// tokenizer.cpp

/**
*    Copyright (C) 2012 10gen Inc.
*
*    This program is free software: you can redistribute it and/or  modify
*    it under the terms of the GNU Affero General Public License, version 3,
*    as published by the Free Software Foundation.
*
*    This program is distributed in the hope that it will be useful,
*    but WITHOUT ANY WARRANTY; without even the implied warranty of
*    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
*    GNU Affero General Public License for more details.
*
*    You should have received a copy of the GNU Affero General Public License
*    along with this program.  If not, see <http://www.gnu.org/licenses/>.
*/

#include <string>

#include "mongo/db/fts/tokenizer.h"
#include "mongo/util/stringutils.h"

namespace mongo {

    namespace fts {

        Tokenizer::Tokenizer( const string& language, const StringData& str )
            : _pos(0), _raw( str ) {
            _english = language == "english";
            _skipWhitespace();
            _previousWhiteSpace = true;
        }

        bool Tokenizer::more() const {
            return _pos < _raw.size();
        }

        Token Tokenizer::next() {
            if ( _pos >= _raw.size() )
                return Token( Token::INVALID, "", 0, false );

            unsigned start = _pos++;
            Token::Type type = _type( _raw[start] );
            if ( type == Token::WHITESPACE ) abort();

            if ( type == Token::TEXT )
                while ( _pos < _raw.size() && _type( _raw[_pos] ) == type )
                    _pos++;

            StringData ret = _raw.substr( start, _pos - start );
            bool old = _previousWhiteSpace;
            _previousWhiteSpace = _skipWhitespace();
            return Token( type, ret, start, old );
        }


        bool Tokenizer::_skipWhitespace() {
            unsigned start = _pos;
            while ( _pos < _raw.size() && _type( _raw[_pos] ) == Token::WHITESPACE )
                _pos++;
            return _pos > start;
        }


        Token::Type Tokenizer::_type( char c ) const {
            switch ( c ) {
            case ' ':
            case '\f':
            case '\v':
            case '\t':
            case '\r':
            case '\n':
                return Token::WHITESPACE;
            case '\'':
                if ( _english )
                    return Token::TEXT;
                else
                    return Token::WHITESPACE;

            case '~':
            case '`':

            case '!':
            case '@':
            case '#':
            case '$':
            case '%':
            case '^':
            case '&':
            case '*':
            case '(':
            case ')':

            case '-':

            case '=':
            case '+':

            case '[':
            case ']':
            case '{':
            case '}':
            case '|':
            case '\\':

            case ';':
            case ':':

            case '"':

            case '<':
            case '>':

            case ',':
            case '.':

            case '/':
            case '?':

                return Token::DELIMITER;
            default:
                return Token::TEXT;
            }
        }

    }

}