Class: Gem::RequestSet::Lockfile::Tokenizer
Relationships & Source Files | |
Namespace Children | |
Classes:
| |
Inherits: | Object |
Defined in: | lib/rubygems/request_set/lockfile/tokenizer.rb |
Constant Summary
Class Method Summary
Instance Attribute Summary
- #empty? ⇒ Boolean readonly
Instance Method Summary
- #make_parser(set, platforms)
- #next_token (also: #shift)
- #peek
-
#shift
Alias for #next_token.
- #skip(type)
- #to_a
- #unshift(token)
- #tokenize(input) private
-
#token_pos(byte_offset)
Internal use only
Calculates the column (by byte) and the line of the current token based on
byte_offset
.
Constructor Details
.new(input, filename = nil, line = 0, pos = 0) ⇒ Tokenizer
# File 'lib/rubygems/request_set/lockfile/tokenizer.rb', line 14
def initialize(input, filename = nil, line = 0, pos = 0) @line = line @line_pos = pos @tokens = [] @filename = filename tokenize input end
Class Method Details
.from_file(file)
[ GitHub ]# File 'lib/rubygems/request_set/lockfile/tokenizer.rb', line 10
def self.from_file(file) new File.read(file), file end
Instance Attribute Details
#empty? ⇒ Boolean
(readonly)
[ GitHub ]
# File 'lib/rubygems/request_set/lockfile/tokenizer.rb', line 42
def empty? @tokens.empty? end
Instance Method Details
#make_parser(set, platforms)
[ GitHub ]#next_token Also known as: #shift
[ GitHub ]# File 'lib/rubygems/request_set/lockfile/tokenizer.rb', line 50
def next_token @tokens.shift end
#peek
[ GitHub ]# File 'lib/rubygems/request_set/lockfile/tokenizer.rb', line 55
def peek @tokens.first || EOF end
#shift
Alias for #next_token.
# File 'lib/rubygems/request_set/lockfile/tokenizer.rb', line 53
alias_method :shift, :next_token
#skip(type)
[ GitHub ]#to_a
[ GitHub ]# File 'lib/rubygems/request_set/lockfile/tokenizer.rb', line 26
def to_a @tokens.map {|token| [token.type, token.value, token.column, token.line] } end
#token_pos(byte_offset)
This method is for internal use only.
Calculates the column (by byte) and the line of the current token based on byte_offset
.
# File 'lib/rubygems/request_set/lockfile/tokenizer.rb', line 38
def token_pos(byte_offset) # :nodoc: [byte_offset - @line_pos, @line] end
#tokenize(input) (private)
[ GitHub ]# File 'lib/rubygems/request_set/lockfile/tokenizer.rb', line 61
def tokenize(input) require "strscan" s = StringScanner.new input until s.eos? do pos = s.pos pos = s.pos if leading_whitespace = s.scan(/ +/) if s.scan(/[<|=>]{7}/) = "your #{@filename} contains merge conflict markers" column, line = token_pos pos raise Gem::RequestSet::Lockfile::ParseError.new , column, line, @filename end @tokens << if s.scan(/\r?\n/) token = Token.new(:newline, nil, *token_pos(pos)) @line_pos = s.pos @line += 1 token elsif s.scan(/[A-Z]+/) if leading_whitespace text = s.matched text += s.scan(/[^\s)]*/).to_s # in case of no match Token.new(:text, text, *token_pos(pos)) else Token.new(:section, s.matched, *token_pos(pos)) end elsif s.scan(/([a-z]+):\s/) s.pos -= 1 # rewind for possible newline Token.new(:entry, s[1], *token_pos(pos)) elsif s.scan(/\(/) Token.new(:l_paren, nil, *token_pos(pos)) elsif s.scan(/\)/) Token.new(:r_paren, nil, *token_pos(pos)) elsif s.scan(/<=|>=|=|~>|<|>|!=/) Token.new(:requirement, s.matched, *token_pos(pos)) elsif s.scan(/,/) Token.new(:comma, nil, *token_pos(pos)) elsif s.scan(/!/) Token.new(:bang, nil, *token_pos(pos)) elsif s.scan(/[^\s),!]*/) Token.new(:text, s.matched, *token_pos(pos)) else raise "BUG: can't create token for: #{s.string[s.pos..-1].inspect}" end end @tokens end
#unshift(token)
[ GitHub ]# File 'lib/rubygems/request_set/lockfile/tokenizer.rb', line 46
def unshift(token) @tokens.unshift token end