Class: Gem::RequestSet::Lockfile::Tokenizer
Relationships & Source Files | |
Namespace Children | |
Classes:
| |
Inherits: | Object |
Defined in: | lib/rubygems/request_set/lockfile/tokenizer.rb |
Constant Summary
Class Method Summary
Instance Attribute Summary
- #empty? ⇒ Boolean readonly
Instance Method Summary
- #make_parser(set, platforms)
- #next_token (also: #shift)
- #peek
-
#shift
Alias for #next_token.
- #skip(type)
- #to_a
- #unshift(token)
- #tokenize(input) private
-
#token_pos(byte_offset)
Internal use only
Calculates the column (by byte) and the line of the current token based on
byte_offset
.
Constructor Details
.new(input, filename = nil, line = 0, pos = 0) ⇒ Tokenizer
# File 'lib/rubygems/request_set/lockfile/tokenizer.rb', line 12
def initialize(input, filename = nil, line = 0, pos = 0) @line = line @line_pos = pos @tokens = [] @filename = filename tokenize input end
Class Method Details
.from_file(file)
[ GitHub ]# File 'lib/rubygems/request_set/lockfile/tokenizer.rb', line 8
def self.from_file(file) new File.read(file), file end
Instance Attribute Details
#empty? ⇒ Boolean
(readonly)
[ GitHub ]
# File 'lib/rubygems/request_set/lockfile/tokenizer.rb', line 40
def empty? @tokens.empty? end
Instance Method Details
#make_parser(set, platforms)
[ GitHub ]#next_token Also known as: #shift
[ GitHub ]# File 'lib/rubygems/request_set/lockfile/tokenizer.rb', line 48
def next_token @tokens.shift end
#peek
[ GitHub ]# File 'lib/rubygems/request_set/lockfile/tokenizer.rb', line 53
def peek @tokens.first || EOF end
#shift
Alias for #next_token.
# File 'lib/rubygems/request_set/lockfile/tokenizer.rb', line 51
alias :shift :next_token
#skip(type)
[ GitHub ]#to_a
[ GitHub ]# File 'lib/rubygems/request_set/lockfile/tokenizer.rb', line 24
def to_a @tokens.map {|token| [token.type, token.value, token.column, token.line] } end
#token_pos(byte_offset)
This method is for internal use only.
Calculates the column (by byte) and the line of the current token based on byte_offset
.
# File 'lib/rubygems/request_set/lockfile/tokenizer.rb', line 36
def token_pos(byte_offset) # :nodoc: [byte_offset - @line_pos, @line] end
#tokenize(input) (private)
[ GitHub ]# File 'lib/rubygems/request_set/lockfile/tokenizer.rb', line 59
def tokenize(input) require 'strscan' s = StringScanner.new input until s.eos? do pos = s.pos pos = s.pos if leading_whitespace = s.scan(/ +/) if s.scan(/[<|=>]{7}/) = "your #{@filename} contains merge conflict markers" column, line = token_pos pos raise Gem::RequestSet::Lockfile::ParseError.new , column, line, @filename end @tokens << case when s.scan(/\r?\n/) then token = Token.new(:newline, nil, *token_pos(pos)) @line_pos = s.pos @line += 1 token when s.scan(/[A-Z]+/) then if leading_whitespace text = s.matched text += s.scan(/[^\s)]*/).to_s # in case of no match Token.new(:text, text, *token_pos(pos)) else Token.new(:section, s.matched, *token_pos(pos)) end when s.scan(/([a-z]+):\s/) then s.pos -= 1 # rewind for possible newline Token.new(:entry, s[1], *token_pos(pos)) when s.scan(/\(/) then Token.new(:l_paren, nil, *token_pos(pos)) when s.scan(/\)/) then Token.new(:r_paren, nil, *token_pos(pos)) when s.scan(/<=|>=|=|~>|<|>|!=/) then Token.new(:requirement, s.matched, *token_pos(pos)) when s.scan(/,/) then Token.new(:comma, nil, *token_pos(pos)) when s.scan(/!/) then Token.new(:bang, nil, *token_pos(pos)) when s.scan(/[^\s),!]*/) then Token.new(:text, s.matched, *token_pos(pos)) else raise "BUG: can't create token for: #{s.string[s.pos..-1].inspect}" end end @tokens end
#unshift(token)
[ GitHub ]# File 'lib/rubygems/request_set/lockfile/tokenizer.rb', line 44
def unshift(token) @tokens.unshift token end