1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374 |
- const test = require('tape')
-
- const Lexer = require('../src/lexer')
- const tt = require('../src/tokenTypes')
-
- test('lexes simple template correctly', t => {
- t.plan(4)
- const lexer = new Lexer()
- let tokens = lexer.scan(
- '(div :class "foobar" (p "Lorem ipsum dolor sit amet"))',
- ).tokens
- t.deepEqual(tokens.map(token => token.type), [
- tt.OPAREN,
- tt.LITERAL,
- tt.ATTRIBUTE,
- tt.QUOTE,
- tt.LITERAL,
- tt.QUOTE,
- tt.OPAREN,
- tt.LITERAL,
- tt.QUOTE,
- tt.LITERAL,
- tt.QUOTE,
- tt.CPAREN,
- tt.CPAREN,
- tt.EOF,
- ])
- t.equal(tokens[1].value, 'div')
- t.equal(tokens[2].value, 'class')
- t.equal(tokens[9].value, 'Lorem ipsum dolor sit amet')
- })
-
- test('keeps track of line numbers', t => {
- t.plan(2)
- const lexer = new Lexer()
- let tokens = lexer.scan(`(
- (div :class "foobar"
- (p :class "bazquux"))
- `).tokens
- t.equal(tokens[2].line, 2)
- t.equal(tokens[7].line, 3)
- })
-
- test('multiple identifiers in a row are kept separate', t => {
- t.plan(2)
- const lexer = new Lexer()
- let tokens = lexer.scan(`(test test test)`).tokens
- t.deepEqual(
- tokens.map(token => token.type),
- [
- tt.OPAREN,
- tt.LITERAL,
- tt.LITERAL,
- tt.LITERAL,
- tt.CPAREN,
- tt.EOF,
- ]
- )
- tokens = lexer.scan(`(test "test" test test)`).tokens
- t.deepEqual(
- tokens.map(token => token.type),
- [
- tt.OPAREN,
- tt.LITERAL,
- tt.QUOTE,
- tt.LITERAL,
- tt.QUOTE,
- tt.LITERAL,
- tt.LITERAL,
- tt.CPAREN,
- tt.EOF,
- ]
- )
- })
|