Browse Source

Enhanced functionality. Add //line annotation Ruby script.

Beoran 3 years ago
parent
commit
090a4fe987
17 changed files with 3246 additions and 1 deletions
  1. 5 0
      .gitignore
  2. 106 1
      README.md
  3. 12 0
      bad.ll1
  4. 6 0
      generate.go
  5. 1 0
      grammar.go
  6. 84 0
      liner
  7. 7 0
      ll1.debug.tpl
  8. 12 0
      ll1.dot.tpl
  9. 671 0
      ll1.go
  10. 76 0
      ll1.ll1
  11. 363 0
      ll1.parser.go.lined.tpl
  12. 335 0
      ll1.parser.go.tpl
  13. 648 0
      ll1_parser.go
  14. 347 0
      main.go
  15. 19 0
      muesli.ll1
  16. 309 0
      parser.go
  17. 245 0
      template_functions.go

+ 5 - 0
.gitignore

@@ -24,3 +24,8 @@ _testmain.go
 *.test
 *.prof
 
+# the ll1 binary itself
+ll1
+
+# Ignore lined templates
+*.lined.tpl

+ 106 - 1
README.md

@@ -1,3 +1,108 @@
 # ll1
 
-ll1 is a tool that reads in an LL(1) grammar description and checks it, and that, with the help of Go templates can generate code or reports based on those grammars.
+ll1 is a tool to parse and check LL(1) specifications, and to generate 
+code or reports using Go templates based on these specifications. 
+ll1 specifications must contain a definition for an ll1 grammar, and 
+may optionally also specify a lexer for that grammar.
+
+# Usage
+
+ll1 [options] input_file.ll1 [template_file.ext*]
+
+The [options] are:
+
+    -append file
+        Name of output file to append. Takes precedence over -out.
+    -define definition
+    	Add a definition for the template, in the form of key:value or 
+        []key:value. Keys that start with a [] are arrays and can be 
+        concatenated to by specifying the same definition key again.
+        Non array keys will be overwoitten if they are specified again. 
+    -help
+        Shows the help page.
+    -out file
+        Name of output file to overwrite. 
+    -template file
+    	Template file to expand. This may be repeated to make use 
+        of several templates to generate one output file.
+    -verbose
+    	Be more verbose. Shows the scanned tokens as well.
+
+The names of template files may be given with the -t option, or after the 
+ll1 input file.
+
+# Syntax
+
+The syntax of an LL1 grammar itself is: 
+    
+    Specification -> Grammar OptLexer.
+    Grammar -> Rules.
+    Rules -> Rule OptRules .
+    OptRules -> dot Rules | epsilon.
+    Rule -> Name arrow Definition Template.
+    Name -> ruleName .
+    Template -> rawString | epsilon .
+    // Alternates consist of sequences.
+    Definition -> Alternates . 
+    Alternates -> Sequence OptSequences .
+    OptSequences -> or Alternates | epsilon.
+    Sequence -> Element OptElements . 
+    OptElements -> Element OptElements | epsilon .
+    Element -> Parenthesis .
+    Element -> Name .
+    Element -> literal .
+    Parenthesis -> '(' Definition ')' .
+    OptLexer -> LexerTerminal OptLexerTerminals | epsilon .
+    LexerTerminal -> terminalName arrow LexerDefinition Template .
+    LexerDefinition -> LexerAlternates . 
+    LexerAlternates -> LexerPattern OptLexerMatches .
+    OptLexerMatches -> or LexerPattern | epsilon.
+    LexerPattern -> literal .
+    OptElements -> Element OptElements | epsilon .
+    Element -> Parenthesis .
+    Element -> Name .
+    Element -> literal /*| Rule */.
+    // Lexer specification starts here:
+    dot          -> '.'
+    or           -> '|'
+    literal      -> characterLiteral | stringLiteral
+    ruleName     -> "re:[[:isUpper:]][[:isAlNum]]"
+    terminalName -> "re:[[:isLower:]][[:isAlNum]]"
+    epsilon      -> "epsilon" | 'ε'
+    arrow        -> "->" | '→'
+
+The syntax of an ll1 grammar has the following elements:  
+  - //comment : Line comments start with //, /*block comments*/ are C-like
+  - RuleName  : names that start with an upper case letter are 
+                rule names or nonterminals defined by the grammar.
+  - terminal  : names that start with a lower case letter are names of 
+                teminals that the lexer produces.
+  - 'l'       : single quoted strings are rune literals that the lexer produces.
+  - "literal" : double quoted strings are rune literals that the lexer produces.
+  - arrow     : a literal -> → as a separator.
+  - epsion    : a literal "epsilon" or 'ε', which indicates the empty rule.
+                this is used in conjunction with alternates to make a rule 
+                optional.
+
+# Templates
+
+If no templates are given, ll1 simply checks the grammar and outputs a 
+simple text report to the output file.
+
+If a template is given, it will be expanded and output to the output file. 
+
+Inside the template the following variables are available: 
+  - .Grammar: contains the .Rules of the grammar.
+  - .InName: contains the name of the ll1 input file.
+  - .OutName: contains the name of the output file specified with -a or -o.
+  - .Templates: contains the names of the templates read.
+  - .Definitions: contains the keys of the available definitions.
+  - All other variables defined with -d
+    
+Inside the ll1 templates, the following template functions are available:
+  - Most functions from the strings package (see go doc strings).
+  - CompileRegexp compiles a regexp package regexp which can be used as such.
+  - ToString to convert anything anything that isn't a string to a string.
+  - NewMap creates a map based it's argumens wich have string keys and interface{} values
+    This is handly to pass multiple aruments to a sub-template
+  - NewList creates a list from the given arguments.

+ 12 - 0
bad.ll1

@@ -0,0 +1,12 @@
+/*
+This is grammar for ll1 contains 
+grammar errors (but not parse errors) that the ll1 tool should detect.
+*/
+
+Grammar -> BadRecursion | BadMutualRecursion  | UndefinedNonterminal .
+BadRecursion -> epsilon | BadRecursion | epsilon .
+BadMutualRecursion -> epsilon | BadMutualRecursion2 | epsilon .
+BadMutualRecursion2 -> epsilon | BadMutualRecursion | epsilon .
+UndefinedNonterminal -> IAmUndefined .
+UndefinedNonterminalRecursive ->  UndefinedNonterminal .
+

+ 6 - 0
generate.go

@@ -0,0 +1,6 @@
+package main
+
+//go:generate /bin/sh -c "rm ll1_parser.go || true"
+//go:generate ./liner -s -o ll1.parser.go.lined.tpl ll1.parser.go.tpl
+//go:generate ./ll1 -d []Import:text/scanner -d []Import:unicode  -d LexerType:scanner.Scanner -o ll1_parser.go ll1.ll1 ll1.parser.go.lined.tpl
+

+ 1 - 0
grammar.go

@@ -0,0 +1 @@
+package main

+ 84 - 0
liner

@@ -0,0 +1,84 @@
+#!/usr/bin/env ruby
+# This script reads in a file and annotates it with 
+# line number directives. Existing line annotations are updated.
+
+require 'optparse'
+
+@options = {}
+
+OptionParser.new do |opts|
+  opts.on("-o", "--out FILENAME", "Output FILENAME to write to.") do | v |
+    @options[:out_file] = v
+  end
+  opts.on("-i", "--in FILENAME", "Input FILENAME to read.") do | v |
+    @options[:in_file] = v
+  end
+  opts.on("-a", "--annotate REGEXP", "REGEXP, if matched, marker is written.") do | v |
+    @options[:annotate_re] = Regexp.new(v)
+  end
+  opts.on("-f", "--format FORMAT", "printf FORMAT of marker, must contain %s and %d of.") do | v |
+    @options[:marker_format] = v
+  end
+  opts.on("-m", "--marker REGEXP", "REGEXP, if matched, marker is updated.") do | v |
+    @options[:marker_re] = Regexp.new(v)
+  end
+  opts.on("-s", "--skip", "If set skipsthe markers in the line count") do | v |
+    @options[:skip] = true
+  end
+end.parse!
+
+file_name = @options[:in_file] || ARGV[0]
+
+if file_name == nil
+    puts "liner: liner [options] file_to_annotate"
+    exit 1
+end
+
+annotate_re      = @options[:annotate_re] || %r{^(func|type|const)}
+marker_format    = @options[:marker_format] || "//line %s:%d\n"
+marker_re        = @options[:marker_re] || %r{^//line [^:]+:[0-9]+$}
+out_name         = @options[:out_file]
+
+lines = nil
+File.open(file_name) do |file|
+    lines = file.readlines
+end
+
+marker_before    = false
+
+out = $stdout
+if out_name 
+  out = File.open(out_name, "w+")
+  if out == nil 
+    puts "liner: cannot open output file #{out_name}"
+    exit 2
+  end
+end
+
+line_number = 0
+lines.each do |line| 
+    line_number += 1
+    marker_match       = marker_re.match(line)
+    if marker_match   != nil
+        # Update existing marker
+        out.printf(marker_format, file_name, line_number)
+        marker_before  = true
+        # Skip read line in output
+        next        
+    end
+    
+    if marker_before
+        marker_before  = false
+    else 
+        annotate_match = annotate_re.match(line)
+        if annotate_match
+          out.printf(marker_format, file_name, line_number)
+          line_number += 1 unless @options[:skip]
+        end
+    end
+    out.puts(line)
+end
+
+if out != $stdout
+  out.close
+end

+ 7 - 0
ll1.debug.tpl

@@ -0,0 +1,7 @@
+Rule ~ Definition ~ First ~ Follow ~ Template
+----------------~----------------~----------------~----------------~----------------
+{{ range .Grammar.Rules -}}
+{{ .Name }}~{{ .Definition }}~{{ .First }}~{{ .Follow }}~{{ .Template }}
+{{ end }}
+
+

+ 12 - 0
ll1.dot.tpl

@@ -0,0 +1,12 @@
+
+digraph {{ .Top.Name }} {
+	{{ range .Rules }}	
+		{{ .Name }} -> 
+		{{- range .Definition.Sequences }}
+			{{ . }}
+		{{ end -}}
+		;
+	{{ end }}
+}
+
+

+ 671 - 0
ll1.go

@@ -0,0 +1,671 @@
+package main
+
+// Use the go scanner so we don't need to write a lexer(yet)
+import "text/scanner"
+
+import "fmt"
+import "unicode"
+import "sort"
+import "strings"
+
+
+/*
+The meta-grammar for ll1 is: 
+Grammar -> Rules .
+Rules -> Rule eor Rules |  .
+Rule -> Name arrow Definition. 
+Name -> identifier .
+Definition -> Alternates . // Alternates have a higher priority than sequences.
+Alternates -> Sequence or Alternates | .
+Sequence -> Element Sequence | . 
+Element -> Parenthesis | Terminal | Literal .
+Parenthesis -> '(' Definition ')' .
+Terminal -> identifier .
+*/
+
+
+type Token struct {
+    Kind rune
+    Value string
+    Position scanner.Position
+}
+
+func (t Token) String() string {
+    return fmt.Sprintf("token: %s %s %s", t.Position, scanner.TokenString(t.Kind), t.Value)
+}
+
+func (t Token) ShortString() string {
+    return t.Value
+}
+
+func (t Token) MakeError(mesg string) error {
+    return fmt.Errorf("%s: error at %s: <%s>: %s", t.Position, scanner.TokenString(t.Kind), t.Value, mesg)    
+}
+
+
+/*
+Check if a Grammar is LL(1)
+May 9, 2020 by admin
+
+Predictive Parser is used to construct a parsing table for a class of grammar called LL(1). 
+The first ‘L’ means input is scanned from left to right, second ‘L’ refers to Left-Derivation, 
+and ‘1’ means the number of lookaheads. Rules of LL(1) Grammar
+
+A grammar is LL(1) if it satisfy the following rules for a production 
+\bf A \bf \rightarrow \pmb \alpha | \pmb \beta for all \pmb \alpha,\pmb \beta .
+
+    \bf \pmb \alpha,\pmb \beta do not derives same terminal.
+    Only one of \pmb \alpha,\pmb \beta can derive ‘Є’.
+    If \pmb \alpha derives ‘Є’, then \bf FIRST(\pmb \beta) should not contain any terminal that is in \bf FOLLOW(A). If \pmb \beta derives ‘Є’, then \bf FIRST(\pmb \alpha) should not contain any terminal that is in \bf FOLLOW(A). 
+
+The first two conditions are equivalent to saying \bf FIRST(\pmb \alpha) \cap FIRST(\pmb \beta) = \pmb \varnothing . The third condition is equivalent to saying \bf FOLLOW(A) \cap FIRST(\pmb \beta) = \pmb \varnothing if \pmb \alpha derives ‘Є’ and \bf FOLLOW(A) \cap FIRST(\pmb \alpha) = \pmb \varnothing if \pmb \beta derives ‘Є’.
+Note
+
+If a Grammar has Left-Recursion then it is not LL(1).Example, \bf A \bf \rightarrow A\pmb \beta . The Parser will not able to determine deterministically how many times production \bf A \bf \rightarrow A\pmb \beta must be used.
+*/
+
+
+type element struct {
+    token Token
+}
+
+func (element ) isElement() {
+}
+
+func (e element) String() string {
+    return e.token.ShortString()
+}
+
+func (e element) Check(g *Grammar, r * Rule) error {
+    return nil
+}
+
+func (e element) Token() Token {
+    return e.token
+}
+
+func (e element) FirstSet(g *Grammar) (Set, error) {
+    res := make(Set)
+    res.Add(e)
+    return res, nil
+}
+
+func (e element) FollowSet(g *Grammar, r *Rule) (Set, error) {
+    // most elements, i.e. terminals and epsilon have no follow set
+    // so return an empty set
+    res := make(Set)
+    return res, nil
+}
+
+// Epsilon is the empty rule.
+type Epsilon struct {
+    element
+}
+
+func (e Epsilon) String() string {
+    return "ε"
+}
+
+// End corresponds to EOF/ end of the input.
+type End struct {
+    element
+}
+
+func (e End) String() string {
+    return "$"
+}
+
+
+type Terminal struct {
+    element
+}
+
+func (t Terminal) String() string {
+    return t.Token().Value
+}
+
+type Nonterminal struct {
+    element
+    *Rule
+}
+
+func (n Nonterminal) String() string {
+    return n.Token().Value
+}
+
+// Nonterminals can cause recursion
+func (n Nonterminal) Check(g *Grammar, r *Rule) error {
+    found, err := g.Lookup(n)
+    if err != nil {
+        r.Undefined = true
+        return err
+    }
+    if  r.Equals(*found) {
+        r.Recursive = true
+        return n.Token().MakeError("left recursive")
+    }
+    // still need to check recursively as well
+    if r.Depth <  16 { 
+        r.Depth ++ 
+        return found.Definition.Check(g, r)
+    }
+    return n.Token().MakeError("left recursive or recursion too deep")
+}    
+                   
+func (a Alternates) Check(g *Grammar, r *Rule) error {
+    for _, s := range a.Sequences {
+        // check Leftmost element for left recursion
+        if (len(s.Elements) > 0) {
+            e := s.Elements[0]
+            err := e.Check(g, r)
+            if err != nil {
+                return err
+            }
+        }
+    }
+    return nil
+}
+
+type Literal struct {
+    Terminal
+}
+
+func (l Literal) String() string {
+    return "\"" + l.Token().Value + "\""
+}
+
+// Element can be one of Terminal, Nonterminal, Literal, or Alternates 
+// in case of a sub-rule.
+type Element interface {
+    isElement()
+    Token() Token
+    String() string
+    Check(g *Grammar, r * Rule) error
+    FirstSet(g *Grammar) (Set, error)
+    FollowSet(g *Grammar, r *Rule) (Set, error)
+} 
+
+type Sequence struct {
+    Elements []Element
+}
+
+func (s Sequence) String() string {
+    sep := ""
+    res := ""
+    for _, seq := range s.Elements {
+        res = res + sep + seq.String()
+        sep = "   "
+    }
+    return res
+}
+
+type Alternates struct {
+    element
+    Sequences []Sequence
+}
+
+func (a Alternates) String() string {
+    sep := ""
+    res := "("
+    for _, seq := range a.Sequences {
+        res = res + sep + seq.String()
+        sep = " | "
+    }
+    return res + ")"
+}
+
+type Set map[string]Element
+
+func IsEpsilon(e Element) bool {
+    _, ok := e.(Epsilon)
+    return ok
+}
+
+func IsNonterminal(e Element) bool {
+    _, ok := e.(Nonterminal)
+    return ok
+}
+
+
+// whether an element set is nullable (i.e. contains epsilon)
+func (s Set) IsNullable() bool {
+    for _, e := range s {
+        if IsEpsilon(e) {
+            return true
+        }
+    }
+    return false
+}
+
+func (s Set) Contains(e Element) bool {
+    name := e.Token().Value
+    v, ok := s[name]
+    return v!= nil && ok
+}
+
+func (s * Set) Add(e Element) bool {
+    name := e.Token().Value
+    v, ok := (*s)[name]
+    if v!= nil || ok {
+        return false
+    }
+    (*s)[e.Token().Value] = e
+    return true
+}
+
+func (s Set) UnionWithoutEpsilon(s2 Set) Set {
+    res := make(Set)
+    if s != nil {
+        for _, v := range s {
+            if !IsEpsilon(v) {
+                res.Add(v)
+            }
+        }
+    }
+    if s2 != nil {
+        for _, v := range s2 {
+            if !IsEpsilon(v) {
+                res.Add(v)
+            }
+        }
+    }
+    return res
+}
+
+func (s Set) Union(s2 Set) Set {
+    res := make(Set)
+    for _, v := range s {
+        res.Add(v)
+    }
+    for _, v := range s2 {
+        res.Add(v)
+    }
+    return res
+}
+
+func (s Set) Intersect(s2 Set) Set {
+    res := make(Set)
+    for _, v := range s {
+        if s2.Contains(v) { 
+            res.Add(v)
+        }
+    }
+    return res
+}
+
+func (s Set) String() string {
+    if len(s) == 0 {
+        return "∅"
+    }
+    aid := []string{}
+    for _, v := range s {
+        aid = append(aid, v.String())
+    }
+    sort.Strings(aid)
+    return strings.Join(aid, " ")
+}
+
+// Definition of a rule is nothing but a set of alternates,
+// where alternates can contain sequences and parenthesis.
+type Definition = Alternates
+
+type Rule struct {
+    Name string
+    Definition
+    // Template for code generation for this rule.
+    Template string
+    // First set of the rule
+    First Set
+    // Follow set of the rule
+    Follow Set
+    // Nullable or not
+    Nullable bool
+    // Realizable or not
+    Realizable bool
+    // Recursive (i.e. LEFT-recursive, which LL(1) disallows) 
+    Recursive bool
+    // Undefined nonterminals in this rule
+    Undefined bool
+    // Depth is the depth of (mutual) recursion of a rule. Limited to 16.
+    Depth int
+}
+
+func (r Rule) String() string {
+    return r.Name  + " → " + r.Definition.String()
+}
+ 
+func (r Rule) IsTerminal() bool {
+    return !unicode.IsUpper([]rune(r.Name)[0])
+}
+ 
+type Grammar struct {
+    Top     *Rule
+    Rules   []*Rule
+    // Unique terminals in all Rules
+    Terminals map[string]Terminal
+    // Unique nonterminals in all Rules
+    NonTerminal map[string]Rule
+    // Unique literals in all Rules
+    Literals map[string]Literal
+    // Whether or not the grammar is LL(1). Only valid 
+    // after running Check()
+    LL1 bool
+}
+
+func (g Grammar) String() string {
+    res := "Top → " + g.Top.Name + "\n"
+    for _, r := range g.Rules {
+        res = res + r.String() + "\n"
+    }
+    return res
+}
+
+
+func (g Grammar) Lookup(nt Nonterminal) (*Rule, error) {
+    for _, r := range g.Rules {
+        if r.Name == nt.Token().Value {
+            return r, nil
+        }
+    }
+    return nil, nt.Token().MakeError("Undefined non terminal")
+}
+
+func (n Nonterminal) FirstSet(g *Grammar) (Set, error) {
+    found, err := g.Lookup(n)
+    if err != nil {
+        return Set{}, err
+    }
+    return found.Definition.FirstSet(g)
+}    
+
+func (n Nonterminal) FollowSet(g *Grammar, r *Rule) (Set, error) {
+    found, err := g.Lookup(n)
+    if err != nil {
+        return Set{}, err
+    }
+    return found.Definition.FollowSet(g, r)
+}    
+
+func (s Sequence) FirstSet(g *Grammar) (Set, error) {
+    res := make(Set)
+    for _, elt := range s.Elements {
+        fs, err := elt.FirstSet(g)
+        if err != nil {
+            return res, err
+        }
+        res = res.UnionWithoutEpsilon(fs)
+        if !fs.IsNullable() {
+            return res, nil
+        }        
+    }
+    // If we get here, all sequence elementswere nullable. 
+    // Add an epsilon to the set
+    res.Add(Epsilon{})
+    return res, nil 
+}
+
+// FollowSet returns the follow set for rule r in grammar g. for this Sequence.
+func (seq Sequence) FollowSet(g *Grammar, r *Rule) (Set, error) {
+    res := make(Set)
+    // If in one of the sequences of one of the rules of the grammar
+    // the original rule is referred to by a nonterminal
+    // then, the follow set is the first set of the following. 
+    // If that contains epsilon, go on to the following and so on 
+    for i := 1 ; i < len(seq.Elements) ; i++ {
+        elt := seq.Elements[i]
+        if nt, ok := elt.(Nonterminal) ; ok {
+            if nt.Token().Value != r.Name {
+               continue 
+            }
+            first, err := nt.FirstSet(g)
+            if err != nil {
+                    return res, err
+            }
+            for _, e := range first {
+                fos, err := e.FollowSet(g, r)
+                if err != nil {
+                    return res, err
+                }            
+                res = res.UnionWithoutEpsilon(fos)
+            }            
+            fmt.Printf("Found follow candidate: %v at %d, %d\n", nt, i, len(seq.Elements))
+            for j := i + 1 ; j < len(seq.Elements) ; j ++ {
+                next := seq.Elements[j]
+                first, err := next.FirstSet(g)
+                if err != nil {
+                    return res, err
+                }
+                // union first sets until it is not nillable
+                res = res.UnionWithoutEpsilon(first)
+                if !first.IsNullable() {
+                    break
+                }
+            }
+        }
+    }
+    return res, nil
+}
+
+                   
+func (a Alternates) FirstSet(g *Grammar) (Set, error) {
+    res := make(Set)
+    for _, s := range a.Sequences {
+        // Check Leftmost element
+        if (len(s.Elements) > 0) {
+            e := s.Elements[0]
+            fs, err := e.FirstSet(g)
+            if err != nil {
+                return res, err
+            }
+            res = res.Union(fs)
+        }
+    }
+    return res, nil
+}
+
+func (a Alternates) FollowSet(g *Grammar, r *Rule) (Set, error) {
+    res := make(Set)
+    for _, s := range a.Sequences {
+        fos, err := s.FollowSet(g, r)
+        if err != nil {
+            return res, err
+        }
+        res = res.UnionWithoutEpsilon(fos)
+    }
+    return res, nil
+}
+
+// Rules are compared by name
+func (r Rule) Equals(o Rule) bool {
+    return r.Name == o.Name
+}
+
+func (r *Rule) Check(g *Grammar) error { 
+    err :=  r.Definition.Check(g, r)
+    if err != nil { 
+        return err
+    }
+    // Now calculate the first and follow set
+    fs, err := r.Definition.FirstSet(g)    
+    r.First = r.First.Union(fs)
+    return err
+}
+
+/*
+Computing the Follow-sets for the nonterminals in a grammar can be done as follows:
+
+    initialize Fo(S) with { $ } and every other Fo(Ai) with the empty set
+    if there is a rule of the form Aj → wAiw' , then
+        if the terminal a is in Fi(w' ), then add a to Fo(Ai)
+        if ε is in Fi(w' ), then add Fo(Aj) to Fo(Ai)
+        if w' has length 0, then add Fo(Aj) to Fo(Ai)
+    repeat step 2 until all Fo sets stay the same.
+
+This provides the least fixed point solution to the following system: 
+*/
+
+/*
+ *   function makeFollowSets() {
+    followSets[rules[0].left].push(END_MARKER);
+
+    let isSetChanged;
+
+    do {
+      isSetChanged = false;
+
+      rules.forEach(({ left, right }) => {
+        right.forEach((item, index) => {
+          if (!isNonterminal(item)) return;
+
+          let set = followSets[item];
+
+          set = union(
+            set,
+            index + 1 < right.length
+              ? collectSet(set, right.slice(index + 1), followSets[left])
+              : followSets[left],
+          );
+
+          if (followSets[item].length !== set.length) {
+            followSets[item] = set;
+            isSetChanged = true;
+          }
+        });
+      });
+    } while (isSetChanged);
+
+    return followSets;
+  }
+*/
+
+// FollowSet calculates the follow set of the rule in the grammar.
+func (r1 *Rule) FollowSet(g *Grammar) (Set, error) {
+    // Top level has empty follow set
+    if r1.Equals(*g.Top) {
+        return make(Set), nil 
+    }
+    
+    set := make(Set)
+        
+    for _, r2 := range g.Rules {
+        fos, err := r2.Definition.FollowSet(g, r1)
+        if err != nil {
+            return set, err
+        }
+        set = set.UnionWithoutEpsilon(fos)
+        if !r1.Equals(*r2) {
+            fos, err := r1.Definition.FollowSet(g, r2)
+            if err != nil {
+                return set, err
+            }
+            set = set.UnionWithoutEpsilon(fos)        
+        }         
+    }
+    return set, nil
+}
+
+func (g * Grammar) CalculateFollowSets() error {
+    g.Top.Follow = make(Set)
+    g.Top.Follow.Add(End{})
+    for _, r := range g.Rules {                        
+        if !r.Equals(*g.Top) {
+            r.Follow = make(Set)
+        }
+    }   
+    changed := false
+    for  {
+        changed = false
+        for _, r := range g.Rules {
+            for _, seq := range r.Definition.Sequences {
+                for index, elt := range seq.Elements {
+                    nt, ok := elt.(Nonterminal) 
+                    if !ok {
+                        continue
+                    }
+                    found, err := g.Lookup(nt)
+                    if err != nil {
+                        return err
+                    }
+                    originalLength := len(found.Follow)
+                    // if w' has length 0, then add Fo(Aj) to Fo(Ai)
+                    if index + 1  == len(seq.Elements) {
+                        found.Follow = found.Follow.UnionWithoutEpsilon(r.Follow)
+                    }  else {
+                        j := 1
+                        for ; index + j < len(seq.Elements); j++ { 
+                            next := seq.Elements[index + j]
+                            // if ε is in Fi(w' ), then add Fo(Aj) to Fo(Ai)
+                            first, err := next.FirstSet(g)
+                            if err != nil {
+                                return err
+                            }
+                            found.Follow = found.Follow.UnionWithoutEpsilon(first)
+                            // if ε is in Fi(w' ), then add Fo(Aj) to Fo(Ai)
+                            if first.IsNullable() {
+                                found.Follow = found.Follow.UnionWithoutEpsilon(r.Follow)
+                            }  else {
+                                break; // stop adding if the i+j'th element
+                                // is not nullable
+                            }
+                        }
+                        if index + j ==  len(seq.Elements) {
+                            found.Follow = found.Follow.UnionWithoutEpsilon(r.Follow)
+                        }
+                    }
+                    changed = changed || (originalLength != len(found.Follow))
+                }
+            }
+        }
+        if !changed {
+                break
+        }
+    }
+    return nil
+}
+
+
+func (r *Rule) FirstSet(g *Grammar) (Set, error) {
+    return r.Definition.FirstSet(g)
+}
+
+func (g *Grammar) Check() []error {
+    errs := []error{}
+    // Check for left recursion
+    for i, rule := range g.Rules {
+        err := rule.Check(g)
+        if err != nil {
+            errs = append(errs, err)
+        }
+        // Store modifications by check to rule.
+        g.Rules[i] = rule
+    }
+        
+    for _, rule := range g.Rules {
+        var err error
+        tok := rule.Token()
+        switch {
+            case rule.Recursive: 
+                err = tok.MakeError("left recursive here")
+            case rule.Undefined:
+                err = tok.MakeError("has undefined elements")
+            default:
+                err = nil
+        }
+        if err != nil {
+            errs = append(errs, err)
+        }
+    }
+    
+    err := g.CalculateFollowSets()
+    if err != nil {
+        errs = append(errs, err)
+    }
+
+        
+    return errs
+}
+
+
+
+
+

+ 76 - 0
ll1.ll1

@@ -0,0 +1,76 @@
+/*
+This is the grammar for LL1 itself. The lexer is the Go language scanner,
+so comments are ignored.
+*/
+
+Grammar -> Rules `` .
+Rules -> Rule OptRules .
+OptRules -> '.' Rules | epsilon.
+Rule -> Name arrow Definition Template.
+Name -> identifier .
+Template -> rawString | epsilon .
+// Alternates consist of sequences.
+Definition -> Alternates . 
+Alternates -> Sequence OptSequences .
+OptSequences -> '|' Alternates | epsilon.
+Sequence -> Element OptElements . 
+OptElements -> Element OptElements | epsilon.
+Element -> Parenthesis .
+Element -> Name .
+Element -> literal /*| Rule */.
+Parenthesis -> '(' Definition ')' .
+// Lexer specifications. These are rules that have a name that 
+// begins with a lower case letter
+dot          -> '.' .
+or           -> '|' .
+identifier   -> ruleName | terminalName .
+ruleName     -> "re:[[:isUpper:]][[:isAlNum]]" . 
+terminalName -> "re:[[:isLower:]][[:isAlNum]]" .
+epsilon      -> "epsilon" | 'ε' . 
+arrow        -> "->" | '→' .
+literal -> stringLiteral | charLiteral .
+stringLiteral 	-> "re:\"[^\"]+\"" .
+charLiteral 	-> "re:'[^']+'" .
+rawString 		-> "re:`[^`]+`/" .
+whiteSpace 		-> "re:[:isblank:]+" .
+lineComment 	-> "re://[^\n]\n" .
+handCoded		-> "code:" `
+// Not implemented
+` .
+
+/*
+Grammar			                                  identifier			   ∅										no	yes
+Rules			                                  identifier			   ∅										no	yes
+OptRules		                                  .						   ∅										yes	yes
+Rule			                                  identifier			  .										no	yes
+Name			                                  identifier			  ) literal ( identifier | arrow .		no	yes
+Definition		                                  literal ( identifier	  ) .									no	yes
+Alternates		                                  literal ( identifier	  ) .									no	yes
+OptSequences	                                  |						  ) .									yes	yes
+Sequence		                                  literal ( identifier	  ) | .									no	yes
+OptElements		                                  literal ( identifier	  ) | .									yes	yes
+Element			                                  literal ( identifier	  ) literal ( identifier | .			no	yes
+Parenthesis		                                  (						  ) literal ( identifier | .			no	yes
+
+
+Output of:
+./ll1 ll1.ll1 -t ll1.template.debug | column -e -s '~' -t -x
+
+Rule               Definition                      First                   Follow
+----------------  ----------------                ----------------        ----------------
+Grammar           (Rules)                         identifier              $
+Rules             (Rule   OptRules)               identifier              $
+OptRules          ("."   Rules | ε)               . ε                     $
+Rule              (Name   arrow   Definition)     identifier              . ε
+Name              (identifier)                    identifier              ( arrow identifier literal ε
+Definition        (Alternates)                    ( identifier literal    ) . ε
+Alternates        (Sequence   OptSequences)       ( identifier literal    ) . ε
+OptSequences      ("|"   Alternates | ε)          | ε                     ) . ε
+Sequence          (Element   OptElements)         ( identifier literal    | ε
+OptElements       (Element   OptElements | ε)     ( identifier literal ε  | ε
+Element           (Parenthesis | Name | literal)  ( identifier literal    ( identifier literal ε
+Parenthesis       ("("   Definition   ")")        (                       ( identifier literal ε
+
+
+
+*/

+ 363 - 0
ll1.parser.go.lined.tpl

@@ -0,0 +1,363 @@
+{{- /* This template generates a recursive descent parser based on the */ -}}
+{{- /* information about the LL(1) grammar processed by the ll1 tool.  */ -}}
+
+/* 
+ * {{.OutName}}: Parser for the {{.Grammar.Top.Name}} grammar. 
+ * Generated by the ll1 tool from {{.InName}} at {{Now}}.
+ * Based on template: {{.Templates}}
+ * Uses a scanner
+ * 
+ * Available definition keys at template expansion: 
+ * {{.Definitions}}
+ * 
+ * DO NOT EDIT. 
+ */
+package {{ .Package }}
+
+
+{{ range .Import }}
+import "{{.}}"
+{{ end }}
+import "io"
+import "os"
+import "fmt"
+
+{{$prefix := .Prefix }}
+
+{{- $Parser := ( printf "%s%s" $prefix "Parser") -}}
+{{- $ParserError := ( printf "%s%s" $prefix "ParserError") -}}
+{{- $Lexer := ( printf "%s%s" $prefix "Lexer") -}}
+{{- $TokenKind := ( printf "%s%s" $prefix "TokenKind") -}}
+{{- $Position := ( printf "%s%s" $prefix "Position") -}}
+{{- $Token := ( printf "%s%s" $prefix "Token") -}}
+{{- $Value := ( printf "%s%s" $prefix "Value") -}}
+
+
+// {{$Value}}  is the lexical value of a lexer token. 
+{{if .ValueType }}
+
+//line ll1.parser.go.tpl:39
+type {{$Value}} = {{.ValueType}}
+{{ else }}
+// This is based on strings as a default.
+
+//line ll1.parser.go.tpl:43
+type {{$Value}} = string
+{{ end }}
+
+
+{{if (.LexerType) eq "scanner.Scanner"}}
+// {{$Position}} is a position within a source file. Since the lexer is based on 
+// text/scanner, we use that package's Position.
+
+//line ll1.parser.go.tpl:51
+type {{$Position}} = scanner.Position
+{{else}}
+// {{$Position}} is a position within a source file. 
+
+//line ll1.parser.go.tpl:55
+type {{$Position}} struct {
+	Filename string // filename, if any
+	Offset   int    // byte offset, starting at 0
+	Line     int    // line number, starting at 1
+	Column   int    // column number, starting at 1 (character count per line)
+}
+{{end}}
+
+
+// {{$TokenKind}} is the kind or type of a token.
+// This has rune as the underlying type so one-character tokens can be easily 
+// supported. EOF will be 65535 (I.e, -1 cast to rune). Non-character token 
+// kinds will start from 65533 down (i.e -3, -4, -5, etc).
+
+//line ll1.parser.go.tpl:69
+type {{$TokenKind}} rune
+
+
+// No{{$TokenKind}} means "no token kind" i.e. no token. 
+
+//line ll1.parser.go.tpl:74
+const No{{$TokenKind}} {{$TokenKind}} = {{$TokenKind}}(0)
+// {{$TokenKind}}EOF means the end of the input. 
+
+//line ll1.parser.go.tpl:77
+const {{$TokenKind}}EOF {{$TokenKind}} = {{$TokenKind}}(-1)
+// {{$TokenKind}}Error means a parsing or lexing error was encountered. 
+
+//line ll1.parser.go.tpl:80
+const {{$TokenKind}}Error {{$TokenKind}} = {{$TokenKind}}(-2)
+
+
+
+// Convert token kind to a string representation
+
+//line ll1.parser.go.tpl:86
+func (tk {{$TokenKind}}) String() string {
+    {{if (.LexerType) eq "scanner.Scanner"}}
+        return scanner.TokenString(rune(tk))
+    {{else}}
+    switch (tk) {
+        case No{{$TokenKind}}: return "NoToken"
+        case {{$TokenKind}}EOF: return "EOF"
+        {{ range .Grammar.Rules -}}
+        {{- $ruleName := .Name -}}
+        {{- if .IsTerminal -}}
+        {{- $TokenKindName := ( printf "%s%s" $TokenKind $ruleName) -}}    
+        case {{$TokenKindName}}: return "{{$TokenKindName}}"
+        {{end}}
+        {{end}}
+        default:
+            return fmt.Printf("TokenKind(%d)", int(tk))
+    }
+    {{end}}
+}
+
+
+// {{$Token}} is the result of a single lexical analysis step by the lexer.
+
+//line ll1.parser.go.tpl:109
+type {{$Token}} struct {
+	{{$Position}}   // Position in the source where the token was found.
+	{{$TokenKind}}  // Type of the token
+	{{$Value}}      // Value of the token
+}
+
+
+// Make{{$Token}} makes a token with the given position, type and value.
+
+//line ll1.parser.go.tpl:118
+func Make{{$Token}}(pos {{$Position}}, typ {{$TokenKind}}, val {{$Value}}) {{$Token}} {
+    return {{$Token}}{ pos, typ, val}
+}
+
+// {{$Lexer}} performs the lexical analysis of the input.
+
+//line ll1.parser.go.tpl:124
+type {{$Lexer}} struct {
+    // Embed {{.LexerType}}
+    {{.LexerType}}
+    Filename string    
+}
+
+{{if (.LexerType) eq "scanner.Scanner"}}
+// New{{$Lexer}}FromReader creates a new lexer for the given parser and input.
+
+//line ll1.parser.go.tpl:133
+func New{{$Lexer}}FromReader(parser *{{$Parser}}, reader io.Reader, filename string) *{{$Lexer}} {
+    lexer := &{{$Lexer}}{}
+    lexer.Filename = filename
+    lexer.Scanner.Init(reader)
+    lexer.Scanner.Mode = scanner.GoTokens
+    lexer.Scanner.Error = func (s *scanner.Scanner, msg string) {
+        parser.Panicf("%s: scanner error: %s, %s", s.Position, s.TokenText(), msg)
+    }
+    // XXX: needs to be generated from the identifier rule in the syntax!
+    lexer.Scanner.IsIdentRune = func(ch rune, i int) bool {
+        if i == 0 {
+            return unicode.IsLetter(ch)
+        }    
+        return unicode.IsLetter(ch) || 
+               unicode.IsNumber(ch) || 
+                          ch == '_' || 
+                          ch == '-'
+    }
+    return lexer
+}
+
+
+//line ll1.parser.go.tpl:155
+func (lex *{{$Lexer}}) Lex() {{$Token}} {
+    scanned := lex.Scanner.Scan()
+    pos := lex.Scanner.Position
+    pos.Filename = lex.Filename    
+    value := lex.Scanner.TokenText()
+    // Get rid of the quotes
+    if scanned == scanner.Char || 
+       scanned == scanner.String || 
+       scanned == scanner.RawString {
+           value = value[1:len(value) - 1]
+    }
+    token := {{$Token}} { 
+        {{$TokenKind}}: {{$TokenKind}}(scanned), 
+        {{$Value}}: value, 
+        {{$Position}}: pos,
+    }
+    return token
+}
+{{else}}
+// Please provide the following functions:
+//
+// * You own lexer creation function with the following signature: 
+// New{{$Lexer}}FromReader(parser *{{$Parser}}, reader io.Reader, filename string) *{{$Lexer}} 
+//
+// * Your own lexing function with the type
+// func (lex *{{$Lexer}}) Lex() {{$Token}}
+{{end}}
+
+
+
+// {{$Parser}} parses the input and returns a parse tree, 
+// based on the rules in {{.InName}}
+
+//line ll1.parser.go.tpl:188
+type {{$Parser}} struct {
+    reader io.Reader
+    lexer *{{$Lexer}}
+    current {{$Token}}
+    Errors []{{$ParserError}}
+    Filename string
+    Debug io.Writer
+}
+
+
+//line ll1.parser.go.tpl:198
+func New{{$Parser}}FromReader(reader io.Reader, filename string, debug bool) *{{$Parser}} {
+    parser := &{{$Parser}}{}
+    parser.lexer = New{{$Lexer}}FromReader(parser, reader, filename)
+    parser.Filename = filename
+    
+    parser.current.{{$TokenKind}} = No{{$TokenKind}}
+    parser.Debug = nil
+    if debug {
+        parser.Debug = os.Stderr
+    }
+    return parser
+}
+
+
+// Advances the parser. Returns the current token /after/ advancing.
+
+//line ll1.parser.go.tpl:214
+func (p *{{$Parser}}) Advance() {{$Token}} {
+	token := p.lexer.Lex()
+    p.Debugf("Lexed token: %v", token)
+	p.current = token
+    return token
+}
+
+// {{$ParserError}} is an error encountered during parsing or lexing. 
+// The parser may panic with this type on errors that would prevent the parser 
+// from making progress.
+
+//line ll1.parser.go.tpl:225
+type {{$ParserError}} struct {
+    *{{$Parser}} // Parser that had the error.
+    *{{$Token}}  // Token at which the error was found
+    Chain error  // underlying error
+}
+
+
+//line ll1.parser.go.tpl:232
+func (pe {{$ParserError}}) Error() string {
+    // XXX will need to be improved
+	return pe.Chain.Error()
+}
+
+
+//line ll1.parser.go.tpl:238
+func (parser *{{$Parser}}) Errorf(message string, args ...interface{}) {{$ParserError}} {
+	err := fmt.Errorf(message, args...)
+    pe := {{$ParserError}} { 
+        {{$Parser}}: parser, 
+        {{$Token}}: &parser.current, 
+        Chain: err,
+    }
+    parser.Errors = append(parser.Errors, pe)
+    return pe
+}
+
+
+//line ll1.parser.go.tpl:250
+func (parser *{{$Parser}}) Panicf(message string, args ...interface{})  {
+    pe := parser.Errorf(message, args...)
+    panic(pe)
+}
+
+
+
+//line ll1.parser.go.tpl:257
+func (p *{{$Parser}}) Debugf(message string, args ...interface{})  {
+    if p.Debug != nil {
+        fmt.Fprintf(p.Debug, message, args)
+    }
+}
+
+/* Looks at the current token and advances the lexer if the token is of any of
+the token kinds given in kinds. In this case it will return the accepted
+token and advance the parser. Otherwise, it will call parser.Panicf.*/
+
+//line ll1.parser.go.tpl:267
+func (parser *{{$Parser}}) Require(kinds ...{{$TokenKind}}) {{$Token}} {
+    parser.Debugf("Require: %v\n", kinds)
+	if parser.current.{{$TokenKind}} == {{$TokenKind}}(0) {
+		parser.Advance()
+	}
+	
+	expected := ""
+	sep := ""
+	for _, kind := range kinds {
+		if kind == parser.current.{{$TokenKind}} {
+			accepted := parser.current
+			parser.Advance()
+			return accepted
+		}
+		expected = fmt.Sprintf("%s%s%s", expected, sep, kind.String())
+	}
+	
+	parser.Panicf("error: expected one of the following: %s", expected)
+	return {{$Token}}{}
+}
+
+//line ll1.parser.go.tpl:288
+func (parser {{$Parser}}) NextIs(kinds ...{{$TokenKind}}) bool {
+    parser.Debugf("NextIs: %v\n", kinds)
+    if (parser.current.{{$TokenKind}} == 0) {
+		parser.Advance()
+	}    
+    for _, kind := range kinds {
+        if kind == parser.current.{{$TokenKind}} {
+            return true
+        }
+    }
+    return false
+}
+
+{{ $tokenKindValue := 2 }}
+
+{{ range .Grammar.Rules -}}
+{{- $ruleName := .Name -}}
+{{ if .Template }}
+// Expanded from template of rule {{$ruleName}}
+{{ .Template }}
+{{ end }}
+{{- $terminal := .IsTerminal -}}
+{{- if $terminal -}}
+{{- $TokenKindName := ( printf "%s%s" $TokenKind $ruleName) -}}
+
+//line ll1.parser.go.tpl:313
+const {{$TokenKindName}} {{$TokenKind}} = {{$TokenKind}}(-{{$tokenKindValue}})
+{{ $tokenKindValue = (iadd $tokenKindValue 1) }}
+
+//line ll1.parser.go.tpl:316
+func ( *{{$Lexer}}) Lex{{$TokenKindName}}() ({{$TokenKind}}, error) {
+    result := {{$TokenKindName}}
+    return result, nil
+}
+
+{{ else }}
+{{ $RuleType := ( printf "%s%s" $prefix $ruleName) }}
+
+//line ll1.parser.go.tpl:324
+type {{$RuleType}} struct {    
+}
+
+
+//line ll1.parser.go.tpl:328
+func ( *{{$Parser}}) Parse{{$RuleType}}() ({{$RuleType}}, error) {
+    result := {{$RuleType}} {}
+    return result, nil 
+}
+{{end}}
+
+{{ end }}
+

+ 335 - 0
ll1.parser.go.tpl

@@ -0,0 +1,335 @@
+{{- /* This template generates a recursive descent parser based on the */ -}}
+{{- /* information about the LL(1) grammar processed by the ll1 tool.  */ -}}
+
+/* 
+ * {{.OutName}}: Parser for the {{.Grammar.Top.Name}} grammar. 
+ * Generated by the ll1 tool from {{.InName}} at {{Now}}.
+ * Based on template: {{.Templates}}
+ * Uses a scanner
+ * 
+ * Available definition keys at template expansion: 
+ * {{.Definitions}}
+ * 
+ * DO NOT EDIT. 
+ */
+package {{ .Package }}
+
+
+{{ range .Import }}
+import "{{.}}"
+{{ end }}
+import "io"
+import "os"
+import "fmt"
+
+{{$prefix := .Prefix }}
+
+{{- $Parser := ( printf "%s%s" $prefix "Parser") -}}
+{{- $ParserError := ( printf "%s%s" $prefix "ParserError") -}}
+{{- $Lexer := ( printf "%s%s" $prefix "Lexer") -}}
+{{- $TokenKind := ( printf "%s%s" $prefix "TokenKind") -}}
+{{- $Position := ( printf "%s%s" $prefix "Position") -}}
+{{- $Token := ( printf "%s%s" $prefix "Token") -}}
+{{- $Value := ( printf "%s%s" $prefix "Value") -}}
+
+
+// {{$Value}}  is the lexical value of a lexer token. 
+{{if .ValueType }}
+
+type {{$Value}} = {{.ValueType}}
+{{ else }}
+// This is based on strings as a default.
+
+type {{$Value}} = string
+{{ end }}
+
+
+{{if (.LexerType) eq "scanner.Scanner"}}
+// {{$Position}} is a position within a source file. Since the lexer is based on 
+// text/scanner, we use that package's Position.
+
+type {{$Position}} = scanner.Position
+{{else}}
+// {{$Position}} is a position within a source file. 
+
+type {{$Position}} struct {
+	Filename string // filename, if any
+	Offset   int    // byte offset, starting at 0
+	Line     int    // line number, starting at 1
+	Column   int    // column number, starting at 1 (character count per line)
+}
+{{end}}
+
+
+// {{$TokenKind}} is the kind or type of a token.
+// This has rune as the underlying type so one-character tokens can be easily 
+// supported. EOF will be 65535 (I.e, -1 cast to rune). Non-character token 
+// kinds will start from 65533 down (i.e -3, -4, -5, etc).
+
+type {{$TokenKind}} rune
+
+
+// No{{$TokenKind}} means "no token kind" i.e. no token. 
+
+const No{{$TokenKind}} {{$TokenKind}} = {{$TokenKind}}(0)
+// {{$TokenKind}}EOF means the end of the input. 
+
+const {{$TokenKind}}EOF {{$TokenKind}} = {{$TokenKind}}(-1)
+// {{$TokenKind}}Error means a parsing or lexing error was encountered. 
+
+const {{$TokenKind}}Error {{$TokenKind}} = {{$TokenKind}}(-2)
+
+
+
+// Convert token kind to a string representation
+
+func (tk {{$TokenKind}}) String() string {
+    {{if (.LexerType) eq "scanner.Scanner"}}
+        return scanner.TokenString(rune(tk))
+    {{else}}
+    switch (tk) {
+        case No{{$TokenKind}}: return "NoToken"
+        case {{$TokenKind}}EOF: return "EOF"
+        {{ range .Grammar.Rules -}}
+        {{- $ruleName := .Name -}}
+        {{- if .IsTerminal -}}
+        {{- $TokenKindName := ( printf "%s%s" $TokenKind $ruleName) -}}    
+        case {{$TokenKindName}}: return "{{$TokenKindName}}"
+        {{end}}
+        {{end}}
+        default:
+            return fmt.Printf("TokenKind(%d)", int(tk))
+    }
+    {{end}}
+}
+
+
+// {{$Token}} is the result of a single lexical analysis step by the lexer.
+
+type {{$Token}} struct {
+	{{$Position}}   // Position in the source where the token was found.
+	{{$TokenKind}}  // Type of the token
+	{{$Value}}      // Value of the token
+}
+
+
+// Make{{$Token}} makes a token with the given position, type and value.
+
+func Make{{$Token}}(pos {{$Position}}, typ {{$TokenKind}}, val {{$Value}}) {{$Token}} {
+    return {{$Token}}{ pos, typ, val}
+}
+
+// {{$Lexer}} performs the lexical analysis of the input.
+
+type {{$Lexer}} struct {
+    // Embed {{.LexerType}}
+    {{.LexerType}}
+    Filename string    
+}
+
+{{if (.LexerType) eq "scanner.Scanner"}}
+// New{{$Lexer}}FromReader creates a new lexer for the given parser and input.
+
+func New{{$Lexer}}FromReader(parser *{{$Parser}}, reader io.Reader, filename string) *{{$Lexer}} {
+    lexer := &{{$Lexer}}{}
+    lexer.Filename = filename
+    lexer.Scanner.Init(reader)
+    lexer.Scanner.Mode = scanner.GoTokens
+    lexer.Scanner.Error = func (s *scanner.Scanner, msg string) {
+        parser.Panicf("%s: scanner error: %s, %s", s.Position, s.TokenText(), msg)
+    }
+    // XXX: needs to be generated from the identifier rule in the syntax!
+    lexer.Scanner.IsIdentRune = func(ch rune, i int) bool {
+        if i == 0 {
+            return unicode.IsLetter(ch)
+        }    
+        return unicode.IsLetter(ch) || 
+               unicode.IsNumber(ch) || 
+                          ch == '_' || 
+                          ch == '-'
+    }
+    return lexer
+}
+
+
+func (lex *{{$Lexer}}) Lex() {{$Token}} {
+    scanned := lex.Scanner.Scan()
+    pos := lex.Scanner.Position
+    pos.Filename = lex.Filename    
+    value := lex.Scanner.TokenText()
+    // Get rid of the quotes
+    if scanned == scanner.Char || 
+       scanned == scanner.String || 
+       scanned == scanner.RawString {
+           value = value[1:len(value) - 1]
+    }
+    token := {{$Token}} { 
+        {{$TokenKind}}: {{$TokenKind}}(scanned), 
+        {{$Value}}: value, 
+        {{$Position}}: pos,
+    }
+    return token
+}
+{{else}}
+// Please provide the following functions:
+//
+// * You own lexer creation function with the following signature: 
+// New{{$Lexer}}FromReader(parser *{{$Parser}}, reader io.Reader, filename string) *{{$Lexer}} 
+//
+// * Your own lexing function with the type
+// func (lex *{{$Lexer}}) Lex() {{$Token}}
+{{end}}
+
+
+
+// {{$Parser}} parses the input and returns a parse tree, 
+// based on the rules in {{.InName}}
+
+type {{$Parser}} struct {
+    reader io.Reader
+    lexer *{{$Lexer}}
+    current {{$Token}}
+    Errors []{{$ParserError}}
+    Filename string
+    Debug io.Writer
+}
+
+
+func New{{$Parser}}FromReader(reader io.Reader, filename string, debug bool) *{{$Parser}} {
+    parser := &{{$Parser}}{}
+    parser.lexer = New{{$Lexer}}FromReader(parser, reader, filename)
+    parser.Filename = filename
+    
+    parser.current.{{$TokenKind}} = No{{$TokenKind}}
+    parser.Debug = nil
+    if debug {
+        parser.Debug = os.Stderr
+    }
+    return parser
+}
+
+
+// Advances the parser. Returns the current token /after/ advancing.
+
+func (p *{{$Parser}}) Advance() {{$Token}} {
+	token := p.lexer.Lex()
+    p.Debugf("Lexed token: %v", token)
+	p.current = token
+    return token
+}
+
+// {{$ParserError}} is an error encountered during parsing or lexing. 
+// The parser may panic with this type on errors that would prevent the parser 
+// from making progress.
+
+type {{$ParserError}} struct {
+    *{{$Parser}} // Parser that had the error.
+    *{{$Token}}  // Token at which the error was found
+    Chain error  // underlying error
+}
+
+
+func (pe {{$ParserError}}) Error() string {
+    // XXX will need to be improved
+	return pe.Chain.Error()
+}
+
+
+func (parser *{{$Parser}}) Errorf(message string, args ...interface{}) {{$ParserError}} {
+	err := fmt.Errorf(message, args...)
+    pe := {{$ParserError}} { 
+        {{$Parser}}: parser, 
+        {{$Token}}: &parser.current, 
+        Chain: err,
+    }
+    parser.Errors = append(parser.Errors, pe)
+    return pe
+}
+
+
+func (parser *{{$Parser}}) Panicf(message string, args ...interface{})  {
+    pe := parser.Errorf(message, args...)
+    panic(pe)
+}
+
+
+
+func (p *{{$Parser}}) Debugf(message string, args ...interface{})  {
+    if p.Debug != nil {
+        fmt.Fprintf(p.Debug, message, args)
+    }
+}
+
+/* Looks at the current token and advances the lexer if the token is of any of
+the token kinds given in kinds. In this case it will return the accepted
+token and advance the parser. Otherwise, it will call parser.Panicf.*/
+
+func (parser *{{$Parser}}) Require(kinds ...{{$TokenKind}}) {{$Token}} {
+    parser.Debugf("Require: %v\n", kinds)
+	if parser.current.{{$TokenKind}} == {{$TokenKind}}(0) {
+		parser.Advance()
+	}
+	
+	expected := ""
+	sep := ""
+	for _, kind := range kinds {
+		if kind == parser.current.{{$TokenKind}} {
+			accepted := parser.current
+			parser.Advance()
+			return accepted
+		}
+		expected = fmt.Sprintf("%s%s%s", expected, sep, kind.String())
+	}
+	
+	parser.Panicf("error: expected one of the following: %s", expected)
+	return {{$Token}}{}
+}
+
+func (parser {{$Parser}}) NextIs(kinds ...{{$TokenKind}}) bool {
+    parser.Debugf("NextIs: %v\n", kinds)
+    if (parser.current.{{$TokenKind}} == 0) {
+		parser.Advance()
+	}    
+    for _, kind := range kinds {
+        if kind == parser.current.{{$TokenKind}} {
+            return true
+        }
+    }
+    return false
+}
+
+{{ $tokenKindValue := 2 }}
+
+{{ range .Grammar.Rules -}}
+{{- $ruleName := .Name -}}
+{{ if .Template }}
+// Expanded from template of rule {{$ruleName}}
+{{ .Template }}
+{{ end }}
+{{- $terminal := .IsTerminal -}}
+{{- if $terminal -}}
+{{- $TokenKindName := ( printf "%s%s" $TokenKind $ruleName) -}}
+
+const {{$TokenKindName}} {{$TokenKind}} = {{$TokenKind}}(-{{$tokenKindValue}})
+{{ $tokenKindValue = (iadd $tokenKindValue 1) }}
+
+func ( *{{$Lexer}}) Lex{{$TokenKindName}}() ({{$TokenKind}}, error) {
+    result := {{$TokenKindName}}
+    return result, nil
+}
+
+{{ else }}
+{{ $RuleType := ( printf "%s%s" $prefix $ruleName) }}
+
+type {{$RuleType}} struct {    
+}
+
+
+func ( *{{$Parser}}) Parse{{$RuleType}}() ({{$RuleType}}, error) {
+    result := {{$RuleType}} {}
+    return result, nil 
+}
+{{end}}
+
+{{ end }}
+

+ 648 - 0
ll1_parser.go

@@ -0,0 +1,648 @@
+/* 
+ * ll1_parser.go: Parser for the Grammar grammar. 
+ * Generated by the ll1 tool from ll1.ll1 at 2020-08-28 18:00:14.138349709 +0200 CEST m=+0.001367333.
+ * Based on template: ll1.parser.go.lined.tpl
+ * Uses a scanner
+ * 
+ * Available definition keys at template expansion: 
+ * [Grammar Import InName LexerType OutName Package Parser Prefix Templates]
+ * 
+ * DO NOT EDIT. 
+ */
+package main
+
+
+
+import "text/scanner"
+
+import "unicode"
+
+import "io"
+import "os"
+import "fmt"
+
+// Ll1Value  is the lexical value of a lexer token. 
+
+// This is based on strings as a default.
+
+//line ll1.parser.go.tpl:43
+type Ll1Value = string
+
+
+
+
+// Ll1Position is a position within a source file. Since the lexer is based on 
+// text/scanner, we use that package's Position.
+
+//line ll1.parser.go.tpl:51
+type Ll1Position = scanner.Position
+
+
+
+// Ll1TokenKind is the kind or type of a token.
+// This has rune as the underlying type so one-character tokens can be easily 
+// supported. EOF will be 65535 (I.e, -1 cast to rune). Non-character token 
+// kinds will start from 65533 down (i.e -3, -4, -5, etc).
+
+//line ll1.parser.go.tpl:69
+type Ll1TokenKind rune
+
+
+// NoLl1TokenKind means "no token kind" i.e. no token. 
+
+//line ll1.parser.go.tpl:74
+const NoLl1TokenKind Ll1TokenKind = Ll1TokenKind(0)
+// Ll1TokenKindEOF means the end of the input. 
+
+//line ll1.parser.go.tpl:77
+const Ll1TokenKindEOF Ll1TokenKind = Ll1TokenKind(-1)
+// Ll1TokenKindError means a parsing or lexing error was encountered. 
+
+//line ll1.parser.go.tpl:80
+const Ll1TokenKindError Ll1TokenKind = Ll1TokenKind(-2)
+
+
+
+// Convert token kind to a string representation
+
+//line ll1.parser.go.tpl:86
+func (tk Ll1TokenKind) String() string {
+    
+        return scanner.TokenString(rune(tk))
+    
+}
+
+
+// Ll1Token is the result of a single lexical analysis step by the lexer.
+
+//line ll1.parser.go.tpl:109
+type Ll1Token struct {
+	Ll1Position   // Position in the source where the token was found.
+	Ll1TokenKind  // Type of the token
+	Ll1Value      // Value of the token
+}
+
+
+// MakeLl1Token makes a token with the given position, type and value.
+
+//line ll1.parser.go.tpl:118
+func MakeLl1Token(pos Ll1Position, typ Ll1TokenKind, val Ll1Value) Ll1Token {
+    return Ll1Token{ pos, typ, val}
+}
+
+// Ll1Lexer performs the lexical analysis of the input.
+
+//line ll1.parser.go.tpl:124
+type Ll1Lexer struct {
+    // Embed scanner.Scanner
+    scanner.Scanner
+    Filename string    
+}
+
+
+// NewLl1LexerFromReader creates a new lexer for the given parser and input.
+
+//line ll1.parser.go.tpl:133
+func NewLl1LexerFromReader(parser *Ll1Parser, reader io.Reader, filename string) *Ll1Lexer {
+    lexer := &Ll1Lexer{}
+    lexer.Filename = filename
+    lexer.Scanner.Init(reader)
+    lexer.Scanner.Mode = scanner.GoTokens
+    lexer.Scanner.Error = func (s *scanner.Scanner, msg string) {
+        parser.Panicf("%s: scanner error: %s, %s", s.Position, s.TokenText(), msg)
+    }
+    // XXX: needs to be generated from the identifier rule in the syntax!
+    lexer.Scanner.IsIdentRune = func(ch rune, i int) bool {
+        if i == 0 {
+            return unicode.IsLetter(ch)
+        }    
+        return unicode.IsLetter(ch) || 
+               unicode.IsNumber(ch) || 
+                          ch == '_' || 
+                          ch == '-'
+    }
+    return lexer
+}
+
+
+//line ll1.parser.go.tpl:155
+func (lex *Ll1Lexer) Lex() Ll1Token {
+    scanned := lex.Scanner.Scan()
+    pos := lex.Scanner.Position
+    pos.Filename = lex.Filename    
+    value := lex.Scanner.TokenText()
+    // Get rid of the quotes
+    if scanned == scanner.Char || 
+       scanned == scanner.String || 
+       scanned == scanner.RawString {
+           value = value[1:len(value) - 1]
+    }
+    token := Ll1Token { 
+        Ll1TokenKind: Ll1TokenKind(scanned), 
+        Ll1Value: value, 
+        Ll1Position: pos,
+    }
+    return token
+}
+
+
+
+
+// Ll1Parser parses the input and returns a parse tree, 
+// based on the rules in ll1.ll1
+
+//line ll1.parser.go.tpl:188
+type Ll1Parser struct {
+    reader io.Reader
+    lexer *Ll1Lexer
+    current Ll1Token
+    Errors []Ll1ParserError
+    Filename string
+    Debug io.Writer
+}
+
+
+//line ll1.parser.go.tpl:198
+func NewLl1ParserFromReader(reader io.Reader, filename string, debug bool) *Ll1Parser {
+    parser := &Ll1Parser{}
+    parser.lexer = NewLl1LexerFromReader(parser, reader, filename)
+    parser.Filename = filename
+    
+    parser.current.Ll1TokenKind = NoLl1TokenKind
+    parser.Debug = nil
+    if debug {
+        parser.Debug = os.Stderr
+    }
+    return parser
+}
+
+
+// Advances the parser. Returns the current token /after/ advancing.
+
+//line ll1.parser.go.tpl:214
+func (p *Ll1Parser) Advance() Ll1Token {
+	token := p.lexer.Lex()
+    p.Debugf("Lexed token: %v", token)
+	p.current = token
+    return token
+}
+
+// Ll1ParserError is an error encountered during parsing or lexing. 
+// The parser may panic with this type on errors that would prevent the parser 
+// from making progress.
+
+//line ll1.parser.go.tpl:225
+type Ll1ParserError struct {
+    *Ll1Parser // Parser that had the error.
+    *Ll1Token  // Token at which the error was found
+    Chain error  // underlying error
+}
+
+
+//line ll1.parser.go.tpl:232
+func (pe Ll1ParserError) Error() string {
+    // XXX will need to be improved
+	return pe.Chain.Error()
+}
+
+
+//line ll1.parser.go.tpl:238
+func (parser *Ll1Parser) Errorf(message string, args ...interface{}) Ll1ParserError {
+	err := fmt.Errorf(message, args...)
+    pe := Ll1ParserError { 
+        Ll1Parser: parser, 
+        Ll1Token: &parser.current, 
+        Chain: err,
+    }
+    parser.Errors = append(parser.Errors, pe)
+    return pe
+}
+
+
+//line ll1.parser.go.tpl:250
+func (parser *Ll1Parser) Panicf(message string, args ...interface{})  {
+    pe := parser.Errorf(message, args...)
+    panic(pe)
+}
+
+
+
+//line ll1.parser.go.tpl:257
+func (p *Ll1Parser) Debugf(message string, args ...interface{})  {
+    if p.Debug != nil {
+        fmt.Fprintf(p.Debug, message, args)
+    }
+}
+
+/* Looks at the current token and advances the lexer if the token is of any of
+the token kinds given in kinds. In this case it will return the accepted
+token and advance the parser. Otherwise, it will call parser.Panicf.*/
+
+//line ll1.parser.go.tpl:267
+func (parser *Ll1Parser) Require(kinds ...Ll1TokenKind) Ll1Token {
+    parser.Debugf("Require: %v\n", kinds)
+	if parser.current.Ll1TokenKind == Ll1TokenKind(0) {
+		parser.Advance()
+	}
+	
+	expected := ""
+	sep := ""
+	for _, kind := range kinds {
+		if kind == parser.current.Ll1TokenKind {
+			accepted := parser.current
+			parser.Advance()
+			return accepted
+		}
+		expected = fmt.Sprintf("%s%s%s", expected, sep, kind.String())
+	}
+	
+	parser.Panicf("error: expected one of the following: %s", expected)
+	return Ll1Token{}
+}
+
+//line ll1.parser.go.tpl:288
+func (parser Ll1Parser) NextIs(kinds ...Ll1TokenKind) bool {
+    parser.Debugf("NextIs: %v\n", kinds)
+    if (parser.current.Ll1TokenKind == 0) {
+		parser.Advance()
+	}    
+    for _, kind := range kinds {
+        if kind == parser.current.Ll1TokenKind {
+            return true
+        }
+    }
+    return false
+}
+
+
+
+
+
+
+//line ll1.parser.go.tpl:324
+type Ll1Grammar struct {    
+}
+
+
+//line ll1.parser.go.tpl:328
+func ( *Ll1Parser) ParseLl1Grammar() (Ll1Grammar, error) {
+    result := Ll1Grammar {}
+    return result, nil 
+}
+
+
+
+
+
+//line ll1.parser.go.tpl:324
+type Ll1Rules struct {    
+}
+
+
+//line ll1.parser.go.tpl:328
+func ( *Ll1Parser) ParseLl1Rules() (Ll1Rules, error) {
+    result := Ll1Rules {}
+    return result, nil 
+}
+
+
+
+
+
+//line ll1.parser.go.tpl:324
+type Ll1OptRules struct {    
+}
+
+
+//line ll1.parser.go.tpl:328
+func ( *Ll1Parser) ParseLl1OptRules() (Ll1OptRules, error) {
+    result := Ll1OptRules {}
+    return result, nil 
+}
+
+
+
+
+
+//line ll1.parser.go.tpl:324
+type Ll1Rule struct {    
+}
+
+
+//line ll1.parser.go.tpl:328
+func ( *Ll1Parser) ParseLl1Rule() (Ll1Rule, error) {
+    result := Ll1Rule {}
+    return result, nil 
+}
+
+
+
+
+
+//line ll1.parser.go.tpl:324
+type Ll1Name struct {    
+}
+
+
+//line ll1.parser.go.tpl:328
+func ( *Ll1Parser) ParseLl1Name() (Ll1Name, error) {
+    result := Ll1Name {}
+    return result, nil 
+}
+
+
+
+
+
+//line ll1.parser.go.tpl:324
+type Ll1Template struct {    
+}
+
+
+//line ll1.parser.go.tpl:328
+func ( *Ll1Parser) ParseLl1Template() (Ll1Template, error) {
+    result := Ll1Template {}
+    return result, nil 
+}
+
+
+
+
+
+//line ll1.parser.go.tpl:324
+type Ll1Definition struct {    
+}
+
+
+//line ll1.parser.go.tpl:328
+func ( *Ll1Parser) ParseLl1Definition() (Ll1Definition, error) {
+    result := Ll1Definition {}
+    return result, nil 
+}
+
+
+
+
+
+//line ll1.parser.go.tpl:324
+type Ll1Alternates struct {    
+}
+
+
+//line ll1.parser.go.tpl:328
+func ( *Ll1Parser) ParseLl1Alternates() (Ll1Alternates, error) {
+    result := Ll1Alternates {}
+    return result, nil 
+}
+
+
+
+
+
+//line ll1.parser.go.tpl:324
+type Ll1OptSequences struct {    
+}
+
+
+//line ll1.parser.go.tpl:328
+func ( *Ll1Parser) ParseLl1OptSequences() (Ll1OptSequences, error) {
+    result := Ll1OptSequences {}
+    return result, nil 
+}
+
+
+
+
+
+//line ll1.parser.go.tpl:324
+type Ll1Sequence struct {    
+}
+
+
+//line ll1.parser.go.tpl:328
+func ( *Ll1Parser) ParseLl1Sequence() (Ll1Sequence, error) {
+    result := Ll1Sequence {}
+    return result, nil 
+}
+
+
+
+
+
+//line ll1.parser.go.tpl:324
+type Ll1OptElements struct {    
+}
+
+
+//line ll1.parser.go.tpl:328
+func ( *Ll1Parser) ParseLl1OptElements() (Ll1OptElements, error) {
+    result := Ll1OptElements {}
+    return result, nil 
+}
+
+
+
+
+
+//line ll1.parser.go.tpl:324
+type Ll1Element struct {    
+}
+
+
+//line ll1.parser.go.tpl:328
+func ( *Ll1Parser) ParseLl1Element() (Ll1Element, error) {
+    result := Ll1Element {}
+    return result, nil 
+}
+
+
+
+
+
+//line ll1.parser.go.tpl:324
+type Ll1Parenthesis struct {    
+}
+
+
+//line ll1.parser.go.tpl:328
+func ( *Ll1Parser) ParseLl1Parenthesis() (Ll1Parenthesis, error) {
+    result := Ll1Parenthesis {}
+    return result, nil 
+}
+
+
+//line ll1.parser.go.tpl:313
+const Ll1TokenKinddot Ll1TokenKind = Ll1TokenKind(-2)
+
+
+//line ll1.parser.go.tpl:316
+func ( *Ll1Lexer) LexLl1TokenKinddot() (Ll1TokenKind, error) {
+    result := Ll1TokenKinddot
+    return result, nil
+}
+
+
+
+//line ll1.parser.go.tpl:313
+const Ll1TokenKindor Ll1TokenKind = Ll1TokenKind(-3)
+
+
+//line ll1.parser.go.tpl:316
+func ( *Ll1Lexer) LexLl1TokenKindor() (Ll1TokenKind, error) {
+    result := Ll1TokenKindor
+    return result, nil
+}
+
+
+
+//line ll1.parser.go.tpl:313
+const Ll1TokenKindidentifier Ll1TokenKind = Ll1TokenKind(-4)
+
+
+//line ll1.parser.go.tpl:316
+func ( *Ll1Lexer) LexLl1TokenKindidentifier() (Ll1TokenKind, error) {
+    result := Ll1TokenKindidentifier
+    return result, nil
+}
+
+
+
+//line ll1.parser.go.tpl:313
+const Ll1TokenKindruleName Ll1TokenKind = Ll1TokenKind(-5)
+
+
+//line ll1.parser.go.tpl:316
+func ( *Ll1Lexer) LexLl1TokenKindruleName() (Ll1TokenKind, error) {
+    result := Ll1TokenKindruleName
+    return result, nil
+}
+
+
+
+//line ll1.parser.go.tpl:313
+const Ll1TokenKindterminalName Ll1TokenKind = Ll1TokenKind(-6)
+
+
+//line ll1.parser.go.tpl:316
+func ( *Ll1Lexer) LexLl1TokenKindterminalName() (Ll1TokenKind, error) {
+    result := Ll1TokenKindterminalName
+    return result, nil
+}
+
+
+
+//line ll1.parser.go.tpl:313
+const Ll1TokenKindepsilon Ll1TokenKind = Ll1TokenKind(-7)
+
+
+//line ll1.parser.go.tpl:316
+func ( *Ll1Lexer) LexLl1TokenKindepsilon() (Ll1TokenKind, error) {
+    result := Ll1TokenKindepsilon
+    return result, nil
+}
+
+
+
+//line ll1.parser.go.tpl:313
+const Ll1TokenKindarrow Ll1TokenKind = Ll1TokenKind(-8)
+
+
+//line ll1.parser.go.tpl:316
+func ( *Ll1Lexer) LexLl1TokenKindarrow() (Ll1TokenKind, error) {
+    result := Ll1TokenKindarrow
+    return result, nil
+}
+
+
+
+//line ll1.parser.go.tpl:313
+const Ll1TokenKindliteral Ll1TokenKind = Ll1TokenKind(-9)
+
+
+//line ll1.parser.go.tpl:316
+func ( *Ll1Lexer) LexLl1TokenKindliteral() (Ll1TokenKind, error) {
+    result := Ll1TokenKindliteral
+    return result, nil
+}
+
+
+
+//line ll1.parser.go.tpl:313
+const Ll1TokenKindstringLiteral Ll1TokenKind = Ll1TokenKind(-10)
+
+
+//line ll1.parser.go.tpl:316
+func ( *Ll1Lexer) LexLl1TokenKindstringLiteral() (Ll1TokenKind, error) {
+    result := Ll1TokenKindstringLiteral
+    return result, nil
+}
+
+
+
+//line ll1.parser.go.tpl:313
+const Ll1TokenKindcharLiteral Ll1TokenKind = Ll1TokenKind(-11)
+
+
+//line ll1.parser.go.tpl:316
+func ( *Ll1Lexer) LexLl1TokenKindcharLiteral() (Ll1TokenKind, error) {
+    result := Ll1TokenKindcharLiteral
+    return result, nil
+}
+
+
+
+//line ll1.parser.go.tpl:313
+const Ll1TokenKindrawString Ll1TokenKind = Ll1TokenKind(-12)
+
+
+//line ll1.parser.go.tpl:316
+func ( *Ll1Lexer) LexLl1TokenKindrawString() (Ll1TokenKind, error) {
+    result := Ll1TokenKindrawString
+    return result, nil
+}
+
+
+
+//line ll1.parser.go.tpl:313
+const Ll1TokenKindwhiteSpace Ll1TokenKind = Ll1TokenKind(-13)
+
+
+//line ll1.parser.go.tpl:316
+func ( *Ll1Lexer) LexLl1TokenKindwhiteSpace() (Ll1TokenKind, error) {
+    result := Ll1TokenKindwhiteSpace
+    return result, nil
+}
+
+
+
+//line ll1.parser.go.tpl:313
+const Ll1TokenKindlineComment Ll1TokenKind = Ll1TokenKind(-14)
+
+
+//line ll1.parser.go.tpl:316
+func ( *Ll1Lexer) LexLl1TokenKindlineComment() (Ll1TokenKind, error) {
+    result := Ll1TokenKindlineComment
+    return result, nil
+}
+
+
+
+
+// Expanded from template of rule handCoded
+
+// Not implemented
+
+//line ll1.parser.go.tpl:313
+const Ll1TokenKindhandCoded Ll1TokenKind = Ll1TokenKind(-15)
+
+
+//line ll1.parser.go.tpl:316
+func ( *Ll1Lexer) LexLl1TokenKindhandCoded() (Ll1TokenKind, error) {
+    result := Ll1TokenKindhandCoded
+    return result, nil
+}
+
+
+
+
+

+ 347 - 0
main.go

@@ -0,0 +1,347 @@
+// ll1 is a tool to parse LL1 grammar definitions and to use those then 
+// to generate code or reports using Go templates. 
+// Inside the ll1 templates, the following template functions are available:
+// - Most functions from the strings package (see go doc strings).
+// - CompileRegexp compiles a regexp package regexp which can be used as such.
+// - ToString to convert anything anything that isn't a string to a string.
+// - NewMap creates a map based it's argumens wich have string keys and interface{} values
+//  This is handly to pass multiple aruments to a sub-template
+// - NewList creates a list from the given arguments.
+package main
+
+import "flag"
+import "os"
+import "strings"
+import "text/template"
+import "fmt"
+import "path"
+import "sort"
+
+func showUsage() {    
+    fmt.Fprintf(flag.CommandLine.Output(), 
+                "%s: %s [options] input_file.ll1 [template_file.ext*]\n",
+                os.Args[0], os.Args[0])
+    
+    fmt.Fprintf(flag.CommandLine.Output(), 
+                "\n  [options] may be one of the following:\n\n")
+	flag.PrintDefaults()
+    fmt.Fprintf(flag.CommandLine.Output(), "\n")
+}
+
+const helpText = `
+ll1 is a tool to parse and check LL(1) specifications, and to generate 
+code or reports using Go templates based on these specifications. 
+ll1 specifications must contain a definition for an ll1 grammar, and 
+may optionally also specify a lexer for that grammar.
+
+Usage: 
+    ll1 [options] input_file.ll1 [template_file.ext*]
+
+The [options] are:
+
+    -a file
+        Name of output file to append. Takes precedence over -out.
+    -d definition
+    	Add a definition for the template, in the form of key:value or 
+        []key:value. Keys that start with a [] are arrays and can be 
+        concatenated to by specifying the same definition key again.
+        Non array keys will be overwoitten if they are specified again. 
+    -help -h
+        Shows the help page.
+    -l format
+        Inject line directives with the given format before template expansion.
+    -o file
+        Name of output file to overwrite. 
+    -t file
+    	Template file to expand. This may be repeated to make use 
+        of several templates to generate one output file.
+    -v
+    	Be more verbose. Shows the scanned tokens as well.
+
+The names of template files may be given with the -t option, or after the 
+ll1 input file.
+
+The syntax of an LL1 grammar itself is: 
+    
+    Specification -> Grammar OptLexer.
+    Grammar -> Rules.
+    Rules -> Rule OptRules .
+    OptRules -> dot Rules | epsilon.
+    Rule -> Name arrow Definition Template.
+    Name -> ruleName .
+    Template -> rawString | epsilon .
+    // Alternates consist of sequences.
+    Definition -> Alternates . 
+    Alternates -> Sequence OptSequences .
+    OptSequences -> or Alternates | epsilon.
+    Sequence -> Element OptElements . 
+    OptElements -> Element OptElements | epsilon .
+    Element -> Parenthesis .
+    Element -> Name .
+    Element -> literal .
+    Parenthesis -> '(' Definition ')' .
+    OptLexer -> LexerTerminal OptLexerTerminals | epsilon .
+    LexerTerminal -> terminalName arrow LexerDefinition Template .
+    LexerDefinition -> LexerAlternates . 
+    LexerAlternates -> LexerPattern OptLexerMatches .
+    OptLexerMatches -> or LexerPattern | epsilon.
+    LexerPattern -> literal .
+    OptElements -> Element OptElements | epsilon .
+    Element -> Parenthesis .
+    Element -> Name .
+    Element -> literal /*| Rule */.
+    // Lexer specification starts here:
+    dot          -> '.'
+    or           -> '|'
+    literal      -> characterLiteral | stringLiteral
+    ruleName     -> "re:[[:isUpper:]][[:isAlNum]]"
+    terminalName -> "re:[[:isLower:]][[:isAlNum]]"
+    epsilon      -> "epsilon" | 'ε'
+    arrow        -> "->" | '→'
+
+The syntax of an ll1 grammar has the following elements:  
+  - //comment : Line comments start with //, /*block comments*/ are C-like
+  - RuleName  : names that start with an upper case letter are 
+                rule names or nonterminals defined by the grammar.
+  - terminal  : names that start with a lower case letter are names of 
+                teminals that the lexer produces.
+  - 'l'       : single quoted strings are rune literals that the lexer produces.
+  - "literal" : double quoted strings are rune literals that the lexer produces.
+  - arrow     : a literal -> → as a separator.
+  - epsion    : a literal "epsilon" or 'ε', which indicates the empty rule.
+                this is used in conjunction with alternates to make a rule 
+                optional.
+
+If no templates are given, ll1 simply checks the grammar and outputs a 
+simple text report to the output file.
+
+If a template is given, it will be expanded and output to the output file. 
+
+Inside the template the following variables are available: 
+  - .Grammar: contains the .Rules of the grammar.
+  - .InName: contains the name of the ll1 input file.
+  - .OutName: contains the name of the output file specified with -a or -o.
+  - .Templates: contains the names of the templates read.
+  - .Definitions: contains the keys of the available definitions.
+  - All other variables defined with -d
+    
+Inside the ll1 templates, the following template functions are available:
+  - Most functions from the strings package (see go doc strings).
+  - CompileRegexp compiles a regexp package regexp which can be used as such.
+  - ToString to convert anything anything that isn't a string to a string.
+  - NewMap creates a map based it's argumens wich have string keys and interface{} values
+    This is handly to pass multiple aruments to a sub-template
+  - NewList creates a list from the given arguments.
+`
+
+func showHelp() {    
+    fmt.Fprintf(flag.CommandLine.Output(), "\n%s\n", helpText)
+}
+
+type arrayFlags []string
+
+func (i *arrayFlags) String() string {
+	return "my string representation"
+}
+
+func (i *arrayFlags) Set(value string) error {
+	*i = append(*i, value)
+	return nil
+}
+
+// The prefix for array definitions
+const definitionArrayPrefix = "[]"
+
+type definitionMap map[string]interface{}
+
+func (i *definitionMap) Set(in string) error {
+	parts := strings.SplitN(in, ":", 2)
+	if len(parts) < 2 {
+		return fmt.Errorf("Could not split definition on ':' for %s ", in)
+	}
+	key := parts[0]
+	value := parts[1]
+
+	if strings.HasPrefix(key, definitionArrayPrefix) {
+		key = strings.TrimPrefix(key, definitionArrayPrefix)
+		existing, exists := (*i)[key]
+		if !exists {
+			slice := make([]string, 0)
+			slice = append(slice, value)
+			(*i)[key] = slice
+		} else {
+			slice, isSlice := existing.([]string)
+			if isSlice {
+				slice = append(slice, value)
+				(*i)[key] = slice
+			} else {
+				return fmt.Errorf("Cannot mix array and non array definitions: %s -> %s:\n", key, value)
+			}
+		}
+	} else {
+		(*i)[key] = value
+	}
+	return nil
+}
+
+func (i *definitionMap) String() string {
+	s := "{"
+	for k, v := range *i {
+        switch vv := v.(type) {
+            case string:  s = s + fmt.Sprintf("%s:%s;", k, vv)            
+            case []string: s = s + fmt.Sprintf("%s:%v;", k, vv)
+            default: s = s + fmt.Sprintf("%s:<omitted>;", k)
+        }
+	}
+    s = s + "}"
+	return s
+}
+
+func (i *definitionMap) Keys() []string {
+    res := []string{}
+	for k, _ := range *i {
+           res = append(res, k) 
+	}
+    sort.Strings(res)
+	return res
+}
+
+
+// LL1 contains the options and variables of the ll1 program.
+type Ll1 struct { 
+    definitions definitionMap
+    templateNames arrayFlags
+    outName string
+    usedName string
+    appendName string
+    help bool
+    verbose bool
+    debug bool
+    lineFormat string
+    fout *os.File
+    parser *Parser
+    grammar *Grammar
+    tmpl *template.Template
+}
+
+func main() {
+    var err error
+    flag.Usage = showUsage
+    ll1 := Ll1{}
+    
+	// Set up a few default definitions
+    ll1.definitions = make(definitionMap)
+    ll1.definitions["Package"] = "main"
+    ll1.definitions["Prefix"] = "Ll1"
+    
+	flag.BoolVar(&ll1.verbose,      "v", false, "Be more verbose. ")
+    flag.BoolVar(&ll1.debug,        "D", false, "Show debug info. Shows the scanned tokens as well.")
+	flag.Var(&ll1.templateNames,    "t", "Template `file` to expand.")
+	flag.StringVar(&ll1.outName,    "o", "", "Name of output `file` to overwrite.")
+	flag.StringVar(&ll1.appendName, "a", "", "Name of output `file` to append.")
+    flag.StringVar(&ll1.lineFormat, "l", "", "Inject line directives before template epansion.")
+	flag.Var(&ll1.definitions,      "d", "Add a `definition` for the template, in the form of key:value or []key:value.")
+    flag.BoolVar(&ll1.help,         "h", false, "Shows the help page.")
+    flag.BoolVar(&ll1.help,         "help", false, "Shows the help page.")
+	flag.Parse()
+    
+    if ll1.help {
+        showUsage()
+        showHelp()
+        os.Exit(1)
+    }
+    
+    if (len(flag.Args()) < 1) {
+        showUsage()
+        os.Exit(1)
+        return 
+    }
+    
+    ll1Name := flag.Arg(0)
+    
+    // other file names after the first are templates.
+    for i := 1 ; i <  len(flag.Args()) ; i ++ {
+        ll1.templateNames.Set(flag.Arg(i))
+    }
+    
+    
+    // Parse grammar
+    ll1.parser, ll1.grammar, err = ParseFile(ll1Name, ll1.debug)
+    if err != nil {        
+        fmt.Fprintf(os.Stderr, "%v\n", err)
+        for _, e := range ll1.parser.Errors {
+            fmt.Fprintf(os.Stderr, "%v\n", e)
+        }
+        os.Exit(2)
+    }
+    
+    // Check grammar and report errors
+    errs := ll1.grammar.Check()
+    if len(errs) > 0 { 
+        for _, err := range errs {
+            fmt.Fprintf(os.Stderr, "%v\n", err)
+        }
+        os.Exit(5)
+    }
+    
+    // If not templates given, output just a grammar report
+    if len(ll1.templateNames) < 1 {
+        fmt.Fprintf(ll1.fout, "Grammar:\n%v\n", ll1.grammar)
+        os.Exit(0)
+    }
+ 
+    // parse the templates
+	if len(ll1.templateNames) > 0 {
+        name := path.Base(ll1.templateNames[0])
+        if ll1.verbose {
+            fmt.Printf("Parsing templates: %s\n", name)
+        }
+                
+        ll1.tmpl, err = template.New(name).Funcs(templateFunctionMap).ParseFiles([]string(ll1.templateNames)...)
+        if err != nil {
+            fmt.Printf("%s: %s: template parsing error\n", err, name)
+            os.Exit(7)
+        }
+    }
+
+    // Determine output file
+	ll1.usedName = ll1.outName
+	
+	if ll1.appendName != "" {
+		ll1.fout, err = os.OpenFile(ll1.appendName, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
+		if err != nil {
+			fmt.Fprintf(os.Stderr, "Could not append to output file %s: %s\n", ll1.appendName, err)
+            os.Exit(9)
+		} else {
+			ll1.usedName = ll1.appendName
+		}
+        defer ll1.fout.Close()
+	} else if ll1.outName == "" {
+		ll1.fout = os.Stdout
+	} else {
+		ll1.fout, err = os.Create(ll1.outName)
+		if err != nil {
+			fmt.Fprintf(os.Stderr, "Could not open output file %s: %s\n", ll1.outName, err)
+		}
+        defer ll1.fout.Close()
+	}
+    
+    // Set up the template definitions
+    ll1.definitions["InName"] = ll1Name
+	ll1.definitions["OutName"] = ll1.usedName
+	ll1.definitions["Templates"] = strings.Join(ll1.templateNames, "\n")
+    ll1.definitions["Grammar"] = ll1.grammar
+    ll1.definitions["Parser"] = ll1.parser
+    ll1.definitions["Definitions"] = ll1.definitions.Keys()
+
+    // And execute the template, generating output
+	err = ll1.tmpl.Execute(ll1.fout, ll1.definitions)
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "%s: template execution error", err)
+        if ll1.usedName != "" {
+            os.Remove(ll1.usedName)
+        }
+        os.Exit(10)
+	}
+}
+

+ 19 - 0
muesli.ll1

@@ -0,0 +1,19 @@
+PROGRAM -> STATEMENTS.
+STATEMENTS -> STATEMENT eos STATEMENTS|.
+STATEMENT -> EXPRESSION OPERATION |.
+OPERATION -> operator STATEMENT |.
+EXPRESSION -> COMMAND | EVALUATION | SENDER .
+COMMAND -> NAME PARAMETERS .
+PARAMETERS -> PARAMETER PARAMETERS |.
+EVALUATION -> LITERAL | BLOCK | GETTER | SETTER | LIST | PARENTHESIS | TAKER | MAKER . 
+PARAMETER -> EVALUATION | NAME .
+PARENTHESIS -> openparen STATEMENT closeparen.
+BLOCK -> openblock STATEMENTS closeblock.
+LIST -> openlist PARAMETER closelist.
+LITERAL -> string | int | float.
+NAME -> word | symbol | type.
+SETTER -> set PARAMETER PARAMETER.
+GETTER -> get PARAMETER.
+TAKER -> take NAME NAME.
+MAKER -> make NAME PARAMETER.
+SENDER -> send NAME NAME PARAMETERS.

+ 309 - 0
parser.go

@@ -0,0 +1,309 @@
+package main
+
+import "text/scanner"
+import "fmt"
+import "io"
+import "os"
+import "unicode"
+
+// This parser is for parsing LL1 grammars, not Beo. 
+type Parser struct {
+    reader io.Reader
+    scanner scanner.Scanner
+    current Token
+    Errors []ParserError
+    Filename string
+    Debug io.Writer
+}
+
+
+func NewParserFromReader(reader io.Reader, filename string, debug bool) *Parser {
+    parser := &Parser{}
+    parser.reader = reader
+    parser.scanner.Init(parser.reader)
+    parser.Filename = filename
+    parser.current.Kind = 0
+    parser.Debug = nil
+    if debug {
+        parser.Debug = os.Stderr
+    }
+    parser.scanner.Mode = scanner.GoTokens
+    parser.scanner.Error = func (s *scanner.Scanner, msg string) {
+        parser.Panicf("%s: scanner error: %s, %s", s.Position, s.TokenText(), msg)
+    }
+    parser.scanner.IsIdentRune = func(ch rune, i int) bool {
+        if i == 0 {
+            return unicode.IsLetter(ch)
+        }    
+        return unicode.IsLetter(ch) || 
+               unicode.IsNumber(ch) || 
+                          ch == '_' || 
+                          ch == '-'
+    }
+    
+    return parser
+}
+
+func (p *Parser) Lex() Token {
+    scanned := p.scanner.Scan()
+    pos := p.scanner.Position
+    pos.Filename = p.Filename    
+    value := p.scanner.TokenText()
+    // Get rid of the quotes
+    if scanned == scanner.Char || 
+       scanned == scanner.String || 
+       scanned == scanner.RawString {
+           value = value[1:len(value) - 1]
+    }
+    token := Token {
+        scanned,
+        value, 
+        pos,
+    }
+    p.Debugf("Lexed: %s\n", p.Filename, token)
+    return token
+}
+
+func (p *Parser) Advance() {
+	token := p.Lex()
+	p.current = token
+}
+
+// panic with this type on errors that would prevent the parser 
+// from making progress.
+type ParserError struct {
+    *Parser
+    *Token
+    Chain error
+}
+
+func (pe ParserError) Error() string {
+	return pe.Token.MakeError(pe.Chain.Error()).Error()
+}
+
+func (parser *Parser) Errorf(message string, args ...interface{}) ParserError {
+	err := fmt.Errorf(message, args...)
+    pe := ParserError { Parser: parser, Token:&parser.current, Chain: err }
+    parser.Errors = append(parser.Errors, pe)
+    return pe
+}
+
+func (parser *Parser) Panicf(message string, args ...interface{})  {
+    pe := parser.Errorf(message, args...)
+    panic(pe)
+}
+
+
+func (p *Parser) Debugf(message string, args ...interface{})  {
+    if p.Debug != nil {
+        fmt.Fprintf(p.Debug, message, args)
+    }
+}
+
+
+/* Looks at the current token and advances the lexer if the token is of any of
+the token kinds given in kinds. In this case it will return the accepted
+token and advance the parser. Otherwise, it will call parser.Panicf.*/
+func (parser *Parser) Require(kinds ...rune) Token {
+    parser.Debugf("Require: %v\n", kinds)
+	if parser.current.Kind == 0 {
+		parser.Advance()
+	}
+	
+	expected := ""
+	sep := ""
+	for _, kind := range kinds {
+		if kind == parser.current.Kind {
+			accepted := parser.current
+			parser.Advance()
+			return accepted
+		}
+		expected = fmt.Sprintf("%s%s%s", expected, sep, scanner.TokenString(kind))
+	}
+	
+	parser.Panicf("error: expected one of the following: %s", expected)
+	return Token{}
+}
+
+
+func (parser Parser) NextIs(kinds ...rune) bool {
+    parser.Debugf("NextIs: %v\n", kinds)
+    if (parser.current.Kind == 0) {
+		parser.Advance()
+	}    
+    for _, kind := range kinds {
+        if kind == parser.current.Kind  {
+            return true
+        }
+    }
+    return false
+}
+
+/*
+Sequence -> Element Sequence | . 
+Element -> Parenthesis | Name | Literal .
+Parenthesis -> '(' Definition ')' .
+*/
+
+func (p *Parser) NextIsElement() (bool) {
+    return p.NextIs('(', scanner.Ident, scanner.Char, scanner.String)
+}
+
+func (p *Parser) ParseElement() (Element, error) {      
+    switch {
+        case p.NextIs('('):
+            return p.ParseParenthesis()
+        
+        case p.NextIs(scanner.Ident):
+            ident := p.Require(scanner.Ident)
+            if ident.Value == "epsilon" || ident.Value == "ε" {
+                ident.Value = "ε"
+                return Epsilon{ element { token: ident } }, nil
+            }
+            // Upper case means a nonterminal, otherwise terminal
+            if unicode.IsUpper(([]rune(ident.Value))[0]) {
+                return Nonterminal{ element { token: ident }, nil }, nil
+            }
+            return Terminal{ element { token: ident } }, nil
+        case p.NextIs(scanner.Char, scanner.String):
+            literal := p.Require(scanner.Char, scanner.String)            
+            return Literal{ Terminal { element { token: literal } } }, nil
+        default:
+            p.Panicf("error: unexpected for grammar Element: %s", p.current)
+    }
+    return nil, nil
+}
+
+func (p *Parser) ParseSequence() (*Sequence, error) {   
+    sequence  := &Sequence{}
+    for p.NextIsElement() {
+        element, err := p.ParseElement()
+        p.Debugf("Sequence parsed element: %v\n", element)
+        if err != nil {
+            return sequence, err
+        }
+        sequence.Elements = append(sequence.Elements, element)
+    }
+    return sequence, nil
+}
+
+
+func (p *Parser) ParseParenthesis() (*Definition, error) {   
+    // Parse the sub definition
+    p.Require('(')
+    sub, err := p.ParseDefinition()
+    p.Require(')')
+    return sub, err
+}
+
+func (p *Parser) ParseAlternates() (*Definition, error) {
+    alternates := &Alternates{}
+    for p.NextIsElement() { 
+        sequence, err := p.ParseSequence()
+        if err != nil {
+            return alternates, err
+        }
+        alternates.Sequences = append(alternates.Sequences, *sequence)
+        if !p.NextIs('.', scanner.RawString, scanner.EOF, ')') {
+            p.Require('|')
+        }
+    }
+    return alternates, nil
+}
+
+
+func (p *Parser) ParseDefinition() (*Definition, error) {
+    return p.ParseAlternates()
+    
+    
+    /*if p.NextIs('(') {
+        sub, err := p.ParseParenthesis()
+        if err != nil {
+            return nil, err
+        }
+    }*/
+}
+
+func (p *Parser) ParseRule() (*Rule, error) {
+    rule := &Rule{}
+    name := p.Require(scanner.Ident)
+    // XXX Require  or → or -> separator. 
+    // This is a hack, needs to be moved to a lexer.
+    if p.NextIs('→') {
+        p.Require('→')
+    } else {
+        p.Require('-')
+        p.Require('>')
+    }
+    definition, err := p.ParseDefinition()
+    if err != nil {
+        return rule, err
+    }
+    if p.NextIs(scanner.RawString) {
+        rs := p.Require(scanner.RawString)
+        rule.Template = rs.Value
+    }    
+    // require '.' terminator
+    p.Require('.')
+    rule.Name = name.Value
+    rule.Definition = *definition
+    return rule, nil
+}
+
+func (p *Parser) ParseRules() ([]*Rule, error) {
+    var rules []*Rule
+    for p.current.Kind != scanner.EOF {
+        rule, err := p.ParseRule()
+        if err != nil {
+            return rules, err
+        }
+        appended := false
+        for i, existing := range(rules) {
+            if existing.Name == rule.Name {
+                // Add to the definition in stead
+               existing.Definition.Sequences = append(existing.Definition.Sequences, rule.Definition.Sequences...)
+               rules[i] = existing
+               appended = true
+               break
+            }            
+        }
+        if !appended {
+            rules = append(rules, rule)
+        }
+    }    
+    return rules, nil
+}
+
+func (p *Parser) Parse() (g *Grammar, e error) {
+    defer func() {
+        thrown := recover() 
+        if thrown != nil {   
+            g = nil         
+            e = thrown.(error)
+            return
+        }
+    }()
+    p.Advance()
+    rules, err := p.ParseRules()
+    if err != nil {
+        return nil, err
+    }
+    if len(rules) < 1 {
+        return nil, nil
+    }    
+    grammar := &Grammar {
+        Top: rules[0],
+        Rules: rules,
+    }
+    
+    return grammar, nil
+}
+
+func ParseFile(filename string, debug bool) (*Parser, *Grammar, error) {
+    file, err := os.Open(filename) 
+    defer file.Close()
+    parser := NewParserFromReader(file, filename, debug)
+    grammar, err := parser.Parse()
+    return parser, grammar, err    
+}
+

+ 245 - 0
template_functions.go

@@ -0,0 +1,245 @@
+package main
+
+import (
+	"fmt"
+	"regexp"
+	"strings"
+	"text/template"
+    "time"
+)
+
+
+func templateNewMap(args ...interface{}) (interface{}, error) {
+	result := map[string]interface{}{}
+	for i := 1; i < len(args); i += 2 {
+		key, ok := args[i-1].(string)
+		if !ok {
+			return nil, fmt.Errorf("Map: key %v must be string.", key)
+		}
+		result[key] = args[i]
+	}
+
+	return result, nil
+}
+
+func templateNewList(args ...interface{}) interface{} {
+	return args
+}
+
+
+func templateToString(v interface{}) string {
+	return fmt.Sprintf("%s", v)
+}
+
+func templateCompileRegexp(reAny interface{}) (interface {}, error) {
+    reStr, ok := reAny.(string)
+    if !ok {
+			return nil, fmt.Errorf("CompileRegexp: %v must be string.", reAny)
+	}
+    re, err := regexp.Compile(reStr)
+    return re, err
+}
+
+
+/* 
+func (re *Regexp) ExpandString(dst []byte, template string, src string, match []int) []byte
+    ExpandString is like Expand but the template and source are strings. It
+    appends to and returns a byte slice in order to give the calling code
+    control over allocation.
+
+func (re *Regexp) FindAllString(s string, n int) []string
+    FindAllString is the 'All' version of FindString; it returns a slice of all
+    successive matches of the expression, as defined by the 'All' description in
+    the package comment. A return value of nil indicates no match.
+
+func (re *Regexp) FindAllStringIndex(s string, n int) [][]int
+    FindAllStringIndex is the 'All' version of FindStringIndex; it returns a
+    slice of all successive matches of the expression, as defined by the 'All'
+    description in the package comment. A return value of nil indicates no
+    match.
+
+func (re *Regexp) FindAllStringSubmatch(s string, n int) [][]string
+    FindAllStringSubmatch is the 'All' version of FindStringSubmatch; it returns
+    a slice of all successive matches of the expression, as defined by the 'All'
+    description in the package comment. A return value of nil indicates no
+    match.
+
+func (re *Regexp) FindAllStringSubmatchIndex(s string, n int) [][]int
+    FindAllStringSubmatchIndex is the 'All' version of FindStringSubmatchIndex;
+    it returns a slice of all successive matches of the expression, as defined
+    by the 'All' description in the package comment. A return value of nil
+    indicates no match.
+
+func (re *Regexp) FindString(s string) string
+    FindString returns a string holding the text of the leftmost match in s of
+    the regular expression. If there is no match, the return value is an empty
+    string, but it will also be empty if the regular expression successfully
+    matches an empty string. Use FindStringIndex or FindStringSubmatch if it is
+    necessary to distinguish these cases.
+
+func (re *Regexp) FindStringIndex(s string) (loc []int)
+    FindStringIndex returns a two-element slice of integers defining the
+    location of the leftmost match in s of the regular expression. The match
+    itself is at s[loc[0]:loc[1]]. A return value of nil indicates no match.
+
+func (re *Regexp) FindStringSubmatch(s string) []string
+    FindStringSubmatch returns a slice of strings holding the text of the
+    leftmost match of the regular expression in s and the matches, if any, of
+    its subexpressions, as defined by the 'Submatch' description in the package
+    comment. A return value of nil indicates no match.
+
+func (re *Regexp) FindStringSubmatchIndex(s string) []int
+    FindStringSubmatchIndex returns a slice holding the index pairs identifying
+    the leftmost match of the regular expression in s and the matches, if any,
+    of its subexpressions, as defined by the 'Submatch' and 'Index' descriptions
+    in the package comment. A return value of nil indicates no match.
+
+func (re *Regexp) LiteralPrefix() (prefix string, complete bool)
+    LiteralPrefix returns a literal string that must begin any match of the
+    regular expression re. It returns the boolean true if the literal string
+    comprises the entire regular expression.
+    
+func (re *Regexp) MatchString(s string) bool
+    MatchString reports whether the string s contains any match of the regular
+    expression re.
+
+func (re *Regexp) FindString(s string) string
+    FindString returns a string holding the text of the leftmost match in s of
+    the regular expression. If there is no match, the return value is an empty
+    string, but it will also be empty if the regular expression successfully
+    matches an empty string. Use FindStringIndex or FindStringSubmatch if it is
+    necessary to distinguish these cases.
+
+func (re *Regexp) FindStringIndex(s string) (loc []int)
+    FindStringIndex returns a two-element slice of integers defining the
+    location of the leftmost match in s of the regular expression. The match
+    itself is at s[loc[0]:loc[1]]. A return value of nil indicates no match.
+    
+ func (re *Regexp) Split(s string, n int) []string
+    Split slices s into substrings separated by the expression and returns a
+    slice of the substrings between those expression matches.
+
+    The slice returned by this method consists of all the substrings of s not
+    contained in the slice returned by FindAllString. When called on an
+    expression that contains no metacharacters, it is equivalent to
+    strings.SplitN.
+
+    Example:
+
+        s := regexp.MustCompile("a*").Split("abaabaccadaaae", 5)
+        // s: ["", "b", "b", "c", "cadaaae"]
+
+    The count determines the number of substrings to return:
+
+        n > 0: at most n substrings; the last substring will be the unsplit remainder.
+        n == 0: the result is nil (zero substrings)
+        n < 0: all substrings
+
+func (re *Regexp) String() string
+    String returns the source text used to compile the regular expression.
+
+func (re *Regexp) SubexpNames() []string
+    SubexpNames returns the names of the parenthesized subexpressions in this
+    Regexp. The name for the first sub-expression is names[1], so that if m is a
+    match slice, the name for m[i] is SubexpNames()[i]. Since the Regexp as a
+    whole cannot be named, names[0] is always the empty string. The slice should
+    not be modified.
+
+*/
+
+func iadd(i1, i2 int)  int {
+    return  i1 + i2
+}
+
+func isub(i1, i2 int)  int {
+    return  i1 - i2
+}
+
+func imul(i1, i2 int)  int {
+    return  i1 * i2
+}
+
+func idiv(i1, i2 int)  int {
+    return  i1 / i2
+}
+
+func fadd(i1, i2 float64)  float64 {
+    return  i1 + i2
+}
+
+func fsub(i1, i2 float64)  float64 {
+    return  i1 - i2
+}
+
+func fmul(i1, i2 float64)  float64 {
+    return  i1 * i2
+}
+
+func fdiv(i1, i2 float64)  float64 {
+    return  i1 / i2
+}
+
+    
+
+var templateFunctionMap template.FuncMap = template.FuncMap{
+	"CompileRegexp":        templateCompileRegexp,
+    "Compare":              strings.Compare,
+	"Contains":             strings.Contains,
+	"ContainsAny":          strings.ContainsAny,
+	"ContainsRune":         strings.ContainsRune,
+	"Count":                strings.Count,
+	"EqualFold":            strings.EqualFold,
+	"Fields":               strings.Fields,
+	"FieldsFunc":           strings.FieldsFunc,
+	"HasPrefix":            strings.HasPrefix,
+	"HasSuffix":            strings.HasSuffix,
+	"Index":                strings.Index,
+	"IndexAny":             strings.IndexAny,
+	"IndexByte":            strings.IndexByte,
+	"IndexFunc":            strings.IndexFunc,
+	"IndexRune":            strings.IndexRune,
+	"Join":                 strings.Join,
+	"LastIndex":            strings.LastIndex,
+	"LastIndexAny":         strings.LastIndexAny,
+	"LastIndexByte":        strings.LastIndexByte,
+	"LastIndexFunc":        strings.LastIndexFunc,
+	"Map":                  strings.Map,    
+	"MatchString":          regexp.MatchString,
+	"NewMap":               templateNewMap,
+	"NewList":              templateNewList,
+    "Now":                  time.Now,
+	"Repeat":               strings.Repeat,
+	"Replace":              strings.Replace,
+	"ReplaceAll":           strings.ReplaceAll,
+	"Split":                strings.Split,
+	"SplitAfter":           strings.SplitAfter,
+	"SplitAfterN":          strings.SplitAfterN,
+	"SplitN":               strings.SplitN,
+	"Title":                strings.Title,
+	"ToLower":              strings.ToLower,
+	"ToLowerSpecial":       strings.ToLowerSpecial,
+	"ToString":             templateToString,
+	"ToTitle":              strings.ToTitle,
+	"ToTitleSpecial":       strings.ToTitleSpecial,
+	"ToUpper":              strings.ToUpper,
+	"ToUpperSpecial":       strings.ToUpperSpecial,
+	"Trim":                 strings.Trim,
+	"TrimFunc":             strings.TrimFunc,
+	"TrimLeft":             strings.TrimLeft,
+	"TrimLeftFunc":         strings.TrimLeftFunc,
+	"TrimPrefix":           strings.TrimPrefix,
+	"TrimRight":            strings.TrimRight,
+	"TrimRightFunc":        strings.TrimRightFunc,
+	"TrimSpace":            strings.TrimSpace,
+	"TrimSuffix":           strings.TrimSuffix,
+    "iadd":                 iadd,
+    "isub":                 isub,
+    "imul":                 imul,
+    "idiv":                 idiv,
+    "fadd":                 fadd,
+    "fsub":                 fsub,
+    "fmul":                 fmul,
+    "fdiv":                 fdiv,
+}
+
+