github.com/caddyserver/caddy/v2/caddyconfig/caddyfile
No package summary is available.
Package
Files: 7. Third party imports: 1. Imports from organisation: 0. Tests: 0. Benchmarks: 0.
Constants
const MatcherNameCtxKey = "matcher_name"
Vars
var (
argsRegexpIndexDeprecated = regexp.MustCompile(`args\.(.+)`)
argsRegexpIndex = regexp.MustCompile(`args\[(.+)]`)
)
var (
spanOpen, spanClose = []byte{'{', '$'}, []byte{'}'}
envVarDefaultDelimiter = ":"
)
Interface guard
var _ caddyconfig.Adapter = (*Adapter)(nil)
var heredocMarkerRegexp = regexp.MustCompile("^[A-Za-z0-9_-]+$")
Types
Adapter
Adapter adapts Caddyfile to Caddy JSON.
type Adapter struct {
ServerType ServerType
}
Dispenser
Dispenser is a type that dispenses tokens, similarly to a lexer, except that it can do so with some notion of structure. An empty Dispenser is invalid; call NewDispenser to make a proper instance.
type Dispenser struct {
tokens []Token
cursor int
nesting int
// A map of arbitrary context data that can be used
// to pass through some information to unmarshalers.
context map[string]any
}
Segment
Segment is a list of tokens which begins with a directive and ends at the end of the directive (either at the end of the line, or at the end of a block it opens).
type Segment []Token
ServerBlock
ServerBlock associates any number of keys from the head of the server block with tokens, which are grouped by segments.
type ServerBlock struct {
HasBraces bool
Keys []Token
Segments []Segment
IsNamedRoute bool
}
ServerType
ServerType is a type that can evaluate a Caddyfile and set up a caddy config.
type ServerType interface {
// Setup takes the server blocks which contain tokens,
// as well as options (e.g. CLI flags) and creates a
// Caddy config, along with any warnings or an error.
Setup([]ServerBlock, map[string]any) (*caddy.Config, []caddyconfig.Warning, error)
}
Unmarshaler
Unmarshaler is a type that can unmarshal Caddyfile tokens to
set itself up for a JSON encoding. The goal of an unmarshaler
is not to set itself up for actual use, but to set itself up for
being marshaled into JSON. Caddyfile-unmarshaled values will not
be used directly; they will be encoded as JSON and then used from
that. Implementations may be able to support multiple segments
(instances of their directive or batch of tokens); typically this
means wrapping parsing logic in a loop: for d.Next() { ... }
.
More commonly, only a single segment is supported, so a simple
d.Next()
at the start should be used to consume the module
identifier token (directive name, etc).
type Unmarshaler interface {
UnmarshalCaddyfile(d *Dispenser) error
}
lexer, Token
This type doesn't have documentation.
type (
// lexer is a utility which can get values, token by
// token, from a Reader. A token is a word, and tokens
// are separated by whitespace. A word can be enclosed
// in quotes if it contains whitespace.
lexer struct {
reader *bufio.Reader
token Token
line int
skippedLines int
}
// Token represents a single parsable unit.
Token struct {
File string
imports []string
Line int
Text string
wasQuoted rune // enclosing quote character, if any
heredocMarker string
snippetName string
}
)
adjacency
This type doesn't have documentation.
type adjacency map[string][]string
importGraph
This type doesn't have documentation.
type importGraph struct {
nodes map[string]struct{}
edges adjacency
}
parser
This type doesn't have documentation.
type parser struct {
*Dispenser
block ServerBlock // current server block being parsed
eof bool // if we encounter a valid EOF in a hard place
definedSnippets map[string][]Token
nesting int
importGraph importGraph
}
Functions
func Format
Format formats the input Caddyfile to a standard, nice-looking appearance. It works by reading each rune of the input and taking control over all the bracing and whitespace that is written; otherwise, words, comments, placeholders, and escaped characters are all treated literally and written as they appear in the input.
func Format(input []byte) []byte {
input = bytes.TrimSpace(input)
out := new(bytes.Buffer)
rdr := bytes.NewReader(input)
type heredocState int
const (
heredocClosed heredocState = 0
heredocOpening heredocState = 1
heredocOpened heredocState = 2
)
var (
last rune // the last character that was written to the result
space = true // whether current/previous character was whitespace (beginning of input counts as space)
beginningOfLine = true // whether we are at beginning of line
openBrace bool // whether current word/token is or started with open curly brace
openBraceWritten bool // if openBrace, whether that brace was written or not
openBraceSpace bool // whether there was a non-newline space before open brace
newLines int // count of newlines consumed
comment bool // whether we're in a comment
quoted bool // whether we're in a quoted segment
escaped bool // whether current char is escaped
heredoc heredocState // whether we're in a heredoc
heredocEscaped bool // whether heredoc is escaped
heredocMarker []rune
heredocClosingMarker []rune
nesting int // indentation level
)
write := func(ch rune) {
out.WriteRune(ch)
last = ch
}
indent := func() {
for tabs := nesting; tabs > 0; tabs-- {
write('\t')
}
}
nextLine := func() {
write('\n')
beginningOfLine = true
}
for {
ch, _, err := rdr.ReadRune()
if err != nil {
if err == io.EOF {
break
}
panic(err)
}
// detect whether we have the start of a heredoc
if !quoted && !(heredoc != heredocClosed || heredocEscaped) &&
space && last == '<' && ch == '<' {
write(ch)
heredoc = heredocOpening
space = false
continue
}
if heredoc == heredocOpening {
if ch == '\n' {
if len(heredocMarker) > 0 && heredocMarkerRegexp.MatchString(string(heredocMarker)) {
heredoc = heredocOpened
} else {
heredocMarker = nil
heredoc = heredocClosed
nextLine()
continue
}
write(ch)
continue
}
if unicode.IsSpace(ch) {
// a space means it's just a regular token and not a heredoc
heredocMarker = nil
heredoc = heredocClosed
} else {
heredocMarker = append(heredocMarker, ch)
write(ch)
continue
}
}
// if we're in a heredoc, all characters are read&write as-is
if heredoc == heredocOpened {
heredocClosingMarker = append(heredocClosingMarker, ch)
if len(heredocClosingMarker) > len(heredocMarker)+1 { // We assert that the heredocClosingMarker is followed by a unicode.Space
heredocClosingMarker = heredocClosingMarker[1:]
}
// check if we're done
if unicode.IsSpace(ch) && slices.Equal(heredocClosingMarker[:len(heredocClosingMarker)-1], heredocMarker) {
heredocMarker = nil
heredocClosingMarker = nil
heredoc = heredocClosed
} else {
write(ch)
if ch == '\n' {
heredocClosingMarker = heredocClosingMarker[:0]
}
continue
}
}
if last == '<' && space {
space = false
}
if comment {
if ch == '\n' {
comment = false
space = true
nextLine()
continue
} else {
write(ch)
continue
}
}
if !escaped && ch == '\\' {
if space {
write(' ')
space = false
}
write(ch)
escaped = true
continue
}
if escaped {
if ch == '<' {
heredocEscaped = true
}
write(ch)
escaped = false
continue
}
if quoted {
if ch == '"' {
quoted = false
}
write(ch)
continue
}
if space && ch == '"' {
quoted = true
}
if unicode.IsSpace(ch) {
space = true
heredocEscaped = false
if ch == '\n' {
newLines++
}
continue
}
spacePrior := space
space = false
//////////////////////////////////////////////////////////
// I find it helpful to think of the formatting loop in two
// main sections; by the time we reach this point, we
// know we are in a "regular" part of the file: we know
// the character is not a space, not in a literal segment
// like a comment or quoted, it's not escaped, etc.
//////////////////////////////////////////////////////////
if ch == '#' {
comment = true
}
if openBrace && spacePrior && !openBraceWritten {
if nesting == 0 && last == '}' {
nextLine()
nextLine()
}
openBrace = false
if beginningOfLine {
indent()
} else if !openBraceSpace {
write(' ')
}
write('{')
openBraceWritten = true
nextLine()
newLines = 0
// prevent infinite nesting from ridiculous inputs (issue #4169)
if nesting < 10 {
nesting++
}
}
switch {
case ch == '{':
openBrace = true
openBraceWritten = false
openBraceSpace = spacePrior && !beginningOfLine
if openBraceSpace {
write(' ')
}
continue
case ch == '}' && (spacePrior || !openBrace):
if last != '\n' {
nextLine()
}
if nesting > 0 {
nesting--
}
indent()
write('}')
newLines = 0
continue
}
if newLines > 2 {
newLines = 2
}
for i := 0; i < newLines; i++ {
nextLine()
}
newLines = 0
if beginningOfLine {
indent()
}
if nesting == 0 && last == '}' && beginningOfLine {
nextLine()
nextLine()
}
if !beginningOfLine && spacePrior {
write(' ')
}
if openBrace && !openBraceWritten {
write('{')
openBraceWritten = true
}
if spacePrior && ch == '<' {
space = true
}
write(ch)
beginningOfLine = false
}
// the Caddyfile does not need any leading or trailing spaces, but...
trimmedResult := bytes.TrimSpace(out.Bytes())
// ...Caddyfiles should, however, end with a newline because
// newlines are significant to the syntax of the file
return append(trimmedResult, '\n')
}
Cognitive complexity: 95
, Cyclomatic complexity: 64
func FormattingDifference
FormattingDifference returns a warning and true if the formatted version is any different from the input; empty warning and false otherwise. TODO: also perform this check on imported files
func FormattingDifference(filename string, body []byte) (caddyconfig.Warning, bool) {
// replace windows-style newlines to normalize comparison
normalizedBody := bytes.Replace(body, []byte("\r\n"), []byte("\n"), -1)
formatted := Format(normalizedBody)
if bytes.Equal(formatted, normalizedBody) {
return caddyconfig.Warning{}, false
}
// find where the difference is
line := 1
for i, ch := range normalizedBody {
if i >= len(formatted) || ch != formatted[i] {
break
}
if ch == '\n' {
line++
}
}
return caddyconfig.Warning{
File: filename,
Line: line,
Message: "Caddyfile input is not formatted; run 'caddy fmt --overwrite' to fix inconsistencies",
}, true
}
Cognitive complexity: 11
, Cyclomatic complexity: 6
func NewDispenser
NewDispenser returns a Dispenser filled with the given tokens.
func NewDispenser(tokens []Token) *Dispenser {
return &Dispenser{
tokens: tokens,
cursor: -1,
}
}
Cognitive complexity: 1
, Cyclomatic complexity: 1
func NewTestDispenser
NewTestDispenser parses input into tokens and creates a new Dispenser for test purposes only; any errors are fatal.
func NewTestDispenser(input string) *Dispenser {
tokens, err := allTokens("Testfile", []byte(input))
if err != nil && err != io.EOF {
log.Fatalf("getting all tokens from input: %v", err)
}
return NewDispenser(tokens)
}
Cognitive complexity: 2
, Cyclomatic complexity: 3
func Parse
Parse parses the input just enough to group tokens, in order, by server block. No further parsing is performed. Server blocks are returned in the order in which they appear. Directives that do not appear in validDirectives will cause an error. If you do not want to check for valid directives, pass in nil instead.
Environment variables in {$ENVIRONMENT_VARIABLE} notation will be replaced before parsing begins.
func Parse(filename string, input []byte) ([]ServerBlock, error) {
// unfortunately, we must copy the input because parsing must
// remain a read-only operation, but we have to expand environment
// variables before we parse, which changes the underlying array (#4422)
inputCopy := make([]byte, len(input))
copy(inputCopy, input)
tokens, err := allTokens(filename, inputCopy)
if err != nil {
return nil, err
}
p := parser{
Dispenser: NewDispenser(tokens),
importGraph: importGraph{
nodes: make(map[string]struct{}),
edges: make(adjacency),
},
}
return p.parseAll()
}
Cognitive complexity: 5
, Cyclomatic complexity: 2
func Tokenize
Tokenize takes bytes as input and lexes it into
a list of tokens that can be parsed as a Caddyfile.
Also takes a filename to fill the token's File as
the source of the tokens, which is important to
determine relative paths for import
directives.
func Tokenize(input []byte, filename string) ([]Token, error) {
l := lexer{}
if err := l.load(bytes.NewReader(input)); err != nil {
return nil, err
}
var tokens []Token
for {
found, err := l.next()
if err != nil {
return nil, err
}
if !found {
break
}
l.token.File = filename
tokens = append(tokens, l.token)
}
return tokens, nil
}
Cognitive complexity: 9
, Cyclomatic complexity: 5
func UnmarshalModule
UnmarshalModule instantiates a module with the given ID and invokes UnmarshalCaddyfile on the new value using the immediate next segment of d as input. In other words, d's next token should be the first token of the module's Caddyfile input.
This function is used when the next segment of Caddyfile tokens belongs to another Caddy module. The returned value is often type-asserted to the module's associated type for practical use when setting up a config.
func UnmarshalModule(d *Dispenser, moduleID string) (Unmarshaler, error) {
mod, err := caddy.GetModule(moduleID)
if err != nil {
return nil, d.Errf("getting module named '%s': %v", moduleID, err)
}
inst := mod.New()
unm, ok := inst.(Unmarshaler)
if !ok {
return nil, d.Errf("module %s is not a Caddyfile unmarshaler; is %T", mod.ID, inst)
}
err = unm.UnmarshalCaddyfile(d.NewFromNextSegment())
if err != nil {
return nil, err
}
return unm, nil
}
Cognitive complexity: 6
, Cyclomatic complexity: 4
func (*Dispenser) AllArgs
AllArgs is like Args, but if there are more argument tokens available than there are targets, false is returned. The number of available argument tokens must match the number of targets exactly to return true.
func (d *Dispenser) AllArgs(targets ...*string) bool {
if !d.Args(targets...) {
return false
}
if d.NextArg() {
d.Prev()
return false
}
return true
}
Cognitive complexity: 4
, Cyclomatic complexity: 3
func (*Dispenser) ArgErr
ArgErr returns an argument error, meaning that another argument was expected but not found. In other words, a line break or open curly brace was encountered instead of an argument.
func (d *Dispenser) ArgErr() error {
if d.Val() == "{" {
return d.Err("unexpected token '{', expecting argument")
}
return d.Errf("wrong argument count or unexpected line ending after '%s'", d.Val())
}
Cognitive complexity: 2
, Cyclomatic complexity: 2
func (*Dispenser) Args
Args is a convenience function that loads the next arguments (tokens on the same line) into an arbitrary number of strings pointed to in targets. If there are not enough argument tokens available to fill targets, false is returned and the remaining targets are left unchanged. If all the targets are filled, then true is returned.
func (d *Dispenser) Args(targets ...*string) bool {
for i := 0; i < len(targets); i++ {
if !d.NextArg() {
return false
}
*targets[i] = d.Val()
}
return true
}
Cognitive complexity: 4
, Cyclomatic complexity: 3
func (*Dispenser) CountRemainingArgs
CountRemainingArgs counts the amount of remaining arguments (tokens on the same line) without consuming the tokens.
func (d *Dispenser) CountRemainingArgs() int {
count := 0
for d.NextArg() {
count++
}
for i := 0; i < count; i++ {
d.Prev()
}
return count
}
Cognitive complexity: 4
, Cyclomatic complexity: 3
func (*Dispenser) Delete
Delete deletes the current token and returns the updated slice of tokens. The cursor is not advanced to the next token. Because deletion modifies the underlying slice, this method should only be called if you have access to the original slice of tokens and/or are using the slice of tokens outside this Dispenser instance. If you do not re-assign the slice with the return value of this method, inconsistencies in the token array will become apparent (or worse, hide from you like they did me for 3 and a half freaking hours late one night).
func (d *Dispenser) Delete() []Token {
if d.cursor >= 0 && d.cursor <= len(d.tokens)-1 {
d.tokens = append(d.tokens[:d.cursor], d.tokens[d.cursor+1:]...)
d.cursor--
}
return d.tokens
}
Cognitive complexity: 2
, Cyclomatic complexity: 3
func (*Dispenser) DeleteN
DeleteN is the same as Delete, but can delete many tokens at once. If there aren't N tokens available to delete, none are deleted.
func (d *Dispenser) DeleteN(amount int) []Token {
if amount > 0 && d.cursor >= (amount-1) && d.cursor <= len(d.tokens)-1 {
d.tokens = append(d.tokens[:d.cursor-(amount-1)], d.tokens[d.cursor+1:]...)
d.cursor -= amount
}
return d.tokens
}
Cognitive complexity: 2
, Cyclomatic complexity: 4
func (*Dispenser) EOFErr
EOFErr returns an error indicating that the dispenser reached the end of the input when searching for the next token.
func (d *Dispenser) EOFErr() error {
return d.Errf("unexpected EOF")
}
Cognitive complexity: 0
, Cyclomatic complexity: 1
func (*Dispenser) Err
Err generates a custom parse-time error with a message of msg.
func (d *Dispenser) Err(msg string) error {
return d.WrapErr(errors.New(msg))
}
Cognitive complexity: 0
, Cyclomatic complexity: 1
func (*Dispenser) Errf
Errf is like Err, but for formatted error messages
func (d *Dispenser) Errf(format string, args ...any) error {
return d.WrapErr(fmt.Errorf(format, args...))
}
Cognitive complexity: 0
, Cyclomatic complexity: 1
func (*Dispenser) File
File gets the filename where the current token originated.
func (d *Dispenser) File() string {
if d.cursor < 0 || d.cursor >= len(d.tokens) {
return ""
}
return d.tokens[d.cursor].File
}
Cognitive complexity: 2
, Cyclomatic complexity: 3
func (*Dispenser) GetContext
GetContext gets the value of a key in the context map.
func (d *Dispenser) GetContext(key string) any {
if d.context == nil {
return nil
}
return d.context[key]
}
Cognitive complexity: 2
, Cyclomatic complexity: 2
func (*Dispenser) GetContextString
GetContextString gets the value of a key in the context map as a string, or an empty string if the key does not exist.
func (d *Dispenser) GetContextString(key string) string {
if d.context == nil {
return ""
}
if val, ok := d.context[key].(string); ok {
return val
}
return ""
}
Cognitive complexity: 4
, Cyclomatic complexity: 3
func (*Dispenser) Line
Line gets the line number of the current token. If there is no token loaded, it returns 0.
func (d *Dispenser) Line() int {
if d.cursor < 0 || d.cursor >= len(d.tokens) {
return 0
}
return d.tokens[d.cursor].Line
}
Cognitive complexity: 2
, Cyclomatic complexity: 3
func (*Dispenser) Nesting
Nesting returns the current nesting level. Necessary if using NextBlock()
func (d *Dispenser) Nesting() int {
return d.nesting
}
Cognitive complexity: 0
, Cyclomatic complexity: 1
func (*Dispenser) NewFromNextSegment
NewFromNextSegment returns a new dispenser with a copy of the tokens from the current token until the end of the "directive" whether that be to the end of the line or the end of a block that starts at the end of the line; in other words, until the end of the segment.
func (d *Dispenser) NewFromNextSegment() *Dispenser {
return NewDispenser(d.NextSegment())
}
Cognitive complexity: 0
, Cyclomatic complexity: 1
func (*Dispenser) Next
Next loads the next token. Returns true if a token was loaded; false otherwise. If false, all tokens have been consumed.
func (d *Dispenser) Next() bool {
if d.cursor < len(d.tokens)-1 {
d.cursor++
return true
}
return false
}
Cognitive complexity: 2
, Cyclomatic complexity: 2
func (*Dispenser) NextArg
NextArg loads the next token if it is on the same line and if it is not a block opening (open curly brace). Returns true if an argument token was loaded; false otherwise. If false, all tokens on the line have been consumed except for potentially a block opening. It handles imported tokens correctly.
func (d *Dispenser) NextArg() bool {
if !d.nextOnSameLine() {
return false
}
if d.Val() == "{" {
// roll back; a block opening is not an argument
d.cursor--
return false
}
return true
}
Cognitive complexity: 4
, Cyclomatic complexity: 3
func (*Dispenser) NextBlock
NextBlock can be used as the condition of a for loop to load the next token as long as it opens a block or is already in a block nested more than initialNestingLevel. In other words, a loop over NextBlock() will iterate all tokens in the block assuming the next token is an open curly brace, until the matching closing brace. The open and closing brace tokens for the outer-most block will be consumed internally and omitted from the iteration.
Proper use of this method looks like this:
for nesting := d.Nesting(); d.NextBlock(nesting); {
}
However, in simple cases where it is known that the Dispenser is new and has not already traversed state by a loop over NextBlock(), this will do:
for d.NextBlock(0) {
}
As with other token parsing logic, a loop over NextBlock() should be contained within a loop over Next(), as it is usually prudent to skip the initial token.
func (d *Dispenser) NextBlock(initialNestingLevel int) bool {
if d.nesting > initialNestingLevel {
if !d.Next() {
return false // should be EOF error
}
if d.Val() == "}" && !d.nextOnSameLine() {
d.nesting--
} else if d.Val() == "{" && !d.nextOnSameLine() {
d.nesting++
}
return d.nesting > initialNestingLevel
}
if !d.nextOnSameLine() { // block must open on same line
return false
}
if d.Val() != "{" {
d.cursor-- // roll back if not opening brace
return false
}
d.Next() // consume open curly brace
if d.Val() == "}" {
return false // open and then closed right away
}
d.nesting++
return true
}
Cognitive complexity: 14
, Cyclomatic complexity: 10
func (*Dispenser) NextLine
NextLine loads the next token only if it is not on the same line as the current token, and returns true if a token was loaded; false otherwise. If false, there is not another token or it is on the same line. It handles imported tokens correctly.
func (d *Dispenser) NextLine() bool {
if d.cursor < 0 {
d.cursor++
return true
}
if d.cursor >= len(d.tokens)-1 {
return false
}
curr := d.tokens[d.cursor]
next := d.tokens[d.cursor+1]
if isNextOnNewLine(curr, next) {
d.cursor++
return true
}
return false
}
Cognitive complexity: 6
, Cyclomatic complexity: 4
func (*Dispenser) NextSegment
NextSegment returns a copy of the tokens from the current token until the end of the line or block that starts at the end of the line.
func (d *Dispenser) NextSegment() Segment {
tkns := Segment{d.Token()}
for d.NextArg() {
tkns = append(tkns, d.Token())
}
var openedBlock bool
for nesting := d.Nesting(); d.NextBlock(nesting); {
if !openedBlock {
// because NextBlock() consumes the initial open
// curly brace, we rewind here to append it, since
// our case is special in that we want the new
// dispenser to have all the tokens including
// surrounding curly braces
d.Prev()
tkns = append(tkns, d.Token())
d.Next()
openedBlock = true
}
tkns = append(tkns, d.Token())
}
if openedBlock {
// include closing brace
tkns = append(tkns, d.Token())
// do not consume the closing curly brace; the
// next iteration of the enclosing loop will
// call Next() and consume it
}
return tkns
}
Cognitive complexity: 9
, Cyclomatic complexity: 5
func (*Dispenser) Prev
Prev moves to the previous token. It does the inverse of Next(), except this function may decrement the cursor to -1 so that the next call to Next() points to the first token; this allows dispensing to "start over". This method returns true if the cursor ends up pointing to a valid token.
func (d *Dispenser) Prev() bool {
if d.cursor > -1 {
d.cursor--
return d.cursor > -1
}
return false
}
Cognitive complexity: 2
, Cyclomatic complexity: 2
func (*Dispenser) RemainingArgs
RemainingArgs loads any more arguments (tokens on the same line) into a slice and returns them. Open curly brace tokens also indicate the end of arguments, and the curly brace is not included in the return value nor is it loaded.
func (d *Dispenser) RemainingArgs() []string {
var args []string
for d.NextArg() {
args = append(args, d.Val())
}
return args
}
Cognitive complexity: 2
, Cyclomatic complexity: 2
func (*Dispenser) RemainingArgsRaw
RemainingArgsRaw loads any more arguments (tokens on the same line, retaining quotes) into a slice and returns them. Open curly brace tokens also indicate the end of arguments, and the curly brace is not included in the return value nor is it loaded.
func (d *Dispenser) RemainingArgsRaw() []string {
var args []string
for d.NextArg() {
args = append(args, d.ValRaw())
}
return args
}
Cognitive complexity: 2
, Cyclomatic complexity: 2
func (*Dispenser) Reset
Reset sets d's cursor to the beginning, as if this was a new and unused dispenser.
func (d *Dispenser) Reset() {
d.cursor = -1
d.nesting = 0
}
Cognitive complexity: 0
, Cyclomatic complexity: 1
func (*Dispenser) ScalarVal
ScalarVal gets value of the current token, converted to the closest scalar type. If there is no token loaded, it returns nil.
func (d *Dispenser) ScalarVal() any {
if d.cursor < 0 || d.cursor >= len(d.tokens) {
return nil
}
quote := d.tokens[d.cursor].wasQuoted
text := d.tokens[d.cursor].Text
if quote > 0 {
return text // string literal
}
if num, err := strconv.Atoi(text); err == nil {
return num
}
if num, err := strconv.ParseFloat(text, 64); err == nil {
return num
}
if bool, err := strconv.ParseBool(text); err == nil {
return bool
}
return text
}
Cognitive complexity: 10
, Cyclomatic complexity: 7
func (*Dispenser) SetContext
SetContext sets a key-value pair in the context map.
func (d *Dispenser) SetContext(key string, value any) {
if d.context == nil {
d.context = make(map[string]any)
}
d.context[key] = value
}
Cognitive complexity: 2
, Cyclomatic complexity: 2
func (*Dispenser) SyntaxErr
SyntaxErr creates a generic syntax error which explains what was found and what was expected.
func (d *Dispenser) SyntaxErr(expected string) error {
msg := fmt.Sprintf("syntax error: unexpected token '%s', expecting '%s', at %s:%d import chain: ['%s']", d.Val(), expected, d.File(), d.Line(), strings.Join(d.Token().imports, "','"))
return errors.New(msg)
}
Cognitive complexity: 0
, Cyclomatic complexity: 1
func (*Dispenser) Token
Token returns the current token.
func (d *Dispenser) Token() Token {
if d.cursor < 0 || d.cursor >= len(d.tokens) {
return Token{}
}
return d.tokens[d.cursor]
}
Cognitive complexity: 3
, Cyclomatic complexity: 3
func (*Dispenser) Val
Val gets the text of the current token. If there is no token loaded, it returns empty string.
func (d *Dispenser) Val() string {
if d.cursor < 0 || d.cursor >= len(d.tokens) {
return ""
}
return d.tokens[d.cursor].Text
}
Cognitive complexity: 2
, Cyclomatic complexity: 3
func (*Dispenser) ValRaw
ValRaw gets the raw text of the current token (including quotes). If the token was a heredoc, then the delimiter is not included, because that is not relevant to any unmarshaling logic at this time. If there is no token loaded, it returns empty string.
func (d *Dispenser) ValRaw() string {
if d.cursor < 0 || d.cursor >= len(d.tokens) {
return ""
}
quote := d.tokens[d.cursor].wasQuoted
if quote > 0 && quote != '<' {
// string literal
return string(quote) + d.tokens[d.cursor].Text + string(quote)
}
return d.tokens[d.cursor].Text
}
Cognitive complexity: 4
, Cyclomatic complexity: 5
func (*Dispenser) WrapErr
WrapErr takes an existing error and adds the Caddyfile file and line number.
func (d *Dispenser) WrapErr(err error) error {
if len(d.Token().imports) > 0 {
return fmt.Errorf("%w, at %s:%d import chain ['%s']", err, d.File(), d.Line(), strings.Join(d.Token().imports, "','"))
}
return fmt.Errorf("%w, at %s:%d", err, d.File(), d.Line())
}
Cognitive complexity: 2
, Cyclomatic complexity: 2
func (Adapter) Adapt
Adapt converts the Caddyfile config in body to Caddy JSON.
func (a Adapter) Adapt(body []byte, options map[string]any) ([]byte, []caddyconfig.Warning, error) {
if a.ServerType == nil {
return nil, nil, fmt.Errorf("no server type")
}
if options == nil {
options = make(map[string]any)
}
filename, _ := options["filename"].(string)
if filename == "" {
filename = "Caddyfile"
}
serverBlocks, err := Parse(filename, body)
if err != nil {
return nil, nil, err
}
cfg, warnings, err := a.ServerType.Setup(serverBlocks, options)
if err != nil {
return nil, warnings, err
}
// lint check: see if input was properly formatted; sometimes messy files parse
// successfully but result in logical errors (the Caddyfile is a bad format, I'm sorry)
if warning, different := FormattingDifference(filename, body); different {
warnings = append(warnings, warning)
}
result, err := json.Marshal(cfg)
return result, warnings, err
}
Cognitive complexity: 12
, Cyclomatic complexity: 7
func (Segment) Directive
Directive returns the directive name for the segment. The directive name is the text of the first token.
func (s Segment) Directive() string {
if len(s) > 0 {
return s[0].Text
}
return ""
}
Cognitive complexity: 2
, Cyclomatic complexity: 2
func (ServerBlock) DispenseDirective
DispenseDirective returns a dispenser that contains all the tokens in the server block.
func (sb ServerBlock) DispenseDirective(dir string) *Dispenser {
var tokens []Token
for _, seg := range sb.Segments {
if len(seg) > 0 && seg[0].Text == dir {
tokens = append(tokens, seg...)
}
}
return NewDispenser(tokens)
}
Cognitive complexity: 5
, Cyclomatic complexity: 4
func (ServerBlock) GetKeysText
func (sb ServerBlock) GetKeysText() []string {
res := []string{}
for _, k := range sb.Keys {
res = append(res, k.Text)
}
return res
}
Cognitive complexity: 4
, Cyclomatic complexity: 2
func (Token) Clone
Clone returns a deep copy of the token.
func (t Token) Clone() Token {
return Token{
File: t.File,
imports: append([]string{}, t.imports...),
Line: t.Line,
Text: t.Text,
wasQuoted: t.wasQuoted,
heredocMarker: t.heredocMarker,
snippetName: t.snippetName,
}
}
Cognitive complexity: 2
, Cyclomatic complexity: 1
func (Token) NumLineBreaks
NumLineBreaks counts how many line breaks are in the token text.
func (t Token) NumLineBreaks() int {
lineBreaks := strings.Count(t.Text, "\n")
if t.wasQuoted == '<' {
// heredocs have an extra linebreak because the opening
// delimiter is on its own line and is not included in the
// token Text itself, and the trailing newline is removed.
lineBreaks += 2
}
return lineBreaks
}
Cognitive complexity: 2
, Cyclomatic complexity: 2
func (Token) Quoted
Quoted returns true if the token was enclosed in quotes (i.e. double quotes, backticks, or heredoc).
func (t Token) Quoted() bool {
return t.wasQuoted > 0
}
Cognitive complexity: 0
, Cyclomatic complexity: 1
Private functions
func allTokens
allTokens lexes the entire input, but does not parse it. It returns all the tokens from the input, unstructured and in order. It may mutate input as it expands env vars.
allTokens (filename string, input []byte) ([]Token, error)
func makeArgsReplacer
makeArgsReplacer prepares a Replacer which can replace non-variadic args placeholders in imported tokens.
makeArgsReplacer (args []string) *caddy.Replacer
References: strconv.Atoi, strconv.Itoa, strings.Contains.
func parseVariadic
parseVariadic determines if the token is a variadic placeholder, and if so, determines the index range (start/end) of args to use. Returns a boolean signaling whether a variadic placeholder was found, and the start and end indices.
parseVariadic (token Token, argCount int) (bool, int, int)
References: strconv.Atoi, strconv.Itoa, strings.Contains, strings.Cut, strings.HasPrefix, strings.HasSuffix, strings.TrimPrefix, strings.TrimSuffix, zap.String, zap.Strings.
func replaceEnvVars
replaceEnvVars replaces all occurrences of environment variables. It mutates the underlying array and returns the updated slice.
replaceEnvVars (input []byte) []byte
References: bytes.Index, os.LookupEnv, strings.SplitN.
func isNewLine
isNewLine determines whether the current token is on a different line (higher line number) than the previous token. It handles imported tokens correctly. If there isn't a previous token, it returns true.
isNewLine () bool
func isNextOnNewLine
isNextOnNewLine determines whether the current token is on a different line (higher line number) than the next token. It handles imported tokens correctly. If there isn't a next token, it returns true.
isNextOnNewLine () bool
func nextOnSameLine
nextOnSameLine advances the cursor if the next token is on the same line of the same file.
nextOnSameLine () bool
func addEdge
addEdge (from,to string) error
References: fmt.Errorf.
func addEdges
addEdges (from string, tos []string) error
func addNode
addNode (name string)
func addNodes
addNodes (names []string)
func areConnected
areConnected (from,to string) bool
References: slices.Contains.
func exists
exists (key string) bool
func removeNode
removeNode (name string)
func removeNodes
removeNodes (names []string)
func willCycle
willCycle (from,to string) bool
func finalizeHeredoc
finalizeHeredoc takes the runes read as the heredoc text and the marker, and processes the text to strip leading whitespace, returning the final value without the leading whitespace.
finalizeHeredoc (val []rune, marker string) ([]rune, error)
References: fmt.Errorf, strings.Index, strings.LastIndex, strings.ReplaceAll, strings.Split.
func load
load prepares the lexer to scan an input for tokens. It discards any leading byte order mark.
load (input io.Reader) error
References: bufio.NewReader.
func next
next loads the next token into the lexer. A token is delimited by whitespace, unless the token starts with a quotes character (") in which case the token goes until the closing quotes (the enclosing quotes are not included). Inside quoted strings, quotes may be escaped with a preceding \ character. No other chars may be escaped. The rest of the line is skipped if a "#" character is read in. Returns true if a token was loaded; false otherwise.
next () (bool, error)
References: fmt.Errorf, io.EOF, unicode.IsSpace.
func addresses
addresses () error
References: strings.Contains, strings.HasPrefix, strings.HasSuffix.
func begin
begin () error
func blockContents
blockContents () error
func blockTokens
read and store everything in a block for later replay.
blockTokens (retainCurlies bool) ([]Token, error)
func closeCurlyBrace
closeCurlyBrace expects the current token to be a closing curly brace. This acts like an assertion because it returns an error if the token is not a closing curly brace. It does NOT advance the token.
closeCurlyBrace () error
func directive
directive collects tokens until the directive's scope closes (either end of line or end of curly brace block). It expects the currently-loaded token to be a directive (or } that ends a server block). The collected tokens are loaded into the current server block for later use by directive setup functions.
directive () error
func directives
directives parses through all the lines for directives and it expects the next token to be the first directive. It goes until EOF or closing curly brace which ends the server block.
directives () error
func doImport
doImport swaps out the import directive and its argument (a total of 2 tokens) with the tokens in the specified file or globbing pattern. When the function returns, the cursor is on the token before where the import directive was. In other words, call Next() to access the first token that was imported.
doImport (nesting int) error
References: filepath.Dir, filepath.Glob, filepath.IsAbs, filepath.Join, filepath.Separator, fmt.Sprintf, strings.Contains, strings.ContainsAny, strings.Count, strings.HasPrefix, strings.HasSuffix, strings.Split, strings.TrimPrefix, strings.TrimSuffix, zap.String.
func doSingleImport
doSingleImport lexes the individual file at importFile and returns its tokens or an error, if any.
doSingleImport (importFile string) ([]Token, error)
References: io.ReadAll, os.Open, strings.TrimSpace, zap.String.
func isNamedRoute
isNamedRoute () (bool, string)
References: strings.HasPrefix, strings.HasSuffix, strings.TrimSuffix.
func isSnippet
isSnippet () (bool, string)
References: strings.HasPrefix, strings.HasSuffix, strings.TrimSuffix.
func openCurlyBrace
openCurlyBrace expects the current token to be an opening curly brace. This acts like an assertion because it returns an error if the token is not a opening curly brace. It does NOT advance the token.
openCurlyBrace () error
func parseAll
parseAll () ([]ServerBlock, error)
func parseOne
parseOne () error
Tests
Files: 4. Third party imports: 0. Imports from organisation: 0. Tests: 21. Benchmarks: 0.