github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy
No package summary is available.
Package
Files: 13. Third party imports: 11. Imports from organisation: 0. Tests: 0. Benchmarks: 0.
Constants
const (
defaultBufferSize = 32 * 1024
wordSize = int(unsafe.Sizeof(uintptr(0)))
)
// dialInfoVarKey is the key used for the variable that holds
// the dial info for the upstream connection.
const dialInfoVarKey = "reverse_proxy.dial_info"
const matcherPrefix = "@"
// proxyHandleResponseContextCtxKey is the context key for the active proxy handler
// so that handle_response routes can inherit some config options
// from the proxy handler.
const proxyHandleResponseContextCtxKey caddy.CtxKey = "reverse_proxy_handle_response_context"
// proxyProtocolInfoVarKey is the key used for the variable that holds
// the proxy protocol info for the upstream connection.
const proxyProtocolInfoVarKey = "reverse_proxy.proxy_protocol_info"
Vars
var (
_ caddyfile.Unmarshaler = (*Handler)(nil)
_ caddyfile.Unmarshaler = (*HTTPTransport)(nil)
_ caddyfile.Unmarshaler = (*SRVUpstreams)(nil)
_ caddyfile.Unmarshaler = (*AUpstreams)(nil)
_ caddyfile.Unmarshaler = (*MultiUpstreams)(nil)
)
var (
srvs = make(map[string]srvLookup)
srvsMu sync.RWMutex
aAaaa = make(map[string]aLookup)
aAaaaMu sync.RWMutex
)
bufPool is used for buffering requests and responses.
var bufPool = sync.Pool{
New: func() any {
return new(bytes.Buffer)
},
}
errNoUpstream occurs when there are no upstream available.
var errNoUpstream = fmt.Errorf("no upstreams available")
Hop-by-hop headers. These are removed when sent to the backend. As of RFC 7230, hop-by-hop headers are required to appear in the Connection header field. These are the headers defined by the obsoleted RFC 2616 (section 13.5.1) and are used for backward compatibility.
var hopHeaders = []string{
"Alt-Svc",
"Connection",
"Proxy-Connection", // non-standard but still sent by libcurl and rejected by e.g. google
"Keep-Alive",
"Proxy-Authenticate",
"Proxy-Authorization",
"Te", // canonicalized version of "TE"
"Trailer", // not Trailers per URL above; https://www.rfc-editor.org/errata_search.php?eid=4522
"Transfer-Encoding",
"Upgrade",
}
hosts is the global repository for hosts that are currently in use by active configuration(s). This allows the state of remote hosts to be preserved through config reloads.
var hosts = caddy.NewUsagePool()
var reverseProxyMetrics = struct {
init sync.Once
upstreamsHealthy *prometheus.GaugeVec
logger *zap.Logger
}{}
var streamingBufPool = sync.Pool{
New: func() any {
// The Pool's New function should generally only return pointer
// types, since a pointer can be put into the return interface
// value without an allocation
// - (from the package docs)
b := make([]byte, defaultBufferSize)
return &b
},
}
Mapping of the canonical form of the headers, to the RFC 6455 form,
i.e. WebSocket
with uppercase 'S'.
var websocketHeaderMapping = map[string]string{
"Sec-Websocket-Accept": "Sec-WebSocket-Accept",
"Sec-Websocket-Extensions": "Sec-WebSocket-Extensions",
"Sec-Websocket-Key": "Sec-WebSocket-Key",
"Sec-Websocket-Protocol": "Sec-WebSocket-Protocol",
"Sec-Websocket-Version": "Sec-WebSocket-Version",
}
Types
AUpstreams
AUpstreams provides upstreams from A/AAAA lookups. Results are cached and refreshed at the configured refresh interval.
type AUpstreams struct {
// The domain name to look up.
Name string `json:"name,omitempty"`
// The port to use with the upstreams. Default: 80
Port string `json:"port,omitempty"`
// The interval at which to refresh the A lookup.
// Results are cached between lookups. Default: 1m
Refresh caddy.Duration `json:"refresh,omitempty"`
// Configures the DNS resolver used to resolve the
// domain name to A records.
Resolver *UpstreamResolver `json:"resolver,omitempty"`
// If Resolver is configured, how long to wait before
// timing out trying to connect to the DNS server.
DialTimeout caddy.Duration `json:"dial_timeout,omitempty"`
// If Resolver is configured, how long to wait before
// spawning an RFC 6555 Fast Fallback connection.
// A negative value disables this.
FallbackDelay caddy.Duration `json:"dial_fallback_delay,omitempty"`
// The IP versions to resolve for. By default, both
// "ipv4" and "ipv6" will be enabled, which
// correspond to A and AAAA records respectively.
Versions *IPVersions `json:"versions,omitempty"`
resolver *net.Resolver
logger *zap.Logger
}
ActiveHealthChecks
ActiveHealthChecks holds configuration related to active health checks (that is, health checks which occur in a background goroutine independently).
type ActiveHealthChecks struct {
// Deprecated: Use 'uri' instead. This field will be removed. TODO: remove this field
Path string `json:"path,omitempty"`
// The URI (path and query) to use for health checks
URI string `json:"uri,omitempty"`
// The host:port to use (if different from the upstream's dial address)
// for health checks. This should be used in tandem with `health_header` and
// `{http.reverse_proxy.active.target_upstream}`. This can be helpful when
// creating an intermediate service to do a more thorough health check.
// If upstream is set, the active health check port is ignored.
Upstream string `json:"upstream,omitempty"`
// The port to use (if different from the upstream's dial
// address) for health checks. If active upstream is set,
// this value is ignored.
Port int `json:"port,omitempty"`
// HTTP headers to set on health check requests.
Headers http.Header `json:"headers,omitempty"`
// The HTTP method to use for health checks (default "GET").
Method string `json:"method,omitempty"`
// The body to send with the health check request.
Body string `json:"body,omitempty"`
// Whether to follow HTTP redirects in response to active health checks (default off).
FollowRedirects bool `json:"follow_redirects,omitempty"`
// How frequently to perform active health checks (default 30s).
Interval caddy.Duration `json:"interval,omitempty"`
// How long to wait for a response from a backend before
// considering it unhealthy (default 5s).
Timeout caddy.Duration `json:"timeout,omitempty"`
// Number of consecutive health check passes before marking
// a previously unhealthy backend as healthy again (default 1).
Passes int `json:"passes,omitempty"`
// Number of consecutive health check failures before marking
// a previously healthy backend as unhealthy (default 1).
Fails int `json:"fails,omitempty"`
// The maximum response body to download from the backend
// during a health check.
MaxSize int64 `json:"max_size,omitempty"`
// The HTTP status code to expect from a healthy backend.
ExpectStatus int `json:"expect_status,omitempty"`
// A regular expression against which to match the response
// body of a healthy backend.
ExpectBody string `json:"expect_body,omitempty"`
uri *url.URL
httpClient *http.Client
bodyRegexp *regexp.Regexp
logger *zap.Logger
}
CircuitBreaker
CircuitBreaker is a type that can act as an early-warning system for the health checker when backends are getting overloaded. This interface is still experimental and is subject to change.
type CircuitBreaker interface {
OK() bool
RecordMetric(statusCode int, latency time.Duration)
}
ClientIPHashSelection
ClientIPHashSelection is a policy that selects a host based on hashing the client IP of the request, as determined by the HTTP app's trusted proxies settings.
type ClientIPHashSelection struct{}
CookieHashSelection
CookieHashSelection is a policy that selects a host based on a given cookie name.
type CookieHashSelection struct {
// The HTTP cookie name whose value is to be hashed and used for upstream selection.
Name string `json:"name,omitempty"`
// Secret to hash (Hmac256) chosen upstream in cookie
Secret string `json:"secret,omitempty"`
// The cookie's Max-Age before it expires. Default is no expiry.
MaxAge caddy.Duration `json:"max_age,omitempty"`
// The fallback policy to use if the cookie is not present. Defaults to `random`.
FallbackRaw json.RawMessage `json:"fallback,omitempty" caddy:"namespace=http.reverse_proxy.selection_policies inline_key=policy"`
fallback Selector
}
CopyResponseHandler
CopyResponseHandler is a special HTTP handler which may only be used within reverse_proxy's handle_response routes, to copy the proxy response. EXPERIMENTAL, subject to change.
type CopyResponseHandler struct {
// To write the upstream response's body but with a different
// status code, set this field to the desired status code.
StatusCode caddyhttp.WeakString `json:"status_code,omitempty"`
ctx caddy.Context
}
CopyResponseHeadersHandler
CopyResponseHeadersHandler is a special HTTP handler which may only be used within reverse_proxy's handle_response routes, to copy headers from the proxy response. EXPERIMENTAL; subject to change.
type CopyResponseHeadersHandler struct {
// A list of header fields to copy from the response.
// Cannot be defined at the same time as Exclude.
Include []string `json:"include,omitempty"`
// A list of header fields to skip copying from the response.
// Cannot be defined at the same time as Include.
Exclude []string `json:"exclude,omitempty"`
includeMap map[string]struct{}
excludeMap map[string]struct{}
ctx caddy.Context
}
DialError
DialError is an error that specifically occurs in a call to Dial or DialContext.
type DialError struct{ error }
DialInfo
DialInfo contains information needed to dial a connection to an upstream host. This information may be different than that which is represented in a URL (for example, unix sockets don't have a host that can be represented in a URL, but they certainly have a network name and address).
type DialInfo struct {
// Upstream is the Upstream associated with
// this DialInfo. It may be nil.
Upstream *Upstream
// The network to use. This should be one of
// the values that is accepted by net.Dial:
// https://golang.org/pkg/net/#Dial
Network string
// The address to dial. Follows the same
// semantics and rules as net.Dial.
Address string
// Host and Port are components of Address.
Host, Port string
}
FirstSelection
FirstSelection is a policy that selects the first available host.
type FirstSelection struct{}
HTTPTransport
HTTPTransport is essentially a configuration wrapper for http.Transport. It defines a JSON structure useful when configuring the HTTP transport for Caddy's reverse proxy. It builds its http.Transport at Provision.
type HTTPTransport struct {
// TODO: It's possible that other transports (like fastcgi) might be
// able to borrow/use at least some of these config fields; if so,
// maybe move them into a type called CommonTransport and embed it?
// Configures the DNS resolver used to resolve the IP address of upstream hostnames.
Resolver *UpstreamResolver `json:"resolver,omitempty"`
// Configures TLS to the upstream. Setting this to an empty struct
// is sufficient to enable TLS with reasonable defaults.
TLS *TLSConfig `json:"tls,omitempty"`
// Configures HTTP Keep-Alive (enabled by default). Should only be
// necessary if rigorous testing has shown that tuning this helps
// improve performance.
KeepAlive *KeepAlive `json:"keep_alive,omitempty"`
// Whether to enable compression to upstream. Default: true
Compression *bool `json:"compression,omitempty"`
// Maximum number of connections per host. Default: 0 (no limit)
MaxConnsPerHost int `json:"max_conns_per_host,omitempty"`
// If non-empty, which PROXY protocol version to send when
// connecting to an upstream. Default: off.
ProxyProtocol string `json:"proxy_protocol,omitempty"`
// URL to the server that the HTTP transport will use to proxy
// requests to the upstream. See http.Transport.Proxy for
// information regarding supported protocols. This value takes
// precedence over `HTTP_PROXY`, etc.
//
// Providing a value to this parameter results in
// requests flowing through the reverse_proxy in the following
// way:
//
// User Agent ->
// reverse_proxy ->
// forward_proxy_url -> upstream
//
// Default: http.ProxyFromEnvironment
ForwardProxyURL string `json:"forward_proxy_url,omitempty"`
// How long to wait before timing out trying to connect to
// an upstream. Default: `3s`.
DialTimeout caddy.Duration `json:"dial_timeout,omitempty"`
// How long to wait before spawning an RFC 6555 Fast Fallback
// connection. A negative value disables this. Default: `300ms`.
FallbackDelay caddy.Duration `json:"dial_fallback_delay,omitempty"`
// How long to wait for reading response headers from server. Default: No timeout.
ResponseHeaderTimeout caddy.Duration `json:"response_header_timeout,omitempty"`
// The length of time to wait for a server's first response
// headers after fully writing the request headers if the
// request has a header "Expect: 100-continue". Default: No timeout.
ExpectContinueTimeout caddy.Duration `json:"expect_continue_timeout,omitempty"`
// The maximum bytes to read from response headers. Default: `10MiB`.
MaxResponseHeaderSize int64 `json:"max_response_header_size,omitempty"`
// The size of the write buffer in bytes. Default: `4KiB`.
WriteBufferSize int `json:"write_buffer_size,omitempty"`
// The size of the read buffer in bytes. Default: `4KiB`.
ReadBufferSize int `json:"read_buffer_size,omitempty"`
// The maximum time to wait for next read from backend. Default: no timeout.
ReadTimeout caddy.Duration `json:"read_timeout,omitempty"`
// The maximum time to wait for next write to backend. Default: no timeout.
WriteTimeout caddy.Duration `json:"write_timeout,omitempty"`
// The versions of HTTP to support. As a special case, "h2c"
// can be specified to use H2C (HTTP/2 over Cleartext) to the
// upstream (this feature is experimental and subject to
// change or removal). Default: ["1.1", "2"]
//
// EXPERIMENTAL: "3" enables HTTP/3, but it must be the only
// version specified if enabled. Additionally, HTTPS must be
// enabled to the upstream as HTTP/3 requires TLS. Subject
// to change or removal while experimental.
Versions []string `json:"versions,omitempty"`
// Specify the address to bind to when connecting to an upstream. In other words,
// it is the address the upstream sees as the remote address.
LocalAddress string `json:"local_address,omitempty"`
// The pre-configured underlying HTTP transport.
Transport *http.Transport `json:"-"`
h2cTransport *http2.Transport
h3Transport *http3.Transport // TODO: EXPERIMENTAL (May 2024)
}
Handler
Handler implements a highly configurable and production-ready reverse proxy.
Upon proxying, this module sets the following placeholders (which can be used both within and after this handler; for example, in response headers):
Placeholder | Description
------------|-------------
{http.reverse_proxy.upstream.address}
| The full address to the upstream as given in the config
{http.reverse_proxy.upstream.hostport}
| The host:port of the upstream
{http.reverse_proxy.upstream.host}
| The host of the upstream
{http.reverse_proxy.upstream.port}
| The port of the upstream
{http.reverse_proxy.upstream.requests}
| The approximate current number of requests to the upstream
{http.reverse_proxy.upstream.max_requests}
| The maximum approximate number of requests allowed to the upstream
{http.reverse_proxy.upstream.fails}
| The number of recent failed requests to the upstream
{http.reverse_proxy.upstream.latency}
| How long it took the proxy upstream to write the response header.
{http.reverse_proxy.upstream.latency_ms}
| Same as 'latency', but in milliseconds.
{http.reverse_proxy.upstream.duration}
| Time spent proxying to the upstream, including writing response body to client.
{http.reverse_proxy.upstream.duration_ms}
| Same as 'upstream.duration', but in milliseconds.
{http.reverse_proxy.duration}
| Total time spent proxying, including selecting an upstream, retries, and writing response.
{http.reverse_proxy.duration_ms}
| Same as 'duration', but in milliseconds.
{http.reverse_proxy.retries}
| The number of retries actually performed to communicate with an upstream.
type Handler struct {
// Configures the method of transport for the proxy. A transport
// is what performs the actual "round trip" to the backend.
// The default transport is plaintext HTTP.
TransportRaw json.RawMessage `json:"transport,omitempty" caddy:"namespace=http.reverse_proxy.transport inline_key=protocol"`
// A circuit breaker may be used to relieve pressure on a backend
// that is beginning to exhibit symptoms of stress or latency.
// By default, there is no circuit breaker.
CBRaw json.RawMessage `json:"circuit_breaker,omitempty" caddy:"namespace=http.reverse_proxy.circuit_breakers inline_key=type"`
// Load balancing distributes load/requests between backends.
LoadBalancing *LoadBalancing `json:"load_balancing,omitempty"`
// Health checks update the status of backends, whether they are
// up or down. Down backends will not be proxied to.
HealthChecks *HealthChecks `json:"health_checks,omitempty"`
// Upstreams is the static list of backends to proxy to.
Upstreams UpstreamPool `json:"upstreams,omitempty"`
// A module for retrieving the list of upstreams dynamically. Dynamic
// upstreams are retrieved at every iteration of the proxy loop for
// each request (i.e. before every proxy attempt within every request).
// Active health checks do not work on dynamic upstreams, and passive
// health checks are only effective on dynamic upstreams if the proxy
// server is busy enough that concurrent requests to the same backends
// are continuous. Instead of health checks for dynamic upstreams, it
// is recommended that the dynamic upstream module only return available
// backends in the first place.
DynamicUpstreamsRaw json.RawMessage `json:"dynamic_upstreams,omitempty" caddy:"namespace=http.reverse_proxy.upstreams inline_key=source"`
// Adjusts how often to flush the response buffer. By default,
// no periodic flushing is done. A negative value disables
// response buffering, and flushes immediately after each
// write to the client. This option is ignored when the upstream's
// response is recognized as a streaming response, or if its
// content length is -1; for such responses, writes are flushed
// to the client immediately.
FlushInterval caddy.Duration `json:"flush_interval,omitempty"`
// A list of IP ranges (supports CIDR notation) from which
// X-Forwarded-* header values should be trusted. By default,
// no proxies are trusted, so existing values will be ignored
// when setting these headers. If the proxy is trusted, then
// existing values will be used when constructing the final
// header values.
TrustedProxies []string `json:"trusted_proxies,omitempty"`
// Headers manipulates headers between Caddy and the backend.
// By default, all headers are passed-thru without changes,
// with the exceptions of special hop-by-hop headers.
//
// X-Forwarded-For, X-Forwarded-Proto and X-Forwarded-Host
// are also set implicitly.
Headers *headers.Handler `json:"headers,omitempty"`
// If nonzero, the entire request body up to this size will be read
// and buffered in memory before being proxied to the backend. This
// should be avoided if at all possible for performance reasons, but
// could be useful if the backend is intolerant of read latency or
// chunked encodings.
RequestBuffers int64 `json:"request_buffers,omitempty"`
// If nonzero, the entire response body up to this size will be read
// and buffered in memory before being proxied to the client. This
// should be avoided if at all possible for performance reasons, but
// could be useful if the backend has tighter memory constraints.
ResponseBuffers int64 `json:"response_buffers,omitempty"`
// If nonzero, streaming requests such as WebSockets will be
// forcibly closed at the end of the timeout. Default: no timeout.
StreamTimeout caddy.Duration `json:"stream_timeout,omitempty"`
// If nonzero, streaming requests such as WebSockets will not be
// closed when the proxy config is unloaded, and instead the stream
// will remain open until the delay is complete. In other words,
// enabling this prevents streams from closing when Caddy's config
// is reloaded. Enabling this may be a good idea to avoid a thundering
// herd of reconnecting clients which had their connections closed
// by the previous config closing. Default: no delay.
StreamCloseDelay caddy.Duration `json:"stream_close_delay,omitempty"`
// If configured, rewrites the copy of the upstream request.
// Allows changing the request method and URI (path and query).
// Since the rewrite is applied to the copy, it does not persist
// past the reverse proxy handler.
// If the method is changed to `GET` or `HEAD`, the request body
// will not be copied to the backend. This allows a later request
// handler -- either in a `handle_response` route, or after -- to
// read the body.
// By default, no rewrite is performed, and the method and URI
// from the incoming request is used as-is for proxying.
Rewrite *rewrite.Rewrite `json:"rewrite,omitempty"`
// List of handlers and their associated matchers to evaluate
// after successful roundtrips. The first handler that matches
// the response from a backend will be invoked. The response
// body from the backend will not be written to the client;
// it is up to the handler to finish handling the response.
// If passive health checks are enabled, any errors from the
// handler chain will not affect the health status of the
// backend.
//
// Three new placeholders are available in this handler chain:
// - `{http.reverse_proxy.status_code}` The status code from the response
// - `{http.reverse_proxy.status_text}` The status text from the response
// - `{http.reverse_proxy.header.*}` The headers from the response
HandleResponse []caddyhttp.ResponseHandler `json:"handle_response,omitempty"`
// If set, the proxy will write very detailed logs about its
// inner workings. Enable this only when debugging, as it
// will produce a lot of output.
//
// EXPERIMENTAL: This feature is subject to change or removal.
VerboseLogs bool `json:"verbose_logs,omitempty"`
Transport http.RoundTripper `json:"-"`
CB CircuitBreaker `json:"-"`
DynamicUpstreams UpstreamSource `json:"-"`
// Holds the parsed CIDR ranges from TrustedProxies
trustedProxies []netip.Prefix
// Holds the named response matchers from the Caddyfile while adapting
responseMatchers map[string]caddyhttp.ResponseMatcher
// Holds the handle_response Caddyfile tokens while adapting
handleResponseSegments []*caddyfile.Dispenser
// Stores upgraded requests (hijacked connections) for proper cleanup
connections map[io.ReadWriteCloser]openConnection
connectionsCloseTimer *time.Timer
connectionsMu *sync.Mutex
ctx caddy.Context
logger *zap.Logger
events *caddyevents.App
}
HeaderHashSelection
HeaderHashSelection is a policy that selects a host based on a given request header.
type HeaderHashSelection struct {
// The HTTP header field whose value is to be hashed and used for upstream selection.
Field string `json:"field,omitempty"`
// The fallback policy to use if the header is not present. Defaults to `random`.
FallbackRaw json.RawMessage `json:"fallback,omitempty" caddy:"namespace=http.reverse_proxy.selection_policies inline_key=policy"`
fallback Selector
}
HealthChecks
HealthChecks configures active and passive health checks.
type HealthChecks struct {
// Active health checks run in the background on a timer. To
// minimally enable active health checks, set either path or
// port (or both). Note that active health check status
// (healthy/unhealthy) is stored per-proxy-handler, not
// globally; this allows different handlers to use different
// criteria to decide what defines a healthy backend.
//
// Active health checks do not run for dynamic upstreams.
Active *ActiveHealthChecks `json:"active,omitempty"`
// Passive health checks monitor proxied requests for errors or timeouts.
// To minimally enable passive health checks, specify at least an empty
// config object with fail_duration > 0. Passive health check state is
// shared (stored globally), so a failure from one handler will be counted
// by all handlers; but the tolerances or standards for what defines
// healthy/unhealthy backends is configured per-proxy-handler.
//
// Passive health checks technically do operate on dynamic upstreams,
// but are only effective for very busy proxies where the list of
// upstreams is mostly stable. This is because the shared/global
// state of upstreams is cleaned up when the upstreams are no longer
// used. Since dynamic upstreams are allocated dynamically at each
// request (specifically, each iteration of the proxy loop per request),
// they are also cleaned up after every request. Thus, if there is a
// moment when no requests are actively referring to a particular
// upstream host, the passive health check state will be reset because
// it will be garbage-collected. It is usually better for the dynamic
// upstream module to only return healthy, available backends instead.
Passive *PassiveHealthChecks `json:"passive,omitempty"`
}
Host
Host is the basic, in-memory representation of the state of a remote host. Its fields are accessed atomically and Host values must not be copied.
type Host struct {
numRequests int64 // must be 64-bit aligned on 32-bit systems (see https://golang.org/pkg/sync/atomic/#pkg-note-BUG)
fails int64
activePasses int64
activeFails int64
}
IPHashSelection
IPHashSelection is a policy that selects a host based on hashing the remote IP of the request.
type IPHashSelection struct{}
IPVersions
This type doesn't have documentation.
type IPVersions struct {
IPv4 *bool `json:"ipv4,omitempty"`
IPv6 *bool `json:"ipv6,omitempty"`
}
KeepAlive
KeepAlive holds configuration pertaining to HTTP Keep-Alive.
type KeepAlive struct {
// Whether HTTP Keep-Alive is enabled. Default: `true`
Enabled *bool `json:"enabled,omitempty"`
// How often to probe for liveness. Default: `30s`.
ProbeInterval caddy.Duration `json:"probe_interval,omitempty"`
// Maximum number of idle connections. Default: `0`, which means no limit.
MaxIdleConns int `json:"max_idle_conns,omitempty"`
// Maximum number of idle connections per host. Default: `32`.
MaxIdleConnsPerHost int `json:"max_idle_conns_per_host,omitempty"`
// How long connections should be kept alive when idle. Default: `2m`.
IdleConnTimeout caddy.Duration `json:"idle_timeout,omitempty"`
}
LeastConnSelection
LeastConnSelection is a policy that selects the host with the least active requests. If multiple hosts have the same fewest number, one is chosen randomly. The term "conn" or "connection" is used in this policy name due to its similar meaning in other software, but our load balancer actually counts active requests rather than connections, since these days requests are multiplexed onto shared connections.
type LeastConnSelection struct{}
LoadBalancing
LoadBalancing has parameters related to load balancing.
type LoadBalancing struct {
// A selection policy is how to choose an available backend.
// The default policy is random selection.
SelectionPolicyRaw json.RawMessage `json:"selection_policy,omitempty" caddy:"namespace=http.reverse_proxy.selection_policies inline_key=policy"`
// How many times to retry selecting available backends for each
// request if the next available host is down. If try_duration is
// also configured, then retries may stop early if the duration
// is reached. By default, retries are disabled (zero).
Retries int `json:"retries,omitempty"`
// How long to try selecting available backends for each request
// if the next available host is down. Clients will wait for up
// to this long while the load balancer tries to find an available
// upstream host. If retries is also configured, tries may stop
// early if the maximum retries is reached. By default, retries
// are disabled (zero duration).
TryDuration caddy.Duration `json:"try_duration,omitempty"`
// How long to wait between selecting the next host from the pool.
// Default is 250ms if try_duration is enabled, otherwise zero. Only
// relevant when a request to an upstream host fails. Be aware that
// setting this to 0 with a non-zero try_duration can cause the CPU
// to spin if all backends are down and latency is very low.
TryInterval caddy.Duration `json:"try_interval,omitempty"`
// A list of matcher sets that restricts with which requests retries are
// allowed. A request must match any of the given matcher sets in order
// to be retried if the connection to the upstream succeeded but the
// subsequent round-trip failed. If the connection to the upstream failed,
// a retry is always allowed. If unspecified, only GET requests will be
// allowed to be retried. Note that a retry is done with the next available
// host according to the load balancing policy.
RetryMatchRaw caddyhttp.RawMatcherSets `json:"retry_match,omitempty" caddy:"namespace=http.matchers"`
SelectionPolicy Selector `json:"-"`
RetryMatch caddyhttp.MatcherSets `json:"-"`
}
MultiUpstreams
MultiUpstreams is a single dynamic upstream source that aggregates the results of multiple dynamic upstream sources. All configured sources will be queried in order, with their results appended to the end of the list. Errors returned from individual sources will be logged and the next source will continue to be invoked.
This module makes it easy to implement redundant cluster
failovers, especially in conjunction with the first
load
balancing policy: if the first source returns an error or
no upstreams, the second source's upstreams will be used
naturally.
type MultiUpstreams struct {
// The list of upstream source modules to get upstreams from.
// They will be queried in order, with their results appended
// in the order they are returned.
SourcesRaw []json.RawMessage `json:"sources,omitempty" caddy:"namespace=http.reverse_proxy.upstreams inline_key=source"`
sources []UpstreamSource
logger *zap.Logger
}
PassiveHealthChecks
PassiveHealthChecks holds configuration related to passive health checks (that is, health checks which occur during the normal flow of request proxying).
type PassiveHealthChecks struct {
// How long to remember a failed request to a backend. A duration > 0
// enables passive health checking. Default is 0.
FailDuration caddy.Duration `json:"fail_duration,omitempty"`
// The number of failed requests within the FailDuration window to
// consider a backend as "down". Must be >= 1; default is 1. Requires
// that FailDuration be > 0.
MaxFails int `json:"max_fails,omitempty"`
// Limits the number of simultaneous requests to a backend by
// marking the backend as "down" if it has this many concurrent
// requests or more.
UnhealthyRequestCount int `json:"unhealthy_request_count,omitempty"`
// Count the request as failed if the response comes back with
// one of these status codes.
UnhealthyStatus []int `json:"unhealthy_status,omitempty"`
// Count the request as failed if the response takes at least this
// long to receive.
UnhealthyLatency caddy.Duration `json:"unhealthy_latency,omitempty"`
logger *zap.Logger
}
ProxyProtocolInfo
ProxyProtocolInfo contains information needed to write proxy protocol to a connection to an upstream host.
type ProxyProtocolInfo struct {
AddrPort netip.AddrPort
}
QueryHashSelection
QueryHashSelection is a policy that selects a host based on a given request query parameter.
type QueryHashSelection struct {
// The query key whose value is to be hashed and used for upstream selection.
Key string `json:"key,omitempty"`
// The fallback policy to use if the query key is not present. Defaults to `random`.
FallbackRaw json.RawMessage `json:"fallback,omitempty" caddy:"namespace=http.reverse_proxy.selection_policies inline_key=policy"`
fallback Selector
}
RandomChoiceSelection
RandomChoiceSelection is a policy that selects two or more available hosts at random, then chooses the one with the least load.
type RandomChoiceSelection struct {
// The size of the sub-pool created from the larger upstream pool. The default value
// is 2 and the maximum at selection time is the size of the upstream pool.
Choose int `json:"choose,omitempty"`
}
RandomSelection
RandomSelection is a policy that selects an available host at random.
type RandomSelection struct{}
RoundRobinSelection
RoundRobinSelection is a policy that selects a host based on round-robin ordering.
type RoundRobinSelection struct {
robin uint32
}
SRVUpstreams
SRVUpstreams provides upstreams from SRV lookups. The lookup DNS name can be configured either by its individual parts (that is, specifying the service, protocol, and name separately) to form the standard "_service._proto.name" domain, or the domain can be specified directly in name by leaving service and proto empty. See RFC 2782.
Lookups are cached and refreshed at the configured refresh interval.
Returned upstreams are sorted by priority and weight.
type SRVUpstreams struct {
// The service label.
Service string `json:"service,omitempty"`
// The protocol label; either tcp or udp.
Proto string `json:"proto,omitempty"`
// The name label; or, if service and proto are
// empty, the entire domain name to look up.
Name string `json:"name,omitempty"`
// The interval at which to refresh the SRV lookup.
// Results are cached between lookups. Default: 1m
Refresh caddy.Duration `json:"refresh,omitempty"`
// If > 0 and there is an error with the lookup,
// continue to use the cached results for up to
// this long before trying again, (even though they
// are stale) instead of returning an error to the
// client. Default: 0s.
GracePeriod caddy.Duration `json:"grace_period,omitempty"`
// Configures the DNS resolver used to resolve the
// SRV address to SRV records.
Resolver *UpstreamResolver `json:"resolver,omitempty"`
// If Resolver is configured, how long to wait before
// timing out trying to connect to the DNS server.
DialTimeout caddy.Duration `json:"dial_timeout,omitempty"`
// If Resolver is configured, how long to wait before
// spawning an RFC 6555 Fast Fallback connection.
// A negative value disables this.
FallbackDelay caddy.Duration `json:"dial_fallback_delay,omitempty"`
resolver *net.Resolver
logger *zap.Logger
}
Selector
Selector selects an available upstream from the pool.
type Selector interface {
Select(UpstreamPool, *http.Request, http.ResponseWriter) *Upstream
}
TLSConfig
TLSConfig holds configuration related to the TLS configuration for the transport/client.
type TLSConfig struct {
// Certificate authority module which provides the certificate pool of trusted certificates
CARaw json.RawMessage `json:"ca,omitempty" caddy:"namespace=tls.ca_pool.source inline_key=provider"`
// Deprecated: Use the `ca` field with the `tls.ca_pool.source.inline` module instead.
// Optional list of base64-encoded DER-encoded CA certificates to trust.
RootCAPool []string `json:"root_ca_pool,omitempty"`
// Deprecated: Use the `ca` field with the `tls.ca_pool.source.file` module instead.
// List of PEM-encoded CA certificate files to add to the same trust
// store as RootCAPool (or root_ca_pool in the JSON).
RootCAPEMFiles []string `json:"root_ca_pem_files,omitempty"`
// PEM-encoded client certificate filename to present to servers.
ClientCertificateFile string `json:"client_certificate_file,omitempty"`
// PEM-encoded key to use with the client certificate.
ClientCertificateKeyFile string `json:"client_certificate_key_file,omitempty"`
// If specified, Caddy will use and automate a client certificate
// with this subject name.
ClientCertificateAutomate string `json:"client_certificate_automate,omitempty"`
// If true, TLS verification of server certificates will be disabled.
// This is insecure and may be removed in the future. Do not use this
// option except in testing or local development environments.
InsecureSkipVerify bool `json:"insecure_skip_verify,omitempty"`
// The duration to allow a TLS handshake to a server. Default: No timeout.
HandshakeTimeout caddy.Duration `json:"handshake_timeout,omitempty"`
// The server name used when verifying the certificate received in the TLS
// handshake. By default, this will use the upstream address' host part.
// You only need to override this if your upstream address does not match the
// certificate the upstream is likely to use. For example if the upstream
// address is an IP address, then you would need to configure this to the
// hostname being served by the upstream server. Currently, this does not
// support placeholders because the TLS config is not provisioned on each
// connection, so a static value must be used.
ServerName string `json:"server_name,omitempty"`
// TLS renegotiation level. TLS renegotiation is the act of performing
// subsequent handshakes on a connection after the first.
// The level can be:
// - "never": (the default) disables renegotiation.
// - "once": allows a remote server to request renegotiation once per connection.
// - "freely": allows a remote server to repeatedly request renegotiation.
Renegotiation string `json:"renegotiation,omitempty"`
// Skip TLS ports specifies a list of upstream ports on which TLS should not be
// attempted even if it is configured. Handy when using dynamic upstreams that
// return HTTP and HTTPS endpoints too.
// When specified, TLS will automatically be configured on the transport.
// The value can be a list of any valid tcp port numbers, default empty.
ExceptPorts []string `json:"except_ports,omitempty"`
// The list of elliptic curves to support. Caddy's
// defaults are modern and secure.
Curves []string `json:"curves,omitempty"`
}
TLSTransport
TLSTransport is implemented by transports that are capable of using TLS.
type TLSTransport interface {
// TLSEnabled returns true if the transport
// has TLS enabled, false otherwise.
TLSEnabled() bool
// EnableTLS enables TLS within the transport
// if it is not already, using the provided
// value as a basis for the TLS config.
EnableTLS(base *TLSConfig) error
}
URIHashSelection
URIHashSelection is a policy that selects a host by hashing the request URI.
type URIHashSelection struct{}
Upstream
Upstream bridges this proxy's configuration to the state of the backend host it is correlated with. Upstream values must not be copied.
type Upstream struct {
*Host `json:"-"`
// The [network address](/docs/conventions#network-addresses)
// to dial to connect to the upstream. Must represent precisely
// one socket (i.e. no port ranges). A valid network address
// either has a host and port or is a unix socket address.
//
// Placeholders may be used to make the upstream dynamic, but be
// aware of the health check implications of this: a single
// upstream that represents numerous (perhaps arbitrary) backends
// can be considered down if one or enough of the arbitrary
// backends is down. Also be aware of open proxy vulnerabilities.
Dial string `json:"dial,omitempty"`
// The maximum number of simultaneous requests to allow to
// this upstream. If set, overrides the global passive health
// check UnhealthyRequestCount value.
MaxRequests int `json:"max_requests,omitempty"`
// TODO: This could be really useful, to bind requests
// with certain properties to specific backends
// HeaderAffinity string
// IPAffinity string
activeHealthCheckPort int
activeHealthCheckUpstream string
healthCheckPolicy *PassiveHealthChecks
cb CircuitBreaker
unhealthy int32 // accessed atomically; status from active health checker
}
UpstreamPool
UpstreamPool is a collection of upstreams.
type UpstreamPool []*Upstream
UpstreamResolver
UpstreamResolver holds the set of addresses of DNS resolvers of upstream addresses
type UpstreamResolver struct {
// The addresses of DNS resolvers to use when looking up the addresses of proxy upstreams.
// It accepts [network addresses](/docs/conventions#network-addresses)
// with port range of only 1. If the host is an IP address, it will be dialed directly to resolve the upstream server.
// If the host is not an IP address, the addresses are resolved using the [name resolution convention](https://golang.org/pkg/net/#hdr-Name_Resolution) of the Go standard library.
// If the array contains more than 1 resolver address, one is chosen at random.
Addresses []string `json:"addresses,omitempty"`
netAddrs []caddy.NetworkAddress
}
UpstreamSource
UpstreamSource gets the list of upstreams that can be used when proxying a request. Returned upstreams will be load balanced and health-checked. This should be a very fast function -- instant if possible -- and the return value must be as stable as possible. In other words, the list of upstreams should ideally not change much across successive calls. If the list of upstreams changes or the ordering is not stable, load balancing will suffer. This function may be called during each retry, multiple times per request, and as such, needs to be instantaneous. The returned slice will not be modified.
type UpstreamSource interface {
GetUpstreams(*http.Request) ([]*Upstream, error)
}
WeightedRoundRobinSelection
WeightedRoundRobinSelection is a policy that selects a host based on weighted round-robin ordering.
type WeightedRoundRobinSelection struct {
// The weight of each upstream in order,
// corresponding with the list of upstreams configured.
Weights []int `json:"weights,omitempty"`
index uint32
totalWeight int
}
aLookup
This type doesn't have documentation.
type aLookup struct {
aUpstreams AUpstreams
freshness time.Time
upstreams []Upstream
}
adminUpstreams
adminUpstreams is a module that provides the /reverse_proxy/upstreams endpoint for the Caddy admin API. This allows for checking the health of configured reverse proxy upstreams in the pool.
type adminUpstreams struct{}
bodyReadCloser
bodyReadCloser is a reader that, upon closing, will return its buffer to the pool and close the underlying body reader.
type bodyReadCloser struct {
io.Reader
buf *bytes.Buffer
body io.ReadCloser
}
h2ReadWriteCloser
This type doesn't have documentation.
type h2ReadWriteCloser struct {
io.ReadCloser
http.ResponseWriter
}
handleResponseContext
handleResponseContext carries some contextual information about the current proxy handling.
type handleResponseContext struct {
// handler is the active proxy handler instance, so that
// routes like copy_response may inherit some config
// options and have access to handler methods.
handler *Handler
// response is the actual response received from the proxy
// roundtrip, to potentially be copied if a copy_response
// handler is in the handle_response routes.
response *http.Response
// start is the time just before the proxy roundtrip was
// performed, used for logging.
start time.Time
// logger is the prepared logger which is used to write logs
// with the request, duration, and selected upstream attached.
logger *zap.Logger
// isFinalized is whether the response has been finalized,
// i.e. copied and closed, to make sure that it doesn't
// happen twice.
isFinalized bool
}
maxLatencyWriter
This type doesn't have documentation.
type maxLatencyWriter struct {
dst io.Writer
flush func() error
latency time.Duration // non-zero; negative means to flush immediately
mu sync.Mutex // protects t, flushPending, and dst.Flush
t *time.Timer
flushPending bool
logger *zap.Logger
}
metricsUpstreamsHealthyUpdater
This type doesn't have documentation.
type metricsUpstreamsHealthyUpdater struct {
handler *Handler
}
openConnection
openConnection maps an open connection to an optional function for graceful close.
type openConnection struct {
conn io.ReadWriteCloser
gracefulClose func() error
}
parsedAddr
This type doesn't have documentation.
type parsedAddr struct {
network, scheme, host, port string
valid bool
}
roundtripSucceededError
roundtripSucceededError is an error type that is returned if the roundtrip succeeded, but an error occurred after-the-fact.
type roundtripSucceededError struct{ error }
srvLookup
This type doesn't have documentation.
type srvLookup struct {
srvUpstreams SRVUpstreams
freshness time.Time
upstreams []Upstream
}
switchProtocolCopier
switchProtocolCopier exists so goroutines proxying data back and forth have nice names in stacks.
type switchProtocolCopier struct {
user, backend io.ReadWriteCloser
wg *sync.WaitGroup
}
tcpRWTimeoutConn
tcpRWTimeoutConn enforces read/write timeouts for a TCP connection. If it fails to set deadlines, the error is logged but does not abort the read/write attempt (ignoring the error is consistent with what the standard library does: https://github.com/golang/go/blob/c5da4fb7ac5cb7434b41fc9a1df3bee66c7f1a4d/src/net/http/server.go#L981-L986)
type tcpRWTimeoutConn struct {
*net.TCPConn
readTimeout, writeTimeout time.Duration
logger *zap.Logger
}
upstreamStatus
upstreamStatus holds the status of a particular upstream
type upstreamStatus struct {
Address string `json:"address"`
NumRequests int `json:"num_requests"`
Fails int `json:"fails"`
}
Functions
func GetDialInfo
GetDialInfo gets the upstream dialing info out of the context, and returns true if there was a valid value; false otherwise.
func GetDialInfo(ctx context.Context) (DialInfo, bool) {
dialInfo, ok := caddyhttp.GetVar(ctx, dialInfoVarKey).(DialInfo)
return dialInfo, ok
}
Cognitive complexity: 0
, Cyclomatic complexity: 1
func (*ActiveHealthChecks) IsEnabled
IsEnabled checks if the active health checks have the minimum config necessary to be enabled.
func (a *ActiveHealthChecks) IsEnabled() bool {
return a.Path != "" || a.URI != "" || a.Port != 0
}
Cognitive complexity: 0
, Cyclomatic complexity: 3
func (*CopyResponseHandler) Provision
Provision ensures that h is set up properly before use.
func (h *CopyResponseHandler) Provision(ctx caddy.Context) error {
h.ctx = ctx
return nil
}
Cognitive complexity: 0
, Cyclomatic complexity: 1
func (*CopyResponseHeadersHandler) Validate
Validate ensures the h's configuration is valid.
func (h *CopyResponseHeadersHandler) Validate() error {
if len(h.Exclude) > 0 && len(h.Include) > 0 {
return fmt.Errorf("cannot define both 'exclude' and 'include' lists at the same time")
}
return nil
}
Cognitive complexity: 2
, Cyclomatic complexity: 3
func (*HTTPTransport) EnableTLS
EnableTLS enables TLS on the transport.
func (h *HTTPTransport) EnableTLS(base *TLSConfig) error {
h.TLS = base
return nil
}
Cognitive complexity: 0
, Cyclomatic complexity: 1
func (*HTTPTransport) NewTransport
NewTransport builds a standard-lib-compatible http.Transport value from h.
func (h *HTTPTransport) NewTransport(caddyCtx caddy.Context) (*http.Transport, error) {
// Set keep-alive defaults if it wasn't otherwise configured
if h.KeepAlive == nil {
h.KeepAlive = &KeepAlive{
ProbeInterval: caddy.Duration(30 * time.Second),
IdleConnTimeout: caddy.Duration(2 * time.Minute),
MaxIdleConnsPerHost: 32, // seems about optimal, see #2805
}
}
// Set a relatively short default dial timeout.
// This is helpful to make load-balancer retries more speedy.
if h.DialTimeout == 0 {
h.DialTimeout = caddy.Duration(3 * time.Second)
}
dialer := &net.Dialer{
Timeout: time.Duration(h.DialTimeout),
FallbackDelay: time.Duration(h.FallbackDelay),
}
if h.LocalAddress != "" {
netaddr, err := caddy.ParseNetworkAddressWithDefaults(h.LocalAddress, "tcp", 0)
if err != nil {
return nil, err
}
if netaddr.PortRangeSize() > 1 {
return nil, fmt.Errorf("local_address must be a single address, not a port range")
}
switch netaddr.Network {
case "tcp", "tcp4", "tcp6":
dialer.LocalAddr, err = net.ResolveTCPAddr(netaddr.Network, netaddr.JoinHostPort(0))
if err != nil {
return nil, err
}
case "unix", "unixgram", "unixpacket":
dialer.LocalAddr, err = net.ResolveUnixAddr(netaddr.Network, netaddr.JoinHostPort(0))
if err != nil {
return nil, err
}
case "udp", "udp4", "udp6":
return nil, fmt.Errorf("local_address must be a TCP address, not a UDP address")
default:
return nil, fmt.Errorf("unsupported network")
}
}
if h.Resolver != nil {
err := h.Resolver.ParseAddresses()
if err != nil {
return nil, err
}
d := &net.Dialer{
Timeout: time.Duration(h.DialTimeout),
FallbackDelay: time.Duration(h.FallbackDelay),
}
dialer.Resolver = &net.Resolver{
PreferGo: true,
Dial: func(ctx context.Context, _, _ string) (net.Conn, error) {
//nolint:gosec
addr := h.Resolver.netAddrs[weakrand.Intn(len(h.Resolver.netAddrs))]
return d.DialContext(ctx, addr.Network, addr.JoinHostPort(0))
},
}
}
dialContext := func(ctx context.Context, network, address string) (net.Conn, error) {
// For unix socket upstreams, we need to recover the dial info from
// the request's context, because the Host on the request's URL
// will have been modified by directing the request, overwriting
// the unix socket filename.
// Also, we need to avoid overwriting the address at this point
// when not necessary, because http.ProxyFromEnvironment may have
// modified the address according to the user's env proxy config.
if dialInfo, ok := GetDialInfo(ctx); ok {
if strings.HasPrefix(dialInfo.Network, "unix") {
network = dialInfo.Network
address = dialInfo.Address
}
}
conn, err := dialer.DialContext(ctx, network, address)
if err != nil {
// identify this error as one that occurred during
// dialing, which can be important when trying to
// decide whether to retry a request
return nil, DialError{err}
}
if h.ProxyProtocol != "" {
proxyProtocolInfo, ok := caddyhttp.GetVar(ctx, proxyProtocolInfoVarKey).(ProxyProtocolInfo)
if !ok {
return nil, fmt.Errorf("failed to get proxy protocol info from context")
}
var proxyv byte
switch h.ProxyProtocol {
case "v1":
proxyv = 1
case "v2":
proxyv = 2
default:
return nil, fmt.Errorf("unexpected proxy protocol version")
}
// The src and dst have to be of the same address family. As we don't know the original
// dst address (it's kind of impossible to know) and this address is generally of very
// little interest, we just set it to all zeros.
var destAddr net.Addr
switch {
case proxyProtocolInfo.AddrPort.Addr().Is4():
destAddr = &net.TCPAddr{
IP: net.IPv4zero,
}
case proxyProtocolInfo.AddrPort.Addr().Is6():
destAddr = &net.TCPAddr{
IP: net.IPv6zero,
}
default:
return nil, fmt.Errorf("unexpected remote addr type in proxy protocol info")
}
sourceAddr := &net.TCPAddr{
IP: proxyProtocolInfo.AddrPort.Addr().AsSlice(),
Port: int(proxyProtocolInfo.AddrPort.Port()),
Zone: proxyProtocolInfo.AddrPort.Addr().Zone(),
}
header := proxyproto.HeaderProxyFromAddrs(proxyv, sourceAddr, destAddr)
// retain the log message structure
switch h.ProxyProtocol {
case "v1":
caddyCtx.Logger().Debug("sending proxy protocol header v1", zap.Any("header", header))
case "v2":
caddyCtx.Logger().Debug("sending proxy protocol header v2", zap.Any("header", header))
}
_, err = header.WriteTo(conn)
if err != nil {
// identify this error as one that occurred during
// dialing, which can be important when trying to
// decide whether to retry a request
return nil, DialError{err}
}
}
// if read/write timeouts are configured and this is a TCP connection,
// enforce the timeouts by wrapping the connection with our own type
if tcpConn, ok := conn.(*net.TCPConn); ok && (h.ReadTimeout > 0 || h.WriteTimeout > 0) {
conn = &tcpRWTimeoutConn{
TCPConn: tcpConn,
readTimeout: time.Duration(h.ReadTimeout),
writeTimeout: time.Duration(h.WriteTimeout),
logger: caddyCtx.Logger(),
}
}
return conn, nil
}
// negotiate any HTTP/SOCKS proxy for the HTTP transport
var proxy func(*http.Request) (*url.URL, error)
if h.ForwardProxyURL != "" {
pUrl, err := url.Parse(h.ForwardProxyURL)
if err != nil {
return nil, fmt.Errorf("failed to parse transport proxy url: %v", err)
}
caddyCtx.Logger().Info("setting transport proxy url", zap.String("url", h.ForwardProxyURL))
proxy = http.ProxyURL(pUrl)
} else {
proxy = http.ProxyFromEnvironment
}
rt := &http.Transport{
Proxy: proxy,
DialContext: dialContext,
MaxConnsPerHost: h.MaxConnsPerHost,
ResponseHeaderTimeout: time.Duration(h.ResponseHeaderTimeout),
ExpectContinueTimeout: time.Duration(h.ExpectContinueTimeout),
MaxResponseHeaderBytes: h.MaxResponseHeaderSize,
WriteBufferSize: h.WriteBufferSize,
ReadBufferSize: h.ReadBufferSize,
}
if h.TLS != nil {
rt.TLSHandshakeTimeout = time.Duration(h.TLS.HandshakeTimeout)
var err error
rt.TLSClientConfig, err = h.TLS.MakeTLSClientConfig(caddyCtx)
if err != nil {
return nil, fmt.Errorf("making TLS client config: %v", err)
}
}
if h.KeepAlive != nil {
dialer.KeepAlive = time.Duration(h.KeepAlive.ProbeInterval)
if h.KeepAlive.Enabled != nil {
rt.DisableKeepAlives = !*h.KeepAlive.Enabled
}
rt.MaxIdleConns = h.KeepAlive.MaxIdleConns
rt.MaxIdleConnsPerHost = h.KeepAlive.MaxIdleConnsPerHost
rt.IdleConnTimeout = time.Duration(h.KeepAlive.IdleConnTimeout)
}
// The proxy protocol header can only be sent once right after opening the connection.
// So single connection must not be used for multiple requests, which can potentially
// come from different clients.
if !rt.DisableKeepAlives && h.ProxyProtocol != "" {
caddyCtx.Logger().Warn("disabling keepalives, they are incompatible with using PROXY protocol")
rt.DisableKeepAlives = true
}
if h.Compression != nil {
rt.DisableCompression = !*h.Compression
}
if slices.Contains(h.Versions, "2") {
if err := http2.ConfigureTransport(rt); err != nil {
return nil, err
}
}
// configure HTTP/3 transport if enabled; however, this does not
// automatically fall back to lower versions like most web browsers
// do (that'd add latency and complexity, besides, we expect that
// site owners control the backends), so it must be exclusive
if len(h.Versions) == 1 && h.Versions[0] == "3" {
h.h3Transport = new(http3.Transport)
if h.TLS != nil {
var err error
h.h3Transport.TLSClientConfig, err = h.TLS.MakeTLSClientConfig(caddyCtx)
if err != nil {
return nil, fmt.Errorf("making TLS client config for HTTP/3 transport: %v", err)
}
}
} else if len(h.Versions) > 1 && slices.Contains(h.Versions, "3") {
return nil, fmt.Errorf("if HTTP/3 is enabled to the upstream, no other HTTP versions are supported")
}
// if h2c is enabled, configure its transport (std lib http.Transport
// does not "HTTP/2 over cleartext TCP")
if slices.Contains(h.Versions, "h2c") {
// crafting our own http2.Transport doesn't allow us to utilize
// most of the customizations/preferences on the http.Transport,
// because, for some reason, only http2.ConfigureTransport()
// is allowed to set the unexported field that refers to a base
// http.Transport config; oh well
h2t := &http2.Transport{
// kind of a hack, but for plaintext/H2C requests, pretend to dial TLS
DialTLSContext: func(ctx context.Context, network, address string, _ *tls.Config) (net.Conn, error) {
return dialContext(ctx, network, address)
},
AllowHTTP: true,
}
if h.Compression != nil {
h2t.DisableCompression = !*h.Compression
}
h.h2cTransport = h2t
}
return rt, nil
}
Cognitive complexity: 98
, Cyclomatic complexity: 51
func (*HTTPTransport) RoundTrip
RoundTrip implements http.RoundTripper.
func (h *HTTPTransport) RoundTrip(req *http.Request) (*http.Response, error) {
// Try to replace TLS servername if needed
repl := req.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
transport := h.replaceTLSServername(repl)
transport.SetScheme(req)
// use HTTP/3 if enabled (TODO: This is EXPERIMENTAL)
if h.h3Transport != nil {
return h.h3Transport.RoundTrip(req)
}
// if H2C ("HTTP/2 over cleartext") is enabled and the upstream request is
// HTTP without TLS, use the alternate H2C-capable transport instead
if req.URL.Scheme == "http" && h.h2cTransport != nil {
// There is no dedicated DisableKeepAlives field in *http2.Transport.
// This is an alternative way to disable keep-alive.
req.Close = h.Transport.DisableKeepAlives
return h.h2cTransport.RoundTrip(req)
}
return transport.Transport.RoundTrip(req)
}
Cognitive complexity: 4
, Cyclomatic complexity: 4
func (*HTTPTransport) SetScheme
SetScheme ensures that the outbound request req has the scheme set in its URL; the underlying http.Transport requires a scheme to be set.
This method may be used by other transport modules that wrap/use this one.
func (h *HTTPTransport) SetScheme(req *http.Request) {
if req.URL.Scheme != "" {
return
}
if h.shouldUseTLS(req) {
req.URL.Scheme = "https"
} else {
req.URL.Scheme = "http"
}
}
Cognitive complexity: 6
, Cyclomatic complexity: 3
func (*Handler) FinalizeUnmarshalCaddyfile
FinalizeUnmarshalCaddyfile finalizes the Caddyfile parsing which requires having an httpcaddyfile.Helper to function, to parse subroutes.
func (h *Handler) FinalizeUnmarshalCaddyfile(helper httpcaddyfile.Helper) error {
for _, d := range h.handleResponseSegments {
// consume the "handle_response" token
d.Next()
args := d.RemainingArgs()
// TODO: Remove this check at some point in the future
if len(args) == 2 {
return d.Errf("configuring 'handle_response' for status code replacement is no longer supported. Use 'replace_status' instead.")
}
if len(args) > 1 {
return d.Errf("too many arguments for 'handle_response': %s", args)
}
var matcher *caddyhttp.ResponseMatcher
if len(args) == 1 {
// the first arg should always be a matcher.
if !strings.HasPrefix(args[0], matcherPrefix) {
return d.Errf("must use a named response matcher, starting with '@'")
}
foundMatcher, ok := h.responseMatchers[args[0]]
if !ok {
return d.Errf("no named response matcher defined with name '%s'", args[0][1:])
}
matcher = &foundMatcher
}
// parse the block as routes
handler, err := httpcaddyfile.ParseSegmentAsSubroute(helper.WithDispenser(d.NewFromNextSegment()))
if err != nil {
return err
}
subroute, ok := handler.(*caddyhttp.Subroute)
if !ok {
return helper.Errf("segment was not parsed as a subroute")
}
h.HandleResponse = append(
h.HandleResponse,
caddyhttp.ResponseHandler{
Match: matcher,
Routes: subroute.Routes,
},
)
}
// move the handle_response entries without a matcher to the end.
// we can't use sort.SliceStable because it will reorder the rest of the
// entries which may be undesirable because we don't have a good
// heuristic to use for sorting.
withoutMatchers := []caddyhttp.ResponseHandler{}
withMatchers := []caddyhttp.ResponseHandler{}
for _, hr := range h.HandleResponse {
if hr.Match == nil {
withoutMatchers = append(withoutMatchers, hr)
} else {
withMatchers = append(withMatchers, hr)
}
}
h.HandleResponse = append(withMatchers, withoutMatchers...)
// clean up the bits we only needed for adapting
h.handleResponseSegments = nil
h.responseMatchers = nil
return nil
}
Cognitive complexity: 27
, Cyclomatic complexity: 11
func (*Handler) UnmarshalCaddyfile
UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax:
reverse_proxy [<matcher>] [<upstreams...>] {
# backends
to <upstreams...>
dynamic <name> [...]
# load balancing
lb_policy <name> [<options...>]
lb_retries <retries>
lb_try_duration <duration>
lb_try_interval <interval>
lb_retry_match <request-matcher>
# active health checking
health_uri <uri>
health_port <port>
health_interval <interval>
health_passes <num>
health_fails <num>
health_timeout <duration>
health_status <status>
health_body <regexp>
health_method <value>
health_request_body <value>
health_follow_redirects
health_headers {
<field> [<values...>]
}
# passive health checking
fail_duration <duration>
max_fails <num>
unhealthy_status <status>
unhealthy_latency <duration>
unhealthy_request_count <num>
# streaming
flush_interval <duration>
request_buffers <size>
response_buffers <size>
stream_timeout <duration>
stream_close_delay <duration>
verbose_logs
# request manipulation
trusted_proxies [private_ranges] <ranges...>
header_up [+|-]<field> [<value|regexp> [<replacement>]]
header_down [+|-]<field> [<value|regexp> [<replacement>]]
method <method>
rewrite <to>
# round trip
transport <name> {
...
}
# optionally intercept responses from upstream
@name {
status <code...>
header <field> [<value>]
}
replace_status [<matcher>] <status_code>
handle_response [<matcher>] {
<directives...>
# special directives only available in handle_response
copy_response [<matcher>] [<status>] {
status <status>
}
copy_response_headers [<matcher>] {
include <fields...>
exclude <fields...>
}
}
}
Proxy upstream addresses should be network dial addresses such
as host:port
, or a URL such as scheme://host:port
. Scheme
and port may be inferred from other parts of the address/URL; if
either are missing, defaults to HTTP.
The FinalizeUnmarshalCaddyfile method should be called after this to finalize parsing of "handle_response" blocks, if possible.
func (h *Handler) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
// currently, all backends must use the same scheme/protocol (the
// underlying JSON does not yet support per-backend transports)
var commonScheme string
// we'll wait until the very end of parsing before
// validating and encoding the transport
var transport http.RoundTripper
var transportModuleName string
// collect the response matchers defined as subdirectives
// prefixed with "@" for use with "handle_response" blocks
h.responseMatchers = make(map[string]caddyhttp.ResponseMatcher)
// appendUpstream creates an upstream for address and adds
// it to the list.
appendUpstream := func(address string) error {
pa, err := parseUpstreamDialAddress(address)
if err != nil {
return d.WrapErr(err)
}
// the underlying JSON does not yet support different
// transports (protocols or schemes) to each backend,
// so we remember the last one we see and compare them
switch pa.scheme {
case "wss":
return d.Errf("the scheme wss:// is only supported in browsers; use https:// instead")
case "ws":
return d.Errf("the scheme ws:// is only supported in browsers; use http:// instead")
case "https", "http", "h2c", "":
// Do nothing or handle the valid schemes
default:
return d.Errf("unsupported URL scheme %s://", pa.scheme)
}
if commonScheme != "" && pa.scheme != commonScheme {
return d.Errf("for now, all proxy upstreams must use the same scheme (transport protocol); expecting '%s://' but got '%s://'",
commonScheme, pa.scheme)
}
commonScheme = pa.scheme
// if the port of upstream address contains a placeholder, only wrap it with the `Upstream` struct,
// delaying actual resolution of the address until request time.
if pa.replaceablePort() {
h.Upstreams = append(h.Upstreams, &Upstream{Dial: pa.dialAddr()})
return nil
}
parsedAddr, err := caddy.ParseNetworkAddress(pa.dialAddr())
if err != nil {
return d.WrapErr(err)
}
if pa.isUnix() || !pa.rangedPort() {
// unix networks don't have ports
h.Upstreams = append(h.Upstreams, &Upstream{
Dial: pa.dialAddr(),
})
} else {
// expand a port range into multiple upstreams
for i := parsedAddr.StartPort; i <= parsedAddr.EndPort; i++ {
h.Upstreams = append(h.Upstreams, &Upstream{
Dial: caddy.JoinNetworkAddress("", parsedAddr.Host, fmt.Sprint(i)),
})
}
}
return nil
}
d.Next() // consume the directive name
for _, up := range d.RemainingArgs() {
err := appendUpstream(up)
if err != nil {
return fmt.Errorf("parsing upstream '%s': %w", up, err)
}
}
for d.NextBlock(0) {
// if the subdirective has an "@" prefix then we
// parse it as a response matcher for use with "handle_response"
if strings.HasPrefix(d.Val(), matcherPrefix) {
err := caddyhttp.ParseNamedResponseMatcher(d.NewFromNextSegment(), h.responseMatchers)
if err != nil {
return err
}
continue
}
switch d.Val() {
case "to":
args := d.RemainingArgs()
if len(args) == 0 {
return d.ArgErr()
}
for _, up := range args {
err := appendUpstream(up)
if err != nil {
return fmt.Errorf("parsing upstream '%s': %w", up, err)
}
}
case "dynamic":
if !d.NextArg() {
return d.ArgErr()
}
if h.DynamicUpstreams != nil {
return d.Err("dynamic upstreams already specified")
}
dynModule := d.Val()
modID := "http.reverse_proxy.upstreams." + dynModule
unm, err := caddyfile.UnmarshalModule(d, modID)
if err != nil {
return err
}
source, ok := unm.(UpstreamSource)
if !ok {
return d.Errf("module %s (%T) is not an UpstreamSource", modID, unm)
}
h.DynamicUpstreamsRaw = caddyconfig.JSONModuleObject(source, "source", dynModule, nil)
case "lb_policy":
if !d.NextArg() {
return d.ArgErr()
}
if h.LoadBalancing != nil && h.LoadBalancing.SelectionPolicyRaw != nil {
return d.Err("load balancing selection policy already specified")
}
name := d.Val()
modID := "http.reverse_proxy.selection_policies." + name
unm, err := caddyfile.UnmarshalModule(d, modID)
if err != nil {
return err
}
sel, ok := unm.(Selector)
if !ok {
return d.Errf("module %s (%T) is not a reverseproxy.Selector", modID, unm)
}
if h.LoadBalancing == nil {
h.LoadBalancing = new(LoadBalancing)
}
h.LoadBalancing.SelectionPolicyRaw = caddyconfig.JSONModuleObject(sel, "policy", name, nil)
case "lb_retries":
if !d.NextArg() {
return d.ArgErr()
}
tries, err := strconv.Atoi(d.Val())
if err != nil {
return d.Errf("bad lb_retries number '%s': %v", d.Val(), err)
}
if h.LoadBalancing == nil {
h.LoadBalancing = new(LoadBalancing)
}
h.LoadBalancing.Retries = tries
case "lb_try_duration":
if !d.NextArg() {
return d.ArgErr()
}
if h.LoadBalancing == nil {
h.LoadBalancing = new(LoadBalancing)
}
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return d.Errf("bad duration value %s: %v", d.Val(), err)
}
h.LoadBalancing.TryDuration = caddy.Duration(dur)
case "lb_try_interval":
if !d.NextArg() {
return d.ArgErr()
}
if h.LoadBalancing == nil {
h.LoadBalancing = new(LoadBalancing)
}
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return d.Errf("bad interval value '%s': %v", d.Val(), err)
}
h.LoadBalancing.TryInterval = caddy.Duration(dur)
case "lb_retry_match":
matcherSet, err := caddyhttp.ParseCaddyfileNestedMatcherSet(d)
if err != nil {
return d.Errf("failed to parse lb_retry_match: %v", err)
}
if h.LoadBalancing == nil {
h.LoadBalancing = new(LoadBalancing)
}
h.LoadBalancing.RetryMatchRaw = append(h.LoadBalancing.RetryMatchRaw, matcherSet)
case "health_uri":
if !d.NextArg() {
return d.ArgErr()
}
if h.HealthChecks == nil {
h.HealthChecks = new(HealthChecks)
}
if h.HealthChecks.Active == nil {
h.HealthChecks.Active = new(ActiveHealthChecks)
}
h.HealthChecks.Active.URI = d.Val()
case "health_path":
if !d.NextArg() {
return d.ArgErr()
}
if h.HealthChecks == nil {
h.HealthChecks = new(HealthChecks)
}
if h.HealthChecks.Active == nil {
h.HealthChecks.Active = new(ActiveHealthChecks)
}
h.HealthChecks.Active.Path = d.Val()
caddy.Log().Named("config.adapter.caddyfile").Warn("the 'health_path' subdirective is deprecated, please use 'health_uri' instead!")
case "health_upstream":
if !d.NextArg() {
return d.ArgErr()
}
if h.HealthChecks == nil {
h.HealthChecks = new(HealthChecks)
}
if h.HealthChecks.Active == nil {
h.HealthChecks.Active = new(ActiveHealthChecks)
}
_, port, err := net.SplitHostPort(d.Val())
if err != nil {
return d.Errf("health_upstream is malformed '%s': %v", d.Val(), err)
}
_, err = strconv.Atoi(port)
if err != nil {
return d.Errf("bad port number '%s': %v", d.Val(), err)
}
h.HealthChecks.Active.Upstream = d.Val()
case "health_port":
if !d.NextArg() {
return d.ArgErr()
}
if h.HealthChecks == nil {
h.HealthChecks = new(HealthChecks)
}
if h.HealthChecks.Active == nil {
h.HealthChecks.Active = new(ActiveHealthChecks)
}
if h.HealthChecks.Active.Upstream != "" {
return d.Errf("the 'health_port' subdirective is ignored if 'health_upstream' is used!")
}
portNum, err := strconv.Atoi(d.Val())
if err != nil {
return d.Errf("bad port number '%s': %v", d.Val(), err)
}
h.HealthChecks.Active.Port = portNum
case "health_headers":
healthHeaders := make(http.Header)
for nesting := d.Nesting(); d.NextBlock(nesting); {
key := d.Val()
values := d.RemainingArgs()
if len(values) == 0 {
values = append(values, "")
}
healthHeaders[key] = append(healthHeaders[key], values...)
}
if h.HealthChecks == nil {
h.HealthChecks = new(HealthChecks)
}
if h.HealthChecks.Active == nil {
h.HealthChecks.Active = new(ActiveHealthChecks)
}
h.HealthChecks.Active.Headers = healthHeaders
case "health_method":
if !d.NextArg() {
return d.ArgErr()
}
if h.HealthChecks == nil {
h.HealthChecks = new(HealthChecks)
}
if h.HealthChecks.Active == nil {
h.HealthChecks.Active = new(ActiveHealthChecks)
}
h.HealthChecks.Active.Method = d.Val()
case "health_request_body":
if !d.NextArg() {
return d.ArgErr()
}
if h.HealthChecks == nil {
h.HealthChecks = new(HealthChecks)
}
if h.HealthChecks.Active == nil {
h.HealthChecks.Active = new(ActiveHealthChecks)
}
h.HealthChecks.Active.Body = d.Val()
case "health_interval":
if !d.NextArg() {
return d.ArgErr()
}
if h.HealthChecks == nil {
h.HealthChecks = new(HealthChecks)
}
if h.HealthChecks.Active == nil {
h.HealthChecks.Active = new(ActiveHealthChecks)
}
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return d.Errf("bad interval value %s: %v", d.Val(), err)
}
h.HealthChecks.Active.Interval = caddy.Duration(dur)
case "health_timeout":
if !d.NextArg() {
return d.ArgErr()
}
if h.HealthChecks == nil {
h.HealthChecks = new(HealthChecks)
}
if h.HealthChecks.Active == nil {
h.HealthChecks.Active = new(ActiveHealthChecks)
}
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return d.Errf("bad timeout value %s: %v", d.Val(), err)
}
h.HealthChecks.Active.Timeout = caddy.Duration(dur)
case "health_status":
if !d.NextArg() {
return d.ArgErr()
}
if h.HealthChecks == nil {
h.HealthChecks = new(HealthChecks)
}
if h.HealthChecks.Active == nil {
h.HealthChecks.Active = new(ActiveHealthChecks)
}
val := d.Val()
if len(val) == 3 && strings.HasSuffix(val, "xx") {
val = val[:1]
}
statusNum, err := strconv.Atoi(val)
if err != nil {
return d.Errf("bad status value '%s': %v", d.Val(), err)
}
h.HealthChecks.Active.ExpectStatus = statusNum
case "health_body":
if !d.NextArg() {
return d.ArgErr()
}
if h.HealthChecks == nil {
h.HealthChecks = new(HealthChecks)
}
if h.HealthChecks.Active == nil {
h.HealthChecks.Active = new(ActiveHealthChecks)
}
h.HealthChecks.Active.ExpectBody = d.Val()
case "health_follow_redirects":
if d.NextArg() {
return d.ArgErr()
}
if h.HealthChecks == nil {
h.HealthChecks = new(HealthChecks)
}
if h.HealthChecks.Active == nil {
h.HealthChecks.Active = new(ActiveHealthChecks)
}
h.HealthChecks.Active.FollowRedirects = true
case "health_passes":
if !d.NextArg() {
return d.ArgErr()
}
if h.HealthChecks == nil {
h.HealthChecks = new(HealthChecks)
}
if h.HealthChecks.Active == nil {
h.HealthChecks.Active = new(ActiveHealthChecks)
}
passes, err := strconv.Atoi(d.Val())
if err != nil {
return d.Errf("invalid passes count '%s': %v", d.Val(), err)
}
h.HealthChecks.Active.Passes = passes
case "health_fails":
if !d.NextArg() {
return d.ArgErr()
}
if h.HealthChecks == nil {
h.HealthChecks = new(HealthChecks)
}
if h.HealthChecks.Active == nil {
h.HealthChecks.Active = new(ActiveHealthChecks)
}
fails, err := strconv.Atoi(d.Val())
if err != nil {
return d.Errf("invalid fails count '%s': %v", d.Val(), err)
}
h.HealthChecks.Active.Fails = fails
case "max_fails":
if !d.NextArg() {
return d.ArgErr()
}
if h.HealthChecks == nil {
h.HealthChecks = new(HealthChecks)
}
if h.HealthChecks.Passive == nil {
h.HealthChecks.Passive = new(PassiveHealthChecks)
}
maxFails, err := strconv.Atoi(d.Val())
if err != nil {
return d.Errf("invalid maximum fail count '%s': %v", d.Val(), err)
}
h.HealthChecks.Passive.MaxFails = maxFails
case "fail_duration":
if !d.NextArg() {
return d.ArgErr()
}
if h.HealthChecks == nil {
h.HealthChecks = new(HealthChecks)
}
if h.HealthChecks.Passive == nil {
h.HealthChecks.Passive = new(PassiveHealthChecks)
}
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return d.Errf("bad duration value '%s': %v", d.Val(), err)
}
h.HealthChecks.Passive.FailDuration = caddy.Duration(dur)
case "unhealthy_request_count":
if !d.NextArg() {
return d.ArgErr()
}
if h.HealthChecks == nil {
h.HealthChecks = new(HealthChecks)
}
if h.HealthChecks.Passive == nil {
h.HealthChecks.Passive = new(PassiveHealthChecks)
}
maxConns, err := strconv.Atoi(d.Val())
if err != nil {
return d.Errf("invalid maximum connection count '%s': %v", d.Val(), err)
}
h.HealthChecks.Passive.UnhealthyRequestCount = maxConns
case "unhealthy_status":
args := d.RemainingArgs()
if len(args) == 0 {
return d.ArgErr()
}
if h.HealthChecks == nil {
h.HealthChecks = new(HealthChecks)
}
if h.HealthChecks.Passive == nil {
h.HealthChecks.Passive = new(PassiveHealthChecks)
}
for _, arg := range args {
if len(arg) == 3 && strings.HasSuffix(arg, "xx") {
arg = arg[:1]
}
statusNum, err := strconv.Atoi(arg)
if err != nil {
return d.Errf("bad status value '%s': %v", d.Val(), err)
}
h.HealthChecks.Passive.UnhealthyStatus = append(h.HealthChecks.Passive.UnhealthyStatus, statusNum)
}
case "unhealthy_latency":
if !d.NextArg() {
return d.ArgErr()
}
if h.HealthChecks == nil {
h.HealthChecks = new(HealthChecks)
}
if h.HealthChecks.Passive == nil {
h.HealthChecks.Passive = new(PassiveHealthChecks)
}
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return d.Errf("bad duration value '%s': %v", d.Val(), err)
}
h.HealthChecks.Passive.UnhealthyLatency = caddy.Duration(dur)
case "flush_interval":
if !d.NextArg() {
return d.ArgErr()
}
if fi, err := strconv.Atoi(d.Val()); err == nil {
h.FlushInterval = caddy.Duration(fi)
} else {
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return d.Errf("bad duration value '%s': %v", d.Val(), err)
}
h.FlushInterval = caddy.Duration(dur)
}
case "request_buffers", "response_buffers":
subdir := d.Val()
if !d.NextArg() {
return d.ArgErr()
}
val := d.Val()
var size int64
if val == "unlimited" {
size = -1
} else {
usize, err := humanize.ParseBytes(val)
if err != nil {
return d.Errf("invalid byte size '%s': %v", val, err)
}
size = int64(usize)
}
if d.NextArg() {
return d.ArgErr()
}
if subdir == "request_buffers" {
h.RequestBuffers = size
} else if subdir == "response_buffers" {
h.ResponseBuffers = size
}
case "stream_timeout":
if !d.NextArg() {
return d.ArgErr()
}
if fi, err := strconv.Atoi(d.Val()); err == nil {
h.StreamTimeout = caddy.Duration(fi)
} else {
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return d.Errf("bad duration value '%s': %v", d.Val(), err)
}
h.StreamTimeout = caddy.Duration(dur)
}
case "stream_close_delay":
if !d.NextArg() {
return d.ArgErr()
}
if fi, err := strconv.Atoi(d.Val()); err == nil {
h.StreamCloseDelay = caddy.Duration(fi)
} else {
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return d.Errf("bad duration value '%s': %v", d.Val(), err)
}
h.StreamCloseDelay = caddy.Duration(dur)
}
case "trusted_proxies":
for d.NextArg() {
if d.Val() == "private_ranges" {
h.TrustedProxies = append(h.TrustedProxies, internal.PrivateRangesCIDR()...)
continue
}
h.TrustedProxies = append(h.TrustedProxies, d.Val())
}
case "header_up":
var err error
if h.Headers == nil {
h.Headers = new(headers.Handler)
}
if h.Headers.Request == nil {
h.Headers.Request = new(headers.HeaderOps)
}
args := d.RemainingArgs()
switch len(args) {
case 1:
err = headers.CaddyfileHeaderOp(h.Headers.Request, args[0], "", nil)
case 2:
// some lint checks, I guess
if strings.EqualFold(args[0], "host") && (args[1] == "{hostport}" || args[1] == "{http.request.hostport}") {
caddy.Log().Named("caddyfile").Warn("Unnecessary header_up Host: the reverse proxy's default behavior is to pass headers to the upstream")
}
if strings.EqualFold(args[0], "x-forwarded-for") && (args[1] == "{remote}" || args[1] == "{http.request.remote}" || args[1] == "{remote_host}" || args[1] == "{http.request.remote.host}") {
caddy.Log().Named("caddyfile").Warn("Unnecessary header_up X-Forwarded-For: the reverse proxy's default behavior is to pass headers to the upstream")
}
if strings.EqualFold(args[0], "x-forwarded-proto") && (args[1] == "{scheme}" || args[1] == "{http.request.scheme}") {
caddy.Log().Named("caddyfile").Warn("Unnecessary header_up X-Forwarded-Proto: the reverse proxy's default behavior is to pass headers to the upstream")
}
if strings.EqualFold(args[0], "x-forwarded-host") && (args[1] == "{host}" || args[1] == "{http.request.host}" || args[1] == "{hostport}" || args[1] == "{http.request.hostport}") {
caddy.Log().Named("caddyfile").Warn("Unnecessary header_up X-Forwarded-Host: the reverse proxy's default behavior is to pass headers to the upstream")
}
err = headers.CaddyfileHeaderOp(h.Headers.Request, args[0], args[1], nil)
case 3:
err = headers.CaddyfileHeaderOp(h.Headers.Request, args[0], args[1], &args[2])
default:
return d.ArgErr()
}
if err != nil {
return d.Err(err.Error())
}
case "header_down":
var err error
if h.Headers == nil {
h.Headers = new(headers.Handler)
}
if h.Headers.Response == nil {
h.Headers.Response = &headers.RespHeaderOps{
HeaderOps: new(headers.HeaderOps),
}
}
args := d.RemainingArgs()
switch len(args) {
case 1:
err = headers.CaddyfileHeaderOp(h.Headers.Response.HeaderOps, args[0], "", nil)
case 2:
err = headers.CaddyfileHeaderOp(h.Headers.Response.HeaderOps, args[0], args[1], nil)
case 3:
err = headers.CaddyfileHeaderOp(h.Headers.Response.HeaderOps, args[0], args[1], &args[2])
default:
return d.ArgErr()
}
if err != nil {
return d.Err(err.Error())
}
case "method":
if !d.NextArg() {
return d.ArgErr()
}
if h.Rewrite == nil {
h.Rewrite = &rewrite.Rewrite{}
}
h.Rewrite.Method = d.Val()
if d.NextArg() {
return d.ArgErr()
}
case "rewrite":
if !d.NextArg() {
return d.ArgErr()
}
if h.Rewrite == nil {
h.Rewrite = &rewrite.Rewrite{}
}
h.Rewrite.URI = d.Val()
if d.NextArg() {
return d.ArgErr()
}
case "transport":
if !d.NextArg() {
return d.ArgErr()
}
if h.TransportRaw != nil {
return d.Err("transport already specified")
}
transportModuleName = d.Val()
modID := "http.reverse_proxy.transport." + transportModuleName
unm, err := caddyfile.UnmarshalModule(d, modID)
if err != nil {
return err
}
rt, ok := unm.(http.RoundTripper)
if !ok {
return d.Errf("module %s (%T) is not a RoundTripper", modID, unm)
}
transport = rt
case "handle_response":
// delegate the parsing of handle_response to the caller,
// since we need the httpcaddyfile.Helper to parse subroutes.
// See h.FinalizeUnmarshalCaddyfile
h.handleResponseSegments = append(h.handleResponseSegments, d.NewFromNextSegment())
case "replace_status":
args := d.RemainingArgs()
if len(args) != 1 && len(args) != 2 {
return d.Errf("must have one or two arguments: an optional response matcher, and a status code")
}
responseHandler := caddyhttp.ResponseHandler{}
if len(args) == 2 {
if !strings.HasPrefix(args[0], matcherPrefix) {
return d.Errf("must use a named response matcher, starting with '@'")
}
foundMatcher, ok := h.responseMatchers[args[0]]
if !ok {
return d.Errf("no named response matcher defined with name '%s'", args[0][1:])
}
responseHandler.Match = &foundMatcher
responseHandler.StatusCode = caddyhttp.WeakString(args[1])
} else if len(args) == 1 {
responseHandler.StatusCode = caddyhttp.WeakString(args[0])
}
// make sure there's no block, cause it doesn't make sense
if nesting := d.Nesting(); d.NextBlock(nesting) {
return d.Errf("cannot define routes for 'replace_status', use 'handle_response' instead.")
}
h.HandleResponse = append(
h.HandleResponse,
responseHandler,
)
case "verbose_logs":
if h.VerboseLogs {
return d.Err("verbose_logs already specified")
}
h.VerboseLogs = true
default:
return d.Errf("unrecognized subdirective %s", d.Val())
}
}
// if the scheme inferred from the backends' addresses is
// HTTPS, we will need a non-nil transport to enable TLS,
// or if H2C, to set the transport versions.
if (commonScheme == "https" || commonScheme == "h2c") && transport == nil {
transport = new(HTTPTransport)
transportModuleName = "http"
}
// verify transport configuration, and finally encode it
if transport != nil {
if te, ok := transport.(TLSTransport); ok {
if commonScheme == "https" && !te.TLSEnabled() {
err := te.EnableTLS(new(TLSConfig))
if err != nil {
return err
}
}
if commonScheme == "http" && te.TLSEnabled() {
return d.Errf("upstream address scheme is HTTP but transport is configured for HTTP+TLS (HTTPS)")
}
if te, ok := transport.(*HTTPTransport); ok && commonScheme == "h2c" {
te.Versions = []string{"h2c", "2"}
}
} else if commonScheme == "https" {
return d.Errf("upstreams are configured for HTTPS but transport module does not support TLS: %T", transport)
}
// no need to encode empty default transport
if !reflect.DeepEqual(transport, new(HTTPTransport)) {
h.TransportRaw = caddyconfig.JSONModuleObject(transport, "protocol", transportModuleName, nil)
}
}
return nil
}
Cognitive complexity: 400
, Cyclomatic complexity: 236
func (*Host) Fails
Fails returns the number of recent failures with the upstream.
func (h *Host) Fails() int {
return int(atomic.LoadInt64(&h.fails))
}
Cognitive complexity: 0
, Cyclomatic complexity: 1
func (*Host) NumRequests
NumRequests returns the number of active requests to the upstream.
func (h *Host) NumRequests() int {
return int(atomic.LoadInt64(&h.numRequests))
}
Cognitive complexity: 0
, Cyclomatic complexity: 1
func (*TLSConfig) MakeTLSClientConfig
MakeTLSClientConfig returns a tls.Config usable by a client to a backend. If there is no custom TLS configuration, a nil config may be returned.
func (t *TLSConfig) MakeTLSClientConfig(ctx caddy.Context) (*tls.Config, error) {
cfg := new(tls.Config)
// client auth
if t.ClientCertificateFile != "" && t.ClientCertificateKeyFile == "" {
return nil, fmt.Errorf("client_certificate_file specified without client_certificate_key_file")
}
if t.ClientCertificateFile == "" && t.ClientCertificateKeyFile != "" {
return nil, fmt.Errorf("client_certificate_key_file specified without client_certificate_file")
}
if t.ClientCertificateFile != "" && t.ClientCertificateKeyFile != "" {
cert, err := tls.LoadX509KeyPair(t.ClientCertificateFile, t.ClientCertificateKeyFile)
if err != nil {
return nil, fmt.Errorf("loading client certificate key pair: %v", err)
}
cfg.Certificates = []tls.Certificate{cert}
}
if t.ClientCertificateAutomate != "" {
// TODO: use or enable ctx.IdentityCredentials() ...
tlsAppIface, err := ctx.App("tls")
if err != nil {
return nil, fmt.Errorf("getting tls app: %v", err)
}
tlsApp := tlsAppIface.(*caddytls.TLS)
err = tlsApp.Manage([]string{t.ClientCertificateAutomate})
if err != nil {
return nil, fmt.Errorf("managing client certificate: %v", err)
}
cfg.GetClientCertificate = func(cri *tls.CertificateRequestInfo) (*tls.Certificate, error) {
certs := caddytls.AllMatchingCertificates(t.ClientCertificateAutomate)
var err error
for _, cert := range certs {
certCertificate := cert.Certificate // avoid taking address of iteration variable (gosec warning)
err = cri.SupportsCertificate(&certCertificate)
if err == nil {
return &cert.Certificate, nil
}
}
if err == nil {
err = fmt.Errorf("no client certificate found for automate name: %s", t.ClientCertificateAutomate)
}
return nil, err
}
}
// trusted root CAs
if len(t.RootCAPool) > 0 || len(t.RootCAPEMFiles) > 0 {
ctx.Logger().Warn("root_ca_pool and root_ca_pem_files are deprecated. Use one of the tls.ca_pool.source modules instead")
rootPool := x509.NewCertPool()
for _, encodedCACert := range t.RootCAPool {
caCert, err := decodeBase64DERCert(encodedCACert)
if err != nil {
return nil, fmt.Errorf("parsing CA certificate: %v", err)
}
rootPool.AddCert(caCert)
}
for _, pemFile := range t.RootCAPEMFiles {
pemData, err := os.ReadFile(pemFile)
if err != nil {
return nil, fmt.Errorf("failed reading ca cert: %v", err)
}
rootPool.AppendCertsFromPEM(pemData)
}
cfg.RootCAs = rootPool
}
if t.CARaw != nil {
if len(t.RootCAPool) > 0 || len(t.RootCAPEMFiles) > 0 {
return nil, fmt.Errorf("conflicting config for Root CA pool")
}
caRaw, err := ctx.LoadModule(t, "CARaw")
if err != nil {
return nil, fmt.Errorf("failed to load ca module: %v", err)
}
ca, ok := caRaw.(caddytls.CA)
if !ok {
return nil, fmt.Errorf("CA module '%s' is not a certificate pool provider", ca)
}
cfg.RootCAs = ca.CertPool()
}
// Renegotiation
switch t.Renegotiation {
case "never", "":
cfg.Renegotiation = tls.RenegotiateNever
case "once":
cfg.Renegotiation = tls.RenegotiateOnceAsClient
case "freely":
cfg.Renegotiation = tls.RenegotiateFreelyAsClient
default:
return nil, fmt.Errorf("invalid TLS renegotiation level: %v", t.Renegotiation)
}
// override for the server name used verify the TLS handshake
cfg.ServerName = t.ServerName
// throw all security out the window
cfg.InsecureSkipVerify = t.InsecureSkipVerify
curvesAdded := make(map[tls.CurveID]struct{})
for _, curveName := range t.Curves {
curveID := caddytls.SupportedCurves[curveName]
if _, ok := curvesAdded[curveID]; !ok {
curvesAdded[curveID] = struct{}{}
cfg.CurvePreferences = append(cfg.CurvePreferences, curveID)
}
}
// only return a config if it's not empty
if reflect.DeepEqual(cfg, new(tls.Config)) {
return nil, nil
}
return cfg, nil
}
Cognitive complexity: 59
, Cyclomatic complexity: 32
func (*Upstream) Available
Available returns true if the remote host is available to receive requests. This is the method that should be used by selection policies, etc. to determine if a backend should be able to be sent a request.
func (u *Upstream) Available() bool {
return u.Healthy() && !u.Full()
}
Cognitive complexity: 0
, Cyclomatic complexity: 2
func (*Upstream) Full
Full returns true if the remote host cannot receive more requests at this time.
func (u *Upstream) Full() bool {
return u.MaxRequests > 0 && u.Host.NumRequests() >= u.MaxRequests
}
Cognitive complexity: 0
, Cyclomatic complexity: 2
func (*Upstream) Healthy
Healthy returns true if the remote host is currently known to be healthy or "up". It consults the circuit breaker, if any.
func (u *Upstream) Healthy() bool {
healthy := u.healthy()
if healthy && u.healthCheckPolicy != nil {
healthy = u.Host.Fails() < u.healthCheckPolicy.MaxFails
}
if healthy && u.cb != nil {
healthy = u.cb.OK()
}
return healthy
}
Cognitive complexity: 4
, Cyclomatic complexity: 5
func (*Upstream) String
(pointer receiver necessary to avoid a race condition, since copying the Upstream reads the 'unhealthy' field which is accessed atomically)
func (u *Upstream) String() string { return u.Dial }
Cognitive complexity: 0
, Cyclomatic complexity: 1
func (*UpstreamResolver) ParseAddresses
ParseAddresses parses all the configured network addresses and ensures they're ready to be used.
func (u *UpstreamResolver) ParseAddresses() error {
for _, v := range u.Addresses {
addr, err := caddy.ParseNetworkAddressWithDefaults(v, "udp", 53)
if err != nil {
return err
}
if addr.PortRangeSize() != 1 {
return fmt.Errorf("resolver address must have exactly one address; cannot call %v", addr)
}
u.netAddrs = append(u.netAddrs, addr)
}
return nil
}
Cognitive complexity: 7
, Cyclomatic complexity: 4
func (*metricsUpstreamsHealthyUpdater) Init
func (m *metricsUpstreamsHealthyUpdater) Init() {
go func() {
defer func() {
if err := recover(); err != nil {
if c := reverseProxyMetrics.logger.Check(zapcore.ErrorLevel, "upstreams healthy metrics updater panicked"); c != nil {
c.Write(
zap.Any("error", err),
zap.ByteString("stack", debug.Stack()),
)
}
}
}()
m.update()
ticker := time.NewTicker(10 * time.Second)
for {
select {
case <-ticker.C:
m.update()
case <-m.handler.ctx.Done():
ticker.Stop()
return
}
}
}()
}
Cognitive complexity: 11
, Cyclomatic complexity: 6
func (*tcpRWTimeoutConn) Read
func (c *tcpRWTimeoutConn) Read(b []byte) (int, error) {
if c.readTimeout > 0 {
err := c.TCPConn.SetReadDeadline(time.Now().Add(c.readTimeout))
if err != nil {
if ce := c.logger.Check(zapcore.ErrorLevel, "failed to set read deadline"); ce != nil {
ce.Write(zap.Error(err))
}
}
}
return c.TCPConn.Read(b)
}
Cognitive complexity: 6
, Cyclomatic complexity: 4
func (*tcpRWTimeoutConn) Write
func (c *tcpRWTimeoutConn) Write(b []byte) (int, error) {
if c.writeTimeout > 0 {
err := c.TCPConn.SetWriteDeadline(time.Now().Add(c.writeTimeout))
if err != nil {
if ce := c.logger.Check(zapcore.ErrorLevel, "failed to set write deadline"); ce != nil {
ce.Write(zap.Error(err))
}
}
}
return c.TCPConn.Write(b)
}
Cognitive complexity: 6
, Cyclomatic complexity: 4
func (CopyResponseHandler) ServeHTTP
ServeHTTP implements the Handler interface.
func (h CopyResponseHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request, _ caddyhttp.Handler) error {
repl := req.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
hrc, ok := req.Context().Value(proxyHandleResponseContextCtxKey).(*handleResponseContext)
// don't allow this to be used outside of handle_response routes
if !ok {
return caddyhttp.Error(http.StatusInternalServerError,
fmt.Errorf("cannot use 'copy_response' outside of reverse_proxy's handle_response routes"))
}
// allow a custom status code to be written; otherwise the
// status code from the upstream response is written
if codeStr := h.StatusCode.String(); codeStr != "" {
intVal, err := strconv.Atoi(repl.ReplaceAll(codeStr, ""))
if err != nil {
return caddyhttp.Error(http.StatusInternalServerError, err)
}
hrc.response.StatusCode = intVal
}
// make sure the reverse_proxy handler doesn't try to call
// finalizeResponse again after we've already done it here.
hrc.isFinalized = true
// write the response
return hrc.handler.finalizeResponse(rw, req, hrc.response, repl, hrc.start, hrc.logger)
}
Cognitive complexity: 6
, Cyclomatic complexity: 4
func (HTTPTransport) Cleanup
Cleanup implements caddy.CleanerUpper and closes any idle connections.
func (h HTTPTransport) Cleanup() error {
if h.Transport == nil {
return nil
}
h.Transport.CloseIdleConnections()
return nil
}
Cognitive complexity: 2
, Cyclomatic complexity: 2
func (HTTPTransport) TLSEnabled
TLSEnabled returns true if TLS is enabled.
func (h HTTPTransport) TLSEnabled() bool {
return h.TLS != nil
}
Cognitive complexity: 0
, Cyclomatic complexity: 1
func (RandomSelection) Select
Select returns an available host, if any.
func (r RandomSelection) Select(pool UpstreamPool, request *http.Request, _ http.ResponseWriter) *Upstream {
return selectRandomHost(pool)
}
Cognitive complexity: 0
, Cyclomatic complexity: 1
func (SRVUpstreams) GetUpstreams
func (su SRVUpstreams) GetUpstreams(r *http.Request) ([]*Upstream, error) {
suAddr, service, proto, name := su.expandedAddr(r)
// first, use a cheap read-lock to return a cached result quickly
srvsMu.RLock()
cached := srvs[suAddr]
srvsMu.RUnlock()
if cached.isFresh() {
return allNew(cached.upstreams), nil
}
// otherwise, obtain a write-lock to update the cached value
srvsMu.Lock()
defer srvsMu.Unlock()
// check to see if it's still stale, since we're now in a different
// lock from when we first checked freshness; another goroutine might
// have refreshed it in the meantime before we re-obtained our lock
cached = srvs[suAddr]
if cached.isFresh() {
return allNew(cached.upstreams), nil
}
if c := su.logger.Check(zapcore.DebugLevel, "refreshing SRV upstreams"); c != nil {
c.Write(
zap.String("service", service),
zap.String("proto", proto),
zap.String("name", name),
)
}
_, records, err := su.resolver.LookupSRV(r.Context(), service, proto, name)
if err != nil {
// From LookupSRV docs: "If the response contains invalid names, those records are filtered
// out and an error will be returned alongside the remaining results, if any." Thus, we
// only return an error if no records were also returned.
if len(records) == 0 {
if su.GracePeriod > 0 {
if c := su.logger.Check(zapcore.ErrorLevel, "SRV lookup failed; using previously cached"); c != nil {
c.Write(zap.Error(err))
}
cached.freshness = time.Now().Add(time.Duration(su.GracePeriod) - time.Duration(su.Refresh))
srvs[suAddr] = cached
return allNew(cached.upstreams), nil
}
return nil, err
}
if c := su.logger.Check(zapcore.WarnLevel, "SRV records filtered"); c != nil {
c.Write(zap.Error(err))
}
}
upstreams := make([]Upstream, len(records))
for i, rec := range records {
if c := su.logger.Check(zapcore.DebugLevel, "discovered SRV record"); c != nil {
c.Write(
zap.String("target", rec.Target),
zap.Uint16("port", rec.Port),
zap.Uint16("priority", rec.Priority),
zap.Uint16("weight", rec.Weight),
)
}
addr := net.JoinHostPort(rec.Target, strconv.Itoa(int(rec.Port)))
upstreams[i] = Upstream{Dial: addr}
}
// before adding a new one to the cache (as opposed to replacing stale one), make room if cache is full
if cached.freshness.IsZero() && len(srvs) >= 100 {
for randomKey := range srvs {
delete(srvs, randomKey)
break
}
}
srvs[suAddr] = srvLookup{
srvUpstreams: su,
freshness: time.Now(),
upstreams: upstreams,
}
return allNew(upstreams), nil
}
Cognitive complexity: 28
, Cyclomatic complexity: 14
func (adminUpstreams) CaddyModule
CaddyModule returns the Caddy module information.
func (adminUpstreams) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "admin.api.reverse_proxy",
New: func() caddy.Module { return new(adminUpstreams) },
}
}
Cognitive complexity: 2
, Cyclomatic complexity: 1
func (adminUpstreams) Routes
Routes returns a route for the /reverse_proxy/upstreams endpoint.
func (al adminUpstreams) Routes() []caddy.AdminRoute {
return []caddy.AdminRoute{
{
Pattern: "/reverse_proxy/upstreams",
Handler: caddy.AdminHandlerFunc(al.handleUpstreams),
},
}
}
Cognitive complexity: 2
, Cyclomatic complexity: 1
func (bodyReadCloser) Close
func (brc bodyReadCloser) Close() error {
bufPool.Put(brc.buf)
if brc.body != nil {
return brc.body.Close()
}
return nil
}
Cognitive complexity: 2
, Cyclomatic complexity: 2
Private functions
func allHeaderValues
allHeaderValues gets all values for a given header field, joined by a comma and space if more than one is set. If the header field is nil, then the omit is true, meaning some earlier logic in the server wanted to prevent this header from getting written at all. If the header is empty, then ok is false. Callers should still check that the value is not empty (the header field may be set but have an empty value).
allHeaderValues (h http.Header, field string) (string, bool, bool)
References: http.CanonicalHeaderKey, strings.Join.
func allNew
allNew (upstreams []Upstream) []*Upstream
func asciiEqualFold
asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t are equal, ASCII-case-insensitively.
asciiEqualFold (s,t string) bool
func asciiIsPrint
asciiIsPrint returns whether s is ASCII and printable according to https://tools.ietf.org/html/rfc20#section-4.2.
asciiIsPrint (s string) bool
func asciiLower
asciiLower returns the ASCII lowercase version of b.
asciiLower (b byte) byte
func cloneRequest
cloneRequest makes a semi-deep clone of origReq.
Most of this code is borrowed from the Go stdlib reverse proxy, but we make a shallow-ish clone the request (deep clone only the headers and URL) so we can avoid manipulating the original request when using it to proxy upstream. This prevents request corruption and data races.
cloneRequest (origReq *http.Request) *http.Request
References: http.Request, url.URL, url.Userinfo.
func cmdReverseProxy
cmdReverseProxy (fs caddycmd.Flags) (int, error)
References: caddyconfig.JSON, caddyconfig.JSONModuleObject, caddyhttp.App, caddyhttp.AutoHTTPSConfig, caddyhttp.DefaultHTTPPort, caddyhttp.DefaultHTTPSPort, caddyhttp.MatchHost, caddyhttp.Route, caddyhttp.RouteList, caddyhttp.Server, caddyhttp.ServerLogConfig, caddytls.AutomationConfig, caddytls.AutomationPolicy, caddytls.TLS, fmt.Errorf, fmt.Sprint, headers.Handler, headers.HeaderOps, headers.RespHeaderOps, http.Header, httpcaddyfile.ParseAddress, json.RawMessage, strconv.Itoa, strings.Cut, strings.TrimSpace, zap.DebugLevel, zap.String, zap.Strings.
func copyHeader
copyHeader (dst,src http.Header)
func decodeBase64DERCert
decodeBase64DERCert base64-decodes, then DER-decodes, certStr.
decodeBase64DERCert (certStr string) (*x509.Certificate, error)
References: base64.StdEncoding, x509.ParseCertificate.
func hash
hash calculates a fast hash based on s.
hash (s string) uint64
func hashCookie
hashCookie hashes (HMAC 256) some data with the secret
hashCookie (secret string, data string) (string, error)
References: hex.EncodeToString, hmac.New, sha256.New.
func hostByHashing
hostByHashing returns an available host from pool based on a hashable string s.
hostByHashing (pool []*Upstream, s string) *Upstream
func init
init ()
func initReverseProxyMetrics
initReverseProxyMetrics (handler *Handler)
References: promauto.NewGaugeVec, prometheus.GaugeOpts.
func isWebsocket
isWebsocket returns true if r looks to be an upgrade request for WebSockets. It is a fairly naive check.
isWebsocket (r *http.Request) bool
References: httpguts.HeaderValuesContainsToken.
func lastHeaderValue
lastHeaderValue gets the last value for a given header field if more than one is set. If the header field is nil, then the omit is true, meaning some earlier logic in the server wanted to prevent this header from getting written at all. If the header is empty, then ok is false. Callers should still check that the value is not empty (the header field may be set but have an empty value).
lastHeaderValue (h http.Header, field string) (string, bool, bool)
References: http.CanonicalHeaderKey.
func leastRequests
leastRequests returns the host with the least number of active requests to it. If more than one host has the same least number of active requests, then one of those is chosen at random.
leastRequests (upstreams []*Upstream) *Upstream
References: weakrand.Intn.
func loadFallbackPolicy
loadFallbackPolicy (d *caddyfile.Dispenser) (json.RawMessage, error)
References: caddyconfig.JSONModuleObject, caddyfile.UnmarshalModule.
func maskBytes
Copied from https://github.com/gorilla/websocket/blob/v1.5.0/mask.go
maskBytes (key [4]byte, pos int, b []byte) int
References: unsafe.Pointer.
func newMaskKey
Copied from https://github.com/gorilla/websocket/blob/v1.5.0/conn.go#L184
newMaskKey () [4]byte
References: weakrand.Uint32.
func newMetricsUpstreamsHealthyUpdater
newMetricsUpstreamsHealthyUpdater (handler *Handler) *metricsUpstreamsHealthyUpdater
func normalizeWebsocketHeaders
normalizeWebsocketHeaders ensures we use the standard casing as per
RFC 6455, i.e. WebSocket
with uppercase 'S'. Most servers don't
care about this difference (read headers case insensitively), but
some do, so this maximizes compatibility with upstreams.
See https://github.com/caddyserver/caddy/pull/6621
normalizeWebsocketHeaders (header http.Header)
func parseCaddyfile
parseCaddyfile (h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error)
func parseCopyResponseCaddyfile
parseCopyResponseCaddyfile (h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error)
func parseCopyResponseHeadersCaddyfile
parseCopyResponseHeadersCaddyfile (h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error)
func parseUpstreamDialAddress
parseUpstreamDialAddress parses configuration inputs for the dial address, including support for a scheme in front as a shortcut for the port number, and a network type, for example 'unix' to dial a unix socket.
parseUpstreamDialAddress (upstreamAddr string) (parsedAddr, error)
References: fmt.Errorf, strings.Contains, strings.Count, strings.LastIndex, strings.ReplaceAll, url.Parse.
func removeConnectionHeaders
removeConnectionHeaders removes hop-by-hop headers listed in the "Connection" header of h. See RFC 7230, section 6.1
removeConnectionHeaders (h http.Header)
References: strings.Split, textproto.TrimString.
func resolveIpVersion
resolveIpVersion (versions *IPVersions) string
func selectRandomHost
selectRandomHost returns a random available host
selectRandomHost (pool []*Upstream) *Upstream
References: weakrand.Int.
func statusError
statusError returns an error value that has a status code.
statusError (err error) error
References: caddyhttp.Error, context.Canceled, errors.Is, http.StatusBadGateway, http.StatusGatewayTimeout, net.Error, strings.Contains.
func upgradeType
upgradeType (h http.Header) string
References: httpguts.HeaderValuesContainsToken, strings.ToLower.
func writeCloseControl
writeCloseControl sends a best-effort Close control message to the given WebSocket connection. Thanks to @pascaldekloe who provided inspiration from his simple implementation of this I was able to learn from at: github.com/pascaldekloe/websocket. Further work for handling masking taken from github.com/gorilla/websocket.
writeCloseControl (conn io.Writer, isClient bool) error
func replaceTLSServername
replaceTLSServername checks TLS servername to see if it needs replacing if it does need replacing, it creates a new cloned HTTPTransport object to avoid any races and does the replacing of the TLS servername on that and returns the new object if no replacement is necessary it returns the original
replaceTLSServername (repl *caddy.Replacer) *HTTPTransport
References: strings.Contains.
func shouldUseTLS
shouldUseTLS returns true if TLS should be used for req.
shouldUseTLS (req *http.Request) bool
func activeHealthChecker
activeHealthChecker runs active health checks on a regular basis and blocks until h.HealthChecks.Active.stopChan is closed.
activeHealthChecker ()
References: debug.Stack, time.Duration, time.NewTicker, zap.Any, zap.ByteString, zapcore.ErrorLevel.
func cleanupConnections
cleanupConnections closes hijacked connections. Depending on the value of StreamCloseDelay it does that either immediately or sets up a timer that will do that later.
cleanupConnections () error
References: time.AfterFunc, time.Duration, zap.Duration, zap.Error, zapcore.DebugLevel, zapcore.ErrorLevel.
func closeConnections
closeConnections immediately closes all hijacked connections (both to client and backend).
closeConnections () error
func countFailure
countFailure is used with passive health checks. It remembers 1 failure for upstream for the configured duration. If passive health checks are disabled or failure expiry is 0, this is a no-op.
countFailure (upstream *Upstream)
References: debug.Stack, time.Duration, time.NewTimer, zap.Any, zap.ByteString, zap.Error, zap.String, zapcore.ErrorLevel.
func doActiveHealthCheck
doActiveHealthCheck performs a health check to upstream which can be reached at address hostAddr. The actual address for the request will be built according to active health checker config. The health status of the host will be updated according to whether it passes the health check. An error is returned only if the health check fails to occur or if marking the host's health status fails.
doActiveHealthCheck (dialInfo DialInfo, hostAddr string, networkAddr string, upstream *Upstream) error
References: caddyhttp.OriginalRequestCtxKey, caddyhttp.StatusCodeMatches, caddyhttp.VarsCtxKey, context.WithValue, fmt.Errorf, http.NewRequestWithContext, io.Copy, io.Discard, io.LimitReader, io.ReadAll, io.Reader, net.JoinHostPort, net.SplitHostPort, slices.Contains, strconv.Itoa, strings.NewReader, url.URL, zap.Error, zap.Int, zap.String, zapcore.ErrorLevel, zapcore.InfoLevel.
func doActiveHealthCheckForAllHosts
doActiveHealthCheckForAllHosts immediately performs a health checks for all upstream hosts configured by h.
doActiveHealthCheckForAllHosts ()
References: debug.Stack, zap.Any, zap.ByteString, zap.Error, zap.String, zapcore.ErrorLevel.
func finalizeResponse
finalizeResponse prepares and copies the response.
finalizeResponse (rw http.ResponseWriter, req *http.Request, res *http.Response, repl *caddy.Replacer, start time.Time, logger *zap.Logger) error
References: http.ErrAbortHandler, http.NewResponseController, http.StatusSwitchingProtocols, http.TrailerPrefix, strings.Join, sync.WaitGroup, time.Since, zap.Error, zapcore.DebugLevel, zapcore.WarnLevel.
func handleUpgradeResponse
handleUpgradeResponse (logger *zap.Logger, wg *sync.WaitGroup, rw http.ResponseWriter, req *http.Request, res *http.Response)
References: bufio.NewReadWriter, bufio.NewReaderSize, bufio.NewWriterSize, bufio.ReadWriter, caddyhttp.GetVar, errors.Is, fmt.Sprintf, http.ErrNotSupported, http.NewResponseController, http.StatusOK, io.ReadCloser, io.ReadWriteCloser, time.Duration, time.NewTimer, time.Now, time.Since, time.Time, zap.DebugLevel, zap.Duration, zap.Error, zap.ErrorLevel, zap.Int, zap.String, zap.Time, zapcore.DebugLevel.
func proxyLoopIteration
proxyLoopIteration implements an iteration of the proxy loop. Despite the enormous amount of local state that has to be passed in, we brought this into its own method so that we could run defer more easily. It returns true when the loop is done and should break; false otherwise. The error value returned should be assigned to the proxyErr value for the next iteration of the loop (or the error handled after break).
proxyLoopIteration (r *http.Request, origReq *http.Request, w http.ResponseWriter, proxyErr error, start time.Time, retries int, repl *caddy.Replacer, reqHeader http.Header, reqHost string, next caddyhttp.Handler) (bool, error)
References: caddyhttp.Error, caddyhttp.SetVar, context.Canceled, errors.Is, fmt.Errorf, http.Header, http.StatusServiceUnavailable, zap.Error, zap.Int, zap.String, zapcore.DebugLevel, zapcore.ErrorLevel.
func registerConnection
registerConnection holds onto conn so it can be closed in the event of a server shutdown. This is useful because hijacked connections or connections dialed to backends don't close when server is shut down. The caller should call the returned delete() function when the connection is done to remove it from memory.
registerConnection (conn io.ReadWriteCloser, gracefulClose func() error) func()
func reverseProxy
reverseProxy performs a round-trip to the given backend and processes the response with the client. (This method is mostly the beginning of what was borrowed from the net/http/httputil package in the Go standard library which was used as the foundation.)
reverseProxy (rw http.ResponseWriter, req *http.Request, origReq *http.Request, repl *caddy.Replacer, di DialInfo, next caddyhttp.Handler) error
References: caddyhttp.Error, caddyhttp.LoggableHTTPHeader, caddyhttp.LoggableHTTPRequest, caddyhttp.Server, caddyhttp.ServerCtxKey, caddyhttp.StatusCodeMatches, context.WithValue, http.Header, http.StatusInternalServerError, httptrace.ClientTrace, httptrace.WithClientTrace, strconv.Atoi, strings.Join, sync.Mutex, textproto.MIMEHeader, time.Duration, time.Now, time.Since, zap.Duration, zap.Error, zap.Int, zap.Object, zap.String, zapcore.DebugLevel.
func activeHealthFails
activeHealthFails returns the number of consecutive active health check failures with the upstream.
activeHealthFails () int
References: atomic.LoadInt64.
func activeHealthPasses
activeHealthPasses returns the number of consecutive active health check passes with the upstream.
activeHealthPasses () int
References: atomic.LoadInt64.
func countFail
countFail mutates the recent failures count by delta. It returns an error if the adjustment fails.
countFail (delta int) error
References: atomic.AddInt64, fmt.Errorf.
func countHealthFail
countHealthFail mutates the recent failures count by delta. It returns an error if the adjustment fails.
countHealthFail (delta int) error
References: atomic.AddInt64, fmt.Errorf.
func countHealthPass
countHealthPass mutates the recent passes count by delta. It returns an error if the adjustment fails.
countHealthPass (delta int) error
References: atomic.AddInt64, fmt.Errorf.
func countRequest
countRequest mutates the active request count by delta. It returns an error if the adjustment fails.
countRequest (delta int) error
References: atomic.AddInt64, fmt.Errorf.
func resetHealth
resetHealth resets the health check counters.
resetHealth ()
References: atomic.StoreInt64.
func fillDialInfo
fillDialInfo returns a filled DialInfo for upstream u, using the request context. Note that the returned value is not a pointer.
fillDialInfo (r *http.Request) (DialInfo, error)
References: fmt.Errorf, strconv.Itoa.
func fillHost
fillHost ()
func healthy
healthy returns true if the upstream is not actively marked as unhealthy. (This returns the status only from the "active" health checks.)
healthy () bool
References: atomic.LoadInt32.
func setHealthy
SetHealthy sets the upstream has healthy or unhealthy and returns true if the new value is different. This sets the status only for the "active" health checks.
setHealthy (healthy bool) bool
References: atomic.CompareAndSwapInt32.
func delayedFlush
delayedFlush ()
func stop
stop ()
func update
update ()
References: prometheus.Labels.
func addForwardedHeaders
addForwardedHeaders adds the de-facto standard X-Forwarded-* headers to the request before it is sent upstream.
These headers are security sensitive, so care is taken to only use existing values for these headers from the incoming request if the client IP is trusted (i.e. coming from a trusted proxy sitting in front of this server). If the request didn't have the headers at all, then they will be added with the values that we can glean from the request.
addForwardedHeaders (req *http.Request) error
References: caddyhttp.GetVar, caddyhttp.TrustedProxyVarKey, fmt.Errorf, net.SplitHostPort, netip.ParseAddr, strings.Cut.
func bufferedBody
bufferedBody reads originalBody into a buffer with maximum size of limit (-1 for unlimited), then returns a reader for the buffer along with how many bytes were buffered. Always close the return value when done with it, just like if it was the original body! If limit is 0 (which it shouldn't be), this function returns its input; i.e. is a no-op, for safety.
bufferedBody (originalBody io.ReadCloser, limit int64) (io.ReadCloser, int64)
References: bytes.Buffer, io.Copy, io.CopyN, io.EOF, io.MultiReader.
func copyBuffer
copyBuffer returns any write errors or non-EOF read errors, and the amount of bytes written.
copyBuffer (dst io.Writer, src io.Reader, buf []byte, logger *zap.Logger) (int64, error)
References: context.Canceled, fmt.Errorf, io.EOF, io.ErrShortWrite, zap.Error, zap.Int, zap.Int64, zapcore.DebugLevel, zapcore.ErrorLevel.
func copyResponse
copyResponse (dst http.ResponseWriter, src io.Reader, flushInterval time.Duration, logger *zap.Logger) error
References: http.NewResponseController, io.Writer, time.AfterFunc, zap.Logger, zap.NewNop.
func directRequest
directRequest modifies only req.URL so that it points to the upstream in the given DialInfo. It must modify ONLY the request URL.
directRequest (req *http.Request, di DialInfo)
func flushInterval
flushInterval returns the p.FlushInterval value, conditionally overriding its value for a specific request/response.
flushInterval (req *http.Request, res *http.Response) time.Duration
References: mime.ParseMediaType, time.Duration.
func isBidirectionalStream
isBidirectionalStream returns whether we should work in bi-directional stream mode.
See https://github.com/caddyserver/caddy/pull/3620 for discussion of nuances.
isBidirectionalStream (req *http.Request, res *http.Response) bool
func prepareRequest
prepareRequest clones req so that it can be safely modified without changing the original request or introducing data races. It then modifies it so that it is ready to be proxied, except for directing to a specific upstream. This method adjusts headers and other relevant properties of the cloned request and should be done just once (before proxying) regardless of proxy retries. This assumes that no mutations of the cloned request are performed by h during or after proxying.
prepareRequest (req *http.Request, repl *caddy.Replacer) (*http.Request, error)
References: caddyhttp.ClientIPVarKey, caddyhttp.GetVar, caddyhttp.SetVar, httpguts.HeaderValuesContainsToken, netip.AddrPortFrom, netip.ParseAddr, netip.ParseAddrPort, strconv.FormatInt.
func provisionUpstream
provisionUpstream (upstream *Upstream)
func tryAgain
tryAgain takes the time that the handler was initially invoked, the amount of retries already performed, as well as any error currently obtained, and the request being tried, and returns true if another attempt should be made at proxying the request. If true is returned, it has already blocked long enough before the next retry (i.e. no more sleeping is needed). If false is returned, the handler should stop trying to proxy the request.
tryAgain (ctx caddy.Context, start time.Time, retries int, proxyErr error, req *http.Request, logger *zap.Logger) bool
References: caddyhttp.HandlerError, errors.Is, time.Duration, time.NewTimer, time.Since, zap.Error.
func expandedAddr
expandedAddr expands placeholders in the configured SRV domain labels. The return values are: addr, the RFC 2782 representation of the SRV domain; service, the service; proto, the protocol; and name, the name. If su.Service and su.Proto are empty, name will be returned as addr instead.
expandedAddr (r *http.Request) string
func formattedAddr
formattedAddr the RFC 2782 representation of the SRV domain, in the form "_service._proto.name".
formattedAddr (service,proto,name string) string
References: fmt.Sprintf.
func handleUpstreams
handleUpstreams reports the status of the reverse proxy upstream pool.
handleUpstreams (w http.ResponseWriter, r *http.Request) error
References: fmt.Errorf, http.MethodGet, http.StatusInternalServerError, http.StatusMethodNotAllowed, json.NewEncoder.
func dialAddr
dialAddr () string
References: net.JoinHostPort, strings.Contains.
func isUnix
isUnix () bool
func rangedPort
rangedPort () bool
References: strings.Contains.
func replaceablePort
replaceablePort () bool
References: strings.Contains.
func isFresh
isFresh () bool
References: time.Duration, time.Since.
func copyFromBackend
copyFromBackend (errc chan<- error)
References: io.Copy.
func copyToBackend
copyToBackend (errc chan<- error)
References: io.Copy.
Tests
Files: 6. Third party imports: 0. Imports from organisation: 0. Tests: 20. Benchmarks: 0.