Merge pull request #3267 from weaveworks/pool-gzwriters

Re-use gzip writers in a pool
This commit is contained in:
Bryan Boreham
2018-07-13 16:09:24 +01:00
committed by GitHub
52 changed files with 761 additions and 4324 deletions

View File

@@ -11,7 +11,7 @@ import (
"sync"
"time"
"github.com/PuerkitoBio/ghost/handlers"
"github.com/NYTimes/gziphandler"
log "github.com/Sirupsen/logrus"
"github.com/gorilla/mux"
"github.com/ugorji/go/codec"
@@ -86,32 +86,29 @@ func matchURL(r *http.Request, pattern string) (map[string]string, bool) {
return vars, true
}
func gzipHandler(h http.HandlerFunc) http.HandlerFunc {
return handlers.GZIPHandlerFunc(h, nil)
func gzipHandler(h http.HandlerFunc) http.Handler {
return gziphandler.GzipHandler(h)
}
// RegisterTopologyRoutes registers the various topology routes with a http mux.
func RegisterTopologyRoutes(router *mux.Router, r Reporter, capabilities map[string]bool) {
get := router.Methods("GET").Subrouter()
get.HandleFunc("/api",
get.Handle("/api",
gzipHandler(requestContextDecorator(apiHandler(r, capabilities))))
get.HandleFunc("/api/topology",
get.Handle("/api/topology",
gzipHandler(requestContextDecorator(topologyRegistry.makeTopologyList(r))))
get.
HandleFunc("/api/topology/{topology}",
gzipHandler(requestContextDecorator(topologyRegistry.captureRenderer(r, handleTopology)))).
get.Handle("/api/topology/{topology}",
gzipHandler(requestContextDecorator(topologyRegistry.captureRenderer(r, handleTopology)))).
Name("api_topology_topology")
get.
HandleFunc("/api/topology/{topology}/ws",
requestContextDecorator(captureReporter(r, handleWebsocket))). // NB not gzip!
get.Handle("/api/topology/{topology}/ws",
requestContextDecorator(captureReporter(r, handleWebsocket))). // NB not gzip!
Name("api_topology_topology_ws")
get.
MatcherFunc(URLMatcher("/api/topology/{topology}/{id}")).HandlerFunc(
get.MatcherFunc(URLMatcher("/api/topology/{topology}/{id}")).Handler(
gzipHandler(requestContextDecorator(topologyRegistry.captureRenderer(r, handleNode)))).
Name("api_topology_topology_id")
get.HandleFunc("/api/report",
get.Handle("/api/report",
gzipHandler(requestContextDecorator(makeRawReportHandler(r))))
get.HandleFunc("/api/probes",
get.Handle("/api/probes",
gzipHandler(requestContextDecorator(makeProbeHandler(r))))
}

View File

@@ -2,7 +2,6 @@
package main
import (
"compress/gzip"
"flag"
"log"
@@ -20,7 +19,7 @@ func main() {
if err != nil {
log.Fatal(err)
}
if err = rpt.WriteToFile(flag.Arg(1), gzip.DefaultCompression); err != nil {
if err = rpt.WriteToFile(flag.Arg(1)); err != nil {
log.Fatal(err)
}
}

View File

@@ -39,11 +39,10 @@ func (StdoutPublisher) Publish(rep Report) error {
// WriteBinary writes a Report as a gzipped msgpack into a bytes.Buffer
func (rep Report) WriteBinary() (*bytes.Buffer, error) {
w := &bytes.Buffer{}
gzwriter, err := gzip.NewWriterLevel(w, gzip.DefaultCompression)
if err != nil {
return nil, err
}
if err = codec.NewEncoder(gzwriter, &codec.MsgpackHandle{}).Encode(&rep); err != nil {
gzwriter := gzipWriterPool.Get().(*gzip.Writer)
gzwriter.Reset(w)
defer gzipWriterPool.Put(gzwriter)
if err := codec.NewEncoder(gzwriter, &codec.MsgpackHandle{}).Encode(&rep); err != nil {
return nil, err
}
gzwriter.Close() // otherwise the content won't get flushed to the output stream
@@ -61,10 +60,14 @@ func (c byteCounter) Read(p []byte) (n int, err error) {
return n, err
}
// buffer pool to reduce garbage-collection
// buffer pools to reduce garbage-collection
var bufferPool = &sync.Pool{
New: func() interface{} { return new(bytes.Buffer) },
}
var gzipWriterPool = &sync.Pool{
// NewWriterLevel() only errors if the compression level is invalid, which can't happen here
New: func() interface{} { w, _ := gzip.NewWriterLevel(nil, gzip.DefaultCompression); return w },
}
// ReadBinary reads bytes into a Report.
//
@@ -169,7 +172,7 @@ func MakeFromFile(path string) (rpt Report, _ error) {
// WriteToFile writes a Report to a file. The encoding is determined
// by the file extension (".msgpack" or ".json", with an optional
// ".gz").
func (rep *Report) WriteToFile(path string, compressionLevel int) error {
func (rep *Report) WriteToFile(path string) error {
f, err := os.Create(path)
if err != nil {
return err
@@ -186,10 +189,9 @@ func (rep *Report) WriteToFile(path string, compressionLevel int) error {
defer bufwriter.Flush()
w = bufwriter
if gzipped {
gzwriter, err := gzip.NewWriterLevel(w, compressionLevel)
if err != nil {
return err
}
gzwriter := gzipWriterPool.Get().(*gzip.Writer)
gzwriter.Reset(w)
defer gzipWriterPool.Put(gzwriter)
defer gzwriter.Close()
w = gzwriter
}

201
vendor/github.com/NYTimes/gziphandler/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016-2017 The New York Times Company
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

485
vendor/github.com/NYTimes/gziphandler/gzip.go generated vendored Normal file
View File

@@ -0,0 +1,485 @@
package gziphandler
import (
"bufio"
"compress/gzip"
"fmt"
"io"
"mime"
"net"
"net/http"
"strconv"
"strings"
"sync"
)
const (
vary = "Vary"
acceptEncoding = "Accept-Encoding"
contentEncoding = "Content-Encoding"
contentType = "Content-Type"
contentLength = "Content-Length"
)
type codings map[string]float64
const (
// DefaultQValue is the default qvalue to assign to an encoding if no explicit qvalue is set.
// This is actually kind of ambiguous in RFC 2616, so hopefully it's correct.
// The examples seem to indicate that it is.
DefaultQValue = 1.0
// 1500 bytes is the MTU size for the internet since that is the largest size allowed at the network layer.
// If you take a file that is 1300 bytes and compress it to 800 bytes, its still transmitted in that same 1500 byte packet regardless, so youve gained nothing.
// That being the case, you should restrict the gzip compression to files with a size greater than a single packet, 1400 bytes (1.4KB) is a safe value.
DefaultMinSize = 1400
)
// gzipWriterPools stores a sync.Pool for each compression level for reuse of
// gzip.Writers. Use poolIndex to covert a compression level to an index into
// gzipWriterPools.
var gzipWriterPools [gzip.BestCompression - gzip.BestSpeed + 2]*sync.Pool
func init() {
for i := gzip.BestSpeed; i <= gzip.BestCompression; i++ {
addLevelPool(i)
}
addLevelPool(gzip.DefaultCompression)
}
// poolIndex maps a compression level to its index into gzipWriterPools. It
// assumes that level is a valid gzip compression level.
func poolIndex(level int) int {
// gzip.DefaultCompression == -1, so we need to treat it special.
if level == gzip.DefaultCompression {
return gzip.BestCompression - gzip.BestSpeed + 1
}
return level - gzip.BestSpeed
}
func addLevelPool(level int) {
gzipWriterPools[poolIndex(level)] = &sync.Pool{
New: func() interface{} {
// NewWriterLevel only returns error on a bad level, we are guaranteeing
// that this will be a valid level so it is okay to ignore the returned
// error.
w, _ := gzip.NewWriterLevel(nil, level)
return w
},
}
}
// GzipResponseWriter provides an http.ResponseWriter interface, which gzips
// bytes before writing them to the underlying response. This doesn't close the
// writers, so don't forget to do that.
// It can be configured to skip response smaller than minSize.
type GzipResponseWriter struct {
http.ResponseWriter
index int // Index for gzipWriterPools.
gw *gzip.Writer
code int // Saves the WriteHeader value.
minSize int // Specifed the minimum response size to gzip. If the response length is bigger than this value, it is compressed.
buf []byte // Holds the first part of the write before reaching the minSize or the end of the write.
contentTypes []parsedContentType // Only compress if the response is one of these content-types. All are accepted if empty.
}
type GzipResponseWriterWithCloseNotify struct {
*GzipResponseWriter
}
func (w GzipResponseWriterWithCloseNotify) CloseNotify() <-chan bool {
return w.ResponseWriter.(http.CloseNotifier).CloseNotify()
}
// Write appends data to the gzip writer.
func (w *GzipResponseWriter) Write(b []byte) (int, error) {
// If content type is not set.
if _, ok := w.Header()[contentType]; !ok {
// It infer it from the uncompressed body.
w.Header().Set(contentType, http.DetectContentType(b))
}
// GZIP responseWriter is initialized. Use the GZIP responseWriter.
if w.gw != nil {
n, err := w.gw.Write(b)
return n, err
}
// Save the write into a buffer for later use in GZIP responseWriter (if content is long enough) or at close with regular responseWriter.
// On the first write, w.buf changes from nil to a valid slice
w.buf = append(w.buf, b...)
// If the global writes are bigger than the minSize and we're about to write
// a response containing a content type we want to handle, enable
// compression.
if len(w.buf) >= w.minSize && handleContentType(w.contentTypes, w) && w.Header().Get(contentEncoding) == "" {
err := w.startGzip()
if err != nil {
return 0, err
}
}
return len(b), nil
}
// startGzip initialize any GZIP specific informations.
func (w *GzipResponseWriter) startGzip() error {
// Set the GZIP header.
w.Header().Set(contentEncoding, "gzip")
// if the Content-Length is already set, then calls to Write on gzip
// will fail to set the Content-Length header since its already set
// See: https://github.com/golang/go/issues/14975.
w.Header().Del(contentLength)
// Write the header to gzip response.
if w.code != 0 {
w.ResponseWriter.WriteHeader(w.code)
}
// Initialize the GZIP response.
w.init()
// Flush the buffer into the gzip response.
n, err := w.gw.Write(w.buf)
// This should never happen (per io.Writer docs), but if the write didn't
// accept the entire buffer but returned no specific error, we have no clue
// what's going on, so abort just to be safe.
if err == nil && n < len(w.buf) {
return io.ErrShortWrite
}
w.buf = nil
return err
}
// WriteHeader just saves the response code until close or GZIP effective writes.
func (w *GzipResponseWriter) WriteHeader(code int) {
if w.code == 0 {
w.code = code
}
}
// init graps a new gzip writer from the gzipWriterPool and writes the correct
// content encoding header.
func (w *GzipResponseWriter) init() {
// Bytes written during ServeHTTP are redirected to this gzip writer
// before being written to the underlying response.
gzw := gzipWriterPools[w.index].Get().(*gzip.Writer)
gzw.Reset(w.ResponseWriter)
w.gw = gzw
}
// Close will close the gzip.Writer and will put it back in the gzipWriterPool.
func (w *GzipResponseWriter) Close() error {
if w.gw == nil {
// Gzip not trigged yet, write out regular response.
if w.code != 0 {
w.ResponseWriter.WriteHeader(w.code)
}
if w.buf != nil {
_, writeErr := w.ResponseWriter.Write(w.buf)
// Returns the error if any at write.
if writeErr != nil {
return fmt.Errorf("gziphandler: write to regular responseWriter at close gets error: %q", writeErr.Error())
}
}
return nil
}
err := w.gw.Close()
gzipWriterPools[w.index].Put(w.gw)
w.gw = nil
return err
}
// Flush flushes the underlying *gzip.Writer and then the underlying
// http.ResponseWriter if it is an http.Flusher. This makes GzipResponseWriter
// an http.Flusher.
func (w *GzipResponseWriter) Flush() {
if w.gw == nil {
// Only flush once startGzip has been called.
//
// Flush is thus a no-op until the written body
// exceeds minSize.
return
}
w.gw.Flush()
if fw, ok := w.ResponseWriter.(http.Flusher); ok {
fw.Flush()
}
}
// Hijack implements http.Hijacker. If the underlying ResponseWriter is a
// Hijacker, its Hijack method is returned. Otherwise an error is returned.
func (w *GzipResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
if hj, ok := w.ResponseWriter.(http.Hijacker); ok {
return hj.Hijack()
}
return nil, nil, fmt.Errorf("http.Hijacker interface is not supported")
}
// verify Hijacker interface implementation
var _ http.Hijacker = &GzipResponseWriter{}
// MustNewGzipLevelHandler behaves just like NewGzipLevelHandler except that in
// an error case it panics rather than returning an error.
func MustNewGzipLevelHandler(level int) func(http.Handler) http.Handler {
wrap, err := NewGzipLevelHandler(level)
if err != nil {
panic(err)
}
return wrap
}
// NewGzipLevelHandler returns a wrapper function (often known as middleware)
// which can be used to wrap an HTTP handler to transparently gzip the response
// body if the client supports it (via the Accept-Encoding header). Responses will
// be encoded at the given gzip compression level. An error will be returned only
// if an invalid gzip compression level is given, so if one can ensure the level
// is valid, the returned error can be safely ignored.
func NewGzipLevelHandler(level int) (func(http.Handler) http.Handler, error) {
return NewGzipLevelAndMinSize(level, DefaultMinSize)
}
// NewGzipLevelAndMinSize behave as NewGzipLevelHandler except it let the caller
// specify the minimum size before compression.
func NewGzipLevelAndMinSize(level, minSize int) (func(http.Handler) http.Handler, error) {
return GzipHandlerWithOpts(CompressionLevel(level), MinSize(minSize))
}
func GzipHandlerWithOpts(opts ...option) (func(http.Handler) http.Handler, error) {
c := &config{
level: gzip.DefaultCompression,
minSize: DefaultMinSize,
}
for _, o := range opts {
o(c)
}
if err := c.validate(); err != nil {
return nil, err
}
return func(h http.Handler) http.Handler {
index := poolIndex(c.level)
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Add(vary, acceptEncoding)
if acceptsGzip(r) {
gw := &GzipResponseWriter{
ResponseWriter: w,
index: index,
minSize: c.minSize,
contentTypes: c.contentTypes,
}
defer gw.Close()
if _, ok := w.(http.CloseNotifier); ok {
gwcn := GzipResponseWriterWithCloseNotify{gw}
h.ServeHTTP(gwcn, r)
} else {
h.ServeHTTP(gw, r)
}
} else {
h.ServeHTTP(w, r)
}
})
}, nil
}
// Parsed representation of one of the inputs to ContentTypes.
// See https://golang.org/pkg/mime/#ParseMediaType
type parsedContentType struct {
mediaType string
params map[string]string
}
// equals returns whether this content type matches another content type.
func (pct parsedContentType) equals(mediaType string, params map[string]string) bool {
if pct.mediaType != mediaType {
return false
}
// if pct has no params, don't care about other's params
if len(pct.params) == 0 {
return true
}
// if pct has any params, they must be identical to other's.
if len(pct.params) != len(params) {
return false
}
for k, v := range pct.params {
if w, ok := params[k]; !ok || v != w {
return false
}
}
return true
}
// Used for functional configuration.
type config struct {
minSize int
level int
contentTypes []parsedContentType
}
func (c *config) validate() error {
if c.level != gzip.DefaultCompression && (c.level < gzip.BestSpeed || c.level > gzip.BestCompression) {
return fmt.Errorf("invalid compression level requested: %d", c.level)
}
if c.minSize < 0 {
return fmt.Errorf("minimum size must be more than zero")
}
return nil
}
type option func(c *config)
func MinSize(size int) option {
return func(c *config) {
c.minSize = size
}
}
func CompressionLevel(level int) option {
return func(c *config) {
c.level = level
}
}
// ContentTypes specifies a list of content types to compare
// the Content-Type header to before compressing. If none
// match, the response will be returned as-is.
//
// Content types are compared in a case-insensitive, whitespace-ignored
// manner.
//
// A MIME type without any other directive will match a content type
// that has the same MIME type, regardless of that content type's other
// directives. I.e., "text/html" will match both "text/html" and
// "text/html; charset=utf-8".
//
// A MIME type with any other directive will only match a content type
// that has the same MIME type and other directives. I.e.,
// "text/html; charset=utf-8" will only match "text/html; charset=utf-8".
//
// By default, responses are gzipped regardless of
// Content-Type.
func ContentTypes(types []string) option {
return func(c *config) {
c.contentTypes = []parsedContentType{}
for _, v := range types {
mediaType, params, err := mime.ParseMediaType(v)
if err == nil {
c.contentTypes = append(c.contentTypes, parsedContentType{mediaType, params})
}
}
}
}
// GzipHandler wraps an HTTP handler, to transparently gzip the response body if
// the client supports it (via the Accept-Encoding header). This will compress at
// the default compression level.
func GzipHandler(h http.Handler) http.Handler {
wrapper, _ := NewGzipLevelHandler(gzip.DefaultCompression)
return wrapper(h)
}
// acceptsGzip returns true if the given HTTP request indicates that it will
// accept a gzipped response.
func acceptsGzip(r *http.Request) bool {
acceptedEncodings, _ := parseEncodings(r.Header.Get(acceptEncoding))
return acceptedEncodings["gzip"] > 0.0
}
// returns true if we've been configured to compress the specific content type.
func handleContentType(contentTypes []parsedContentType, w http.ResponseWriter) bool {
// If contentTypes is empty we handle all content types.
if len(contentTypes) == 0 {
return true
}
ct := w.Header().Get(contentType)
mediaType, params, err := mime.ParseMediaType(ct)
if err != nil {
return false
}
for _, c := range contentTypes {
if c.equals(mediaType, params) {
return true
}
}
return false
}
// parseEncodings attempts to parse a list of codings, per RFC 2616, as might
// appear in an Accept-Encoding header. It returns a map of content-codings to
// quality values, and an error containing the errors encountered. It's probably
// safe to ignore those, because silently ignoring errors is how the internet
// works.
//
// See: http://tools.ietf.org/html/rfc2616#section-14.3.
func parseEncodings(s string) (codings, error) {
c := make(codings)
var e []string
for _, ss := range strings.Split(s, ",") {
coding, qvalue, err := parseCoding(ss)
if err != nil {
e = append(e, err.Error())
} else {
c[coding] = qvalue
}
}
// TODO (adammck): Use a proper multi-error struct, so the individual errors
// can be extracted if anyone cares.
if len(e) > 0 {
return c, fmt.Errorf("errors while parsing encodings: %s", strings.Join(e, ", "))
}
return c, nil
}
// parseCoding parses a single conding (content-coding with an optional qvalue),
// as might appear in an Accept-Encoding header. It attempts to forgive minor
// formatting errors.
func parseCoding(s string) (coding string, qvalue float64, err error) {
for n, part := range strings.Split(s, ";") {
part = strings.TrimSpace(part)
qvalue = DefaultQValue
if n == 0 {
coding = strings.ToLower(part)
} else if strings.HasPrefix(part, "q=") {
qvalue, err = strconv.ParseFloat(strings.TrimPrefix(part, "q="), 64)
if qvalue < 0.0 {
qvalue = 0.0
} else if qvalue > 1.0 {
qvalue = 1.0
}
}
}
if coding == "" {
err = fmt.Errorf("empty content-coding")
}
return
}

43
vendor/github.com/NYTimes/gziphandler/gzip_go18.go generated vendored Normal file
View File

@@ -0,0 +1,43 @@
// +build go1.8
package gziphandler
import "net/http"
// Push initiates an HTTP/2 server push.
// Push returns ErrNotSupported if the client has disabled push or if push
// is not supported on the underlying connection.
func (w *GzipResponseWriter) Push(target string, opts *http.PushOptions) error {
pusher, ok := w.ResponseWriter.(http.Pusher)
if ok && pusher != nil {
return pusher.Push(target, setAcceptEncodingForPushOptions(opts))
}
return http.ErrNotSupported
}
// setAcceptEncodingForPushOptions sets "Accept-Encoding" : "gzip" for PushOptions without overriding existing headers.
func setAcceptEncodingForPushOptions(opts *http.PushOptions) *http.PushOptions {
if opts == nil {
opts = &http.PushOptions{
Header: http.Header{
acceptEncoding: []string{"gzip"},
},
}
return opts
}
if opts.Header == nil {
opts.Header = http.Header{
acceptEncoding: []string{"gzip"},
}
return opts
}
if encoding := opts.Header.Get(acceptEncoding); encoding == "" {
opts.Header.Add(acceptEncoding, "gzip")
return opts
}
return opts
}

View File

@@ -1,12 +0,0 @@
Copyright (c) 2013, Martin Angers
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,83 +0,0 @@
# Ghost
Ghost is a web development library loosely inspired by node's [Connect library][connect]. It provides a number of simple, single-responsibility HTTP handlers that can be combined to build a full-featured web server, and a generic template engine integration interface.
It stays close to the metal, not abstracting Go's standard library away. As a matter of fact, any stdlib handler can be used with Ghost's handlers, they simply are `net/http.Handler`'s.
## Installation and documentation
`go get github.com/PuerkitoBio/ghost`
[API reference][godoc]
*Status* : Still under development, things will change.
## Example
See the /ghostest directory for a complete working example of a website built with Ghost. It shows all handlers and template support of Ghost.
## Handlers
Ghost offers the following handlers:
* BasicAuthHandler : basic authentication support.
* ContextHandler : key-value map provider for the duration of the request.
* FaviconHandler : simple and efficient favicon renderer.
* GZIPHandler : gzip-compresser for the body of the response.
* LogHandler : fully customizable request logger.
* PanicHandler : panic-catching handler to control the error response.
* SessionHandler : store-agnostic server-side session provider.
* StaticHandler : convenience handler that wraps a call to `net/http.ServeFile`.
Two stores are provided for the session persistence, `MemoryStore`, an in-memory map that is not suited for production environment, and `RedisStore`, a more robust and scalable [redigo][]-based Redis store. Because of the generic `SessionStore` interface, custom stores can easily be created as needed.
The `handlers` package also offers the `ChainableHandler` interface, which supports combining HTTP handlers in a sequential fashion, and the `ChainHandlers()` function that creates a new handler from the sequential combination of any number of handlers.
As a convenience, all functions that take a `http.Handler` as argument also have a corresponding function with the `Func` suffix that take a `http.HandlerFunc` instead as argument. This saves the type-cast when a simple handler function is passed (for example, `SessionHandler()` and `SessionHandlerFunc()`).
### Handlers Design
The HTTP handlers such as Basic Auth and Context need to store some state information to provide their functionality. Instead of using variables and a mutex to control shared access, Ghost augments the `http.ResponseWriter` interface that is part of the Handler's `ServeHTTP()` function signature. Because this instance is unique for each request and is not shared, there is no locking involved to access the state information.
However, when combining such handlers, Ghost needs a way to move through the chain of augmented ResponseWriters. This is why these *augmented writers* need to implement the `WrapWriter` interface. A single method is required, `WrappedWriter() http.ResponseWriter`, which returns the wrapped ResponseWriter.
And to get back a specific augmented writer, the `GetResponseWriter()` function is provided. It takes a ResponseWriter and a predicate function as argument, and returns the requested specific writer using the *comma-ok* pattern. Example, for the session writer:
```Go
func getSessionWriter(w http.ResponseWriter) (*sessResponseWriter, bool) {
ss, ok := GetResponseWriter(w, func(tst http.ResponseWriter) bool {
_, ok := tst.(*sessResponseWriter)
return ok
})
if ok {
return ss.(*sessResponseWriter), true
}
return nil, false
}
```
Ghost does not provide a muxer, there are already many great ones available, but I would recommend Go's native `http.ServeMux` or [pat][] because it has great features and plays well with Ghost's design. Gorilla's muxer is very popular, but since it depends on Gorilla's (mutex-based) context provider, this is redundant with Ghost's context.
## Templates
Ghost supports the following template engines:
* Go's native templates (needs work, at the moment does not work with nested templates)
* [Amber][]
TODO : Go's mustache implementation.
### Templates Design
The template engines can be registered much in the same way as database drivers, just by importing for side effects (using `_ "import/path"`). The `init()` function of the template engine's package registers the template compiler with the correct file extension, and the engine can be used.
## License
The [BSD 3-Clause license][lic].
[connect]: https://github.com/senchalabs/connect
[godoc]: http://godoc.org/github.com/PuerkitoBio/ghost
[lic]: http://opensource.org/licenses/BSD-3-Clause
[redigo]: https://github.com/garyburd/redigo
[pat]: https://github.com/bmizerany/pat
[amber]: https://github.com/eknkc/amber

View File

@@ -1,12 +0,0 @@
package ghost
import (
"log"
)
// Logging function, defaults to Go's native log.Printf function. The idea to use
// this instead of a *log.Logger struct is that it can be set to any of log.{Printf,Fatalf, Panicf},
// but also to more flexible userland loggers like SeeLog (https://github.com/cihub/seelog).
// It could be set, for example, to SeeLog's Debugf function. Any function with the
// signature func(fmt string, params ...interface{}).
var LogFn = log.Printf

View File

@@ -1,22 +0,0 @@
<html>
<head>
<title>Ghost Test</title>
<link type="text/css" rel="stylesheet" href="/public/styles.css">
<link type="text/css" rel="stylesheet" href="/public/bootstrap-combined.min.css">
</head>
<body>
<h1>Welcome to Ghost Test</h1>
<img src="/public/logo.png" alt="peace" />
<ol>
<li><a href="/session">Session</a></li>
<li><a href="/session/auth">Authenticated Session</a></li>
<li><a href="/context">Chained Context</a></li>
<li><a href="/panic">Panic</a></li>
<li><a href="/public/styles.css">Styles.css</a></li>
<li><a href="/public/jquery-2.0.0.min.js">JQuery</a></li>
<li><a href="/public/logo.png">Logo</a></li>
</ol>
<script src="/public/jquery-2.0.0.min.js"></script>
</body>
</html>

View File

@@ -1,169 +0,0 @@
// Ghostest is an interactive end-to-end Web site application to test
// the ghost packages. It serves the following URLs, with the specified
// features (handlers):
//
// / : panic;log;gzip;static; -> serve file index.html
// /public/styles.css : panic;log;gzip;StripPrefix;FileServer; -> serve directory public/
// /public/script.js : panic;log;gzip;StripPrefix;FileServer; -> serve directory public/
// /public/logo.pn : panic;log;gzip;StripPrefix;FileServer; -> serve directory public/
// /session : panic;log;gzip;session;context;Custom; -> serve dynamic Go template
// /session/auth : panic;log;gzip;session;context;basicAuth;Custom; -> serve dynamic template
// /panic : panic;log;gzip;Custom; -> panics
// /context : panic;log;gzip;context;Custom1;Custom2; -> serve dynamic Amber template
package main
import (
"log"
"net/http"
"time"
"github.com/PuerkitoBio/ghost/handlers"
"github.com/PuerkitoBio/ghost/templates"
_ "github.com/PuerkitoBio/ghost/templates/amber"
_ "github.com/PuerkitoBio/ghost/templates/gotpl"
"github.com/bmizerany/pat"
)
const (
sessionPageTitle = "Session Page"
sessionPageAuthTitle = "Authenticated Session Page"
sessionPageKey = "txt"
contextPageKey = "time"
sessionExpiration = 10 // Session expires after 10 seconds
)
var (
// Create the common session store and secret
memStore = handlers.NewMemoryStore(1)
secret = "testimony of the ancients"
)
// The struct used to pass data to the session template.
type sessionPageInfo struct {
SessionID string
Title string
Text string
}
// Authenticate the Basic Auth credentials.
func authenticate(u, p string) (interface{}, bool) {
if u == "user" && p == "pwd" {
return u + p, true
}
return nil, false
}
// Handle the session page requests.
func sessionPageRenderer(w handlers.GhostWriter, r *http.Request) {
var (
txt interface{}
data sessionPageInfo
title string
)
ssn := w.Session()
if r.Method == "GET" {
txt = ssn.Data[sessionPageKey]
} else {
txt = r.FormValue(sessionPageKey)
ssn.Data[sessionPageKey] = txt
}
if r.URL.Path == "/session/auth" {
title = sessionPageAuthTitle
} else {
title = sessionPageTitle
}
if txt != nil {
data = sessionPageInfo{ssn.ID(), title, txt.(string)}
} else {
data = sessionPageInfo{ssn.ID(), title, "[nil]"}
}
err := templates.Render("templates/session.tmpl", w, data)
if err != nil {
panic(err)
}
}
// Prepare the context value for the chained handlers context page.
func setContext(w handlers.GhostWriter, r *http.Request) {
w.Context()[contextPageKey] = time.Now().String()
}
// Retrieve the context value and render the chained handlers context page.
func renderContextPage(w handlers.GhostWriter, r *http.Request) {
err := templates.Render("templates/amber/context.amber",
w, &struct{ Val string }{w.Context()[contextPageKey].(string)})
if err != nil {
panic(err)
}
}
// Prepare the web server and kick it off.
func main() {
// Blank the default logger's prefixes
log.SetFlags(0)
// Compile the dynamic templates (native Go templates and Amber
// templates are both registered via the for-side-effects-only imports)
err := templates.CompileDir("./templates/")
if err != nil {
panic(err)
}
// Set the simple routes for static files
mux := pat.New()
mux.Get("/", handlers.StaticFileHandler("./index.html"))
mux.Get("/public/", http.StripPrefix("/public/", http.FileServer(http.Dir("./public/"))))
// Set the more complex routes for session handling and dynamic page (same
// handler is used for both GET and POST).
ssnOpts := handlers.NewSessionOptions(memStore, secret)
ssnOpts.CookieTemplate.MaxAge = sessionExpiration
hSsn := handlers.SessionHandler(
handlers.ContextHandlerFunc(
handlers.GhostHandlerFunc(sessionPageRenderer),
1),
ssnOpts)
mux.Get("/session", hSsn)
mux.Post("/session", hSsn)
hAuthSsn := handlers.BasicAuthHandler(hSsn, authenticate, "")
mux.Get("/session/auth", hAuthSsn)
mux.Post("/session/auth", hAuthSsn)
// Set the handler for the chained context route
mux.Get("/context", handlers.ContextHandler(handlers.ChainHandlerFuncs(
handlers.GhostHandlerFunc(setContext),
handlers.GhostHandlerFunc(renderContextPage)),
1))
// Set the panic route, which simply panics
mux.Get("/panic", http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
panic("explicit panic")
}))
// Combine the top level handlers, that wrap around the muxer.
// Panic is the outermost, so that any panic is caught and responded to with a code 500.
// Log is next, so that every request is logged along with the URL, status code and response time.
// GZIP is then applied, so that content is compressed.
// Finally, the muxer finds the specific handler that applies to the route.
h := handlers.FaviconHandler(
handlers.PanicHandler(
handlers.LogHandler(
handlers.GZIPHandler(
mux,
nil),
handlers.NewLogOptions(nil, handlers.Ltiny)),
nil),
"./public/favicon.ico",
48*time.Hour)
// Assign the combined handler to the server.
http.Handle("/", h)
// Start it up.
if err := http.ListenAndServe(":9000", nil); err != nil {
panic(err)
}
}

File diff suppressed because one or more lines are too long

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.4 KiB

File diff suppressed because one or more lines are too long

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

View File

@@ -1,3 +0,0 @@
body {
background-color: silver;
}

View File

@@ -1,8 +0,0 @@
!!! 5
html
head
title Chained Context
link[type="text/css"][rel="stylesheet"][href="/public/bootstrap-combined.min.css"]
body
h1 Chained Context
h2 Value found: #{Val}

View File

@@ -1,28 +0,0 @@
<html>
<head>
<title>{{ .Title }}</title>
<link type="text/css" rel="stylesheet" href="/public/styles.css">
<link type="text/css" rel="stylesheet" href="/public/bootstrap-combined.min.css">
</head>
<body>
<h1>Session: {{ .SessionID }}</h1>
<ol>
<li><a href="/">Home</a></li>
<li><a href="/session">Session</a></li>
<li><a href="/session/auth">Authenticated Session</a></li>
<li><a href="/context">Chained Context</a></li>
<li><a href="/panic">Panic</a></li>
<li><a href="/public/styles.css">Styles.css</a></li>
<li><a href="/public/jquery-2.0.0.min.js">JQuery</a></li>
<li><a href="/public/logo.png">Logo</a></li>
</ol>
<h2>Current Value: {{ .Text }}</h2>
<form method="POST">
<input type="text" name="txt" placeholder="some value to save to session"></input>
<button type="submit">Submit</button>
</form>
<script src="/public/jquery-2.0.0.min.js"></script>
</body>
</html>

View File

@@ -1,123 +0,0 @@
package handlers
// Inspired by node.js' Connect library implementation of the basicAuth middleware.
// https://github.com/senchalabs/connect
import (
"bytes"
"encoding/base64"
"fmt"
"net/http"
"strings"
)
// Internal writer that keeps track of the currently authenticated user.
type userResponseWriter struct {
http.ResponseWriter
user interface{}
userName string
}
// Implement the WrapWriter interface.
func (this *userResponseWriter) WrappedWriter() http.ResponseWriter {
return this.ResponseWriter
}
// Writes an unauthorized response to the client, specifying the expected authentication
// information.
func Unauthorized(w http.ResponseWriter, realm string) {
w.Header().Set("Www-Authenticate", fmt.Sprintf(`Basic realm="%s"`, realm))
w.WriteHeader(http.StatusUnauthorized)
w.Write([]byte("Unauthorized"))
}
// Writes a bad request response to the client, with an optional message.
func BadRequest(w http.ResponseWriter, msg string) {
w.WriteHeader(http.StatusBadRequest)
if msg == "" {
msg = "Bad Request"
}
w.Write([]byte(msg))
}
// BasicAuthHandlerFunc is the same as BasicAuthHandler, it is just a convenience
// signature that accepts a func(http.ResponseWriter, *http.Request) instead of
// a http.Handler interface. It saves the boilerplate http.HandlerFunc() cast.
func BasicAuthHandlerFunc(h http.HandlerFunc,
authFn func(string, string) (interface{}, bool), realm string) http.HandlerFunc {
return BasicAuthHandler(h, authFn, realm)
}
// Returns a Basic Authentication handler, protecting the wrapped handler from
// being accessed if the authentication function is not successful.
func BasicAuthHandler(h http.Handler,
authFn func(string, string) (interface{}, bool), realm string) http.HandlerFunc {
if realm == "" {
realm = "Authorization Required"
}
return func(w http.ResponseWriter, r *http.Request) {
// Self-awareness
if _, ok := GetUser(w); ok {
h.ServeHTTP(w, r)
return
}
authInfo := r.Header.Get("Authorization")
if authInfo == "" {
// No authorization info, return 401
Unauthorized(w, realm)
return
}
parts := strings.Split(authInfo, " ")
if len(parts) != 2 {
BadRequest(w, "Bad authorization header")
return
}
scheme := parts[0]
creds, err := base64.StdEncoding.DecodeString(parts[1])
if err != nil {
BadRequest(w, "Bad credentials encoding")
return
}
index := bytes.Index(creds, []byte(":"))
if scheme != "Basic" || index < 0 {
BadRequest(w, "Bad authorization header")
return
}
user, pwd := string(creds[:index]), string(creds[index+1:])
udata, ok := authFn(user, pwd)
if ok {
// Save user data and continue
uw := &userResponseWriter{w, udata, user}
h.ServeHTTP(uw, r)
} else {
Unauthorized(w, realm)
}
}
}
// Return the currently authenticated user. This is the same data that was returned
// by the authentication function passed to BasicAuthHandler.
func GetUser(w http.ResponseWriter) (interface{}, bool) {
usr, ok := GetResponseWriter(w, func(tst http.ResponseWriter) bool {
_, ok := tst.(*userResponseWriter)
return ok
})
if ok {
return usr.(*userResponseWriter).user, true
}
return nil, false
}
// Return the currently authenticated user name. This is the user name that was
// authenticated for the current request.
func GetUserName(w http.ResponseWriter) (string, bool) {
usr, ok := GetResponseWriter(w, func(tst http.ResponseWriter) bool {
_, ok := tst.(*userResponseWriter)
return ok
})
if ok {
return usr.(*userResponseWriter).userName, true
}
return "", false
}

View File

@@ -1,62 +0,0 @@
package handlers
import (
"fmt"
"net/http"
"net/http/httptest"
"testing"
)
func TestUnauth(t *testing.T) {
h := BasicAuthHandler(StaticFileHandler("./testdata/script.js"), func(u, pwd string) (interface{}, bool) {
if u == "me" && pwd == "you" {
return u, true
}
return nil, false
}, "foo")
s := httptest.NewServer(h)
defer s.Close()
res, err := http.Get(s.URL)
if err != nil {
panic(err)
}
assertStatus(http.StatusUnauthorized, res.StatusCode, t)
assertHeader("Www-Authenticate", `Basic realm="foo"`, res, t)
}
func TestGzippedAuth(t *testing.T) {
h := GZIPHandler(BasicAuthHandler(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
usr, ok := GetUser(w)
if assertTrue(ok, "expected authenticated user, got false", t) {
assertTrue(usr.(string) == "meyou", fmt.Sprintf("expected user data to be 'meyou', got '%s'", usr), t)
}
usr, ok = GetUserName(w)
if assertTrue(ok, "expected authenticated user name, got false", t) {
assertTrue(usr == "me", fmt.Sprintf("expected user name to be 'me', got '%s'", usr), t)
}
w.Header().Set("Content-Type", "text/plain")
w.Write([]byte(usr.(string)))
}), func(u, pwd string) (interface{}, bool) {
if u == "me" && pwd == "you" {
return u + pwd, true
}
return nil, false
}, ""), nil)
s := httptest.NewServer(h)
defer s.Close()
req, err := http.NewRequest("GET", "http://me:you@"+s.URL[7:], nil)
if err != nil {
panic(err)
}
req.Header.Set("Accept-Encoding", "gzip")
res, err := http.DefaultClient.Do(req)
if err != nil {
panic(err)
}
assertStatus(http.StatusOK, res.StatusCode, t)
assertGzippedBody([]byte("me"), res, t)
}

View File

@@ -1,63 +0,0 @@
package handlers
import (
"net/http"
)
// ChainableHandler is a valid Handler interface, and adds the possibility to
// chain other handlers.
type ChainableHandler interface {
http.Handler
Chain(http.Handler) ChainableHandler
ChainFunc(http.HandlerFunc) ChainableHandler
}
// Default implementation of a simple ChainableHandler
type chainHandler struct {
http.Handler
}
func (this *chainHandler) ChainFunc(h http.HandlerFunc) ChainableHandler {
return this.Chain(h)
}
// Implementation of the ChainableHandler interface, calls the chained handler
// after the current one (sequential).
func (this *chainHandler) Chain(h http.Handler) ChainableHandler {
return &chainHandler{
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Add the chained handler after the call to this handler
this.ServeHTTP(w, r)
h.ServeHTTP(w, r)
}),
}
}
// Convert a standard http handler to a chainable handler interface.
func NewChainableHandler(h http.Handler) ChainableHandler {
return &chainHandler{
h,
}
}
// Helper function to chain multiple handler functions in a single call.
func ChainHandlerFuncs(h ...http.HandlerFunc) ChainableHandler {
return &chainHandler{
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
for _, v := range h {
v(w, r)
}
}),
}
}
// Helper function to chain multiple handlers in a single call.
func ChainHandlers(h ...http.Handler) ChainableHandler {
return &chainHandler{
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
for _, v := range h {
v.ServeHTTP(w, r)
}
}),
}
}

View File

@@ -1,73 +0,0 @@
package handlers
import (
"bytes"
"net/http"
"testing"
)
func TestChaining(t *testing.T) {
var buf bytes.Buffer
a := func(w http.ResponseWriter, r *http.Request) {
buf.WriteRune('a')
}
b := func(w http.ResponseWriter, r *http.Request) {
buf.WriteRune('b')
}
c := func(w http.ResponseWriter, r *http.Request) {
buf.WriteRune('c')
}
f := NewChainableHandler(http.HandlerFunc(a)).Chain(http.HandlerFunc(b)).Chain(http.HandlerFunc(c))
f.ServeHTTP(nil, nil)
if buf.String() != "abc" {
t.Errorf("expected 'abc', got %s", buf.String())
}
}
func TestChainingWithHelperFunc(t *testing.T) {
var buf bytes.Buffer
a := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
buf.WriteRune('a')
})
b := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
buf.WriteRune('b')
})
c := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
buf.WriteRune('c')
})
d := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
buf.WriteRune('d')
})
f := ChainHandlers(a, b, c, d)
f.ServeHTTP(nil, nil)
if buf.String() != "abcd" {
t.Errorf("expected 'abcd', got %s", buf.String())
}
}
func TestChainingMixed(t *testing.T) {
var buf bytes.Buffer
a := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
buf.WriteRune('a')
})
b := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
buf.WriteRune('b')
})
c := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
buf.WriteRune('c')
})
d := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
buf.WriteRune('d')
})
f := NewChainableHandler(a).Chain(ChainHandlers(b, c)).Chain(d)
f.ServeHTTP(nil, nil)
if buf.String() != "abcd" {
t.Errorf("expected 'abcd', got %s", buf.String())
}
}

View File

@@ -1,55 +0,0 @@
package handlers
import (
"net/http"
)
// Structure that holds the context map and exposes the ResponseWriter interface.
type contextResponseWriter struct {
http.ResponseWriter
m map[interface{}]interface{}
}
// Implement the WrapWriter interface.
func (this *contextResponseWriter) WrappedWriter() http.ResponseWriter {
return this.ResponseWriter
}
// ContextHandlerFunc is the same as ContextHandler, it is just a convenience
// signature that accepts a func(http.ResponseWriter, *http.Request) instead of
// a http.Handler interface. It saves the boilerplate http.HandlerFunc() cast.
func ContextHandlerFunc(h http.HandlerFunc, cap int) http.HandlerFunc {
return ContextHandler(h, cap)
}
// ContextHandler gives a context storage that lives only for the duration of
// the request, with no locking involved.
func ContextHandler(h http.Handler, cap int) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if _, ok := GetContext(w); ok {
// Self-awareness, context handler is already set up
h.ServeHTTP(w, r)
return
}
// Create the context-providing ResponseWriter replacement.
ctxw := &contextResponseWriter{
w,
make(map[interface{}]interface{}, cap),
}
// Call the wrapped handler with the context-aware writer
h.ServeHTTP(ctxw, r)
}
}
// Helper function to retrieve the context map from the ResponseWriter interface.
func GetContext(w http.ResponseWriter) (map[interface{}]interface{}, bool) {
ctxw, ok := GetResponseWriter(w, func(tst http.ResponseWriter) bool {
_, ok := tst.(*contextResponseWriter)
return ok
})
if ok {
return ctxw.(*contextResponseWriter).m, true
}
return nil, false
}

View File

@@ -1,83 +0,0 @@
package handlers
import (
"fmt"
"net/http"
"net/http/httptest"
"testing"
)
func TestContext(t *testing.T) {
key := "key"
val := 10
body := "this is the output"
h2 := wrappedHandler(t, key, val, body)
// Create the context handler with a wrapped handler
h := ContextHandler(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
ctx, _ := GetContext(w)
assertTrue(ctx != nil, "expected context to be non-nil", t)
assertTrue(len(ctx) == 0, fmt.Sprintf("expected context to be empty, got %d", len(ctx)), t)
ctx[key] = val
h2.ServeHTTP(w, r)
}), 2)
s := httptest.NewServer(h)
defer s.Close()
// First call
res, err := http.DefaultClient.Get(s.URL)
if err != nil {
panic(err)
}
res.Body.Close()
// Second call, context should be cleaned at start
res, err = http.DefaultClient.Get(s.URL)
if err != nil {
panic(err)
}
assertStatus(http.StatusOK, res.StatusCode, t)
assertBody([]byte(body), res, t)
}
func TestWrappedContext(t *testing.T) {
key := "key"
val := 10
body := "this is the output"
h2 := wrappedHandler(t, key, val, body)
h := ContextHandler(LogHandler(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
ctx, _ := GetContext(w)
if !assertTrue(ctx != nil, "expected context to be non-nil", t) {
panic("ctx is nil")
}
assertTrue(len(ctx) == 0, fmt.Sprintf("expected context to be empty, got %d", len(ctx)), t)
ctx[key] = val
h2.ServeHTTP(w, r)
}), NewLogOptions(nil, "%s", "url")), 2)
s := httptest.NewServer(h)
defer s.Close()
res, err := http.DefaultClient.Get(s.URL)
if err != nil {
panic(err)
}
assertStatus(http.StatusOK, res.StatusCode, t)
assertBody([]byte(body), res, t)
}
func wrappedHandler(t *testing.T, k, v interface{}, body string) http.Handler {
return http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
ctx, _ := GetContext(w)
ac := ctx[k]
assertTrue(ac == v, fmt.Sprintf("expected value to be %v, got %v", v, ac), t)
// Actually write something
_, err := w.Write([]byte(body))
if err != nil {
panic(err)
}
})
}

View File

@@ -1,29 +0,0 @@
// Package handlers define reusable handler components that focus on offering
// a single well-defined feature. Note that any http.Handler implementation
// can be used with Ghost's chainable or wrappable handlers design.
//
// Go's standard library provides a number of such useful handlers in net/http:
//
// - FileServer(http.FileSystem)
// - NotFoundHandler()
// - RedirectHandler(string, int)
// - StripPrefix(string, http.Handler)
// - TimeoutHandler(http.Handler, time.Duration, string)
//
// This package adds the following list of handlers:
//
// - BasicAuthHandler(http.Handler, func(string, string) (interface{}, bool), string)
// a Basic Authentication handler.
// - ContextHandler(http.Handler, int) : a volatile storage map valid only
// for the duration of the request, with no locking required.
// - FaviconHandler(http.Handler, string, time.Duration) : an efficient favicon
// handler.
// - GZIPHandler(http.Handler) : compress the content of the body if the client
// accepts gzip compression.
// - LogHandler(http.Handler, *LogOptions) : customizable request logger.
// - PanicHandler(http.Handler) : handle panics gracefully so that the client
// receives a response (status code 500).
// - SessionHandler(http.Handler, *SessionOptions) : a cookie-based, store-agnostic
// persistent session handler.
// - StaticFileHandler(string) : serve the contents of a specific file.
package handlers

View File

@@ -1,71 +0,0 @@
package handlers
import (
"crypto/md5"
"io/ioutil"
"net/http"
"strconv"
"time"
"github.com/PuerkitoBio/ghost"
)
// FaviconHandlerFunc is the same as FaviconHandler, it is just a convenience
// signature that accepts a func(http.ResponseWriter, *http.Request) instead of
// a http.Handler interface. It saves the boilerplate http.HandlerFunc() cast.
func FaviconHandlerFunc(h http.HandlerFunc, path string, maxAge time.Duration) http.HandlerFunc {
return FaviconHandler(h, path, maxAge)
}
// Efficient favicon handler, mostly a port of node's Connect library implementation
// of the favicon middleware.
// https://github.com/senchalabs/connect
func FaviconHandler(h http.Handler, path string, maxAge time.Duration) http.HandlerFunc {
var buf []byte
var hash string
return func(w http.ResponseWriter, r *http.Request) {
var err error
if r.URL.Path == "/favicon.ico" {
if buf == nil {
// Read from file and cache
ghost.LogFn("ghost.favicon : serving from %s", path)
buf, err = ioutil.ReadFile(path)
if err != nil {
ghost.LogFn("ghost.favicon : error reading file : %s", err)
http.NotFound(w, r)
return
}
hash = hashContent(buf)
}
writeHeaders(w.Header(), buf, maxAge, hash)
writeBody(w, r, buf)
} else {
h.ServeHTTP(w, r)
}
}
}
// Write the content of the favicon, or respond with a 404 not found
// in case of error (hardly a critical error).
func writeBody(w http.ResponseWriter, r *http.Request, buf []byte) {
_, err := w.Write(buf)
if err != nil {
ghost.LogFn("ghost.favicon : error writing response : %s", err)
http.NotFound(w, r)
}
}
// Correctly set the http headers.
func writeHeaders(hdr http.Header, buf []byte, maxAge time.Duration, hash string) {
hdr.Set("Content-Type", "image/x-icon")
hdr.Set("Content-Length", strconv.Itoa(len(buf)))
hdr.Set("Etag", hash)
hdr.Set("Cache-Control", "public, max-age="+strconv.Itoa(int(maxAge.Seconds())))
}
// Get the MD5 hash of the content.
func hashContent(buf []byte) string {
h := md5.New()
return string(h.Sum(buf))
}

View File

@@ -1,72 +0,0 @@
package handlers
import (
"net/http"
"net/http/httptest"
"os"
"testing"
"time"
)
func TestFavicon(t *testing.T) {
s := httptest.NewServer(FaviconHandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("ok"))
}, "./testdata/favicon.ico", time.Second))
defer s.Close()
res, err := http.Get(s.URL + "/favicon.ico")
if err != nil {
panic(err)
}
defer res.Body.Close()
assertStatus(http.StatusOK, res.StatusCode, t)
assertHeader("Content-Type", "image/x-icon", res, t)
assertHeader("Cache-Control", "public, max-age=1", res, t)
assertHeader("Content-Length", "1406", res, t)
}
func TestFaviconInvalidPath(t *testing.T) {
s := httptest.NewServer(FaviconHandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("ok"))
}, "./testdata/xfavicon.ico", time.Second))
defer s.Close()
res, err := http.Get(s.URL + "/favicon.ico")
if err != nil {
panic(err)
}
defer res.Body.Close()
assertStatus(http.StatusNotFound, res.StatusCode, t)
}
func TestFaviconFromCache(t *testing.T) {
s := httptest.NewServer(FaviconHandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("ok"))
}, "./testdata/favicon.ico", time.Second))
defer s.Close()
res, err := http.Get(s.URL + "/favicon.ico")
if err != nil {
panic(err)
}
defer res.Body.Close()
// Rename the file temporarily
err = os.Rename("./testdata/favicon.ico", "./testdata/xfavicon.ico")
if err != nil {
panic(err)
}
defer os.Rename("./testdata/xfavicon.ico", "./testdata/favicon.ico")
res, err = http.Get(s.URL + "/favicon.ico")
if err != nil {
panic(err)
}
defer res.Body.Close()
assertStatus(http.StatusOK, res.StatusCode, t)
assertHeader("Content-Type", "image/x-icon", res, t)
assertHeader("Cache-Control", "public, max-age=1", res, t)
assertHeader("Content-Length", "1406", res, t)
}

View File

@@ -1,75 +0,0 @@
package handlers
import (
"net/http"
)
// Interface giving easy access to the most common augmented features.
type GhostWriter interface {
http.ResponseWriter
UserName() string
User() interface{}
Context() map[interface{}]interface{}
Session() *Session
}
// Internal implementation of the GhostWriter interface.
type ghostWriter struct {
http.ResponseWriter
userName string
user interface{}
ctx map[interface{}]interface{}
ssn *Session
}
func (this *ghostWriter) UserName() string {
return this.userName
}
func (this *ghostWriter) User() interface{} {
return this.user
}
func (this *ghostWriter) Context() map[interface{}]interface{} {
return this.ctx
}
func (this *ghostWriter) Session() *Session {
return this.ssn
}
// Convenience handler that wraps a custom function with direct access to the
// authenticated user, context and session on the writer.
func GhostHandlerFunc(h func(w GhostWriter, r *http.Request)) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if gw, ok := getGhostWriter(w); ok {
// Self-awareness
h(gw, r)
return
}
uid, _ := GetUserName(w)
usr, _ := GetUser(w)
ctx, _ := GetContext(w)
ssn, _ := GetSession(w)
gw := &ghostWriter{
w,
uid,
usr,
ctx,
ssn,
}
h(gw, r)
}
}
// Check the writer chain to find a ghostWriter.
func getGhostWriter(w http.ResponseWriter) (*ghostWriter, bool) {
gw, ok := GetResponseWriter(w, func(tst http.ResponseWriter) bool {
_, ok := tst.(*ghostWriter)
return ok
})
if ok {
return gw.(*ghostWriter), true
}
return nil, false
}

View File

@@ -1,168 +0,0 @@
package handlers
import (
"compress/gzip"
"io"
"net/http"
)
// Thanks to Andrew Gerrand for inspiration:
// https://groups.google.com/d/msg/golang-nuts/eVnTcMwNVjM/4vYU8id9Q2UJ
//
// Also, node's Connect library implementation of the compress middleware:
// https://github.com/senchalabs/connect/blob/master/lib/middleware/compress.js
//
// And StackOverflow's explanation of Vary: Accept-Encoding header:
// http://stackoverflow.com/questions/7848796/what-does-varyaccept-encoding-mean
// Internal gzipped writer that satisfies both the (body) writer in gzipped format,
// and maintains the rest of the ResponseWriter interface for header manipulation.
type gzipResponseWriter struct {
io.Writer
http.ResponseWriter
r *http.Request // Keep a hold of the Request, for the filter function
filtered bool // Has the request been run through the filter function?
dogzip bool // Should we do GZIP compression for this request?
filterFn func(http.ResponseWriter, *http.Request) bool
}
// Make sure the filter function is applied.
func (w *gzipResponseWriter) applyFilter() {
if !w.filtered {
if w.dogzip = w.filterFn(w, w.r); w.dogzip {
setGzipHeaders(w.Header())
}
w.filtered = true
}
}
// Unambiguous Write() implementation (otherwise both ResponseWriter and Writer
// want to claim this method).
func (w *gzipResponseWriter) Write(b []byte) (int, error) {
w.applyFilter()
if w.dogzip {
// Write compressed
return w.Writer.Write(b)
}
// Write uncompressed
return w.ResponseWriter.Write(b)
}
// Intercept the WriteHeader call to correctly set the GZIP headers.
func (w *gzipResponseWriter) WriteHeader(code int) {
w.applyFilter()
w.ResponseWriter.WriteHeader(code)
}
// Implement WrapWriter interface
func (w *gzipResponseWriter) WrappedWriter() http.ResponseWriter {
return w.ResponseWriter
}
var (
defaultFilterTypes = [...]string{
"text",
"javascript",
"json",
}
)
// Default filter to check if the response should be GZIPped.
// By default, all text (html, css, xml, ...), javascript and json
// content types are candidates for GZIP.
func defaultFilter(w http.ResponseWriter, r *http.Request) bool {
hdr := w.Header()
for _, tp := range defaultFilterTypes {
ok := HeaderMatch(hdr, "Content-Type", HmContains, tp)
if ok {
return true
}
}
return false
}
// GZIPHandlerFunc is the same as GZIPHandler, it is just a convenience
// signature that accepts a func(http.ResponseWriter, *http.Request) instead of
// a http.Handler interface. It saves the boilerplate http.HandlerFunc() cast.
func GZIPHandlerFunc(h http.HandlerFunc, filterFn func(http.ResponseWriter, *http.Request) bool) http.HandlerFunc {
return GZIPHandler(h, filterFn)
}
// Gzip compression HTTP handler. If the client supports it, it compresses the response
// written by the wrapped handler. The filter function is called when the response is about
// to be written to determine if compression should be applied. If this argument is nil,
// the default filter will GZIP only content types containing /json|text|javascript/.
func GZIPHandler(h http.Handler, filterFn func(http.ResponseWriter, *http.Request) bool) http.HandlerFunc {
if filterFn == nil {
filterFn = defaultFilter
}
return func(w http.ResponseWriter, r *http.Request) {
if _, ok := getGzipWriter(w); ok {
// Self-awareness, gzip handler is already set up
h.ServeHTTP(w, r)
return
}
hdr := w.Header()
setVaryHeader(hdr)
// Do nothing on a HEAD request
if r.Method == "HEAD" {
h.ServeHTTP(w, r)
return
}
if !acceptsGzip(r.Header) {
// No gzip support from the client, return uncompressed
h.ServeHTTP(w, r)
return
}
// Prepare a gzip response container
gz := gzip.NewWriter(w)
gzw := &gzipResponseWriter{
Writer: gz,
ResponseWriter: w,
r: r,
filterFn: filterFn,
}
h.ServeHTTP(gzw, r)
// Iff the handler completed successfully (no panic) and GZIP was indeed used, close the gzip writer,
// which seems to generate a Write to the underlying writer.
if gzw.dogzip {
gz.Close()
}
}
}
// Add the vary by "accept-encoding" header if it is not already set.
func setVaryHeader(hdr http.Header) {
if !HeaderMatch(hdr, "Vary", HmContains, "accept-encoding") {
hdr.Add("Vary", "Accept-Encoding")
}
}
// Checks if the client accepts GZIP-encoded responses.
func acceptsGzip(hdr http.Header) bool {
ok := HeaderMatch(hdr, "Accept-Encoding", HmContains, "gzip")
if !ok {
ok = HeaderMatch(hdr, "Accept-Encoding", HmEquals, "*")
}
return ok
}
func setGzipHeaders(hdr http.Header) {
// The content-type will be explicitly set somewhere down the path of handlers
hdr.Set("Content-Encoding", "gzip")
hdr.Del("Content-Length")
}
// Helper function to retrieve the gzip writer.
func getGzipWriter(w http.ResponseWriter) (*gzipResponseWriter, bool) {
gz, ok := GetResponseWriter(w, func(tst http.ResponseWriter) bool {
_, ok := tst.(*gzipResponseWriter)
return ok
})
if ok {
return gz.(*gzipResponseWriter), true
}
return nil, false
}

View File

@@ -1,178 +0,0 @@
package handlers
import (
"net/http"
"net/http/httptest"
"testing"
)
func TestGzipped(t *testing.T) {
body := "This is the body"
headers := []string{"gzip", "*", "gzip, deflate, sdch"}
h := GZIPHandler(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain")
_, err := w.Write([]byte(body))
if err != nil {
panic(err)
}
}), nil)
s := httptest.NewServer(h)
defer s.Close()
for _, hdr := range headers {
t.Logf("running with Accept-Encoding header %s", hdr)
req, err := http.NewRequest("GET", s.URL, nil)
if err != nil {
panic(err)
}
req.Header.Set("Accept-Encoding", hdr)
res, err := http.DefaultClient.Do(req)
if err != nil {
panic(err)
}
assertStatus(http.StatusOK, res.StatusCode, t)
assertHeader("Content-Encoding", "gzip", res, t)
assertGzippedBody([]byte(body), res, t)
}
}
func TestNoGzip(t *testing.T) {
body := "This is the body"
h := GZIPHandler(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain")
_, err := w.Write([]byte(body))
if err != nil {
panic(err)
}
}), nil)
s := httptest.NewServer(h)
defer s.Close()
req, err := http.NewRequest("GET", s.URL, nil)
if err != nil {
panic(err)
}
res, err := http.DefaultClient.Do(req)
if err != nil {
panic(err)
}
assertStatus(http.StatusOK, res.StatusCode, t)
assertHeader("Content-Encoding", "", res, t)
assertBody([]byte(body), res, t)
}
func TestGzipOuterPanic(t *testing.T) {
msg := "ko"
h := PanicHandler(
GZIPHandler(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
panic(msg)
}), nil), nil)
s := httptest.NewServer(h)
defer s.Close()
req, err := http.NewRequest("GET", s.URL, nil)
if err != nil {
panic(err)
}
res, err := http.DefaultClient.Do(req)
if err != nil {
panic(err)
}
assertStatus(http.StatusInternalServerError, res.StatusCode, t)
assertHeader("Content-Encoding", "", res, t)
assertBody([]byte(msg+"\n"), res, t)
}
func TestNoGzipOnFilter(t *testing.T) {
body := "This is the body"
h := GZIPHandler(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "x/x")
_, err := w.Write([]byte(body))
if err != nil {
panic(err)
}
}), nil)
s := httptest.NewServer(h)
defer s.Close()
req, err := http.NewRequest("GET", s.URL, nil)
if err != nil {
panic(err)
}
req.Header.Set("Accept-Encoding", "gzip")
res, err := http.DefaultClient.Do(req)
if err != nil {
panic(err)
}
assertStatus(http.StatusOK, res.StatusCode, t)
assertHeader("Content-Encoding", "", res, t)
assertBody([]byte(body), res, t)
}
func TestNoGzipOnCustomFilter(t *testing.T) {
body := "This is the body"
h := GZIPHandler(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain")
_, err := w.Write([]byte(body))
if err != nil {
panic(err)
}
}), func(w http.ResponseWriter, r *http.Request) bool {
return false
})
s := httptest.NewServer(h)
defer s.Close()
req, err := http.NewRequest("GET", s.URL, nil)
if err != nil {
panic(err)
}
req.Header.Set("Accept-Encoding", "gzip")
res, err := http.DefaultClient.Do(req)
if err != nil {
panic(err)
}
assertStatus(http.StatusOK, res.StatusCode, t)
assertHeader("Content-Encoding", "", res, t)
assertBody([]byte(body), res, t)
}
func TestGzipOnCustomFilter(t *testing.T) {
body := "This is the body"
h := GZIPHandler(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "x/x")
_, err := w.Write([]byte(body))
if err != nil {
panic(err)
}
}), func(w http.ResponseWriter, r *http.Request) bool {
return true
})
s := httptest.NewServer(h)
defer s.Close()
req, err := http.NewRequest("GET", s.URL, nil)
if err != nil {
panic(err)
}
req.Header.Set("Accept-Encoding", "gzip")
res, err := http.DefaultClient.Do(req)
if err != nil {
panic(err)
}
assertStatus(http.StatusOK, res.StatusCode, t)
assertHeader("Content-Encoding", "gzip", res, t)
assertGzippedBody([]byte(body), res, t)
}

View File

@@ -1,50 +0,0 @@
package handlers
import (
"net/http"
"strings"
)
// Kind of match to apply to the header check.
type HeaderMatchType int
const (
HmEquals HeaderMatchType = iota
HmStartsWith
HmEndsWith
HmContains
)
// Check if the specified header matches the test string, applying the header match type
// specified.
func HeaderMatch(hdr http.Header, nm string, matchType HeaderMatchType, test string) bool {
// First get the header value
val := hdr[http.CanonicalHeaderKey(nm)]
if len(val) == 0 {
return false
}
// Prepare the match test
test = strings.ToLower(test)
for _, v := range val {
v = strings.Trim(strings.ToLower(v), " \n\t")
switch matchType {
case HmEquals:
if v == test {
return true
}
case HmStartsWith:
if strings.HasPrefix(v, test) {
return true
}
case HmEndsWith:
if strings.HasSuffix(v, test) {
return true
}
case HmContains:
if strings.Contains(v, test) {
return true
}
}
}
return false
}

View File

@@ -1,231 +0,0 @@
package handlers
// Inspired by node's Connect library implementation of the logging middleware
// https://github.com/senchalabs/connect
import (
"fmt"
"net/http"
"regexp"
"strings"
"time"
"github.com/PuerkitoBio/ghost"
)
const (
// Predefined logging formats that can be passed as format string.
Ldefault = "_default_"
Lshort = "_short_"
Ltiny = "_tiny_"
)
var (
// Token parser for request and response headers
rxHeaders = regexp.MustCompile(`^(req|res)\[([^\]]+)\]$`)
// Lookup table for predefined formats
predefFormats = map[string]struct {
fmt string
toks []string
}{
Ldefault: {
`%s - - [%s] "%s %s HTTP/%s" %d %s "%s" "%s"`,
[]string{"remote-addr", "date", "method", "url", "http-version", "status", "res[Content-Length]", "referrer", "user-agent"},
},
Lshort: {
`%s - %s %s HTTP/%s %d %s - %.3f s`,
[]string{"remote-addr", "method", "url", "http-version", "status", "res[Content-Length]", "response-time"},
},
Ltiny: {
`%s %s %d %s - %.3f s`,
[]string{"method", "url", "status", "res[Content-Length]", "response-time"},
},
}
)
// Augmented ResponseWriter implementation that captures the status code for the logger.
type statusResponseWriter struct {
http.ResponseWriter
code int
oriURL string
}
// Intercept the WriteHeader call to save the status code.
func (this *statusResponseWriter) WriteHeader(code int) {
this.code = code
this.ResponseWriter.WriteHeader(code)
}
// Intercept the Write call to save the default status code.
func (this *statusResponseWriter) Write(data []byte) (int, error) {
if this.code == 0 {
this.code = http.StatusOK
}
return this.ResponseWriter.Write(data)
}
// Implement the WrapWriter interface.
func (this *statusResponseWriter) WrappedWriter() http.ResponseWriter {
return this.ResponseWriter
}
// LogHandler options
type LogOptions struct {
LogFn func(string, ...interface{}) // Defaults to ghost.LogFn if nil
Format string
Tokens []string
CustomTokens map[string]func(http.ResponseWriter, *http.Request) string
Immediate bool
DateFormat string
}
// Create a new LogOptions struct. The DateFormat defaults to time.RFC3339.
func NewLogOptions(l func(string, ...interface{}), ft string, tok ...string) *LogOptions {
return &LogOptions{
LogFn: l,
Format: ft,
Tokens: tok,
CustomTokens: make(map[string]func(http.ResponseWriter, *http.Request) string),
DateFormat: time.RFC3339,
}
}
// LogHandlerFunc is the same as LogHandler, it is just a convenience
// signature that accepts a func(http.ResponseWriter, *http.Request) instead of
// a http.Handler interface. It saves the boilerplate http.HandlerFunc() cast.
func LogHandlerFunc(h http.HandlerFunc, opts *LogOptions) http.HandlerFunc {
return LogHandler(h, opts)
}
// Create a log handler for every request it receives.
func LogHandler(h http.Handler, opts *LogOptions) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if _, ok := getStatusWriter(w); ok {
// Self-awareness, logging handler already set up
h.ServeHTTP(w, r)
return
}
// Save the response start time
st := time.Now()
// Call the wrapped handler, with the augmented ResponseWriter to handle the status code
stw := &statusResponseWriter{w, 0, ""}
// Log immediately if requested, otherwise on exit
if opts.Immediate {
logRequest(stw, r, st, opts)
} else {
// Store original URL, may get modified by handlers (i.e. StripPrefix)
stw.oriURL = r.URL.String()
defer logRequest(stw, r, st, opts)
}
h.ServeHTTP(stw, r)
}
}
func getIpAddress(r *http.Request) string {
hdr := r.Header
hdrRealIp := hdr.Get("X-Real-Ip")
hdrForwardedFor := hdr.Get("X-Forwarded-For")
if hdrRealIp == "" && hdrForwardedFor == "" {
return r.RemoteAddr
}
if hdrForwardedFor != "" {
// X-Forwarded-For is potentially a list of addresses separated with ","
part := strings.Split(hdrForwardedFor, ",")[0]
return strings.TrimSpace(part) + ":0"
}
return hdrRealIp
}
// Check if the specified token is a predefined one, and if so return its current value.
func getPredefinedTokenValue(t string, w *statusResponseWriter, r *http.Request,
st time.Time, opts *LogOptions) (interface{}, bool) {
switch t {
case "http-version":
return fmt.Sprintf("%d.%d", r.ProtoMajor, r.ProtoMinor), true
case "response-time":
return time.Now().Sub(st).Seconds(), true
case "remote-addr":
return getIpAddress(r), true
case "date":
return time.Now().Format(opts.DateFormat), true
case "method":
return r.Method, true
case "url":
if w.oriURL != "" {
return w.oriURL, true
}
return r.URL.String(), true
case "referrer", "referer":
return r.Referer(), true
case "user-agent":
return r.UserAgent(), true
case "status":
return w.code, true
}
// Handle special cases for header
mtch := rxHeaders.FindStringSubmatch(t)
if len(mtch) > 2 {
if mtch[1] == "req" {
return r.Header.Get(mtch[2]), true
} else {
// This only works for headers explicitly set via the Header() map of
// the writer, not those added by the http package under the covers.
return w.Header().Get(mtch[2]), true
}
}
return nil, false
}
// Do the actual logging.
func logRequest(w *statusResponseWriter, r *http.Request, st time.Time, opts *LogOptions) {
var (
fn func(string, ...interface{})
ok bool
format string
toks []string
)
// If no specific log function, use the default one from the ghost package
if opts.LogFn == nil {
fn = ghost.LogFn
} else {
fn = opts.LogFn
}
// If this is a predefined format, use it instead
if v, ok := predefFormats[opts.Format]; ok {
format = v.fmt
toks = v.toks
} else {
format = opts.Format
toks = opts.Tokens
}
args := make([]interface{}, len(toks))
for i, t := range toks {
if args[i], ok = getPredefinedTokenValue(t, w, r, st, opts); !ok {
if f, ok := opts.CustomTokens[t]; ok && f != nil {
args[i] = f(w, r)
} else {
args[i] = "?"
}
}
}
fn(format, args...)
}
// Helper function to retrieve the status writer.
func getStatusWriter(w http.ResponseWriter) (*statusResponseWriter, bool) {
st, ok := GetResponseWriter(w, func(tst http.ResponseWriter) bool {
_, ok := tst.(*statusResponseWriter)
return ok
})
if ok {
return st.(*statusResponseWriter), true
}
return nil, false
}

View File

@@ -1,217 +0,0 @@
package handlers
import (
"bytes"
"fmt"
"log"
"net/http"
"net/http/httptest"
"regexp"
"testing"
"time"
)
type testCase struct {
tok string
fmt string
rx *regexp.Regexp
}
func TestLog(t *testing.T) {
log.SetFlags(0)
now := time.Now()
formats := []testCase{
testCase{"remote-addr",
"%s",
regexp.MustCompile(`^127\.0\.0\.1:\d+\n$`),
},
testCase{"date",
"%s",
regexp.MustCompile(`^` + fmt.Sprintf("%04d-%02d-%02d", now.Year(), now.Month(), now.Day()) + `\n$`),
},
testCase{"method",
"%s",
regexp.MustCompile(`^GET\n$`),
},
testCase{"url",
"%s",
regexp.MustCompile(`^/\n$`),
},
testCase{"http-version",
"%s",
regexp.MustCompile(`^1\.1\n$`),
},
testCase{"status",
"%d",
regexp.MustCompile(`^200\n$`),
},
testCase{"referer",
"%s",
regexp.MustCompile(`^http://www\.test\.com\n$`),
},
testCase{"referrer",
"%s",
regexp.MustCompile(`^http://www\.test\.com\n$`),
},
testCase{"user-agent",
"%s",
regexp.MustCompile(`^Go \d+\.\d+ package http\n$`),
},
testCase{"bidon",
"%s",
regexp.MustCompile(`^\?\n$`),
},
testCase{"response-time",
"%.3f",
regexp.MustCompile(`^0\.1\d\d\n$`),
},
testCase{"req[Accept-Encoding]",
"%s",
regexp.MustCompile(`^gzip\n$`),
},
testCase{"res[blah]",
"%s",
regexp.MustCompile(`^$`),
},
testCase{"tiny",
Ltiny,
regexp.MustCompile(`^GET / 200 - 0\.1\d\d s\n$`),
},
testCase{"short",
Lshort,
regexp.MustCompile(`^127\.0\.0\.1:\d+ - GET / HTTP/1\.1 200 - 0\.1\d\d s\n$`),
},
testCase{"default",
Ldefault,
regexp.MustCompile(`^127\.0\.0\.1:\d+ - - \[\d{4}-\d{2}-\d{2}\] "GET / HTTP/1\.1" 200 "http://www\.test\.com" "Go \d+\.\d+ package http"\n$`),
},
testCase{"res[Content-Type]",
"%s",
regexp.MustCompile(`^text/plain\n$`),
},
}
for _, tc := range formats {
testLogCase(tc, t)
}
}
func testLogCase(tc testCase, t *testing.T) {
buf := bytes.NewBuffer(nil)
log.SetOutput(buf)
opts := NewLogOptions(log.Printf, tc.fmt, tc.tok)
opts.DateFormat = "2006-01-02"
h := LogHandler(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
time.Sleep(100 * time.Millisecond)
w.Header().Set("Content-Type", "text/plain")
w.WriteHeader(200)
w.Write([]byte("body"))
}), opts)
s := httptest.NewServer(h)
defer s.Close()
t.Logf("running %s...", tc.tok)
req, err := http.NewRequest("GET", s.URL, nil)
if err != nil {
panic(err)
}
req.Header.Set("Referer", "http://www.test.com")
req.Header.Set("Accept-Encoding", "gzip")
res, err := http.DefaultClient.Do(req)
if err != nil {
panic(err)
}
assertStatus(http.StatusOK, res.StatusCode, t)
ac := buf.String()
assertTrue(tc.rx.MatchString(ac), fmt.Sprintf("expected log to match '%s', got '%s'", tc.rx.String(), ac), t)
}
func TestForwardedFor(t *testing.T) {
rx := regexp.MustCompile(`^1\.1\.1\.1:0 - - \[\d{4}-\d{2}-\d{2}\] "GET / HTTP/1\.1" 200 "http://www\.test\.com" "Go \d+\.\d+ package http"\n$`)
buf := bytes.NewBuffer(nil)
log.SetOutput(buf)
opts := NewLogOptions(log.Printf, Ldefault)
opts.DateFormat = "2006-01-02"
h := LogHandler(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
time.Sleep(100 * time.Millisecond)
w.Header().Set("Content-Type", "text/plain")
w.WriteHeader(200)
w.Write([]byte("body"))
}), opts)
s := httptest.NewServer(h)
defer s.Close()
t.Logf("running ForwardedFor...")
req, err := http.NewRequest("GET", s.URL, nil)
if err != nil {
panic(err)
}
req.Header.Set("Referer", "http://www.test.com")
req.Header.Set("X-Forwarded-For", "1.1.1.1")
req.Header.Set("Accept-Encoding", "gzip")
res, err := http.DefaultClient.Do(req)
if err != nil {
panic(err)
}
assertStatus(http.StatusOK, res.StatusCode, t)
ac := buf.String()
assertTrue(rx.MatchString(ac), fmt.Sprintf("expected log to match '%s', got '%s'", rx.String(), ac), t)
}
func TestImmediate(t *testing.T) {
buf := bytes.NewBuffer(nil)
log.SetFlags(0)
log.SetOutput(buf)
opts := NewLogOptions(nil, Ltiny)
opts.Immediate = true
h := LogHandler(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
time.Sleep(100 * time.Millisecond)
w.WriteHeader(200)
w.Write([]byte("body"))
}), opts)
s := httptest.NewServer(h)
defer s.Close()
res, err := http.Get(s.URL)
if err != nil {
panic(err)
}
assertStatus(http.StatusOK, res.StatusCode, t)
ac := buf.String()
// Since it is Immediate logging, status is still 0 and response time is less than 100ms
rx := regexp.MustCompile(`GET / 0 - 0\.0\d\d s\n`)
assertTrue(rx.MatchString(ac), fmt.Sprintf("expected log to match '%s', got '%s'", rx.String(), ac), t)
}
func TestCustom(t *testing.T) {
buf := bytes.NewBuffer(nil)
log.SetFlags(0)
log.SetOutput(buf)
opts := NewLogOptions(nil, "%s %s", "method", "custom")
opts.CustomTokens["custom"] = func(w http.ResponseWriter, r *http.Request) string {
return "toto"
}
h := LogHandler(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
time.Sleep(100 * time.Millisecond)
w.WriteHeader(200)
w.Write([]byte("body"))
}), opts)
s := httptest.NewServer(h)
defer s.Close()
res, err := http.Get(s.URL)
if err != nil {
panic(err)
}
assertStatus(http.StatusOK, res.StatusCode, t)
ac := buf.String()
rx := regexp.MustCompile(`GET toto`)
assertTrue(rx.MatchString(ac), fmt.Sprintf("expected log to match '%s', got '%s'", rx.String(), ac), t)
}

View File

@@ -1,57 +0,0 @@
package handlers
import (
"fmt"
"net/http"
)
// Augmented response writer to hold the panic data (can be anything, not necessarily an error
// interface).
type errResponseWriter struct {
http.ResponseWriter
perr interface{}
}
// Implement the WrapWriter interface.
func (this *errResponseWriter) WrappedWriter() http.ResponseWriter {
return this.ResponseWriter
}
// PanicHandlerFunc is the same as PanicHandler, it is just a convenience
// signature that accepts a func(http.ResponseWriter, *http.Request) instead of
// a http.Handler interface. It saves the boilerplate http.HandlerFunc() cast.
func PanicHandlerFunc(h http.HandlerFunc, errH http.HandlerFunc) http.HandlerFunc {
return PanicHandler(h, errH)
}
// Calls the wrapped handler and on panic calls the specified error handler. If the error handler is nil,
// responds with a 500 error message.
func PanicHandler(h http.Handler, errH http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
defer func() {
if err := recover(); err != nil {
if errH != nil {
ew := &errResponseWriter{w, err}
errH.ServeHTTP(ew, r)
} else {
http.Error(w, fmt.Sprintf("%s", err), http.StatusInternalServerError)
}
}
}()
// Call the protected handler
h.ServeHTTP(w, r)
}
}
// Helper function to retrieve the panic error, if any.
func GetPanicError(w http.ResponseWriter) (interface{}, bool) {
er, ok := GetResponseWriter(w, func(tst http.ResponseWriter) bool {
_, ok := tst.(*errResponseWriter)
return ok
})
if ok {
return er.(*errResponseWriter).perr, true
}
return nil, false
}

View File

@@ -1,62 +0,0 @@
package handlers
import (
"net/http"
"net/http/httptest"
"testing"
)
func TestPanic(t *testing.T) {
h := PanicHandler(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
panic("test")
}), nil)
s := httptest.NewServer(h)
defer s.Close()
res, err := http.Get(s.URL)
if err != nil {
panic(err)
}
assertStatus(http.StatusInternalServerError, res.StatusCode, t)
}
func TestNoPanic(t *testing.T) {
h := PanicHandler(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
}), nil)
s := httptest.NewServer(h)
defer s.Close()
res, err := http.Get(s.URL)
if err != nil {
panic(err)
}
assertStatus(http.StatusOK, res.StatusCode, t)
}
func TestPanicCustom(t *testing.T) {
h := PanicHandler(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
panic("ok")
}),
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
err, ok := GetPanicError(w)
if !ok {
panic("no panic error found")
}
w.WriteHeader(501)
w.Write([]byte(err.(string)))
}))
s := httptest.NewServer(h)
defer s.Close()
res, err := http.Get(s.URL)
if err != nil {
panic(err)
}
assertStatus(501, res.StatusCode, t)
assertBody([]byte("ok"), res, t)
}

View File

@@ -1,135 +0,0 @@
package handlers
import (
"encoding/json"
"errors"
"time"
"github.com/garyburd/redigo/redis"
)
var (
ErrNoKeyPrefix = errors.New("cannot get session keys without a key prefix")
)
type RedisStoreOptions struct {
Network string
Address string
ConnectTimeout time.Duration
ReadTimeout time.Duration
WriteTimeout time.Duration
Database int // Redis database to use for session keys
KeyPrefix string // If set, keys will be KeyPrefix:SessionID (semicolon added)
BrowserSessServerTTL time.Duration // Defaults to 2 days
}
type RedisStore struct {
opts *RedisStoreOptions
conn redis.Conn
}
// Create a redis session store with the specified options.
func NewRedisStore(opts *RedisStoreOptions) *RedisStore {
var err error
rs := &RedisStore{opts, nil}
rs.conn, err = redis.DialTimeout(opts.Network, opts.Address, opts.ConnectTimeout,
opts.ReadTimeout, opts.WriteTimeout)
if err != nil {
panic(err)
}
return rs
}
// Get the session from the store.
func (this *RedisStore) Get(id string) (*Session, error) {
key := id
if this.opts.KeyPrefix != "" {
key = this.opts.KeyPrefix + ":" + id
}
b, err := redis.Bytes(this.conn.Do("GET", key))
if err != nil {
return nil, err
}
var sess Session
err = json.Unmarshal(b, &sess)
if err != nil {
return nil, err
}
return &sess, nil
}
// Save the session into the store.
func (this *RedisStore) Set(sess *Session) error {
b, err := json.Marshal(sess)
if err != nil {
return err
}
key := sess.ID()
if this.opts.KeyPrefix != "" {
key = this.opts.KeyPrefix + ":" + sess.ID()
}
ttl := sess.MaxAge()
if ttl == 0 {
// Browser session, set to specified TTL
ttl = this.opts.BrowserSessServerTTL
if ttl == 0 {
ttl = 2 * 24 * time.Hour // Default to 2 days
}
}
_, err = this.conn.Do("SETEX", key, int(ttl.Seconds()), b)
if err != nil {
return err
}
return nil
}
// Delete the session from the store.
func (this *RedisStore) Delete(id string) error {
key := id
if this.opts.KeyPrefix != "" {
key = this.opts.KeyPrefix + ":" + id
}
_, err := this.conn.Do("DEL", key)
if err != nil {
return err
}
return nil
}
// Clear all sessions from the store. Requires the use of a key
// prefix in the store options, otherwise the method refuses to delete all keys.
func (this *RedisStore) Clear() error {
vals, err := this.getSessionKeys()
if err != nil {
return err
}
if len(vals) > 0 {
this.conn.Send("MULTI")
for _, v := range vals {
this.conn.Send("DEL", v)
}
_, err = this.conn.Do("EXEC")
if err != nil {
return err
}
}
return nil
}
// Get the number of session keys in the store. Requires the use of a
// key prefix in the store options, otherwise returns -1 (cannot tell
// session keys from other keys).
func (this *RedisStore) Len() int {
vals, err := this.getSessionKeys()
if err != nil {
return -1
}
return len(vals)
}
func (this *RedisStore) getSessionKeys() ([]interface{}, error) {
if this.opts.KeyPrefix != "" {
return redis.Values(this.conn.Do("KEYS", this.opts.KeyPrefix+":*"))
}
return nil, ErrNoKeyPrefix
}

View File

@@ -1,30 +0,0 @@
package handlers
import (
"net/http"
)
// This interface can be implemented by an augmented ResponseWriter, so that
// it doesn't hide other augmented writers in the chain.
type WrapWriter interface {
http.ResponseWriter
WrappedWriter() http.ResponseWriter
}
// Helper function to retrieve a specific ResponseWriter.
func GetResponseWriter(w http.ResponseWriter,
predicate func(http.ResponseWriter) bool) (http.ResponseWriter, bool) {
for {
// Check if this writer is the one we're looking for
if w != nil && predicate(w) {
return w, true
}
// If it is a WrapWriter, move back the chain of wrapped writers
ww, ok := w.(WrapWriter)
if !ok {
return nil, false
}
w = ww.WrappedWriter()
}
}

View File

@@ -1,52 +0,0 @@
package handlers
import (
"fmt"
"net/http"
"testing"
)
type baseWriter struct{}
func (b *baseWriter) Write(data []byte) (int, error) { return 0, nil }
func (b *baseWriter) WriteHeader(code int) {}
func (b *baseWriter) Header() http.Header { return nil }
func TestNilWriter(t *testing.T) {
rw, ok := GetResponseWriter(nil, func(w http.ResponseWriter) bool {
return true
})
assertTrue(rw == nil, "expected nil, got non-nil", t)
assertTrue(!ok, "expected false, got true", t)
}
func TestBaseWriter(t *testing.T) {
bw := &baseWriter{}
rw, ok := GetResponseWriter(bw, func(w http.ResponseWriter) bool {
return true
})
assertTrue(rw == bw, fmt.Sprintf("expected %#v, got %#v", bw, rw), t)
assertTrue(ok, "expected true, got false", t)
}
func TestWrappedWriter(t *testing.T) {
bw := &baseWriter{}
ctx := &contextResponseWriter{bw, nil}
rw, ok := GetResponseWriter(ctx, func(w http.ResponseWriter) bool {
_, ok := w.(*baseWriter)
return ok
})
assertTrue(rw == bw, fmt.Sprintf("expected %#v, got %#v", bw, rw), t)
assertTrue(ok, "expected true, got false", t)
}
func TestWrappedNotFoundWriter(t *testing.T) {
bw := &baseWriter{}
ctx := &contextResponseWriter{bw, nil}
rw, ok := GetResponseWriter(ctx, func(w http.ResponseWriter) bool {
_, ok := w.(*statusResponseWriter)
return ok
})
assertTrue(rw == nil, fmt.Sprintf("expected nil, got %#v", rw), t)
assertTrue(!ok, "expected false, got true", t)
}

View File

@@ -1,321 +0,0 @@
package handlers
import (
"encoding/json"
"errors"
"hash/crc32"
"net/http"
"strings"
"time"
"github.com/PuerkitoBio/ghost"
"github.com/gorilla/securecookie"
"github.com/nu7hatch/gouuid"
)
const defaultCookieName = "ghost.sid"
var (
ErrSessionSecretMissing = errors.New("session secret is missing")
ErrNoSessionID = errors.New("session ID could not be generated")
)
// The Session holds the data map that persists for the duration of the session.
// The information stored in this map should be marshalable for the target Session store
// format (i.e. json, sql, gob, etc. depending on how the store persists the data).
type Session struct {
isNew bool // keep private, not saved to JSON, will be false once read from the store
internalSession
}
// Use a separate private struct to hold the private fields of the Session,
// although those fields are exposed (public). This is a trick to simplify
// JSON encoding.
type internalSession struct {
Data map[string]interface{} // JSON cannot marshal a map[interface{}]interface{}
ID string
Created time.Time
MaxAge time.Duration
}
// Create a new Session instance. It panics in the unlikely event that a new random ID cannot be generated.
func newSession(maxAge int) *Session {
uid, err := uuid.NewV4()
if err != nil {
panic(ErrNoSessionID)
}
return &Session{
true, // is new
internalSession{
make(map[string]interface{}),
uid.String(),
time.Now(),
time.Duration(maxAge) * time.Second,
},
}
}
// Gets the ID of the session.
func (ø *Session) ID() string {
return ø.internalSession.ID
}
// Get the max age duration
func (ø *Session) MaxAge() time.Duration {
return ø.internalSession.MaxAge
}
// Get the creation time of the session.
func (ø *Session) Created() time.Time {
return ø.internalSession.Created
}
// Is this a new Session (created by the current request)
func (ø *Session) IsNew() bool {
return ø.isNew
}
// TODO : Resets the max age property of the session to its original value (sliding expiration).
func (ø *Session) resetMaxAge() {
}
// Marshal the session to JSON.
func (ø *Session) MarshalJSON() ([]byte, error) {
return json.Marshal(ø.internalSession)
}
// Unmarshal the JSON into the internal session struct.
func (ø *Session) UnmarshalJSON(b []byte) error {
return json.Unmarshal(b, &ø.internalSession)
}
// Options object for the session handler. It specified the Session store to use for
// persistence, the template for the session cookie (name, path, maxage, etc.),
// whether or not the proxy should be trusted to determine if the connection is secure,
// and the required secret to sign the session cookie.
type SessionOptions struct {
Store SessionStore
CookieTemplate http.Cookie
TrustProxy bool
Secret string
}
// Create a new SessionOptions struct, using default cookie and proxy values.
func NewSessionOptions(store SessionStore, secret string) *SessionOptions {
return &SessionOptions{
Store: store,
Secret: secret,
}
}
// The augmented ResponseWriter struct for the session handler. It holds the current
// Session object and Session store, as well as flags and function to send the actual
// session cookie at the end of the request.
type sessResponseWriter struct {
http.ResponseWriter
sess *Session
sessStore SessionStore
sessSent bool
sendCookieFn func()
}
// Implement the WrapWriter interface.
func (ø *sessResponseWriter) WrappedWriter() http.ResponseWriter {
return ø.ResponseWriter
}
// Intercept the Write() method to add the Set-Cookie header before it's too late.
func (ø *sessResponseWriter) Write(data []byte) (int, error) {
if !ø.sessSent {
ø.sendCookieFn()
ø.sessSent = true
}
return ø.ResponseWriter.Write(data)
}
// Intercept the WriteHeader() method to add the Set-Cookie header before it's too late.
func (ø *sessResponseWriter) WriteHeader(code int) {
if !ø.sessSent {
ø.sendCookieFn()
ø.sessSent = true
}
ø.ResponseWriter.WriteHeader(code)
}
// SessionHandlerFunc is the same as SessionHandler, it is just a convenience
// signature that accepts a func(http.ResponseWriter, *http.Request) instead of
// a http.Handler interface. It saves the boilerplate http.HandlerFunc() cast.
func SessionHandlerFunc(h http.HandlerFunc, opts *SessionOptions) http.HandlerFunc {
return SessionHandler(h, opts)
}
// Create a Session handler to offer the Session behaviour to the specified handler.
func SessionHandler(h http.Handler, opts *SessionOptions) http.HandlerFunc {
// Make sure the required cookie fields are set
if opts.CookieTemplate.Name == "" {
opts.CookieTemplate.Name = defaultCookieName
}
if opts.CookieTemplate.Path == "" {
opts.CookieTemplate.Path = "/"
}
// Secret is required
if opts.Secret == "" {
panic(ErrSessionSecretMissing)
}
// Return the actual handler
return func(w http.ResponseWriter, r *http.Request) {
if _, ok := getSessionWriter(w); ok {
// Self-awareness
h.ServeHTTP(w, r)
return
}
if strings.Index(r.URL.Path, opts.CookieTemplate.Path) != 0 {
// Session does not apply to this path
h.ServeHTTP(w, r)
return
}
// Create a new Session or retrieve the existing session based on the
// session cookie received.
var sess *Session
var ckSessId string
exCk, err := r.Cookie(opts.CookieTemplate.Name)
if err != nil {
sess = newSession(opts.CookieTemplate.MaxAge)
ghost.LogFn("ghost.session : error getting session cookie : %s", err)
} else {
ckSessId, err = parseSignedCookie(exCk, opts.Secret)
if err != nil {
sess = newSession(opts.CookieTemplate.MaxAge)
ghost.LogFn("ghost.session : error parsing signed cookie : %s", err)
} else if ckSessId == "" {
sess = newSession(opts.CookieTemplate.MaxAge)
ghost.LogFn("ghost.session : no existing session ID")
} else {
// Get the session
sess, err = opts.Store.Get(ckSessId)
if err != nil {
sess = newSession(opts.CookieTemplate.MaxAge)
ghost.LogFn("ghost.session : error getting session from store : %s", err)
} else if sess == nil {
sess = newSession(opts.CookieTemplate.MaxAge)
ghost.LogFn("ghost.session : nil session")
}
}
}
// Save the original hash of the session, used to compare if the contents
// have changed during the handling of the request, so that it has to be
// saved to the stored.
oriHash := hash(sess)
// Create the augmented ResponseWriter.
srw := &sessResponseWriter{w, sess, opts.Store, false, func() {
// This function is called when the header is about to be written, so that
// the session cookie is correctly set.
// Check if the connection is secure
proto := strings.Trim(strings.ToLower(r.Header.Get("X-Forwarded-Proto")), " ")
tls := r.TLS != nil || (strings.HasPrefix(proto, "https") && opts.TrustProxy)
if opts.CookieTemplate.Secure && !tls {
ghost.LogFn("ghost.session : secure cookie on a non-secure connection, cookie not sent")
return
}
if !sess.IsNew() {
// If this is not a new session, no need to send back the cookie
// TODO : Handle expires?
return
}
// Send the session cookie
ck := opts.CookieTemplate
ck.Value = sess.ID()
err := signCookie(&ck, opts.Secret)
if err != nil {
ghost.LogFn("ghost.session : error signing cookie : %s", err)
return
}
http.SetCookie(w, &ck)
}}
// Call wrapped handler
h.ServeHTTP(srw, r)
// TODO : Expiration management? srw.sess.resetMaxAge()
// Do not save if content is the same, unless session is new (to avoid
// creating a new session and sending a cookie on each successive request).
if newHash := hash(sess); !sess.IsNew() && oriHash == newHash && newHash != 0 {
// No changes to the session, no need to save
ghost.LogFn("ghost.session : no changes to save to store")
return
}
err = opts.Store.Set(sess)
if err != nil {
ghost.LogFn("ghost.session : error saving session to store : %s", err)
}
}
}
// Helper function to retrieve the session for the current request.
func GetSession(w http.ResponseWriter) (*Session, bool) {
ss, ok := getSessionWriter(w)
if ok {
return ss.sess, true
}
return nil, false
}
// Helper function to retrieve the session store
func GetSessionStore(w http.ResponseWriter) (SessionStore, bool) {
ss, ok := getSessionWriter(w)
if ok {
return ss.sessStore, true
}
return nil, false
}
// Internal helper function to retrieve the session writer object.
func getSessionWriter(w http.ResponseWriter) (*sessResponseWriter, bool) {
ss, ok := GetResponseWriter(w, func(tst http.ResponseWriter) bool {
_, ok := tst.(*sessResponseWriter)
return ok
})
if ok {
return ss.(*sessResponseWriter), true
}
return nil, false
}
// Parse a signed cookie and return the cookie value
func parseSignedCookie(ck *http.Cookie, secret string) (string, error) {
var val string
sck := securecookie.New([]byte(secret), nil)
err := sck.Decode(ck.Name, ck.Value, &val)
if err != nil {
return "", err
}
return val, nil
}
// Sign the specified cookie's value
func signCookie(ck *http.Cookie, secret string) error {
sck := securecookie.New([]byte(secret), nil)
enc, err := sck.Encode(ck.Name, ck.Value)
if err != nil {
return err
}
ck.Value = enc
return nil
}
// Compute a CRC32 hash of the session's JSON-encoded contents.
func hash(s *Session) uint32 {
data, err := json.Marshal(s)
if err != nil {
ghost.LogFn("ghost.session : error hash : %s", err)
return 0 // 0 is always treated as "modified" session content
}
return crc32.ChecksumIEEE(data)
}

View File

@@ -1,258 +0,0 @@
package handlers
import (
"fmt"
"io/ioutil"
"net/http"
"net/http/cookiejar"
"net/http/httptest"
"testing"
"time"
)
var (
store SessionStore
secret = "butchered at birth"
)
func TestSession(t *testing.T) {
stores := map[string]SessionStore{
"memory": NewMemoryStore(1),
"redis": NewRedisStore(&RedisStoreOptions{
Network: "tcp",
Address: ":6379",
Database: 1,
KeyPrefix: "sess",
}),
}
for k, v := range stores {
t.Logf("testing session with %s store\n", k)
store = v
t.Log("SessionExists")
testSessionExists(t)
t.Log("SessionPersists")
testSessionPersists(t)
t.Log("SessionExpires")
testSessionExpires(t)
t.Log("SessionBeforeExpires")
testSessionBeforeExpires(t)
t.Log("PanicIfNoSecret")
testPanicIfNoSecret(t)
t.Log("InvalidPath")
testInvalidPath(t)
t.Log("ValidSubPath")
testValidSubPath(t)
t.Log("SecureOverHttp")
testSecureOverHttp(t)
}
}
func setupTest(f func(w http.ResponseWriter, r *http.Request), ckPath string, secure bool, maxAge int) *httptest.Server {
opts := NewSessionOptions(store, secret)
if ckPath != "" {
opts.CookieTemplate.Path = ckPath
}
opts.CookieTemplate.Secure = secure
opts.CookieTemplate.MaxAge = maxAge
h := SessionHandler(http.HandlerFunc(f), opts)
return httptest.NewServer(h)
}
func doRequest(u string, newJar bool) *http.Response {
var err error
if newJar {
http.DefaultClient.Jar, err = cookiejar.New(new(cookiejar.Options))
if err != nil {
panic(err)
}
}
res, err := http.Get(u)
if err != nil {
panic(err)
}
return res
}
func testSessionExists(t *testing.T) {
s := setupTest(func(w http.ResponseWriter, r *http.Request) {
ssn, ok := GetSession(w)
if assertTrue(ok, "expected session to be non-nil, got nil", t) {
ssn.Data["foo"] = "bar"
assertTrue(ssn.Data["foo"] == "bar", fmt.Sprintf("expected ssn[foo] to be 'bar', got %v", ssn.Data["foo"]), t)
}
w.Write([]byte("ok"))
}, "", false, 0)
defer s.Close()
res := doRequest(s.URL, true)
assertStatus(http.StatusOK, res.StatusCode, t)
assertBody([]byte("ok"), res, t)
assertTrue(len(res.Cookies()) == 1, fmt.Sprintf("expected response to have 1 cookie, got %d", len(res.Cookies())), t)
}
func testSessionPersists(t *testing.T) {
cnt := 0
s := setupTest(func(w http.ResponseWriter, r *http.Request) {
ssn, ok := GetSession(w)
if !ok {
panic("session not found!")
}
if cnt == 0 {
ssn.Data["foo"] = "bar"
w.Write([]byte("ok"))
cnt++
} else {
w.Write([]byte(ssn.Data["foo"].(string)))
}
}, "", false, 0)
defer s.Close()
// 1st call, set the session value
res := doRequest(s.URL, true)
assertStatus(http.StatusOK, res.StatusCode, t)
assertBody([]byte("ok"), res, t)
// 2nd call, get the session value
res = doRequest(s.URL, false)
assertStatus(http.StatusOK, res.StatusCode, t)
assertBody([]byte("bar"), res, t)
assertTrue(len(res.Cookies()) == 0, fmt.Sprintf("expected 2nd response to have 0 cookie, got %d", len(res.Cookies())), t)
}
func testSessionExpires(t *testing.T) {
cnt := 0
s := setupTest(func(w http.ResponseWriter, r *http.Request) {
ssn, ok := GetSession(w)
if !ok {
panic("session not found!")
}
if cnt == 0 {
w.Write([]byte(ssn.ID()))
cnt++
} else {
w.Write([]byte(ssn.ID()))
}
}, "", false, 1) // Expire in 1 second
defer s.Close()
// 1st call, set the session value
res := doRequest(s.URL, true)
assertStatus(http.StatusOK, res.StatusCode, t)
id1, err := ioutil.ReadAll(res.Body)
if err != nil {
panic(err)
}
res.Body.Close()
time.Sleep(1001 * time.Millisecond)
// 2nd call, get the session value
res = doRequest(s.URL, false)
assertStatus(http.StatusOK, res.StatusCode, t)
id2, err := ioutil.ReadAll(res.Body)
if err != nil {
panic(err)
}
res.Body.Close()
sid1, sid2 := string(id1), string(id2)
assertTrue(len(res.Cookies()) == 1, fmt.Sprintf("expected 2nd response to have 1 cookie, got %d", len(res.Cookies())), t)
assertTrue(sid1 != sid2, "expected session IDs to be different, got same", t)
}
func testSessionBeforeExpires(t *testing.T) {
s := setupTest(func(w http.ResponseWriter, r *http.Request) {
ssn, ok := GetSession(w)
if !ok {
panic("session not found!")
}
w.Write([]byte(ssn.ID()))
}, "", false, 1) // Expire in 1 second
defer s.Close()
// 1st call, set the session value
res := doRequest(s.URL, true)
assertStatus(http.StatusOK, res.StatusCode, t)
id1, err := ioutil.ReadAll(res.Body)
if err != nil {
panic(err)
}
res.Body.Close()
time.Sleep(500 * time.Millisecond)
// 2nd call, get the session value
res = doRequest(s.URL, false)
assertStatus(http.StatusOK, res.StatusCode, t)
id2, err := ioutil.ReadAll(res.Body)
if err != nil {
panic(err)
}
res.Body.Close()
sid1, sid2 := string(id1), string(id2)
assertTrue(len(res.Cookies()) == 0, fmt.Sprintf("expected 2nd response to have no cookie, got %d", len(res.Cookies())), t)
assertTrue(sid1 == sid2, "expected session IDs to be the same, got different", t)
}
func testPanicIfNoSecret(t *testing.T) {
defer assertPanic(t)
SessionHandler(http.NotFoundHandler(), NewSessionOptions(nil, ""))
}
func testInvalidPath(t *testing.T) {
s := setupTest(func(w http.ResponseWriter, r *http.Request) {
_, ok := GetSession(w)
assertTrue(!ok, "expected session to be nil, got non-nil", t)
w.Write([]byte("ok"))
}, "/foo", false, 0)
defer s.Close()
res := doRequest(s.URL, true)
assertStatus(http.StatusOK, res.StatusCode, t)
assertBody([]byte("ok"), res, t)
assertTrue(len(res.Cookies()) == 0, fmt.Sprintf("expected response to have no cookie, got %d", len(res.Cookies())), t)
}
func testValidSubPath(t *testing.T) {
s := setupTest(func(w http.ResponseWriter, r *http.Request) {
_, ok := GetSession(w)
assertTrue(ok, "expected session to be non-nil, got nil", t)
w.Write([]byte("ok"))
}, "/foo", false, 0)
defer s.Close()
res := doRequest(s.URL+"/foo/bar", true)
assertStatus(http.StatusOK, res.StatusCode, t)
assertBody([]byte("ok"), res, t)
assertTrue(len(res.Cookies()) == 1, fmt.Sprintf("expected response to have 1 cookie, got %d", len(res.Cookies())), t)
}
func testSecureOverHttp(t *testing.T) {
s := setupTest(func(w http.ResponseWriter, r *http.Request) {
_, ok := GetSession(w)
assertTrue(ok, "expected session to be non-nil, got nil", t)
w.Write([]byte("ok"))
}, "", true, 0)
defer s.Close()
res := doRequest(s.URL, true)
assertStatus(http.StatusOK, res.StatusCode, t)
assertBody([]byte("ok"), res, t)
assertTrue(len(res.Cookies()) == 0, fmt.Sprintf("expected response to have no cookie, got %d", len(res.Cookies())), t)
}
// TODO : commented, certificate problem
func xtestSecureOverHttps(t *testing.T) {
opts := NewSessionOptions(store, secret)
opts.CookieTemplate.Secure = true
h := SessionHandler(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
_, ok := GetSession(w)
assertTrue(ok, "expected session to be non-nil, got nil", t)
w.Write([]byte("ok"))
}), opts)
s := httptest.NewTLSServer(h)
defer s.Close()
res := doRequest(s.URL, true)
assertStatus(http.StatusOK, res.StatusCode, t)
assertBody([]byte("ok"), res, t)
assertTrue(len(res.Cookies()) == 1, fmt.Sprintf("expected response to have 1 cookie, got %d", len(res.Cookies())), t)
}

View File

@@ -1,90 +0,0 @@
package handlers
import (
"sync"
"time"
)
// SessionStore interface, must be implemented by any store to be used
// for session storage.
type SessionStore interface {
Get(id string) (*Session, error) // Get the session from the store
Set(sess *Session) error // Save the session in the store
Delete(id string) error // Delete the session from the store
Clear() error // Delete all sessions from the store
Len() int // Get the number of sessions in the store
}
// In-memory implementation of a session store. Not recommended for production
// use.
type MemoryStore struct {
l sync.RWMutex
m map[string]*Session
capc int
}
// Create a new memory store.
func NewMemoryStore(capc int) *MemoryStore {
m := &MemoryStore{}
m.capc = capc
m.newMap()
return m
}
// Get the number of sessions saved in the store.
func (this *MemoryStore) Len() int {
return len(this.m)
}
// Get the requested session from the store.
func (this *MemoryStore) Get(id string) (*Session, error) {
this.l.RLock()
defer this.l.RUnlock()
return this.m[id], nil
}
// Save the session to the store.
func (this *MemoryStore) Set(sess *Session) error {
this.l.Lock()
defer this.l.Unlock()
this.m[sess.ID()] = sess
if sess.IsNew() {
// Since the memory store doesn't marshal to a string without the isNew, if it is left
// to true, it will stay true forever.
sess.isNew = false
// Expire in the given time. If the maxAge is 0 (which means browser-session lifetime),
// expire in a reasonable delay, 2 days. The weird case of a negative maxAge will
// cause the immediate Delete call.
wait := sess.MaxAge()
if wait == 0 {
wait = 2 * 24 * time.Hour
}
go func() {
// Clear the session after the specified delay
<-time.After(wait)
this.Delete(sess.ID())
}()
}
return nil
}
// Delete the specified session ID from the store.
func (this *MemoryStore) Delete(id string) error {
this.l.Lock()
defer this.l.Unlock()
delete(this.m, id)
return nil
}
// Clear all sessions from the store.
func (this *MemoryStore) Clear() error {
this.l.Lock()
defer this.l.Unlock()
this.newMap()
return nil
}
// Re-create the internal map, dropping all existing sessions.
func (this *MemoryStore) newMap() {
this.m = make(map[string]*Session, this.capc)
}

View File

@@ -1,13 +0,0 @@
package handlers
import (
"net/http"
)
// StaticFileHandler, unlike net/http.FileServer, serves the contents of a specific
// file when it is called.
func StaticFileHandler(path string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, path)
}
}

View File

@@ -1,46 +0,0 @@
package handlers
import (
"net/http"
"net/http/httptest"
"testing"
)
func TestServeFile(t *testing.T) {
h := StaticFileHandler("./testdata/styles.css")
s := httptest.NewServer(h)
defer s.Close()
res, err := http.Get(s.URL)
if err != nil {
panic(err)
}
assertStatus(http.StatusOK, res.StatusCode, t)
assertHeader("Content-Type", "text/css; charset=utf-8", res, t)
assertHeader("Content-Encoding", "", res, t)
assertBody([]byte(`* {
background-color: white;
}`), res, t)
}
func TestGzippedFile(t *testing.T) {
h := GZIPHandler(StaticFileHandler("./testdata/styles.css"), nil)
s := httptest.NewServer(h)
defer s.Close()
req, err := http.NewRequest("GET", s.URL, nil)
if err != nil {
panic(err)
}
req.Header.Set("Accept-Encoding", "*")
res, err := http.DefaultClient.Do(req)
if err != nil {
panic(err)
}
assertStatus(http.StatusOK, res.StatusCode, t)
assertHeader("Content-Encoding", "gzip", res, t)
assertHeader("Content-Type", "text/css; charset=utf-8", res, t)
assertGzippedBody([]byte(`* {
background-color: white;
}`), res, t)
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.4 KiB

View File

@@ -1 +0,0 @@
var a = 0;

View File

@@ -1,3 +0,0 @@
* {
background-color: white;
}

View File

@@ -1,68 +0,0 @@
package handlers
import (
"bytes"
"compress/gzip"
"io"
"io/ioutil"
"net/http"
"testing"
)
func assertTrue(cond bool, msg string, t *testing.T) bool {
if !cond {
t.Error(msg)
return false
}
return true
}
func assertStatus(ex, ac int, t *testing.T) {
if ex != ac {
t.Errorf("expected status code to be %d, got %d", ex, ac)
}
}
func assertBody(ex []byte, res *http.Response, t *testing.T) {
buf, err := ioutil.ReadAll(res.Body)
if err != nil {
panic(err)
}
defer res.Body.Close()
if !bytes.Equal(ex, buf) {
t.Errorf("expected body to be '%s' (%d), got '%s' (%d)", ex, len(ex), buf, len(buf))
}
}
func assertGzippedBody(ex []byte, res *http.Response, t *testing.T) {
gr, err := gzip.NewReader(res.Body)
if err != nil {
panic(err)
}
defer res.Body.Close()
buf := bytes.NewBuffer(nil)
_, err = io.Copy(buf, gr)
if err != nil {
panic(err)
}
if !bytes.Equal(ex, buf.Bytes()) {
t.Errorf("expected unzipped body to be '%s' (%d), got '%s' (%d)", ex, len(ex), buf.Bytes(), buf.Len())
}
}
func assertHeader(hName, ex string, res *http.Response, t *testing.T) {
hVal, ok := res.Header[hName]
if (!ok || len(hVal) == 0) && len(ex) > 0 {
t.Errorf("expected header %s to be %s, was not set", hName, ex)
} else if len(hVal) > 0 && hVal[0] != ex {
t.Errorf("expected header %s to be %s, got %s", hName, ex, hVal)
}
}
func assertPanic(t *testing.T) {
if err := recover(); err == nil {
t.Error("expected a panic, got none")
}
}

File diff suppressed because one or more lines are too long

View File

@@ -1,38 +0,0 @@
package amber
import (
"github.com/PuerkitoBio/ghost/templates"
"github.com/eknkc/amber"
)
// The template compiler for Amber templates.
type AmberCompiler struct {
Options amber.Options
c *amber.Compiler
}
// Create a new Amber compiler with the specified Amber-specific options.
func NewAmberCompiler(opts amber.Options) *AmberCompiler {
return &AmberCompiler{
opts,
nil,
}
}
// Implementation of the TemplateCompiler interface.
func (this *AmberCompiler) Compile(f string) (templates.Templater, error) {
// amber.CompileFile creates a new compiler each time. To limit the number
// of allocations, reuse a compiler.
if this.c == nil {
this.c = amber.New()
}
this.c.Options = this.Options
if err := this.c.ParseFile(f); err != nil {
return nil, err
}
return this.c.Compile()
}
func init() {
templates.Register(".amber", NewAmberCompiler(amber.DefaultOptions))
}

View File

@@ -1,19 +0,0 @@
package gotpl
import (
"html/template"
"github.com/PuerkitoBio/ghost/templates"
)
// The template compiler for native Go templates.
type GoTemplateCompiler struct{}
// Implementation of the TemplateCompiler interface.
func (this *GoTemplateCompiler) Compile(f string) (templates.Templater, error) {
return template.ParseFiles(f)
}
func init() {
templates.Register(".tmpl", new(GoTemplateCompiler))
}

View File

@@ -1,129 +0,0 @@
package templates
import (
"errors"
"io"
"net/http"
"os"
"path"
"path/filepath"
"sync"
"github.com/PuerkitoBio/ghost"
)
var (
ErrTemplateNotExist = errors.New("template does not exist")
ErrDirNotExist = errors.New("directory does not exist")
compilers = make(map[string]TemplateCompiler)
// The mutex guards the templaters map
mu sync.RWMutex
templaters = make(map[string]Templater)
)
// Defines the interface that the template compiler must return. The Go native
// templates implement this interface.
type Templater interface {
Execute(wr io.Writer, data interface{}) error
}
// The interface that a template engine must implement to be used by Ghost.
type TemplateCompiler interface {
Compile(fileName string) (Templater, error)
}
// TODO : How to manage Go nested templates?
// TODO : Support Go's port of the mustache template?
// Register a template compiler for the specified extension. Extensions are case-sensitive.
// The extension must start with a dot (it is compared to the result of path.Ext() on a
// given file name).
//
// Registering is not thread-safe. Compilers should be registered before the http server
// is started.
// Compiling templates, on the other hand, is thread-safe.
func Register(ext string, c TemplateCompiler) {
if c == nil {
panic("ghost: Register TemplateCompiler is nil")
}
if _, dup := compilers[ext]; dup {
panic("ghost: Register called twice for extension " + ext)
}
compilers[ext] = c
}
// Compile all templates that have a matching compiler (based on their extension) in the
// specified directory.
func CompileDir(dir string) error {
mu.Lock()
defer mu.Unlock()
return filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error {
if fi == nil {
return ErrDirNotExist
}
if !fi.IsDir() {
err = compileTemplate(path, dir)
if err != nil {
ghost.LogFn("ghost.templates : error compiling template %s : %s", path, err)
return err
}
}
return nil
})
}
// Compile a single template file, using the specified base directory. The base
// directory is used to set the name of the template (the part of the path relative to this
// base directory is used as the name of the template).
func Compile(path, base string) error {
mu.Lock()
defer mu.Unlock()
return compileTemplate(path, base)
}
// Compile the specified template file if there is a matching compiler.
func compileTemplate(p, base string) error {
ext := path.Ext(p)
c, ok := compilers[ext]
// Ignore file if no template compiler exist for this extension
if ok {
t, err := c.Compile(p)
if err != nil {
return err
}
key, err := filepath.Rel(base, p)
if err != nil {
return err
}
ghost.LogFn("ghost.templates : storing template for file %s", key)
templaters[key] = t
}
return nil
}
// Execute the template.
func Execute(tplName string, w io.Writer, data interface{}) error {
mu.RLock()
t, ok := templaters[tplName]
mu.RUnlock()
if !ok {
return ErrTemplateNotExist
}
return t.Execute(w, data)
}
// Render is the same as Execute, except that it takes a http.ResponseWriter
// instead of a generic io.Writer, and sets the Content-Type to text/html.
func Render(tplName string, w http.ResponseWriter, data interface{}) (err error) {
w.Header().Set("Content-Type", "text/html")
defer func() {
if err != nil {
w.Header().Del("Content-Type")
}
}()
return Execute(tplName, w, data)
}

11
vendor/manifest vendored
View File

@@ -34,11 +34,12 @@
"path": "/statsd"
},
{
"importpath": "github.com/PuerkitoBio/ghost",
"repository": "https://github.com/PuerkitoBio/ghost",
"vcs": "",
"revision": "a0146f2f931611b8bfe40f07018c97a7c881c76a",
"branch": "master"
"importpath": "github.com/NYTimes/gziphandler",
"repository": "https://github.com/NYTimes/gziphandler",
"vcs": "git",
"revision": "5032c8878b9dd46cfe8c625c0d9b9f258a560ee8",
"branch": "master",
"notests": true
},
{
"importpath": "github.com/PuerkitoBio/purell",