mirror of
https://github.com/WiiLink24/wfc-server.git
synced 2026-03-21 17:44:58 -05:00
584 lines
16 KiB
Go
584 lines
16 KiB
Go
package nhttp
|
|
|
|
import (
|
|
"bufio"
|
|
"errors"
|
|
"fmt"
|
|
"golang.org/x/net/http/httpguts"
|
|
"io"
|
|
"log"
|
|
_http "net/http"
|
|
"net/textproto"
|
|
"strconv"
|
|
"strings"
|
|
"sync"
|
|
"sync/atomic"
|
|
"time"
|
|
)
|
|
|
|
// NoBody is an io.ReadCloser with no bytes. Read always returns EOF
|
|
// and Close always returns nil. It can be used in an outgoing client
|
|
// request to explicitly signal that a request has zero bytes.
|
|
// An alternative, however, is to simply set Request.Body to nil.
|
|
var NoBody = noBody{}
|
|
|
|
type noBody struct{}
|
|
|
|
func (noBody) Read([]byte) (int, error) { return 0, io.EOF }
|
|
func (noBody) Close() error { return nil }
|
|
func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil }
|
|
|
|
type atomicBool int32
|
|
|
|
func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
|
|
func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
|
|
func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) }
|
|
|
|
var textprotoReaderPool sync.Pool
|
|
|
|
func newTextprotoReader(br *bufio.Reader) *textproto.Reader {
|
|
if v := textprotoReaderPool.Get(); v != nil {
|
|
tr := v.(*textproto.Reader)
|
|
tr.R = br
|
|
return tr
|
|
}
|
|
return textproto.NewReader(br)
|
|
}
|
|
|
|
func putTextprotoReader(r *textproto.Reader) {
|
|
r.R = nil
|
|
textprotoReaderPool.Put(r)
|
|
}
|
|
|
|
// foreachHeaderElement splits v according to the "#rule" construction
|
|
// in RFC 7230 section 7 and calls fn for each non-empty element.
|
|
func foreachHeaderElement(v string, fn func(string)) {
|
|
v = textproto.TrimString(v)
|
|
if v == "" {
|
|
return
|
|
}
|
|
if !strings.Contains(v, ",") {
|
|
fn(v)
|
|
return
|
|
}
|
|
for _, f := range strings.Split(v, ",") {
|
|
if f = textproto.TrimString(f); f != "" {
|
|
fn(f)
|
|
}
|
|
}
|
|
}
|
|
|
|
var (
|
|
bufioReaderPool sync.Pool
|
|
bufioWriter2kPool sync.Pool
|
|
bufioWriter4kPool sync.Pool
|
|
)
|
|
|
|
var copyBufPool = sync.Pool{
|
|
New: func() any {
|
|
b := make([]byte, 32*1024)
|
|
return &b
|
|
},
|
|
}
|
|
|
|
func bufioWriterPool(size int) *sync.Pool {
|
|
switch size {
|
|
case 2 << 10:
|
|
return &bufioWriter2kPool
|
|
case 4 << 10:
|
|
return &bufioWriter4kPool
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func newBufioWriterSize(w io.Writer, size int) *bufio.Writer {
|
|
pool := bufioWriterPool(size)
|
|
if pool != nil {
|
|
if v := pool.Get(); v != nil {
|
|
bw := v.(*bufio.Writer)
|
|
bw.Reset(w)
|
|
return bw
|
|
}
|
|
}
|
|
return bufio.NewWriterSize(w, size)
|
|
}
|
|
|
|
func newBufioReader(r io.Reader) *bufio.Reader {
|
|
if v := bufioReaderPool.Get(); v != nil {
|
|
br := v.(*bufio.Reader)
|
|
br.Reset(r)
|
|
return br
|
|
}
|
|
// Note: if this reader size is ever changed, update
|
|
// TestHandlerBodyClose's assumptions.
|
|
return bufio.NewReader(r)
|
|
}
|
|
|
|
func putBufioReader(br *bufio.Reader) {
|
|
br.Reset(nil)
|
|
bufioReaderPool.Put(br)
|
|
}
|
|
|
|
func putBufioWriter(bw *bufio.Writer) {
|
|
bw.Reset(nil)
|
|
if pool := bufioWriterPool(bw.Available()); pool != nil {
|
|
pool.Put(bw)
|
|
}
|
|
}
|
|
|
|
func (cw *chunkWriter) Write(p []byte) (n int, err error) {
|
|
if !cw.wroteHeader {
|
|
cw.writeHeader(nil)
|
|
}
|
|
if cw.res.req.Method == "HEAD" {
|
|
// Eat writes.
|
|
return len(p), nil
|
|
}
|
|
if cw.chunking {
|
|
_, err = fmt.Fprintf(cw.res.conn.bufw, "%x\r\n", len(p))
|
|
if err != nil {
|
|
cw.res.conn.rwc.Close()
|
|
return
|
|
}
|
|
}
|
|
n, err = cw.res.conn.bufw.Write(p)
|
|
if cw.chunking && err == nil {
|
|
_, err = cw.res.conn.bufw.Write(crlf)
|
|
}
|
|
if err != nil {
|
|
cw.res.conn.rwc.Close()
|
|
}
|
|
return
|
|
}
|
|
|
|
func (cw *chunkWriter) flush() {
|
|
if !cw.wroteHeader {
|
|
cw.writeHeader(nil)
|
|
}
|
|
cw.res.conn.bufw.Flush()
|
|
}
|
|
|
|
func (cw *chunkWriter) close() {
|
|
if !cw.wroteHeader {
|
|
cw.writeHeader(nil)
|
|
}
|
|
if cw.chunking {
|
|
bw := cw.res.conn.bufw // conn's bufio writer
|
|
// zero chunk to mark EOF
|
|
bw.WriteString("0\r\n")
|
|
if trailers := cw.res.finalTrailers(); trailers != nil {
|
|
trailers.Write(bw) // the writer handles noting errors
|
|
}
|
|
// final blank line after the trailers (whether
|
|
// present or not)
|
|
bw.WriteString("\r\n")
|
|
}
|
|
}
|
|
|
|
// wrapper around io.ReadCloser which on first read, sends an
|
|
// HTTP/1.1 100 Continue header
|
|
type expectContinueReader struct {
|
|
resp *response
|
|
readCloser io.ReadCloser
|
|
closed atomicBool
|
|
sawEOF atomicBool
|
|
}
|
|
|
|
func (ecr *expectContinueReader) Read(p []byte) (n int, err error) {
|
|
if ecr.closed.isSet() {
|
|
return 0, errors.New("nhttp: invalid Read on closed Body")
|
|
}
|
|
w := ecr.resp
|
|
if !w.wroteContinue && w.canWriteContinue.isSet() {
|
|
w.wroteContinue = true
|
|
w.writeContinueMu.Lock()
|
|
if w.canWriteContinue.isSet() {
|
|
w.conn.bufw.WriteString("HTTP/1.1 100 Continue\r\n\r\n")
|
|
w.conn.bufw.Flush()
|
|
w.canWriteContinue.setFalse()
|
|
}
|
|
w.writeContinueMu.Unlock()
|
|
}
|
|
n, err = ecr.readCloser.Read(p)
|
|
if err == io.EOF {
|
|
ecr.sawEOF.setTrue()
|
|
}
|
|
return
|
|
}
|
|
|
|
func (ecr *expectContinueReader) Close() error {
|
|
ecr.closed.setTrue()
|
|
return ecr.readCloser.Close()
|
|
}
|
|
|
|
// writeHeader finalizes the header sent to the client and writes it
|
|
// to cw.res.conn.bufw.
|
|
//
|
|
// p is not written by writeHeader, but is the first chunk of the body
|
|
// that will be written. It is sniffed for a Content-Type if none is
|
|
// set explicitly. It's also used to set the Content-Length, if the
|
|
// total body size was small and the handler has already finished
|
|
// running.
|
|
func (cw *chunkWriter) writeHeader(p []byte) {
|
|
if cw.wroteHeader {
|
|
return
|
|
}
|
|
cw.wroteHeader = true
|
|
|
|
w := cw.res
|
|
keepAlivesEnabled := w.conn.server.doKeepAlives()
|
|
isHEAD := w.req.Method == "HEAD"
|
|
|
|
// header is written out to w.conn.buf below. Depending on the
|
|
// state of the handler, we either own the map or not. If we
|
|
// don't own it, the exclude map is created lazily for
|
|
// WriteSubset to remove headers. The setHeader struct holds
|
|
// headers we need to add.
|
|
header := cw.header
|
|
owned := header != nil
|
|
if !owned {
|
|
header = w.handlerHeader
|
|
}
|
|
var excludeHeader map[string]bool
|
|
delHeader := func(key string) {
|
|
if owned {
|
|
header.Del(key)
|
|
return
|
|
}
|
|
if _, ok := header[key]; !ok {
|
|
return
|
|
}
|
|
if excludeHeader == nil {
|
|
excludeHeader = make(map[string]bool)
|
|
}
|
|
excludeHeader[key] = true
|
|
}
|
|
var setHeader extraHeader
|
|
|
|
// Don't write out the fake "Trailer:foo" keys. See TrailerPrefix.
|
|
trailers := false
|
|
for k := range cw.header {
|
|
if strings.HasPrefix(k, TrailerPrefix) {
|
|
if excludeHeader == nil {
|
|
excludeHeader = make(map[string]bool)
|
|
}
|
|
excludeHeader[k] = true
|
|
trailers = true
|
|
}
|
|
}
|
|
for _, v := range cw.header["Trailer"] {
|
|
trailers = true
|
|
foreachHeaderElement(v, cw.res.declareTrailer)
|
|
}
|
|
|
|
te := header.Get("Transfer-Encoding")
|
|
hasTE := te != ""
|
|
|
|
// If the handler is done but never sent a Content-Length
|
|
// response header and this is our first (and last) write, set
|
|
// it, even to zero. This helps HTTP/1.0 clients keep their
|
|
// "keep-alive" connections alive.
|
|
// Exceptions: 304/204/1xx responses never get Content-Length, and if
|
|
// it was a HEAD request, we don't know the difference between
|
|
// 0 actual bytes and 0 bytes because the handler noticed it
|
|
// was a HEAD request and chose not to write anything. So for
|
|
// HEAD, the handler should either write the Content-Length or
|
|
// write non-zero bytes. If it's actually 0 bytes and the
|
|
// handler never looked at the Request.Method, we just don't
|
|
// send a Content-Length header.
|
|
// Further, we don't send an automatic Content-Length if they
|
|
// set a Transfer-Encoding, because they're generally incompatible.
|
|
if w.handlerDone.isSet() && !trailers && !hasTE && bodyAllowedForStatus(w.status) && header.Get("Content-Length") == "" && (!isHEAD || len(p) > 0) {
|
|
w.contentLength = int64(len(p))
|
|
setHeader.contentLength = strconv.AppendInt(cw.res.clenBuf[:0], int64(len(p)), 10)
|
|
}
|
|
|
|
// If this was an HTTP/1.0 request with keep-alive and we sent a
|
|
// Content-Length back, we can make this a keep-alive response ...
|
|
if w.wants10KeepAlive && keepAlivesEnabled {
|
|
sentLength := header.Get("Content-Length") != ""
|
|
if sentLength && header.Get("Connection") == "keep-alive" {
|
|
w.closeAfterReply = false
|
|
}
|
|
}
|
|
|
|
// Check for an explicit (and valid) Content-Length header.
|
|
hasCL := w.contentLength != -1
|
|
|
|
if w.wants10KeepAlive && (isHEAD || hasCL || !bodyAllowedForStatus(w.status)) {
|
|
_, connectionHeaderSet := header["Connection"]
|
|
if !connectionHeaderSet {
|
|
setHeader.connection = "keep-alive"
|
|
}
|
|
} else if !w.req.ProtoAtLeast(1, 1) || w.wantsClose {
|
|
w.closeAfterReply = true
|
|
}
|
|
|
|
if header.Get("Connection") == "close" || !keepAlivesEnabled {
|
|
w.closeAfterReply = true
|
|
}
|
|
|
|
// If the client wanted a 100-continue but we never sent it to
|
|
// them (or, more strictly: we never finished reading their
|
|
// request body), don't reuse this connection because it's now
|
|
// in an unknown state: we might be sending this response at
|
|
// the same time the client is now sending its request body
|
|
// after a timeout. (Some HTTP clients send Expect:
|
|
// 100-continue but knowing that some servers don't support
|
|
// it, the clients set a timer and send the body later anyway)
|
|
// If we haven't seen EOF, we can't skip over the unread body
|
|
// because we don't know if the next bytes on the wire will be
|
|
// the body-following-the-timer or the subsequent request.
|
|
// See Issue 11549.
|
|
if ecr, ok := w.req.Body.(*expectContinueReader); ok && !ecr.sawEOF.isSet() {
|
|
w.closeAfterReply = true
|
|
}
|
|
|
|
// Per RFC 2616, we should consume the request body before
|
|
// replying, if the handler hasn't already done so. But we
|
|
// don't want to do an unbounded amount of reading here for
|
|
// DoS reasons, so we only try up to a threshold.
|
|
// TODO(bradfitz): where does RFC 2616 say that? See Issue 15527
|
|
// about HTTP/1.x Handlers concurrently reading and writing, like
|
|
// HTTP/2 handlers can do. Maybe this code should be relaxed?
|
|
if w.req.ContentLength != 0 && !w.closeAfterReply {
|
|
var discard, tooBig bool
|
|
|
|
switch bdy := w.req.Body.(type) {
|
|
case *expectContinueReader:
|
|
if bdy.resp.wroteContinue {
|
|
discard = true
|
|
}
|
|
case *body:
|
|
bdy.mu.Lock()
|
|
switch {
|
|
case bdy.closed:
|
|
if !bdy.sawEOF {
|
|
// Body was closed in handler with non-EOF error.
|
|
w.closeAfterReply = true
|
|
}
|
|
case bdy.unreadDataSizeLocked() >= 256<<10:
|
|
tooBig = true
|
|
default:
|
|
discard = true
|
|
}
|
|
bdy.mu.Unlock()
|
|
default:
|
|
discard = true
|
|
}
|
|
|
|
if discard {
|
|
_, err := io.CopyN(io.Discard, w.reqBody, 256<<10+1)
|
|
switch err {
|
|
case nil:
|
|
// There must be even more data left over.
|
|
tooBig = true
|
|
case _http.ErrBodyReadAfterClose:
|
|
// Body was already consumed and closed.
|
|
case io.EOF:
|
|
// The remaining body was just consumed, close it.
|
|
err = w.reqBody.Close()
|
|
if err != nil {
|
|
w.closeAfterReply = true
|
|
}
|
|
default:
|
|
// Some other kind of error occurred, like a read timeout, or
|
|
// corrupt chunked encoding. In any case, whatever remains
|
|
// on the wire must not be parsed as another HTTP request.
|
|
w.closeAfterReply = true
|
|
}
|
|
}
|
|
|
|
if tooBig {
|
|
w.requestTooLarge()
|
|
delHeader("Connection")
|
|
setHeader.connection = "close"
|
|
}
|
|
}
|
|
|
|
code := w.status
|
|
if bodyAllowedForStatus(code) {
|
|
// If no content type, apply sniffing algorithm to body.
|
|
_, haveType := header["Content-Type"]
|
|
|
|
// If the Content-Encoding was set and is non-blank,
|
|
// we shouldn't sniff the body. See Issue 31753.
|
|
ce := header.Get("Content-Encoding")
|
|
hasCE := len(ce) > 0
|
|
if !hasCE && !haveType && !hasTE && len(p) > 0 {
|
|
setHeader.contentType = _http.DetectContentType(p)
|
|
}
|
|
} else {
|
|
for _, k := range suppressedHeaders(code) {
|
|
delHeader(k)
|
|
}
|
|
}
|
|
|
|
//if !header.has("Date") {
|
|
setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now())
|
|
//}
|
|
|
|
if hasCL && hasTE && te != "identity" {
|
|
// TODO: return an error if WriteHeader gets a return parameter
|
|
// For now just ignore the Content-Length.
|
|
log.Printf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d\n",
|
|
te, w.contentLength)
|
|
delHeader("Content-Length")
|
|
hasCL = false
|
|
}
|
|
|
|
if w.req.Method == "HEAD" || !bodyAllowedForStatus(code) || code == _http.StatusNoContent {
|
|
// Response has no body.
|
|
delHeader("Transfer-Encoding")
|
|
} else if hasCL {
|
|
// Content-Length has been provided, so no chunking is to be done.
|
|
delHeader("Transfer-Encoding")
|
|
} else if w.req.ProtoAtLeast(1, 1) {
|
|
// HTTP/1.1 or greater: Transfer-Encoding has been set to identity, and no
|
|
// content-length has been provided. The connection must be closed after the
|
|
// reply is written, and no chunking is to be done. This is the setup
|
|
// recommended in the Server-Sent Events candidate recommendation 11,
|
|
// section 8.
|
|
if hasTE && te == "identity" {
|
|
cw.chunking = false
|
|
w.closeAfterReply = true
|
|
delHeader("Transfer-Encoding")
|
|
} else {
|
|
// HTTP/1.1 or greater: use chunked transfer encoding
|
|
// to avoid closing the connection at EOF.
|
|
cw.chunking = true
|
|
setHeader.transferEncoding = "chunked"
|
|
if hasTE && te == "chunked" {
|
|
// We will send the chunked Transfer-Encoding header later.
|
|
delHeader("Transfer-Encoding")
|
|
}
|
|
}
|
|
} else {
|
|
// HTTP version < 1.1: cannot do chunked transfer
|
|
// encoding and we don't know the Content-Length so
|
|
// signal EOF by closing connection.
|
|
w.closeAfterReply = true
|
|
delHeader("Transfer-Encoding") // in case already set
|
|
}
|
|
|
|
// Cannot use Content-Length with non-identity Transfer-Encoding.
|
|
if cw.chunking {
|
|
delHeader("Content-Length")
|
|
}
|
|
if !w.req.ProtoAtLeast(1, 0) {
|
|
return
|
|
}
|
|
|
|
// Only override the Connection header if it is not a successful
|
|
// protocol switch response and if KeepAlives are not enabled.
|
|
// See https://golang.org/issue/36381.
|
|
delConnectionHeader := w.closeAfterReply &&
|
|
(!keepAlivesEnabled || !hasToken(cw.header.Get("Connection"), "close")) &&
|
|
!isProtocolSwitchResponse(w.status, header)
|
|
if delConnectionHeader {
|
|
delHeader("Connection")
|
|
if w.req.ProtoAtLeast(1, 1) {
|
|
setHeader.connection = "close"
|
|
}
|
|
}
|
|
|
|
writeStatusLine(w.conn.bufw, w.req.ProtoAtLeast(1, 1), code, w.statusBuf[:])
|
|
cw.header.WriteSubset(w.conn.bufw, excludeHeader)
|
|
setHeader.Write(w.conn.bufw)
|
|
w.conn.bufw.Write(crlf)
|
|
}
|
|
|
|
// extraHeader is the set of headers sometimes added by chunkWriter.writeHeader.
|
|
// This type is used to avoid extra allocations from cloning and/or populating
|
|
// the response Header map and all its 1-element slices.
|
|
type extraHeader struct {
|
|
contentType string
|
|
connection string
|
|
transferEncoding string
|
|
date []byte // written if not nil
|
|
contentLength []byte // written if not nil
|
|
}
|
|
|
|
// Sorted the same as extraHeader.Write's loop.
|
|
var extraHeaderKeys = [][]byte{
|
|
[]byte("Content-Type"),
|
|
[]byte("Connection"),
|
|
[]byte("Transfer-Encoding"),
|
|
}
|
|
|
|
var (
|
|
headerContentLength = []byte("Content-Length: ")
|
|
headerDate = []byte("Date: ")
|
|
)
|
|
|
|
// Write writes the headers described in h to w.
|
|
//
|
|
// This method has a value receiver, despite the somewhat large size
|
|
// of h, because it prevents an allocation. The escape analysis isn't
|
|
// smart enough to realize this function doesn't mutate h.
|
|
func (h extraHeader) Write(w *bufio.Writer) {
|
|
if h.date != nil {
|
|
w.Write(headerDate)
|
|
w.Write(h.date)
|
|
w.Write(crlf)
|
|
}
|
|
if h.contentLength != nil {
|
|
w.Write(headerContentLength)
|
|
w.Write(h.contentLength)
|
|
w.Write(crlf)
|
|
}
|
|
for i, v := range []string{h.contentType, h.connection, h.transferEncoding} {
|
|
if v != "" {
|
|
w.Write(extraHeaderKeys[i])
|
|
w.Write(colonSpace)
|
|
w.WriteString(v)
|
|
w.Write(crlf)
|
|
}
|
|
}
|
|
}
|
|
|
|
// declareTrailer is called for each Trailer header when the
|
|
// response header is written. It notes that a header will need to be
|
|
// written in the trailers at the end of the response.
|
|
func (w *response) declareTrailer(k string) {
|
|
k = CanonicalHeaderKey(k)
|
|
if !httpguts.ValidTrailerHeader(k) {
|
|
// Forbidden by RFC 7230, section 4.1.2
|
|
return
|
|
}
|
|
w.trailers = append(w.trailers, k)
|
|
}
|
|
|
|
// unreadDataSizeLocked returns the number of bytes of unread input.
|
|
// It returns -1 if unknown.
|
|
// b.mu must be held.
|
|
func (b *body) unreadDataSizeLocked() int64 {
|
|
if lr, ok := b.src.(*io.LimitedReader); ok {
|
|
return lr.N
|
|
}
|
|
return -1
|
|
}
|
|
|
|
// requestTooLarge is called by maxBytesReader when too much input has
|
|
// been read from the client.
|
|
func (w *response) requestTooLarge() {
|
|
w.closeAfterReply = true
|
|
w.requestBodyLimitHit = true
|
|
if !w.wroteHeader {
|
|
w.Header().Set("Connection", "close")
|
|
}
|
|
}
|
|
|
|
// isProtocolSwitchResponse reports whether the response code and
|
|
// response header indicate a successful protocol upgrade response.
|
|
func isProtocolSwitchResponse(code int, h _http.Header) bool {
|
|
return code == _http.StatusSwitchingProtocols && isProtocolSwitchHeader(h)
|
|
}
|
|
|
|
// isProtocolSwitchHeader reports whether the request or response header
|
|
// is for a protocol switch.
|
|
func isProtocolSwitchHeader(h _http.Header) bool {
|
|
return h.Get("Upgrade") != "" &&
|
|
httpguts.HeaderValuesContainsToken(h["Connection"], "Upgrade")
|
|
}
|